Skip to content

Commit

Permalink
address review comments
Browse files Browse the repository at this point in the history
  • Loading branch information
arvidn committed Oct 31, 2024
1 parent f80a472 commit d451773
Show file tree
Hide file tree
Showing 3 changed files with 19 additions and 20 deletions.
2 changes: 1 addition & 1 deletion chia/_tests/plot_sync/test_sender.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def test_set_connection_values(bt: BlockTools, seeded_random: random.Random) ->
# Test setting a valid connection works
sender.set_connection(farmer_connection) # type:ignore[arg-type]
assert sender._connection is not None
assert sender._connection == farmer_connection
assert id(sender._connection) == id(farmer_connection)


@pytest.mark.anyio
Expand Down
29 changes: 18 additions & 11 deletions chia/full_node/full_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -1155,18 +1155,20 @@ async def fetch_blocks(output_queue: asyncio.Queue[Optional[tuple[WSChiaConnecti
start = time.monotonic()

# update the timestamp, now that we're sending a request
# it's OK for the timestamp to fall behind wall-clock
# time. It just means we're allowed to send more
# requests to catch up
if is_localhost(peer.peer_info.host):
# we don't apply rate limits to localhost, and our
# tests depend on it
new_peers_with_peak[idx] = (
new_peers_with_peak[idx][0],
new_peers_with_peak[idx][1] + 0.1,
)
bump = 0.1
else:
new_peers_with_peak[idx] = (
new_peers_with_peak[idx][0],
new_peers_with_peak[idx][1] + seconds_per_request,
)
bump = seconds_per_request

new_peers_with_peak[idx] = (
new_peers_with_peak[idx][0],
new_peers_with_peak[idx][1] + bump,
)
response = await peer.call_api(FullNodeAPI.request_blocks, request, timeout=30)
end = time.monotonic()
if response is None:
Expand All @@ -1176,7 +1178,12 @@ async def fetch_blocks(output_queue: asyncio.Queue[Optional[tuple[WSChiaConnecti
if end - start > 5:
self.log.info(f"peer took {end-start:.1f} s to respond to request_blocks")
# this isn't a great peer, reduce its priority
# to prefer any peers that had to wait for it
# to prefer any peers that had to wait for it.
# By setting the next allowed timestamp to now,
# means that any other peer that has waited for
# this will have its next allowed timestamp in
# the passed, and be prefered multiple times
# over this peer.
new_peers_with_peak[idx] = (
new_peers_with_peak[idx][0],
end,
Expand All @@ -1195,9 +1202,9 @@ async def fetch_blocks(output_queue: asyncio.Queue[Optional[tuple[WSChiaConnecti
self.log.error(f"failed fetching {start_height} to {end_height} from peers")
return
if self.sync_store.peers_changed.is_set():
existing_peers = {c: timestamp for c, timestamp in new_peers_with_peak}
existing_peers = {id(c): timestamp for c, timestamp in new_peers_with_peak}
peers = self.get_peers_with_peak(peak_hash)
new_peers_with_peak = [(c, existing_peers.get(c, end)) for c in peers]
new_peers_with_peak = [(c, existing_peers.get(id(c), end)) for c in peers]
random.shuffle(new_peers_with_peak)
self.sync_store.peers_changed.clear()
self.log.info(f"peers with peak: {len(new_peers_with_peak)}")
Expand Down
8 changes: 0 additions & 8 deletions chia/server/ws_connection.py
Original file line number Diff line number Diff line change
Expand Up @@ -751,11 +751,3 @@ def get_peer_logging(self) -> PeerInfo:

def has_capability(self, capability: Capability) -> bool:
return capability in self.peer_capabilities

def __hash__(self) -> int:
return hash(id(self))

def __eq__(self, other: object) -> bool:
if not isinstance(object, WSChiaConnection):
return False
return id(self) == id(other)

0 comments on commit d451773

Please sign in to comment.