diff --git a/chia/_tests/plot_sync/test_sender.py b/chia/_tests/plot_sync/test_sender.py index 0257697a5ee2..c04035f722f0 100644 --- a/chia/_tests/plot_sync/test_sender.py +++ b/chia/_tests/plot_sync/test_sender.py @@ -45,7 +45,7 @@ def test_set_connection_values(bt: BlockTools, seeded_random: random.Random) -> # Test setting a valid connection works sender.set_connection(farmer_connection) # type:ignore[arg-type] assert sender._connection is not None - assert sender._connection == farmer_connection + assert id(sender._connection) == id(farmer_connection) @pytest.mark.anyio diff --git a/chia/full_node/full_node.py b/chia/full_node/full_node.py index 347442e59084..dd0beee175c9 100644 --- a/chia/full_node/full_node.py +++ b/chia/full_node/full_node.py @@ -1155,18 +1155,20 @@ async def fetch_blocks(output_queue: asyncio.Queue[Optional[tuple[WSChiaConnecti start = time.monotonic() # update the timestamp, now that we're sending a request + # it's OK for the timestamp to fall behind wall-clock + # time. It just means we're allowed to send more + # requests to catch up if is_localhost(peer.peer_info.host): # we don't apply rate limits to localhost, and our # tests depend on it - new_peers_with_peak[idx] = ( - new_peers_with_peak[idx][0], - new_peers_with_peak[idx][1] + 0.1, - ) + bump = 0.1 else: - new_peers_with_peak[idx] = ( - new_peers_with_peak[idx][0], - new_peers_with_peak[idx][1] + seconds_per_request, - ) + bump = seconds_per_request + + new_peers_with_peak[idx] = ( + new_peers_with_peak[idx][0], + new_peers_with_peak[idx][1] + bump, + ) response = await peer.call_api(FullNodeAPI.request_blocks, request, timeout=30) end = time.monotonic() if response is None: @@ -1176,7 +1178,12 @@ async def fetch_blocks(output_queue: asyncio.Queue[Optional[tuple[WSChiaConnecti if end - start > 5: self.log.info(f"peer took {end-start:.1f} s to respond to request_blocks") # this isn't a great peer, reduce its priority - # to prefer any peers that had to wait for it + # to prefer any peers that had to wait for it. + # By setting the next allowed timestamp to now, + # means that any other peer that has waited for + # this will have its next allowed timestamp in + # the passed, and be prefered multiple times + # over this peer. new_peers_with_peak[idx] = ( new_peers_with_peak[idx][0], end, @@ -1195,9 +1202,9 @@ async def fetch_blocks(output_queue: asyncio.Queue[Optional[tuple[WSChiaConnecti self.log.error(f"failed fetching {start_height} to {end_height} from peers") return if self.sync_store.peers_changed.is_set(): - existing_peers = {c: timestamp for c, timestamp in new_peers_with_peak} + existing_peers = {id(c): timestamp for c, timestamp in new_peers_with_peak} peers = self.get_peers_with_peak(peak_hash) - new_peers_with_peak = [(c, existing_peers.get(c, end)) for c in peers] + new_peers_with_peak = [(c, existing_peers.get(id(c), end)) for c in peers] random.shuffle(new_peers_with_peak) self.sync_store.peers_changed.clear() self.log.info(f"peers with peak: {len(new_peers_with_peak)}") diff --git a/chia/server/ws_connection.py b/chia/server/ws_connection.py index cc018f359b41..40c634385481 100644 --- a/chia/server/ws_connection.py +++ b/chia/server/ws_connection.py @@ -751,11 +751,3 @@ def get_peer_logging(self) -> PeerInfo: def has_capability(self, capability: Capability) -> bool: return capability in self.peer_capabilities - - def __hash__(self) -> int: - return hash(id(self)) - - def __eq__(self, other: object) -> bool: - if not isinstance(object, WSChiaConnection): - return False - return id(self) == id(other)