Replace (check and) del pattern with pop method

This commit is contained in:
NIC619
2019-11-20 23:06:37 +08:00
parent 74198c70b1
commit 19907e18ec
8 changed files with 20 additions and 34 deletions

View File

@ -248,7 +248,7 @@ class Swarm(INetwork):
# TODO: Should be changed to close multisple connections, # TODO: Should be changed to close multisple connections,
# if we have several connections per peer in the future. # if we have several connections per peer in the future.
connection = self.connections[peer_id] connection = self.connections[peer_id]
# NOTE: `connection.close` will perform `del self.connections[peer_id]` # NOTE: `connection.close` will delete `peer_id` from `self.connections`
# and `notify_disconnected` for us. # and `notify_disconnected` for us.
await connection.close() await connection.close()
@ -270,11 +270,9 @@ class Swarm(INetwork):
"""Simply remove the connection from Swarm's records, without closing """Simply remove the connection from Swarm's records, without closing
the connection.""" the connection."""
peer_id = swarm_conn.muxed_conn.peer_id peer_id = swarm_conn.muxed_conn.peer_id
if peer_id not in self.connections:
return
# TODO: Should be changed to remove the exact connection, # TODO: Should be changed to remove the exact connection,
# if we have several connections per peer in the future. # if we have several connections per peer in the future.
del self.connections[peer_id] self.connections.pop(peer_id, None)
# Notifee # Notifee

View File

@ -144,8 +144,7 @@ class GossipSub(IPubsubRouter):
elif peer_id in self.peers_floodsub: elif peer_id in self.peers_floodsub:
self.peers_floodsub.remove(peer_id) self.peers_floodsub.remove(peer_id)
if peer_id in self.peers_to_protocol: self.peers_to_protocol.pop(peer_id, None)
del self.peers_to_protocol[peer_id]
async def handle_rpc(self, rpc: rpc_pb2.RPC, sender_peer_id: ID) -> None: async def handle_rpc(self, rpc: rpc_pb2.RPC, sender_peer_id: ID) -> None:
""" """
@ -274,8 +273,7 @@ class GossipSub(IPubsubRouter):
self.mesh[topic].append(peer) self.mesh[topic].append(peer)
await self.emit_graft(topic, peer) await self.emit_graft(topic, peer)
if topic_in_fanout: self.fanout.pop(topic, None)
del self.fanout[topic]
async def leave(self, topic: str) -> None: async def leave(self, topic: str) -> None:
# Note: the comments here are the near-exact algorithm description from the spec # Note: the comments here are the near-exact algorithm description from the spec
@ -294,7 +292,7 @@ class GossipSub(IPubsubRouter):
await self.emit_prune(topic, peer) await self.emit_prune(topic, peer)
# Forget mesh[topic] # Forget mesh[topic]
del self.mesh[topic] self.mesh.pop(topic, None)
# Heartbeat # Heartbeat
async def heartbeat(self) -> None: async def heartbeat(self) -> None:
@ -355,8 +353,8 @@ class GossipSub(IPubsubRouter):
# TODO: there's no way time_since_last_publish gets set anywhere yet # TODO: there's no way time_since_last_publish gets set anywhere yet
if self.time_since_last_publish[topic] > self.time_to_live: if self.time_since_last_publish[topic] > self.time_to_live:
# Remove topic from fanout # Remove topic from fanout
del self.fanout[topic] self.fanout.pop(topic, None)
del self.time_since_last_publish[topic] self.time_since_last_publish.pop(topic, None)
else: else:
num_fanout_peers_in_topic = len(self.fanout[topic]) num_fanout_peers_in_topic = len(self.fanout[topic])

View File

@ -96,8 +96,7 @@ class MessageCache:
last_entries: List[CacheEntry] = self.history[len(self.history) - 1] last_entries: List[CacheEntry] = self.history[len(self.history) - 1]
for entry in last_entries: for entry in last_entries:
if entry.mid in self.msgs: self.msgs.pop(entry.mid)
del self.msgs[entry.mid]
i: int = len(self.history) - 2 i: int = len(self.history) - 2

View File

@ -232,8 +232,7 @@ class Pubsub:
:param topic: the topic to remove validator from :param topic: the topic to remove validator from
""" """
if topic in self.topic_validators: self.topic_validators.pop(topic, None)
del self.topic_validators[topic]
def get_msg_validators(self, msg: rpc_pb2.Message) -> Tuple[TopicValidator, ...]: def get_msg_validators(self, msg: rpc_pb2.Message) -> Tuple[TopicValidator, ...]:
""" """
@ -283,7 +282,7 @@ class Pubsub:
await stream.write(encode_varint_prefixed(hello.SerializeToString())) await stream.write(encode_varint_prefixed(hello.SerializeToString()))
except StreamClosed: except StreamClosed:
logger.debug("Fail to add new peer %s: stream closed", peer_id) logger.debug("Fail to add new peer %s: stream closed", peer_id)
del self.peers[peer_id] self.peers.pop(peer_id, None)
return return
# TODO: Check EOF of this stream. # TODO: Check EOF of this stream.
# TODO: Check if the peer in black list. # TODO: Check if the peer in black list.
@ -291,7 +290,7 @@ class Pubsub:
self.router.add_peer(peer_id, stream.get_protocol()) self.router.add_peer(peer_id, stream.get_protocol())
except Exception as error: except Exception as error:
logger.debug("fail to add new peer %s, error %s", peer_id, error) logger.debug("fail to add new peer %s, error %s", peer_id, error)
del self.peers[peer_id] self.peers.pop(peer_id, None)
return return
logger.debug("added new peer %s", peer_id) logger.debug("added new peer %s", peer_id)
@ -299,7 +298,7 @@ class Pubsub:
def _handle_dead_peer(self, peer_id: ID) -> None: def _handle_dead_peer(self, peer_id: ID) -> None:
if peer_id not in self.peers: if peer_id not in self.peers:
return return
del self.peers[peer_id] self.peers.pop(peer_id, None)
for topic in self.peer_topics: for topic in self.peer_topics:
if peer_id in self.peer_topics[topic]: if peer_id in self.peer_topics[topic]:
@ -411,7 +410,7 @@ class Pubsub:
if topic_id not in self.my_topics: if topic_id not in self.my_topics:
return return
# Remove topic_id from map if present # Remove topic_id from map if present
del self.my_topics[topic_id] self.my_topics.pop(topic_id, None)
# Create unsubscribe message # Create unsubscribe message
packet: rpc_pb2.RPC = rpc_pb2.RPC() packet: rpc_pb2.RPC = rpc_pb2.RPC()

View File

@ -50,8 +50,7 @@ class SecurityMultistream(ABC):
:param transport: the corresponding transportation to the ``protocol``. :param transport: the corresponding transportation to the ``protocol``.
""" """
# If protocol is already added before, remove it and add it again. # If protocol is already added before, remove it and add it again.
if protocol in self.transports: self.transports.pop(protocol, None)
del self.transports[protocol]
self.transports[protocol] = transport self.transports[protocol] = transport
# Note: None is added as the handler for the given protocol since # Note: None is added as the handler for the given protocol since
# we only care about selecting the protocol, not any handler function # we only care about selecting the protocol, not any handler function

View File

@ -297,8 +297,7 @@ class Mplex(IMuxedConn):
# the entry of this stream, to avoid others from accessing it. # the entry of this stream, to avoid others from accessing it.
if is_local_closed: if is_local_closed:
async with self.streams_lock: async with self.streams_lock:
if stream_id in self.streams: self.streams.pop(stream_id, None)
del self.streams[stream_id]
async def _handle_reset(self, stream_id: StreamID) -> None: async def _handle_reset(self, stream_id: StreamID) -> None:
async with self.streams_lock: async with self.streams_lock:
@ -316,8 +315,7 @@ class Mplex(IMuxedConn):
if not stream.event_local_closed.is_set(): if not stream.event_local_closed.is_set():
stream.event_local_closed.set() stream.event_local_closed.set()
async with self.streams_lock: async with self.streams_lock:
if stream_id in self.streams: self.streams.pop(stream_id, None)
del self.streams[stream_id]
async def _cleanup(self) -> None: async def _cleanup(self) -> None:
if not self.event_shutting_down.is_set(): if not self.event_shutting_down.is_set():

View File

@ -180,8 +180,7 @@ class MplexStream(IMuxedStream):
if _is_remote_closed: if _is_remote_closed:
# Both sides are closed, we can safely remove the buffer from the dict. # Both sides are closed, we can safely remove the buffer from the dict.
async with self.muxed_conn.streams_lock: async with self.muxed_conn.streams_lock:
if self.stream_id in self.muxed_conn.streams: self.muxed_conn.streams.pop(self.stream_id, None)
del self.muxed_conn.streams[self.stream_id]
async def reset(self) -> None: async def reset(self) -> None:
"""closes both ends of the stream tells this remote side to hang up.""" """closes both ends of the stream tells this remote side to hang up."""
@ -208,11 +207,8 @@ class MplexStream(IMuxedStream):
self.event_remote_closed.set() self.event_remote_closed.set()
async with self.muxed_conn.streams_lock: async with self.muxed_conn.streams_lock:
if ( if self.muxed_conn.streams is not None:
self.muxed_conn.streams is not None self.muxed_conn.streams.pop(self.stream_id, None)
and self.stream_id in self.muxed_conn.streams
):
del self.muxed_conn.streams[self.stream_id]
# TODO deadline not in use # TODO deadline not in use
def set_deadline(self, ttl: int) -> bool: def set_deadline(self, ttl: int) -> bool:

View File

@ -44,8 +44,7 @@ class MuxerMultistream:
:param transport: the corresponding transportation to the ``protocol``. :param transport: the corresponding transportation to the ``protocol``.
""" """
# If protocol is already added before, remove it and add it again. # If protocol is already added before, remove it and add it again.
if protocol in self.transports: self.transports.pop(protocol, None)
del self.transports[protocol]
self.transports[protocol] = transport self.transports[protocol] = transport
self.multiselect.add_handler(protocol, None) self.multiselect.add_handler(protocol, None)