idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
25,300 | def build_fname ( self , prefix , postfix , fmt , export_dir = None ) : if hasattr ( postfix , 'sm_lt_path' ) : fname = '%s-rlz-%03d.%s' % ( prefix , postfix . ordinal , fmt ) else : fname = prefix + ( '-%s' % postfix if postfix else '' ) + '.' + fmt return self . export_path ( fname , export_dir ) | Build a file name from a realization by using prefix and extension . |
25,301 | def flush ( self ) : if self . parent != ( ) : self . parent . flush ( ) if self . hdf5 : self . hdf5 . flush ( ) | Flush the underlying hdf5 file |
25,302 | def close ( self ) : if self . parent != ( ) : self . parent . flush ( ) self . parent . close ( ) if self . hdf5 : self . hdf5 . flush ( ) self . hdf5 . close ( ) self . hdf5 = ( ) | Close the underlying hdf5 file |
25,303 | def getsize ( self , key = None ) : if key is None : return os . path . getsize ( self . filename ) return hdf5 . ByteCounter . get_nbytes ( h5py . File . __getitem__ ( self . hdf5 , key ) ) | Return the size in byte of the output associated to the given key . If no key is given returns the total size of all files . |
25,304 | def maybe_encode ( value ) : if isinstance ( value , ( list , tuple ) ) and isinstance ( value [ 0 ] , str ) : return encode ( value ) return value | If value is a sequence of strings encode it |
25,305 | def extend ( dset , array , ** attrs ) : length = len ( dset ) if len ( array ) == 0 : return length newlength = length + len ( array ) if array . dtype . name == 'object' : shape = ( newlength , ) + preshape ( array [ 0 ] ) else : shape = ( newlength , ) + array . shape [ 1 : ] dset . resize ( shape ) dset [ length : newlength ] = array for key , val in attrs . items ( ) : dset . attrs [ key ] = val return newlength | Extend an extensible dataset with an array of a compatible dtype . |
25,306 | def extend3 ( filename , key , array , ** attrs ) : with h5py . File ( filename ) as h5 : try : dset = h5 [ key ] except KeyError : if array . dtype . name == 'object' : shape = ( None , ) + preshape ( array [ 0 ] ) else : shape = ( None , ) + array . shape [ 1 : ] dset = create ( h5 , key , array . dtype , shape ) length = extend ( dset , array ) for key , val in attrs . items ( ) : dset . attrs [ key ] = val h5 . flush ( ) return length | Extend an HDF5 file dataset with the given array |
25,307 | def get_nbytes ( dset ) : if 'nbytes' in dset . attrs : return dset . attrs [ 'nbytes' ] elif hasattr ( dset , 'dtype' ) : return dset . size * numpy . zeros ( 1 , dset . dtype ) . nbytes | If the dataset has an attribute nbytes return it . Otherwise get the size of the underlying array . Returns None if the dataset is actually a group . |
25,308 | def decode_array ( values ) : out = [ ] for val in values : try : out . append ( val . decode ( 'utf8' ) ) except AttributeError : out . append ( val ) return out | Decode the values which are bytestrings . |
25,309 | def temporary ( cls ) : fh , path = tempfile . mkstemp ( suffix = '.hdf5' ) os . close ( fh ) self = cls ( path , 'w' ) self . path = path return self | Returns a temporary hdf5 file open for writing . The temporary name is stored in the . path attribute . It is the user responsability to remove the file when closed . |
25,310 | def save_vlen ( self , key , data ) : shape = ( None , ) + data [ 0 ] . shape [ : - 1 ] try : dset = self [ key ] except KeyError : vdt = h5py . special_dtype ( vlen = data [ 0 ] . dtype ) dset = create ( self , key , vdt , shape , fillvalue = None ) nbytes = dset . attrs . get ( 'nbytes' , 0 ) totlen = dset . attrs . get ( 'totlen' , 0 ) for i , val in enumerate ( data ) : nbytes += val . nbytes totlen += len ( val ) length = len ( dset ) dset . resize ( ( length + len ( data ) , ) + shape [ 1 : ] ) for i , arr in enumerate ( data ) : dset [ length + i ] = arr dset . attrs [ 'nbytes' ] = nbytes dset . attrs [ 'totlen' ] = totlen | Save a sequence of variable - length arrays |
25,311 | def set_nbytes ( self , key , nbytes = None ) : obj = super ( ) . __getitem__ ( key ) if nbytes is not None : obj . attrs [ 'nbytes' ] = nbytes else : obj . attrs [ 'nbytes' ] = nbytes = ByteCounter . get_nbytes ( obj ) return nbytes | Set the nbytes attribute on the HDF5 object identified by key . |
25,312 | def _get_delta ( self , stds , dists ) : delta = np . maximum ( ( 0.1 - 0.001 * dists . repi ) , np . zeros_like ( dists . repi ) ) return delta | Computes the additional delta to be used for the computation of the upp and low models |
25,313 | def _get_mean_and_stddevs ( self , sites , rup , dists , imt , stddev_types ) : distsl = copy . copy ( dists ) distsl . rjb , distsl . rrup = utils . get_equivalent_distances_east ( rup . mag , dists . repi ) mean1 , stds1 = super ( ) . get_mean_and_stddevs ( sites , rup , distsl , imt , stddev_types ) mean1 = self . apply_correction_to_BC ( mean1 , imt , distsl ) gmpe = Atkinson2008prime ( ) mean2 , stds2 = gmpe . get_mean_and_stddevs ( sites , rup , distsl , imt , stddev_types ) gmpe = SilvaEtAl2002SingleCornerSaturation ( ) mean4 , stds4 = gmpe . get_mean_and_stddevs ( sites , rup , distsl , imt , stddev_types ) mean4 = self . apply_correction_to_BC ( mean4 , imt , distsl ) gmpe = SilvaEtAl2002DoubleCornerSaturation ( ) mean5 , stds5 = gmpe . get_mean_and_stddevs ( sites , rup , distsl , imt , stddev_types ) mean5 = self . apply_correction_to_BC ( mean5 , imt , distsl ) distsl . rjb , distsl . rrup = utils . get_equivalent_distances_east ( rup . mag , dists . repi , ab06 = True ) gmpe = AtkinsonBoore2006Modified2011 ( ) mean3 , stds3 = gmpe . get_mean_and_stddevs ( sites , rup , distsl , imt , stddev_types ) mean_adj = mean1 * 0.2 + mean2 * 0.2 + mean3 * 0.2 + mean4 * 0.2 + mean5 * 0.2 stds_adj = np . log ( np . exp ( stds1 ) * 0.2 + np . exp ( stds2 ) * 0.2 + np . exp ( stds3 ) * 0.2 + np . exp ( stds4 ) * 0.2 + np . exp ( stds5 ) * 0.2 ) return mean_adj , stds_adj | Returns only the mean values . |
25,314 | async def ensure_open ( self ) -> None : if self . state is State . OPEN : if self . transfer_data_task . done ( ) : await asyncio . shield ( self . close_connection_task ) raise ConnectionClosed ( self . close_code , self . close_reason ) from self . transfer_data_exc else : return if self . state is State . CLOSED : raise ConnectionClosed ( self . close_code , self . close_reason ) from self . transfer_data_exc if self . state is State . CLOSING : await asyncio . shield ( self . close_connection_task ) raise ConnectionClosed ( self . close_code , self . close_reason ) from self . transfer_data_exc assert self . state is State . CONNECTING raise InvalidState ( "WebSocket connection isn't established yet" ) | Check that the WebSocket connection is open . |
25,315 | async def transfer_data ( self ) -> None : try : while True : message = await self . read_message ( ) if message is None : break if self . max_queue is not None : while len ( self . messages ) >= self . max_queue : self . _put_message_waiter = self . loop . create_future ( ) try : await self . _put_message_waiter finally : self . _put_message_waiter = None self . messages . append ( message ) if self . _pop_message_waiter is not None : self . _pop_message_waiter . set_result ( None ) self . _pop_message_waiter = None except asyncio . CancelledError as exc : self . transfer_data_exc = exc raise except WebSocketProtocolError as exc : self . transfer_data_exc = exc self . fail_connection ( 1002 ) except ( ConnectionError , EOFError ) as exc : self . transfer_data_exc = exc self . fail_connection ( 1006 ) except UnicodeDecodeError as exc : self . transfer_data_exc = exc self . fail_connection ( 1007 ) except PayloadTooBig as exc : self . transfer_data_exc = exc self . fail_connection ( 1009 ) except Exception as exc : logger . error ( "Error in data transfer" , exc_info = True ) self . transfer_data_exc = exc self . fail_connection ( 1011 ) | Read incoming messages and put them in a queue . |
25,316 | async def read_message ( self ) -> Optional [ Data ] : frame = await self . read_data_frame ( max_size = self . max_size ) if frame is None : return None if frame . opcode == OP_TEXT : text = True elif frame . opcode == OP_BINARY : text = False else : raise WebSocketProtocolError ( "Unexpected opcode" ) if frame . fin : return frame . data . decode ( "utf-8" ) if text else frame . data chunks : List [ Data ] = [ ] max_size = self . max_size if text : decoder_factory = codecs . getincrementaldecoder ( "utf-8" ) decoder = decoder_factory ( errors = "strict" ) if max_size is None : def append ( frame : Frame ) -> None : nonlocal chunks chunks . append ( decoder . decode ( frame . data , frame . fin ) ) else : def append ( frame : Frame ) -> None : nonlocal chunks , max_size chunks . append ( decoder . decode ( frame . data , frame . fin ) ) max_size -= len ( frame . data ) else : if max_size is None : def append ( frame : Frame ) -> None : nonlocal chunks chunks . append ( frame . data ) else : def append ( frame : Frame ) -> None : nonlocal chunks , max_size chunks . append ( frame . data ) max_size -= len ( frame . data ) append ( frame ) while not frame . fin : frame = await self . read_data_frame ( max_size = max_size ) if frame is None : raise WebSocketProtocolError ( "Incomplete fragmented message" ) if frame . opcode != OP_CONT : raise WebSocketProtocolError ( "Unexpected opcode" ) append ( frame ) return ( "" if text else b"" ) . join ( chunks ) | Read a single message from the connection . |
25,317 | async def read_data_frame ( self , max_size : int ) -> Optional [ Frame ] : while True : frame = await self . read_frame ( max_size ) if frame . opcode == OP_CLOSE : self . close_code , self . close_reason = parse_close ( frame . data ) await self . write_close_frame ( frame . data ) return None elif frame . opcode == OP_PING : ping_hex = frame . data . hex ( ) or "[empty]" logger . debug ( "%s - received ping, sending pong: %s" , self . side , ping_hex ) await self . pong ( frame . data ) elif frame . opcode == OP_PONG : if frame . data in self . pings : ping_id = None ping_ids = [ ] while ping_id != frame . data : ping_id , pong_waiter = self . pings . popitem ( last = False ) ping_ids . append ( ping_id ) pong_waiter . set_result ( None ) pong_hex = binascii . hexlify ( frame . data ) . decode ( ) or "[empty]" logger . debug ( "%s - received solicited pong: %s" , self . side , pong_hex ) ping_ids = ping_ids [ : - 1 ] if ping_ids : pings_hex = ", " . join ( binascii . hexlify ( ping_id ) . decode ( ) or "[empty]" for ping_id in ping_ids ) plural = "s" if len ( ping_ids ) > 1 else "" logger . debug ( "%s - acknowledged previous ping%s: %s" , self . side , plural , pings_hex , ) else : pong_hex = binascii . hexlify ( frame . data ) . decode ( ) or "[empty]" logger . debug ( "%s - received unsolicited pong: %s" , self . side , pong_hex ) else : return frame | Read a single data frame from the connection . |
25,318 | async def read_frame ( self , max_size : int ) -> Frame : frame = await Frame . read ( self . reader . readexactly , mask = not self . is_client , max_size = max_size , extensions = self . extensions , ) logger . debug ( "%s < %r" , self . side , frame ) return frame | Read a single frame from the connection . |
25,319 | async def write_close_frame ( self , data : bytes = b"" ) -> None : if self . state is State . OPEN : self . state = State . CLOSING logger . debug ( "%s - state = CLOSING" , self . side ) await self . write_frame ( True , OP_CLOSE , data , _expected_state = State . CLOSING ) | Write a close frame if and only if the connection state is OPEN . |
25,320 | async def keepalive_ping ( self ) -> None : if self . ping_interval is None : return try : while True : await asyncio . sleep ( self . ping_interval , loop = self . loop ) ping_waiter = await self . ping ( ) if self . ping_timeout is not None : try : await asyncio . wait_for ( ping_waiter , self . ping_timeout , loop = self . loop ) except asyncio . TimeoutError : logger . debug ( "%s ! timed out waiting for pong" , self . side ) self . fail_connection ( 1011 ) break except asyncio . CancelledError : raise except Exception : logger . warning ( "Unexpected exception in keepalive ping task" , exc_info = True ) | Send a Ping frame and wait for a Pong frame at regular intervals . |
25,321 | async def close_connection ( self ) -> None : try : if hasattr ( self , "transfer_data_task" ) : try : await self . transfer_data_task except asyncio . CancelledError : pass if hasattr ( self , "keepalive_ping_task" ) : self . keepalive_ping_task . cancel ( ) if self . is_client and hasattr ( self , "transfer_data_task" ) : if await self . wait_for_connection_lost ( ) : return logger . debug ( "%s ! timed out waiting for TCP close" , self . side ) if self . writer . can_write_eof ( ) : logger . debug ( "%s x half-closing TCP connection" , self . side ) self . writer . write_eof ( ) if await self . wait_for_connection_lost ( ) : return logger . debug ( "%s ! timed out waiting for TCP close" , self . side ) finally : if self . connection_lost_waiter . done ( ) and not self . secure : return logger . debug ( "%s x closing TCP connection" , self . side ) self . writer . close ( ) if await self . wait_for_connection_lost ( ) : return logger . debug ( "%s ! timed out waiting for TCP close" , self . side ) logger . debug ( "%s x aborting TCP connection" , self . side ) self . writer . transport . abort ( ) await self . wait_for_connection_lost ( ) | 7 . 1 . 1 . Close the WebSocket Connection |
25,322 | async def wait_for_connection_lost ( self ) -> bool : if not self . connection_lost_waiter . done ( ) : try : await asyncio . wait_for ( asyncio . shield ( self . connection_lost_waiter ) , self . close_timeout , loop = self . loop , ) except asyncio . TimeoutError : pass return self . connection_lost_waiter . done ( ) | Wait until the TCP connection is closed or self . close_timeout elapses . |
25,323 | def fail_connection ( self , code : int = 1006 , reason : str = "" ) -> None : logger . debug ( "%s ! failing %s WebSocket connection with code %d" , self . side , self . state . name , code , ) if hasattr ( self , "transfer_data_task" ) : self . transfer_data_task . cancel ( ) if code != 1006 and self . state is State . OPEN : frame_data = serialize_close ( code , reason ) self . state = State . CLOSING logger . debug ( "%s - state = CLOSING" , self . side ) frame = Frame ( True , OP_CLOSE , frame_data ) logger . debug ( "%s > %r" , self . side , frame ) frame . write ( self . writer . write , mask = self . is_client , extensions = self . extensions ) if not hasattr ( self , "close_connection_task" ) : self . close_connection_task = self . loop . create_task ( self . close_connection ( ) ) | 7 . 1 . 7 . Fail the WebSocket Connection |
25,324 | def abort_keepalive_pings ( self ) -> None : assert self . state is State . CLOSED exc = ConnectionClosed ( self . close_code , self . close_reason ) exc . __cause__ = self . transfer_data_exc for ping in self . pings . values ( ) : ping . set_exception ( exc ) if self . pings : pings_hex = ", " . join ( binascii . hexlify ( ping_id ) . decode ( ) or "[empty]" for ping_id in self . pings ) plural = "s" if len ( self . pings ) > 1 else "" logger . debug ( "%s - aborted pending ping%s: %s" , self . side , plural , pings_hex ) | Raise ConnectionClosed in pending keepalive pings . |
25,325 | def connection_made ( self , transport : asyncio . BaseTransport ) -> None : logger . debug ( "%s - event = connection_made(%s)" , self . side , transport ) transport . set_write_buffer_limits ( self . write_limit ) super ( ) . connection_made ( transport ) | Configure write buffer limits . |
25,326 | def eof_received ( self ) -> bool : logger . debug ( "%s - event = eof_received()" , self . side ) super ( ) . eof_received ( ) return False | Close the transport after receiving EOF . |
25,327 | def connection_lost ( self , exc : Optional [ Exception ] ) -> None : logger . debug ( "%s - event = connection_lost(%s)" , self . side , exc ) self . state = State . CLOSED logger . debug ( "%s - state = CLOSED" , self . side ) if not hasattr ( self , "close_code" ) : self . close_code = 1006 if not hasattr ( self , "close_reason" ) : self . close_reason = "" logger . debug ( "%s x code = %d, reason = %s" , self . side , self . close_code , self . close_reason or "[no reason]" , ) self . abort_keepalive_pings ( ) self . connection_lost_waiter . set_result ( None ) super ( ) . connection_lost ( exc ) | 7 . 1 . 4 . The WebSocket Connection is Closed . |
25,328 | def build_request ( headers : Headers ) -> str : raw_key = bytes ( random . getrandbits ( 8 ) for _ in range ( 16 ) ) key = base64 . b64encode ( raw_key ) . decode ( ) headers [ "Upgrade" ] = "websocket" headers [ "Connection" ] = "Upgrade" headers [ "Sec-WebSocket-Key" ] = key headers [ "Sec-WebSocket-Version" ] = "13" return key | Build a handshake request to send to the server . |
25,329 | def check_request ( headers : Headers ) -> str : connection = sum ( [ parse_connection ( value ) for value in headers . get_all ( "Connection" ) ] , [ ] ) if not any ( value . lower ( ) == "upgrade" for value in connection ) : raise InvalidUpgrade ( "Connection" , ", " . join ( connection ) ) upgrade = sum ( [ parse_upgrade ( value ) for value in headers . get_all ( "Upgrade" ) ] , [ ] ) if not ( len ( upgrade ) == 1 and upgrade [ 0 ] . lower ( ) == "websocket" ) : raise InvalidUpgrade ( "Upgrade" , ", " . join ( upgrade ) ) try : s_w_key = headers [ "Sec-WebSocket-Key" ] except KeyError : raise InvalidHeader ( "Sec-WebSocket-Key" ) except MultipleValuesError : raise InvalidHeader ( "Sec-WebSocket-Key" , "more than one Sec-WebSocket-Key header found" ) try : raw_key = base64 . b64decode ( s_w_key . encode ( ) , validate = True ) except binascii . Error : raise InvalidHeaderValue ( "Sec-WebSocket-Key" , s_w_key ) if len ( raw_key ) != 16 : raise InvalidHeaderValue ( "Sec-WebSocket-Key" , s_w_key ) try : s_w_version = headers [ "Sec-WebSocket-Version" ] except KeyError : raise InvalidHeader ( "Sec-WebSocket-Version" ) except MultipleValuesError : raise InvalidHeader ( "Sec-WebSocket-Version" , "more than one Sec-WebSocket-Version header found" ) if s_w_version != "13" : raise InvalidHeaderValue ( "Sec-WebSocket-Version" , s_w_version ) return s_w_key | Check a handshake request received from the client . |
25,330 | def build_response ( headers : Headers , key : str ) -> None : headers [ "Upgrade" ] = "websocket" headers [ "Connection" ] = "Upgrade" headers [ "Sec-WebSocket-Accept" ] = accept ( key ) | Build a handshake response to send to the client . |
25,331 | def check_response ( headers : Headers , key : str ) -> None : connection = sum ( [ parse_connection ( value ) for value in headers . get_all ( "Connection" ) ] , [ ] ) if not any ( value . lower ( ) == "upgrade" for value in connection ) : raise InvalidUpgrade ( "Connection" , " " . join ( connection ) ) upgrade = sum ( [ parse_upgrade ( value ) for value in headers . get_all ( "Upgrade" ) ] , [ ] ) if not ( len ( upgrade ) == 1 and upgrade [ 0 ] . lower ( ) == "websocket" ) : raise InvalidUpgrade ( "Upgrade" , ", " . join ( upgrade ) ) try : s_w_accept = headers [ "Sec-WebSocket-Accept" ] except KeyError : raise InvalidHeader ( "Sec-WebSocket-Accept" ) except MultipleValuesError : raise InvalidHeader ( "Sec-WebSocket-Accept" , "more than one Sec-WebSocket-Accept header found" ) if s_w_accept != accept ( key ) : raise InvalidHeaderValue ( "Sec-WebSocket-Accept" , s_w_accept ) | Check a handshake response received from the server . |
25,332 | def decode ( self , frame : Frame , * , max_size : Optional [ int ] = None ) -> Frame : if frame . opcode in CTRL_OPCODES : return frame if frame . opcode == OP_CONT : if not self . decode_cont_data : return frame if frame . fin : self . decode_cont_data = False else : if not frame . rsv1 : return frame if not frame . fin : self . decode_cont_data = True if self . remote_no_context_takeover : self . decoder = zlib . decompressobj ( wbits = - self . remote_max_window_bits ) data = frame . data if frame . fin : data += _EMPTY_UNCOMPRESSED_BLOCK max_length = 0 if max_size is None else max_size data = self . decoder . decompress ( data , max_length ) if self . decoder . unconsumed_tail : raise PayloadTooBig ( f"Uncompressed payload length exceeds size limit (? > {max_size} bytes)" ) if frame . fin and self . remote_no_context_takeover : del self . decoder return frame . _replace ( data = data , rsv1 = False ) | Decode an incoming frame . |
25,333 | def encode ( self , frame : Frame ) -> Frame : if frame . opcode in CTRL_OPCODES : return frame if frame . opcode != OP_CONT : if self . local_no_context_takeover : self . encoder = zlib . compressobj ( wbits = - self . local_max_window_bits , ** self . compress_settings ) data = self . encoder . compress ( frame . data ) + self . encoder . flush ( zlib . Z_SYNC_FLUSH ) if frame . fin and data . endswith ( _EMPTY_UNCOMPRESSED_BLOCK ) : data = data [ : - 4 ] if frame . fin and self . local_no_context_takeover : del self . encoder return frame . _replace ( data = data , rsv1 = True ) | Encode an outgoing frame . |
25,334 | def get_request_params ( self ) -> List [ ExtensionParameter ] : return _build_parameters ( self . server_no_context_takeover , self . client_no_context_takeover , self . server_max_window_bits , self . client_max_window_bits , ) | Build request parameters . |
25,335 | def process_response_params ( self , params : Sequence [ ExtensionParameter ] , accepted_extensions : Sequence [ "Extension" ] , ) -> PerMessageDeflate : if any ( other . name == self . name for other in accepted_extensions ) : raise NegotiationError ( f"Received duplicate {self.name}" ) ( server_no_context_takeover , client_no_context_takeover , server_max_window_bits , client_max_window_bits , ) = _extract_parameters ( params , is_server = False ) if self . server_no_context_takeover : if not server_no_context_takeover : raise NegotiationError ( "Expected server_no_context_takeover" ) if self . client_no_context_takeover : if not client_no_context_takeover : client_no_context_takeover = True if self . server_max_window_bits is None : pass else : if server_max_window_bits is None : raise NegotiationError ( "Expected server_max_window_bits" ) elif server_max_window_bits > self . server_max_window_bits : raise NegotiationError ( "Unsupported server_max_window_bits" ) if self . client_max_window_bits is None : if client_max_window_bits is not None : raise NegotiationError ( "Unexpected client_max_window_bits" ) elif self . client_max_window_bits is True : pass else : if client_max_window_bits is None : client_max_window_bits = self . client_max_window_bits elif client_max_window_bits > self . client_max_window_bits : raise NegotiationError ( "Unsupported client_max_window_bits" ) return PerMessageDeflate ( server_no_context_takeover , client_no_context_takeover , server_max_window_bits or 15 , client_max_window_bits or 15 , self . compress_settings , ) | Process response parameters . |
25,336 | def process_request_params ( self , params : Sequence [ ExtensionParameter ] , accepted_extensions : Sequence [ "Extension" ] , ) -> Tuple [ List [ ExtensionParameter ] , PerMessageDeflate ] : if any ( other . name == self . name for other in accepted_extensions ) : raise NegotiationError ( f"Skipped duplicate {self.name}" ) ( server_no_context_takeover , client_no_context_takeover , server_max_window_bits , client_max_window_bits , ) = _extract_parameters ( params , is_server = True ) if self . server_no_context_takeover : if not server_no_context_takeover : server_no_context_takeover = True if self . client_no_context_takeover : if not client_no_context_takeover : client_no_context_takeover = True if self . server_max_window_bits is None : pass else : if server_max_window_bits is None : server_max_window_bits = self . server_max_window_bits elif server_max_window_bits > self . server_max_window_bits : server_max_window_bits = self . server_max_window_bits if self . client_max_window_bits is None : if client_max_window_bits is True : client_max_window_bits = self . client_max_window_bits else : if client_max_window_bits is None : raise NegotiationError ( "Required client_max_window_bits" ) elif client_max_window_bits is True : client_max_window_bits = self . client_max_window_bits elif self . client_max_window_bits < client_max_window_bits : client_max_window_bits = self . client_max_window_bits return ( _build_parameters ( server_no_context_takeover , client_no_context_takeover , server_max_window_bits , client_max_window_bits , ) , PerMessageDeflate ( client_no_context_takeover , server_no_context_takeover , client_max_window_bits or 15 , server_max_window_bits or 15 , self . compress_settings , ) , ) | Process request parameters . |
25,337 | def apply_mask ( data : bytes , mask : bytes ) -> bytes : if len ( mask ) != 4 : raise ValueError ( "mask must contain 4 bytes" ) return bytes ( b ^ m for b , m in zip ( data , itertools . cycle ( mask ) ) ) | Apply masking to the data of a WebSocket message . |
25,338 | def format_close ( code : int , reason : str ) -> str : if 3000 <= code < 4000 : explanation = "registered" elif 4000 <= code < 5000 : explanation = "private use" else : explanation = CLOSE_CODES . get ( code , "unknown" ) result = f"code = {code} ({explanation}), " if reason : result += f"reason = {reason}" else : result += "no reason" return result | Display a human - readable version of the close code and reason . |
25,339 | def start ( self ) : service_names = ', ' . join ( self . service_names ) _log . info ( 'starting services: %s' , service_names ) SpawningProxy ( self . containers ) . start ( ) _log . debug ( 'services started: %s' , service_names ) | Start all the registered services . |
25,340 | def wait ( self ) : try : SpawningProxy ( self . containers , abort_on_error = True ) . wait ( ) except Exception : self . stop ( ) raise | Wait for all running containers to stop . |
25,341 | def publish ( self , payload , ** kwargs ) : publish_kwargs = self . publish_kwargs . copy ( ) headers = publish_kwargs . pop ( 'headers' , { } ) . copy ( ) headers . update ( kwargs . pop ( 'headers' , { } ) ) headers . update ( kwargs . pop ( 'extra_headers' , { } ) ) use_confirms = kwargs . pop ( 'use_confirms' , self . use_confirms ) transport_options = kwargs . pop ( 'transport_options' , self . transport_options ) transport_options [ 'confirm_publish' ] = use_confirms delivery_mode = kwargs . pop ( 'delivery_mode' , self . delivery_mode ) mandatory = kwargs . pop ( 'mandatory' , self . mandatory ) priority = kwargs . pop ( 'priority' , self . priority ) expiration = kwargs . pop ( 'expiration' , self . expiration ) serializer = kwargs . pop ( 'serializer' , self . serializer ) compression = kwargs . pop ( 'compression' , self . compression ) retry = kwargs . pop ( 'retry' , self . retry ) retry_policy = kwargs . pop ( 'retry_policy' , self . retry_policy ) declare = self . declare [ : ] declare . extend ( kwargs . pop ( 'declare' , ( ) ) ) publish_kwargs . update ( kwargs ) with get_producer ( self . amqp_uri , use_confirms , self . ssl , transport_options , ) as producer : try : producer . publish ( payload , headers = headers , delivery_mode = delivery_mode , mandatory = mandatory , priority = priority , expiration = expiration , compression = compression , declare = declare , retry = retry , retry_policy = retry_policy , serializer = serializer , ** publish_kwargs ) except ChannelError as exc : if "NO_ROUTE" in str ( exc ) : raise UndeliverableMessage ( ) raise if mandatory : if not use_confirms : warnings . warn ( "Mandatory delivery was requested, but " "unroutable messages cannot be detected without " "publish confirms enabled." ) | Publish a message . |
25,342 | def stop ( self ) : if not self . _providers_registered : self . queue_consumer . unregister_provider ( self ) self . _unregistered_from_queue_consumer . send ( True ) | Stop the RpcConsumer . |
25,343 | def unregister_provider ( self , provider ) : self . _unregistering_providers . add ( provider ) remaining_providers = self . _providers - self . _unregistering_providers if not remaining_providers : _log . debug ( 'unregistering from queueconsumer %s' , self ) self . queue_consumer . unregister_provider ( self ) _log . debug ( 'unregistered from queueconsumer %s' , self ) self . _unregistered_from_queue_consumer . send ( True ) _log . debug ( 'waiting for unregister from queue consumer %s' , self ) self . _unregistered_from_queue_consumer . wait ( ) super ( RpcConsumer , self ) . unregister_provider ( provider ) | Unregister a provider . |
25,344 | def get_dependency ( self , worker_ctx ) : extra_headers = self . get_message_headers ( worker_ctx ) def dispatch ( event_type , event_data ) : self . publisher . publish ( event_data , exchange = self . exchange , routing_key = event_type , extra_headers = extra_headers ) return dispatch | Inject a dispatch method onto the service instance |
25,345 | def broadcast_identifier ( self ) : if self . handler_type is not BROADCAST : return None if self . reliable_delivery : raise EventHandlerConfigurationError ( "You are using the default broadcast identifier " "which is not compatible with reliable delivery. See " ":meth:`nameko.events.EventHandler.broadcast_identifier` " "for details." ) return uuid . uuid4 ( ) . hex | A unique string to identify a service instance for BROADCAST type handlers . |
25,346 | def get_module_path ( exc_type ) : module = inspect . getmodule ( exc_type ) return "{}.{}" . format ( module . __name__ , exc_type . __name__ ) | Return the dotted module path of exc_type including the class name . |
25,347 | def safe_for_serialization ( value ) : if isinstance ( value , six . string_types ) : return value if isinstance ( value , dict ) : return { safe_for_serialization ( key ) : safe_for_serialization ( val ) for key , val in six . iteritems ( value ) } if isinstance ( value , collections . Iterable ) : return list ( map ( safe_for_serialization , value ) ) try : return six . text_type ( value ) except Exception : return '[__unicode__ failed]' | Transform a value in preparation for serializing as json |
25,348 | def serialize ( exc ) : return { 'exc_type' : type ( exc ) . __name__ , 'exc_path' : get_module_path ( type ( exc ) ) , 'exc_args' : list ( map ( safe_for_serialization , exc . args ) ) , 'value' : safe_for_serialization ( exc ) , } | Serialize self . exc into a data dictionary representing it . |
25,349 | def deserialize ( data ) : key = data . get ( 'exc_path' ) if key in registry : exc_args = data . get ( 'exc_args' , ( ) ) return registry [ key ] ( * exc_args ) exc_type = data . get ( 'exc_type' ) value = data . get ( 'value' ) return RemoteError ( exc_type = exc_type , value = value ) | Deserialize data to an exception instance . |
25,350 | def import_from_path ( path ) : if path is None : return obj = locate ( path ) if obj is None : raise ImportError ( "`{}` could not be imported" . format ( path ) ) return obj | Import and return the object at path if it exists . |
25,351 | def sanitize_url ( url ) : parts = urlparse ( url ) if parts . password is None : return url host_info = parts . netloc . rsplit ( '@' , 1 ) [ - 1 ] parts = parts . _replace ( netloc = '{}:{}@{}' . format ( parts . username , REDACTED , host_info ) ) return parts . geturl ( ) | Redact password in urls . |
25,352 | def get_subscriptions ( self , socket_id ) : con = self . _get_connection ( socket_id , create = False ) if con is None : return [ ] return sorted ( con . subscriptions ) | Returns a list of all the subscriptions of a socket . |
25,353 | def subscribe ( self , socket_id , channel ) : con = self . _get_connection ( socket_id ) self . subscriptions . setdefault ( channel , set ( ) ) . add ( socket_id ) con . subscriptions . add ( channel ) | Subscribes a socket to a channel . |
25,354 | def unsubscribe ( self , socket_id , channel ) : con = self . _get_connection ( socket_id , create = False ) if con is not None : con . subscriptions . discard ( channel ) try : self . subscriptions [ channel ] . discard ( socket_id ) except KeyError : pass | Unsubscribes a socket from a channel . |
25,355 | def broadcast ( self , channel , event , data ) : payload = self . _server . serialize_event ( event , data ) for socket_id in self . subscriptions . get ( channel , ( ) ) : rv = self . _server . sockets . get ( socket_id ) if rv is not None : rv . socket . send ( payload ) | Broadcasts an event to all sockets listening on a channel . |
25,356 | def unicast ( self , socket_id , event , data ) : payload = self . _server . serialize_event ( event , data ) rv = self . _server . sockets . get ( socket_id ) if rv is not None : rv . socket . send ( payload ) return True return False | Sends an event to a single socket . Returns True if that worked or False if not . |
25,357 | def make_timing_logger ( logger , precision = 3 , level = logging . DEBUG ) : @ contextmanager def log_time ( msg , * args ) : start_time = time . time ( ) try : yield finally : message = "{} in %0.{}fs" . format ( msg , precision ) duration = time . time ( ) - start_time args = args + ( duration , ) logger . log ( level , message , * args ) return log_time | Return a timing logger . |
25,358 | def get_wsgi_server ( self , sock , wsgi_app , protocol = HttpOnlyProtocol , debug = False ) : return wsgi . Server ( sock , sock . getsockname ( ) , wsgi_app , protocol = protocol , debug = debug , log = getLogger ( __name__ ) ) | Get the WSGI server used to process requests . |
25,359 | def get_event_exchange ( service_name ) : exchange_name = "{}.events" . format ( service_name ) exchange = Exchange ( exchange_name , type = 'topic' , durable = True , delivery_mode = PERSISTENT ) return exchange | Get an exchange for service_name events . |
25,360 | def event_dispatcher ( nameko_config , ** kwargs ) : amqp_uri = nameko_config [ AMQP_URI_CONFIG_KEY ] serializer , _ = serialization . setup ( nameko_config ) serializer = kwargs . pop ( 'serializer' , serializer ) ssl = nameko_config . get ( AMQP_SSL_CONFIG_KEY ) publisher = Publisher ( amqp_uri , serializer = serializer , ssl = ssl , ** kwargs ) def dispatch ( service_name , event_type , event_data ) : exchange = get_event_exchange ( service_name ) publisher . publish ( event_data , exchange = exchange , routing_key = event_type ) return dispatch | Return a function that dispatches nameko events . |
25,361 | def _run ( self ) : def get_next_interval ( ) : start_time = time . time ( ) start = 0 if self . eager else 1 for count in itertools . count ( start = start ) : yield max ( start_time + count * self . interval - time . time ( ) , 0 ) interval = get_next_interval ( ) sleep_time = next ( interval ) while True : with Timeout ( sleep_time , exception = False ) : self . should_stop . wait ( ) break self . handle_timer_tick ( ) self . worker_complete . wait ( ) self . worker_complete . reset ( ) sleep_time = next ( interval ) | Runs the interval loop . |
25,362 | def stop ( self ) : if not self . _consumers_ready . ready ( ) : _log . debug ( 'stopping while consumer is starting %s' , self ) stop_exc = QueueConsumerStopped ( ) self . _gt . kill ( stop_exc ) self . wait_for_providers ( ) try : _log . debug ( 'waiting for consumer death %s' , self ) self . _gt . wait ( ) except QueueConsumerStopped : pass super ( QueueConsumer , self ) . stop ( ) _log . debug ( 'stopped %s' , self ) | Stop the queue - consumer gracefully . |
25,363 | def kill ( self ) : if self . _gt is not None and not self . _gt . dead : self . _providers = set ( ) self . _pending_remove_providers = { } self . should_stop = True try : self . _gt . wait ( ) except Exception as exc : _log . warn ( 'QueueConsumer %s raised `%s` during kill' , self , exc ) super ( QueueConsumer , self ) . kill ( ) _log . debug ( 'killed %s' , self ) | Kill the queue - consumer . |
25,364 | def connection ( self ) : heartbeat = self . container . config . get ( HEARTBEAT_CONFIG_KEY , DEFAULT_HEARTBEAT ) transport_options = self . container . config . get ( TRANSPORT_OPTIONS_CONFIG_KEY , DEFAULT_TRANSPORT_OPTIONS ) ssl = self . container . config . get ( AMQP_SSL_CONFIG_KEY ) conn = Connection ( self . amqp_uri , transport_options = transport_options , heartbeat = heartbeat , ssl = ssl ) return conn | Provide the connection parameters for kombu s ConsumerMixin . |
25,365 | def get_consumers ( self , consumer_cls , channel ) : _log . debug ( 'setting up consumers %s' , self ) for provider in self . _providers : callbacks = [ partial ( self . handle_message , provider ) ] consumer = consumer_cls ( queues = [ provider . queue ] , callbacks = callbacks , accept = self . accept ) consumer . qos ( prefetch_count = self . prefetch_count ) self . _consumers [ provider ] = consumer return self . _consumers . values ( ) | Kombu callback to set up consumers . |
25,366 | def on_iteration ( self ) : self . _cancel_consumers_if_requested ( ) if len ( self . _consumers ) == 0 : _log . debug ( 'requesting stop after iteration' ) self . should_stop = True | Kombu callback for each drain_events loop iteration . |
25,367 | def on_consume_ready ( self , connection , channel , consumers , ** kwargs ) : if not self . _consumers_ready . ready ( ) : _log . debug ( 'consumer started %s' , self ) self . _consumers_ready . send ( None ) | Kombu callback when consumers are ready to accept messages . |
25,368 | def make_nameko_helper ( config ) : module = ModuleType ( 'nameko' ) module . __doc__ = proxy = ClusterRpcProxy ( config ) module . rpc = proxy . start ( ) module . dispatch_event = event_dispatcher ( config ) module . config = config module . disconnect = proxy . stop return module | Create a fake module that provides some convenient access to nameko standalone functionality for interactive shell usage . |
25,369 | def iter_extensions ( extension ) : for _ , ext in inspect . getmembers ( extension , is_extension ) : for item in iter_extensions ( ext ) : yield item yield ext | Depth - first iterator over sub - extensions on extension . |
25,370 | def bind ( self , container ) : def clone ( prototype ) : if prototype . is_bound ( ) : raise RuntimeError ( 'Cannot `bind` a bound extension.' ) cls = type ( prototype ) args , kwargs = prototype . __params instance = cls ( * args , ** kwargs ) instance . container = weakref . proxy ( container ) return instance instance = clone ( self ) for name , ext in inspect . getmembers ( self , is_extension ) : setattr ( instance , name , ext . bind ( container ) ) return instance | Get an instance of this Extension to bind to container . |
25,371 | def bind ( self , container ) : shared = container . shared_extensions . get ( self . sharing_key ) if shared : return shared instance = super ( SharedExtension , self ) . bind ( container ) container . shared_extensions [ self . sharing_key ] = instance return instance | Bind implementation that supports sharing . |
25,372 | def bind ( self , container , attr_name ) : instance = super ( DependencyProvider , self ) . bind ( container ) instance . attr_name = attr_name self . attr_name = attr_name return instance | Get an instance of this Dependency to bind to container with attr_name . |
25,373 | def wait_for_providers ( self ) : if self . _providers_registered : _log . debug ( 'waiting for providers to unregister %s' , self ) self . _last_provider_unregistered . wait ( ) _log . debug ( 'all providers unregistered %s' , self ) | Wait for any providers registered with the collector to have unregistered . |
25,374 | def bind ( self , container , method_name ) : instance = super ( Entrypoint , self ) . bind ( container ) instance . method_name = method_name return instance | Get an instance of this Entrypoint to bind to container with method_name . |
25,375 | def start ( self ) : _log . debug ( 'starting %s' , self ) self . started = True with _log_time ( 'started %s' , self ) : self . extensions . all . setup ( ) self . extensions . all . start ( ) | Start a container by starting all of its extensions . |
25,376 | def stop ( self ) : if self . _died . ready ( ) : _log . debug ( 'already stopped %s' , self ) return if self . _being_killed : _log . debug ( 'already being killed %s' , self ) try : self . _died . wait ( ) except : pass return _log . debug ( 'stopping %s' , self ) with _log_time ( 'stopped %s' , self ) : self . entrypoints . all . stop ( ) self . _worker_pool . waitall ( ) self . dependencies . all . stop ( ) self . subextensions . all . stop ( ) self . _kill_managed_threads ( ) self . started = False if not self . _died . ready ( ) : self . _died . send ( None ) | Stop the container gracefully . |
25,377 | def kill ( self , exc_info = None ) : if self . _being_killed : _log . debug ( 'already killing %s ... waiting for death' , self ) try : self . _died . wait ( ) except : pass return self . _being_killed = True if self . _died . ready ( ) : _log . debug ( 'already stopped %s' , self ) return if exc_info is not None : _log . info ( 'killing %s due to %s' , self , exc_info [ 1 ] ) else : _log . info ( 'killing %s' , self ) def safely_kill_extensions ( ext_set ) : try : ext_set . kill ( ) except Exception as exc : _log . warning ( 'Extension raised `%s` during kill' , exc ) safely_kill_extensions ( self . entrypoints . all ) self . _kill_worker_threads ( ) safely_kill_extensions ( self . extensions . all ) self . _kill_managed_threads ( ) self . started = False if not self . _died . ready ( ) : self . _died . send ( None , exc_info ) | Kill the container in a semi - graceful way . |
25,378 | def spawn_worker ( self , entrypoint , args , kwargs , context_data = None , handle_result = None ) : if self . _being_killed : _log . info ( "Worker spawn prevented due to being killed" ) raise ContainerBeingKilled ( ) service = self . service_cls ( ) worker_ctx = WorkerContext ( self , service , entrypoint , args , kwargs , data = context_data ) _log . debug ( 'spawning %s' , worker_ctx ) gt = self . _worker_pool . spawn ( self . _run_worker , worker_ctx , handle_result ) gt . link ( self . _handle_worker_thread_exited , worker_ctx ) self . _worker_threads [ worker_ctx ] = gt return worker_ctx | Spawn a worker thread for running the service method decorated by entrypoint . |
25,379 | def _kill_worker_threads ( self ) : num_workers = len ( self . _worker_threads ) if num_workers : _log . warning ( 'killing %s active workers(s)' , num_workers ) for worker_ctx , gt in list ( self . _worker_threads . items ( ) ) : _log . warning ( 'killing active worker for %s' , worker_ctx ) gt . kill ( ) | Kill any currently executing worker threads . |
25,380 | def _kill_managed_threads ( self ) : num_threads = len ( self . _managed_threads ) if num_threads : _log . warning ( 'killing %s managed thread(s)' , num_threads ) for gt , identifier in list ( self . _managed_threads . items ( ) ) : _log . warning ( 'killing managed thread `%s`' , identifier ) gt . kill ( ) | Kill any currently executing managed threads . |
25,381 | def wait ( self ) : if self . exception : raise self . exception if self . queue_consumer . stopped : raise RuntimeError ( "This consumer has been stopped, and can no longer be used" ) if self . queue_consumer . connection . connected is False : raise RuntimeError ( "This consumer has been disconnected, and can no longer " "be used" ) try : self . queue_consumer . get_message ( self . correlation_id ) except socket . error as exc : self . exception = exc if self . exception : raise self . exception return self . body | Makes a blocking call to its queue_consumer until the message with the given correlation_id has been processed . |
25,382 | def select_form ( self , selector = "form" , nr = 0 ) : if isinstance ( selector , bs4 . element . Tag ) : if selector . name != "form" : raise LinkNotFoundError self . __state . form = Form ( selector ) else : found_forms = self . get_current_page ( ) . select ( selector , limit = nr + 1 ) if len ( found_forms ) != nr + 1 : if self . __debug : print ( 'select_form failed for' , selector ) self . launch_browser ( ) raise LinkNotFoundError ( ) self . __state . form = Form ( found_forms [ - 1 ] ) return self . get_current_form ( ) | Select a form in the current page . |
25,383 | def links ( self , url_regex = None , link_text = None , * args , ** kwargs ) : all_links = self . get_current_page ( ) . find_all ( 'a' , href = True , * args , ** kwargs ) if url_regex is not None : all_links = [ a for a in all_links if re . search ( url_regex , a [ 'href' ] ) ] if link_text is not None : all_links = [ a for a in all_links if a . text == link_text ] return all_links | Return links in the page as a list of bs4 . element . Tag objects . |
25,384 | def find_link ( self , * args , ** kwargs ) : links = self . links ( * args , ** kwargs ) if len ( links ) == 0 : raise LinkNotFoundError ( ) else : return links [ 0 ] | Find and return a link as a bs4 . element . Tag object . |
25,385 | def follow_link ( self , link = None , * args , ** kwargs ) : link = self . _find_link_internal ( link , args , kwargs ) referer = self . get_url ( ) headers = { 'Referer' : referer } if referer else None return self . open_relative ( link [ 'href' ] , headers = headers ) | Follow a link . |
25,386 | def set_input ( self , data ) : for ( name , value ) in data . items ( ) : i = self . form . find ( "input" , { "name" : name } ) if not i : raise InvalidFormMethod ( "No input field named " + name ) i [ "value" ] = value | Fill - in a set of fields in a form . |
25,387 | def new_control ( self , type , name , value , ** kwargs ) : old_input = self . form . find_all ( 'input' , { 'name' : name } ) for old in old_input : old . decompose ( ) old_textarea = self . form . find_all ( 'textarea' , { 'name' : name } ) for old in old_textarea : old . decompose ( ) control = BeautifulSoup ( "" , "html.parser" ) . new_tag ( 'input' ) control [ 'type' ] = type control [ 'name' ] = name control [ 'value' ] = value for k , v in kwargs . items ( ) : control [ k ] = v self . form . append ( control ) return control | Add a new input element to the form . |
25,388 | def print_summary ( self ) : for input in self . form . find_all ( ( "input" , "textarea" , "select" , "button" ) ) : input_copy = copy . copy ( input ) for subtag in input_copy . find_all ( ) + [ input_copy ] : if subtag . string : subtag . string = subtag . string . strip ( ) print ( input_copy ) | Print a summary of the form . |
25,389 | def __looks_like_html ( response ) : text = response . text . lstrip ( ) . lower ( ) return text . startswith ( '<html' ) or text . startswith ( '<!doctype' ) | Guesses entity type when Content - Type header is missing . Since Content - Type is not strictly required some servers leave it out . |
25,390 | def add_soup ( response , soup_config ) : if ( "text/html" in response . headers . get ( "Content-Type" , "" ) or Browser . __looks_like_html ( response ) ) : response . soup = bs4 . BeautifulSoup ( response . content , ** soup_config ) else : response . soup = None | Attaches a soup object to a requests response . |
25,391 | def set_user_agent ( self , user_agent ) : if user_agent is None : requests_ua = requests . utils . default_user_agent ( ) user_agent = '%s (%s/%s)' % ( requests_ua , __title__ , __version__ ) self . session . headers [ 'User-agent' ] = user_agent | Replaces the current user agent in the requests session headers . |
25,392 | def _request ( self , form , url = None , ** kwargs ) : method = str ( form . get ( "method" , "get" ) ) action = form . get ( "action" ) url = urllib . parse . urljoin ( url , action ) if url is None : raise ValueError ( 'no URL to submit to' ) data = kwargs . pop ( "data" , dict ( ) ) files = kwargs . pop ( "files" , dict ( ) ) data = [ ( k , v ) for k , v in data . items ( ) ] selector = "," . join ( "{}[name]" . format ( i ) for i in ( "input" , "button" , "textarea" , "select" ) ) for tag in form . select ( selector ) : name = tag . get ( "name" ) if tag . has_attr ( 'disabled' ) : continue if tag . name == "input" : if tag . get ( "type" , "" ) . lower ( ) in ( "radio" , "checkbox" ) : if "checked" not in tag . attrs : continue value = tag . get ( "value" , "on" ) else : value = tag . get ( "value" , "" ) if tag . get ( "type" , "" ) . lower ( ) == "file" : filename = value if filename != "" and isinstance ( filename , string_types ) : content = open ( filename , "rb" ) else : content = "" files [ name ] = ( filename , content ) else : data . append ( ( name , value ) ) elif tag . name == "button" : if tag . get ( "type" , "" ) . lower ( ) in ( "button" , "reset" ) : continue else : data . append ( ( name , tag . get ( "value" , "" ) ) ) elif tag . name == "textarea" : data . append ( ( name , tag . text ) ) elif tag . name == "select" : options = tag . select ( "option" ) selected_values = [ i . get ( "value" , i . text ) for i in options if "selected" in i . attrs ] if "multiple" in tag . attrs : for value in selected_values : data . append ( ( name , value ) ) elif selected_values : data . append ( ( name , selected_values [ - 1 ] ) ) elif options : first_value = options [ 0 ] . get ( "value" , options [ 0 ] . text ) data . append ( ( name , first_value ) ) if method . lower ( ) == "get" : kwargs [ "params" ] = data else : kwargs [ "data" ] = data return self . session . request ( method , url , files = files , ** kwargs ) | Extract input data from the form to pass to a Requests session . |
25,393 | def submit ( self , form , url = None , ** kwargs ) : if isinstance ( form , Form ) : form = form . form response = self . _request ( form , url , ** kwargs ) Browser . add_soup ( response , self . soup_config ) return response | Prepares and sends a form request . |
25,394 | def close ( self ) : if self . session is not None : self . session . cookies . clear ( ) self . session . close ( ) self . session = None | Close the current session if still open . |
25,395 | def requirements_from_file ( filename ) : return [ line . strip ( ) for line in open ( filename , 'r' ) if line . strip ( ) and not line . strip ( ) . startswith ( '--' ) ] | Parses a pip requirements file into a list . |
25,396 | def read ( fname , URL , URLImage ) : readme = open ( path . join ( path . dirname ( __file__ ) , fname ) ) . read ( ) if hasattr ( readme , 'decode' ) : readme = readme . decode ( 'utf8' ) readme = re . sub ( r'`<([^>]*)>`__' , r'`\1 <' + URL + r"/blob/master/\1>`__" , readme ) readme = re . sub ( r"\.\. image:: /" , ".. image:: " + URLImage + "/" , readme ) return readme | Read the content of a file . |
25,397 | def imscatter ( images , positions ) : positions = np . array ( positions ) bottoms = positions [ : , 1 ] - np . array ( [ im . shape [ 1 ] / 2.0 for im in images ] ) tops = bottoms + np . array ( [ im . shape [ 1 ] for im in images ] ) lefts = positions [ : , 0 ] - np . array ( [ im . shape [ 0 ] / 2.0 for im in images ] ) rigths = lefts + np . array ( [ im . shape [ 0 ] for im in images ] ) most_bottom = int ( np . floor ( bottoms . min ( ) ) ) most_top = int ( np . ceil ( tops . max ( ) ) ) most_left = int ( np . floor ( lefts . min ( ) ) ) most_right = int ( np . ceil ( rigths . max ( ) ) ) scatter_image = np . zeros ( [ most_right - most_left , most_top - most_bottom , 3 ] , dtype = imgs [ 0 ] . dtype ) positions -= [ most_left , most_bottom ] for im , pos in zip ( images , positions ) : xl = int ( pos [ 0 ] - im . shape [ 0 ] / 2 ) xr = xl + im . shape [ 0 ] yb = int ( pos [ 1 ] - im . shape [ 1 ] / 2 ) yt = yb + im . shape [ 1 ] scatter_image [ xl : xr , yb : yt , : ] = im return scatter_image | Creates a scatter plot where each plot is shown by corresponding image |
25,398 | def pauli ( qubo ) : from blueqat . pauli import qubo_bit h = 0.0 assert all ( len ( q ) == len ( qubo ) for q in qubo ) for i in range ( len ( qubo ) ) : h += qubo_bit ( i ) * qubo [ i ] [ i ] for j in range ( i + 1 , len ( qubo ) ) : h += qubo_bit ( i ) * qubo_bit ( j ) * ( qubo [ i ] [ j ] + qubo [ j ] [ i ] ) return h | Convert to pauli operators of universal gate model . Requires blueqat . |
25,399 | def plot ( self ) : import matplotlib . pyplot as plt plt . plot ( self . E ) plt . show ( ) | Draws energy chart using matplotlib . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.