idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
23,600
def match ( self , message ) -> bool : if self . to and message . to != self . to : return False if self . sender and message . sender != self . sender : return False if self . body and message . body != self . body : return False if self . thread and message . thread != self . thread : return False for key , value in self . metadata . items ( ) : if message . get_metadata ( key ) != value : return False logger . debug ( f"message matched {self} == {message}" ) return True
Returns wether a message matches with this message or not . The message can be a Message object or a Template object .
23,601
def make_reply ( self ) : return Message ( to = str ( self . sender ) , sender = str ( self . to ) , body = self . body , thread = self . thread , metadata = self . metadata )
Creates a copy of the message exchanging sender and receiver
23,602
def prepare ( self ) : msg = aioxmpp . stanza . Message ( to = self . to , from_ = self . sender , type_ = aioxmpp . MessageType . CHAT , ) msg . body [ None ] = self . body if len ( self . metadata ) : data = forms_xso . Data ( type_ = forms_xso . DataType . FORM ) for name , value in self . metadata . items ( ) : data . fields . append ( forms_xso . Field ( var = name , type_ = forms_xso . FieldType . TEXT_SINGLE , values = [ value ] , ) ) if self . thread : data . fields . append ( forms_xso . Field ( var = "_thread_node" , type_ = forms_xso . FieldType . TEXT_SINGLE , values = [ self . thread ] ) ) data . title = SPADE_X_METADATA msg . xep0004_data = [ data ] return msg
Returns an aioxmpp . stanza . Message built from the Message and prepared to be sent .
23,603
def unused_port ( hostname ) : with socket . socket ( socket . AF_INET , socket . SOCK_STREAM ) as s : s . bind ( ( hostname , 0 ) ) return s . getsockname ( ) [ 1 ]
Return a port that is unused on the current host .
23,604
async def start_server_in_loop ( runner , hostname , port , agent ) : await runner . setup ( ) agent . web . server = aioweb . TCPSite ( runner , hostname , port ) await agent . web . server . start ( ) logger . info ( f"Serving on http://{hostname}:{port}/" )
Listens to http requests and sends them to the webapp .
23,605
def start ( self , hostname = None , port = None , templates_path = None ) : self . hostname = hostname if hostname else "localhost" if port : self . port = port elif not self . port : self . port = unused_port ( self . hostname ) if templates_path : self . loaders . insert ( 0 , jinja2 . FileSystemLoader ( templates_path ) ) self . _set_loaders ( ) self . setup_routes ( ) self . runner = aioweb . AppRunner ( self . app ) return self . agent . submit ( start_server_in_loop ( self . runner , self . hostname , self . port , self . agent ) )
Starts the web interface .
23,606
def add_get ( self , path , controller , template , raw = False ) : if raw : fn = controller else : fn = self . _prepare_controller ( controller , template ) self . app . router . add_get ( path , fn )
Setup a route of type GET
23,607
def set_agent ( self , agent ) : self . agent = agent self . queue = asyncio . Queue ( loop = self . agent . loop ) self . presence = agent . presence self . web = agent . web
Links behaviour with its owner agent
23,608
def match ( self , message : Message ) -> bool : if self . template : return self . template . match ( message ) return True
Matches a message with the behaviour s template
23,609
def set ( self , name : str , value : Any ) -> None : self . agent . set ( name , value )
Stores a knowledge item in the agent knowledge base .
23,610
def start ( self ) : self . agent . submit ( self . _start ( ) ) self . is_running = True
starts behaviour in the event loop
23,611
async def _start ( self ) : self . agent . _alive . wait ( ) try : await self . on_start ( ) except Exception as e : logger . error ( "Exception running on_start in behaviour {}: {}" . format ( self , e ) ) self . kill ( exit_code = e ) await self . _step ( ) self . _is_done . clear ( )
Start coroutine . runs on_start coroutine and then runs the _step coroutine where the body of the behaviour is called .
23,612
def kill ( self , exit_code : Any = None ) : self . _force_kill . set ( ) if exit_code is not None : self . _exit_code = exit_code logger . info ( "Killing behavior {0} with exit code: {1}" . format ( self , exit_code ) )
Stops the behaviour
23,613
def exit_code ( self ) -> Any : if self . _done ( ) or self . is_killed ( ) : return self . _exit_code else : raise BehaviourNotFinishedException
Returns the exit_code of the behaviour . It only works when the behaviour is done or killed otherwise it raises an exception .
23,614
async def send ( self , msg : Message ) : if not msg . sender : msg . sender = str ( self . agent . jid ) logger . debug ( f"Adding agent's jid as sender to message: {msg}" ) await self . agent . container . send ( msg , self ) msg . sent = True self . agent . traces . append ( msg , category = str ( self ) )
Sends a message .
23,615
async def receive ( self , timeout : float = None ) -> Union [ Message , None ] : if timeout : coro = self . queue . get ( ) try : msg = await asyncio . wait_for ( coro , timeout = timeout ) except asyncio . TimeoutError : msg = None else : try : msg = self . queue . get_nowait ( ) except asyncio . QueueEmpty : msg = None return msg
Receives a message for this behaviour . If timeout is not None it returns the message or None after timeout is done .
23,616
def period ( self , value : float ) : if value < 0 : raise ValueError ( "Period must be greater or equal than zero." ) self . _period = timedelta ( seconds = value )
Set the period .
23,617
def add_state ( self , name : str , state : State , initial : bool = False ) : if not issubclass ( state . __class__ , State ) : raise AttributeError ( "state must be subclass of spade.behaviour.State" ) self . _states [ name ] = state if initial : self . current_state = name
Adds a new state to the FSM .
23,618
def add_transition ( self , source : str , dest : str ) : self . _transitions [ source ] . append ( dest )
Adds a transition from one state to another .
23,619
def is_valid_transition ( self , source : str , dest : str ) -> bool : if dest not in self . _states or source not in self . _states : raise NotValidState elif dest not in self . _transitions [ source ] : raise NotValidTransition return True
Checks if a transitions is registered in the FSM
23,620
def to_graphviz ( self ) -> str : graph = "digraph finite_state_machine { rankdir=LR; node [fixedsize=true];" for origin , dest in self . _transitions . items ( ) : origin = origin . replace ( " " , "_" ) for d in dest : d = d . replace ( " " , "_" ) graph += "{0} -> {1};" . format ( origin , d ) graph += "}" return graph
Converts the FSM behaviour structure to Graphviz syntax
23,621
def resample ( x , sr_orig , sr_new , axis = - 1 , filter = 'kaiser_best' , ** kwargs ) : if sr_orig <= 0 : raise ValueError ( 'Invalid sample rate: sr_orig={}' . format ( sr_orig ) ) if sr_new <= 0 : raise ValueError ( 'Invalid sample rate: sr_new={}' . format ( sr_new ) ) sample_ratio = float ( sr_new ) / sr_orig shape = list ( x . shape ) shape [ axis ] = int ( shape [ axis ] * sample_ratio ) if shape [ axis ] < 1 : raise ValueError ( 'Input signal length={} is too small to ' 'resample from {}->{}' . format ( x . shape [ axis ] , sr_orig , sr_new ) ) y = np . zeros ( shape , dtype = x . dtype ) interp_win , precision , _ = get_filter ( filter , ** kwargs ) if sample_ratio < 1 : interp_win *= sample_ratio interp_delta = np . zeros_like ( interp_win ) interp_delta [ : - 1 ] = np . diff ( interp_win ) x_2d = x . swapaxes ( 0 , axis ) . reshape ( ( x . shape [ axis ] , - 1 ) ) y_2d = y . swapaxes ( 0 , axis ) . reshape ( ( y . shape [ axis ] , - 1 ) ) resample_f ( x_2d , y_2d , sample_ratio , interp_win , interp_delta , precision ) return y
Resample a signal x from sr_orig to sr_new along a given axis .
23,622
def sinc_window ( num_zeros = 64 , precision = 9 , window = None , rolloff = 0.945 ) : if window is None : window = scipy . signal . blackmanharris elif not six . callable ( window ) : raise TypeError ( 'window must be callable, not type(window)={}' . format ( type ( window ) ) ) if not 0 < rolloff <= 1 : raise ValueError ( 'Invalid roll-off: rolloff={}' . format ( rolloff ) ) if num_zeros < 1 : raise ValueError ( 'Invalid num_zeros: num_zeros={}' . format ( num_zeros ) ) if precision < 0 : raise ValueError ( 'Invalid precision: precision={}' . format ( precision ) ) num_bits = 2 ** precision n = num_bits * num_zeros sinc_win = rolloff * np . sinc ( rolloff * np . linspace ( 0 , num_zeros , num = n + 1 , endpoint = True ) ) taper = window ( 2 * n + 1 ) [ n : ] interp_win = ( taper * sinc_win ) return interp_win , num_bits , rolloff
Construct a windowed sinc interpolation filter
23,623
def get_filter ( name_or_function , ** kwargs ) : if name_or_function in FILTER_FUNCTIONS : return getattr ( sys . modules [ __name__ ] , name_or_function ) ( ** kwargs ) elif six . callable ( name_or_function ) : return name_or_function ( ** kwargs ) else : try : return load_filter ( name_or_function ) except ( IOError , ValueError ) : raise NotImplementedError ( 'Cannot load filter definition for ' '{}' . format ( name_or_function ) )
Retrieve a window given its name or function handle .
23,624
def load_filter ( filter_name ) : fname = os . path . join ( 'data' , os . path . extsep . join ( [ filter_name , 'npz' ] ) ) data = np . load ( pkg_resources . resource_filename ( __name__ , fname ) ) return data [ 'half_window' ] , data [ 'precision' ] , data [ 'rolloff' ]
Retrieve a pre - computed filter .
23,625
def default_values_of ( func ) : signature = inspect . signature ( func ) return [ k for k , v in signature . parameters . items ( ) if v . default is not inspect . Parameter . empty or v . kind != inspect . Parameter . POSITIONAL_OR_KEYWORD ]
Return the defaults of the function func .
23,626
def required_arguments ( func ) : defaults = default_values_of ( func ) args = arguments_of ( func ) if defaults : args = args [ : - len ( defaults ) ] return args
Return all arguments of a function that do not have a default value .
23,627
def text ( message : Text , default : Text = "" , validate : Union [ Type [ Validator ] , Callable [ [ Text ] , bool ] , None ] = None , qmark : Text = DEFAULT_QUESTION_PREFIX , style : Optional [ Style ] = None , ** kwargs : Any ) -> Question : merged_style = merge_styles ( [ DEFAULT_STYLE , style ] ) validator = build_validator ( validate ) def get_prompt_tokens ( ) : return [ ( "class:qmark" , qmark ) , ( "class:question" , ' {} ' . format ( message ) ) ] p = PromptSession ( get_prompt_tokens , style = merged_style , validator = validator , ** kwargs ) p . default_buffer . reset ( Document ( default ) ) return Question ( p . app )
Prompt the user to enter a free text message .
23,628
def skip_if ( self , condition : bool , default : Any = None ) -> 'Question' : self . should_skip_question = condition self . default = default return self
Skip the question if flag is set and return the default instead .
23,629
def _fix_unecessary_blank_lines ( ps : PromptSession ) -> None : default_container = ps . layout . container default_buffer_window = default_container . get_children ( ) [ 0 ] . content . get_children ( ) [ 1 ] . content assert isinstance ( default_buffer_window , Window ) default_buffer_window . dont_extend_height = Always ( )
This is a fix for additional empty lines added by prompt toolkit .
23,630
def create_inquirer_layout ( ic : InquirerControl , get_prompt_tokens : Callable [ [ ] , List [ Tuple [ Text , Text ] ] ] , ** kwargs ) -> Layout : ps = PromptSession ( get_prompt_tokens , reserve_space_for_menu = 0 , ** kwargs ) _fix_unecessary_blank_lines ( ps ) return Layout ( HSplit ( [ ps . layout . container , ConditionalContainer ( Window ( ic ) , filter = ~ IsDone ( ) ) ] ) )
Create a layout combining question and inquirer selection .
23,631
def build ( c : Union [ Text , 'Choice' , Dict [ Text , Any ] ] ) -> 'Choice' : if isinstance ( c , Choice ) : return c elif isinstance ( c , str ) : return Choice ( c , c ) else : return Choice ( c . get ( 'name' ) , c . get ( 'value' ) , c . get ( 'disabled' , None ) , c . get ( 'checked' ) , c . get ( 'key' ) )
Create a choice object from different representations .
23,632
def password ( message : Text , default : Text = "" , validate : Union [ Type [ Validator ] , Callable [ [ Text ] , bool ] , None ] = None , qmark : Text = DEFAULT_QUESTION_PREFIX , style : Optional [ Style ] = None , ** kwargs : Any ) -> Question : return text . text ( message , default , validate , qmark , style , is_password = True , ** kwargs )
Question the user to enter a secret text not displayed in the prompt .
23,633
def form ( ** kwargs : Question ) : return Form ( * ( FormField ( k , q ) for k , q in kwargs . items ( ) ) )
Create a form with multiple questions .
23,634
def prompt ( questions : List [ Dict [ Text , Any ] ] , answers : Optional [ Dict [ Text , Any ] ] = None , patch_stdout : bool = False , true_color : bool = False , kbi_msg : Text = DEFAULT_KBI_MESSAGE , ** kwargs ) : if isinstance ( questions , dict ) : questions = [ questions ] answers = answers or { } for question_config in questions : if 'type' not in question_config : raise PromptParameterException ( 'type' ) if 'name' not in question_config : raise PromptParameterException ( 'name' ) choices = question_config . get ( 'choices' ) if choices is not None and callable ( choices ) : question_config [ 'choices' ] = choices ( answers ) _kwargs = kwargs . copy ( ) _kwargs . update ( question_config ) _type = _kwargs . pop ( 'type' ) _filter = _kwargs . pop ( 'filter' , None ) name = _kwargs . pop ( 'name' ) when = _kwargs . pop ( 'when' , None ) if true_color : _kwargs [ "color_depth" ] = ColorDepth . TRUE_COLOR try : if when : if callable ( question_config [ 'when' ] ) : try : if not question_config [ 'when' ] ( answers ) : continue except Exception as e : raise ValueError ( "Problem in 'when' check of {} " "question: {}" . format ( name , e ) ) else : raise ValueError ( "'when' needs to be function that " "accepts a dict argument" ) if _filter : if not callable ( _filter ) : raise ValueError ( "'filter' needs to be function that " "accepts an argument" ) if callable ( question_config . get ( 'default' ) ) : _kwargs [ 'default' ] = question_config [ 'default' ] ( answers ) create_question_func = prompt_by_name ( _type ) if not create_question_func : raise ValueError ( "No question type '{}' found. " "Known question types are {}." "" . format ( _type , ", " . join ( AVAILABLE_PROMPTS ) ) ) missing_args = list ( utils . missing_arguments ( create_question_func , _kwargs ) ) if missing_args : raise PromptParameterException ( missing_args [ 0 ] ) question = create_question_func ( ** _kwargs ) answer = question . unsafe_ask ( patch_stdout ) if answer is not None : if _filter : try : answer = _filter ( answer ) except Exception as e : raise ValueError ( "Problem processing 'filter' of {} " "question: {}" . format ( name , e ) ) answers [ name ] = answer except KeyboardInterrupt : print ( '' ) print ( kbi_msg ) print ( '' ) return { } return answers
Prompt the user for input on all the questions .
23,635
def confirm ( message : Text , default : bool = True , qmark : Text = DEFAULT_QUESTION_PREFIX , style : Optional [ Style ] = None , ** kwargs : Any ) -> Question : merged_style = merge_styles ( [ DEFAULT_STYLE , style ] ) status = { 'answer' : None } def get_prompt_tokens ( ) : tokens = [ ] tokens . append ( ( "class:qmark" , qmark ) ) tokens . append ( ( "class:question" , ' {} ' . format ( message ) ) ) if status [ 'answer' ] is not None : answer = ' {}' . format ( YES if status [ 'answer' ] else NO ) tokens . append ( ( "class:answer" , answer ) ) else : instruction = ' {}' . format ( YES_OR_NO if default else NO_OR_YES ) tokens . append ( ( "class:instruction" , instruction ) ) return to_formatted_text ( tokens ) bindings = KeyBindings ( ) @ bindings . add ( Keys . ControlQ , eager = True ) @ bindings . add ( Keys . ControlC , eager = True ) def _ ( event ) : event . app . exit ( exception = KeyboardInterrupt , style = 'class:aborting' ) @ bindings . add ( 'n' ) @ bindings . add ( 'N' ) def key_n ( event ) : status [ 'answer' ] = False event . app . exit ( result = False ) @ bindings . add ( 'y' ) @ bindings . add ( 'Y' ) def key_y ( event ) : status [ 'answer' ] = True event . app . exit ( result = True ) @ bindings . add ( Keys . ControlM , eager = True ) def set_answer ( event ) : status [ 'answer' ] = default event . app . exit ( result = default ) @ bindings . add ( Keys . Any ) def other ( event ) : pass return Question ( PromptSession ( get_prompt_tokens , key_bindings = bindings , style = merged_style , ** kwargs ) . app )
Prompt the user to confirm or reject .
23,636
def rawselect ( message : Text , choices : List [ Union [ Text , Choice , Dict [ Text , Any ] ] ] , default : Optional [ Text ] = None , qmark : Text = DEFAULT_QUESTION_PREFIX , style : Optional [ Style ] = None , ** kwargs : Any ) -> Question : return select . select ( message , choices , default , qmark , style , use_shortcuts = True , ** kwargs )
Ask the user to select one item from a list of choices using shortcuts .
23,637
def _discover_models ( self ) : apps = get_installed_apps ( ) connection = self . connection . connection . alias keyspace = self . connection . connection . keyspace for app in apps : self . _cql_models [ app . __name__ ] = get_cql_models ( app , connection = connection , keyspace = keyspace )
Return a dict containing a list of cassandra . cqlengine . Model classes within installed App .
23,638
def django_table_names ( self , only_existing = False , ** kwargs ) : all_models = list ( chain . from_iterable ( self . cql_models . values ( ) ) ) tables = [ model . column_family_name ( include_keyspace = False ) for model in all_models ] return tables
Returns a list of all table names that have associated cqlengine models and are present in settings . INSTALLED_APPS .
23,639
def table_names ( self , cursor = None , ** kwargs ) : if cursor : return [ ] connection = self . connection . connection keyspace_name = connection . keyspace if not connection . cluster . schema_metadata_enabled and keyspace_name not in connection . cluster . metadata . keyspaces : connection . cluster . refresh_schema_metadata ( ) keyspace = connection . cluster . metadata . keyspaces [ keyspace_name ] return keyspace . tables
Returns all table names in current keyspace
23,640
def set_models_keyspace ( self , keyspace ) : for models in self . connection . introspection . cql_models . values ( ) : for model in models : model . __keyspace__ = keyspace
Set keyspace for all connection models
23,641
def _import_management ( ) : from importlib import import_module for app_name in settings . INSTALLED_APPS : try : import_module ( '.management' , app_name ) except SystemError : pass except ImportError as exc : msg = exc . args [ 0 ] if not msg . startswith ( 'No module named' ) or 'management' not in msg : raise
Import the management module within each installed app to register dispatcher events .
23,642
def get_installed_apps ( ) : if django . VERSION >= ( 1 , 7 ) : from django . apps import apps return [ a . models_module for a in apps . get_app_configs ( ) if a . models_module is not None ] else : from django . db import models return models . get_apps ( )
Return list of all installed apps
23,643
def handle ( self , * args , ** options ) : self . _change_cassandra_engine_name ( 'django.db.backends.dummy' ) try : super ( Command , self ) . handle ( * args , ** options ) finally : self . _change_cassandra_engine_name ( 'django_cassandra_engine' )
Pretend django_cassandra_engine to be dummy database backend with no support for migrations .
23,644
def sql_flush ( self , style , tables , sequences , allow_cascade = False ) : for table in tables : qs = "TRUNCATE {}" . format ( table ) self . connection . connection . execute ( qs ) return [ ]
Truncate all existing tables in current keyspace .
23,645
def add_field ( self , field , ** kwargs ) : getattr ( self , self . _private_fields_name ) . append ( field ) self . _expire_cache ( reverse = True ) self . _expire_cache ( reverse = False )
Add each field as a private field .
23,646
def _give_columns_django_field_attributes ( self ) : methods_to_add = ( django_field_methods . value_from_object , django_field_methods . value_to_string , django_field_methods . get_attname , django_field_methods . get_cache_name , django_field_methods . pre_save , django_field_methods . get_prep_value , django_field_methods . get_choices , django_field_methods . get_choices_default , django_field_methods . save_form_data , django_field_methods . formfield , django_field_methods . get_db_prep_value , django_field_methods . get_db_prep_save , django_field_methods . db_type_suffix , django_field_methods . select_format , django_field_methods . get_internal_type , django_field_methods . get_attname_column , django_field_methods . check , django_field_methods . _check_field_name , django_field_methods . _check_db_index , django_field_methods . deconstruct , django_field_methods . run_validators , django_field_methods . clean , django_field_methods . get_db_converters , django_field_methods . get_prep_lookup , django_field_methods . get_db_prep_lookup , django_field_methods . get_filter_kwargs_for_object , django_field_methods . set_attributes_from_name , django_field_methods . db_parameters , django_field_methods . get_pk_value_on_save , django_field_methods . get_col , ) for name , cql_column in six . iteritems ( self . _defined_columns ) : self . _set_column_django_attributes ( cql_column = cql_column , name = name ) for method in methods_to_add : try : method_name = method . func_name except AttributeError : method_name = method . __name__ new_method = six . create_bound_method ( method , cql_column ) setattr ( cql_column , method_name , new_method )
Add Django Field attributes to each cqlengine . Column instance .
23,647
def _get_column ( cls , name ) : if name == 'pk' : return cls . _meta . get_field ( cls . _meta . pk . name ) return cls . _columns [ name ]
Based on cqlengine . models . BaseModel . _get_column .
23,648
def _get_next ( request ) : next = request . POST . get ( 'next' , request . GET . get ( 'next' , request . META . get ( 'HTTP_REFERER' , None ) ) ) if not next : next = request . path return next
The part that s the least straightforward about views in this module is how they determine their redirects after they have finished computation .
23,649
def get_cache_key ( user_or_username , size , prefix ) : if isinstance ( user_or_username , get_user_model ( ) ) : user_or_username = get_username ( user_or_username ) key = six . u ( '%s_%s_%s' ) % ( prefix , user_or_username , size ) return six . u ( '%s_%s' ) % ( slugify ( key ) [ : 100 ] , hashlib . md5 ( force_bytes ( key ) ) . hexdigest ( ) )
Returns a cache key consisten of a username and image size .
23,650
def cache_result ( default_size = settings . AVATAR_DEFAULT_SIZE ) : if not settings . AVATAR_CACHE_ENABLED : def decorator ( func ) : return func return decorator def decorator ( func ) : def cached_func ( user , size = None , ** kwargs ) : prefix = func . __name__ cached_funcs . add ( prefix ) key = get_cache_key ( user , size or default_size , prefix = prefix ) result = cache . get ( key ) if result is None : result = func ( user , size or default_size , ** kwargs ) cache_set ( key , result ) return result return cached_func return decorator
Decorator to cache the result of functions that take a user and a size value .
23,651
def invalidate_cache ( user , size = None ) : sizes = set ( settings . AVATAR_AUTO_GENERATE_SIZES ) if size is not None : sizes . add ( size ) for prefix in cached_funcs : for size in sizes : cache . delete ( get_cache_key ( user , size , prefix ) )
Function to be called when saving or changing an user s avatars .
23,652
def create ( self , institution_id , initial_products , _options = None , webhook = None , transactions__start_date = None , transactions__end_date = None , ) : options = _options or { } if webhook is not None : options [ 'webhook' ] = webhook transaction_options = { } transaction_options . update ( options . get ( 'transactions' , { } ) ) if transactions__start_date is not None : transaction_options [ 'start_date' ] = transactions__start_date if transactions__end_date is not None : transaction_options [ 'end_date' ] = transactions__end_date if transaction_options : options [ 'transactions' ] = transaction_options return self . client . post_public_key ( '/sandbox/public_token/create' , { 'institution_id' : institution_id , 'initial_products' : initial_products , 'options' : options , } )
Generate a public token for sandbox testing .
23,653
def from_response ( response ) : cls = PLAID_ERROR_TYPE_MAP . get ( response [ 'error_type' ] , PlaidError ) return cls ( response [ 'error_message' ] , response [ 'error_type' ] , response [ 'error_code' ] , response [ 'display_message' ] , response [ 'request_id' ] , response . get ( 'causes' ) )
Create an error of the right class from an API response .
23,654
def create ( self , access_tokens , days_requested , options = None ) : options = options or { } return self . client . post ( '/asset_report/create' , { 'access_tokens' : access_tokens , 'days_requested' : days_requested , 'options' : options , } )
Create an asset report .
23,655
def refresh ( self , asset_report_token , days_requested , options = None ) : options = options or { } return self . client . post ( '/asset_report/refresh' , { 'asset_report_token' : asset_report_token , 'days_requested' : days_requested , 'options' : options , } )
Create a new refreshed asset report based on an existing asset report .
23,656
def get ( self , asset_report_token , include_insights = False ) : return self . client . post ( '/asset_report/get' , { 'asset_report_token' : asset_report_token , 'include_insights' : include_insights , } )
Retrieves an asset report .
23,657
def post ( self , path , data , is_json = True ) : post_data = { 'client_id' : self . client_id , 'secret' : self . secret , } post_data . update ( data ) return self . _post ( path , post_data , is_json )
Make a post request with client_id and secret key .
23,658
def post_public ( self , path , data , is_json = True ) : return self . _post ( path , data , is_json )
Make a post request requiring no auth .
23,659
def post_public_key ( self , path , data , is_json = True ) : post_data = { 'public_key' : self . public_key } post_data . update ( data ) return self . _post ( path , post_data , is_json )
Make a post request using a public key .
23,660
def get_by_id ( self , institution_id , _options = None ) : options = _options or { } return self . client . post_public_key ( '/institutions/get_by_id' , { 'institution_id' : institution_id , 'options' : options , } )
Fetch a single institution by id .
23,661
def search ( self , query , _options = { } , products = None ) : options = _options or { } return self . client . post_public_key ( '/institutions/search' , { 'query' : query , 'products' : products , 'options' : options , } )
Search all institutions by name .
23,662
def read_avro ( file_path_or_buffer , schema = None , ** kwargs ) : if isinstance ( file_path_or_buffer , six . string_types ) : with open ( file_path_or_buffer , 'rb' ) as f : return __file_to_dataframe ( f , schema , ** kwargs ) else : return __file_to_dataframe ( file_path_or_buffer , schema , ** kwargs )
Avro file reader .
23,663
def to_avro ( file_path_or_buffer , df , schema = None , codec = 'null' , append = False ) : if schema is None : schema = __schema_infer ( df ) open_mode = 'wb' if not append else 'a+b' if isinstance ( file_path_or_buffer , six . string_types ) : with open ( file_path_or_buffer , open_mode ) as f : fastavro . writer ( f , schema = schema , records = df . to_dict ( 'records' ) , codec = codec ) else : fastavro . writer ( file_path_or_buffer , schema = schema , records = df . to_dict ( 'records' ) , codec = codec )
Avro file writer .
23,664
def update ( self , x , w = 1 ) : self . n += w if len ( self ) == 0 : self . _add_centroid ( Centroid ( x , w ) ) return S = self . _find_closest_centroids ( x ) while len ( S ) != 0 and w > 0 : j = choice ( list ( range ( len ( S ) ) ) ) c_j = S [ j ] q = self . _compute_centroid_quantile ( c_j ) if c_j . count + w > self . _threshold ( q ) : S . pop ( j ) continue delta_w = min ( self . _threshold ( q ) - c_j . count , w ) self . _update_centroid ( c_j , x , delta_w ) w -= delta_w S . pop ( j ) if w > 0 : self . _add_centroid ( Centroid ( x , w ) ) if len ( self ) > self . K / self . delta : self . compress ( ) return
Update the t - digest with value x and weight w .
23,665
def batch_update ( self , values , w = 1 ) : for x in values : self . update ( x , w ) self . compress ( ) return
Update the t - digest with an iterable of values . This assumes all points have the same weight .
23,666
def trimmed_mean ( self , p1 , p2 ) : if not ( p1 < p2 ) : raise ValueError ( "p1 must be between 0 and 100 and less than p2." ) min_count = p1 / 100. * self . n max_count = p2 / 100. * self . n trimmed_sum = trimmed_count = curr_count = 0 for i , c in enumerate ( self . C . values ( ) ) : next_count = curr_count + c . count if next_count <= min_count : curr_count = next_count continue count = c . count if curr_count < min_count : count = next_count - min_count if next_count > max_count : count -= next_count - max_count trimmed_sum += count * c . mean trimmed_count += count if next_count >= max_count : break curr_count = next_count if trimmed_count == 0 : return 0 return trimmed_sum / trimmed_count
Computes the mean of the distribution between the two percentiles p1 and p2 . This is a modified algorithm than the one presented in the original t - Digest paper .
23,667
def centroids_to_list ( self ) : centroids = [ ] for key in self . C . keys ( ) : tree_values = self . C . get_value ( key ) centroids . append ( { 'm' : tree_values . mean , 'c' : tree_values . count } ) return centroids
Returns a Python list of the TDigest object s Centroid values .
23,668
def update_from_dict ( self , dict_values ) : self . delta = dict_values . get ( 'delta' , self . delta ) self . K = dict_values . get ( 'K' , self . K ) self . update_centroids_from_list ( dict_values [ 'centroids' ] ) return self
Updates TDigest object with dictionary values .
23,669
def update_centroids_from_list ( self , list_values ) : [ self . update ( value [ 'm' ] , value [ 'c' ] ) for value in list_values ] return self
Add or update Centroids from a Python list . Any existing centroids in the digest object are appropriately updated .
23,670
def is_undirected ( matrix ) : if isspmatrix ( matrix ) : return sparse_allclose ( matrix , matrix . transpose ( ) ) return np . allclose ( matrix , matrix . T )
Determine if the matrix reprensents a directed graph
23,671
def convert_to_adjacency_matrix ( matrix ) : for i in range ( matrix . shape [ 0 ] ) : if isspmatrix ( matrix ) : col = find ( matrix [ : , i ] ) [ 2 ] else : col = matrix [ : , i ] . T . tolist ( ) [ 0 ] coeff = max ( Fraction ( c ) . limit_denominator ( ) . denominator for c in col ) matrix [ : , i ] *= coeff return matrix
Converts transition matrix into adjacency matrix
23,672
def modularity ( matrix , clusters ) : matrix = convert_to_adjacency_matrix ( matrix ) m = matrix . sum ( ) if isspmatrix ( matrix ) : matrix_2 = matrix . tocsr ( copy = True ) else : matrix_2 = matrix if is_undirected ( matrix ) : expected = lambda i , j : ( ( matrix_2 [ i , : ] . sum ( ) + matrix [ : , i ] . sum ( ) ) * ( matrix [ : , j ] . sum ( ) + matrix_2 [ j , : ] . sum ( ) ) ) else : expected = lambda i , j : ( matrix_2 [ i , : ] . sum ( ) * matrix [ : , j ] . sum ( ) ) delta = delta_matrix ( matrix , clusters ) indices = np . array ( delta . nonzero ( ) ) Q = sum ( matrix [ i , j ] - expected ( i , j ) / m for i , j in indices . T ) / m return Q
Compute the modularity
23,673
def sparse_allclose ( a , b , rtol = 1e-5 , atol = 1e-8 ) : c = np . abs ( a - b ) - rtol * np . abs ( b ) return c . max ( ) <= atol
Version of np . allclose for use with sparse matrices
23,674
def _guess_concat ( data ) : return { type ( u'' ) : u'' . join , type ( b'' ) : concat_bytes , } . get ( type ( data ) , list )
Guess concat function from given data
23,675
def print_code_table ( self , out = sys . stdout ) : out . write ( u'bits code (value) symbol\n' ) for symbol , ( bitsize , value ) in sorted ( self . _table . items ( ) ) : out . write ( u'{b:4d} {c:10} ({v:5d}) {s!r}\n' . format ( b = bitsize , v = value , s = symbol , c = bin ( value ) [ 2 : ] . rjust ( bitsize , '0' ) ) )
Print code table overview
23,676
def encode_streaming ( self , data ) : buffer = 0 size = 0 for s in data : b , v = self . _table [ s ] buffer = ( buffer << b ) + v size += b while size >= 8 : byte = buffer >> ( size - 8 ) yield to_byte ( byte ) buffer = buffer - ( byte << ( size - 8 ) ) size -= 8 if size > 0 : b , v = self . _table [ _EOF ] buffer = ( buffer << b ) + v size += b if size >= 8 : byte = buffer >> ( size - 8 ) else : byte = buffer << ( 8 - size ) yield to_byte ( byte )
Encode given data in streaming fashion .
23,677
def decode ( self , data , as_list = False ) : return self . _concat ( self . decode_streaming ( data ) )
Decode given data .
23,678
def decode_streaming ( self , data ) : lookup = dict ( ( ( b , v ) , s ) for ( s , ( b , v ) ) in self . _table . items ( ) ) buffer = 0 size = 0 for byte in data : for m in [ 128 , 64 , 32 , 16 , 8 , 4 , 2 , 1 ] : buffer = ( buffer << 1 ) + bool ( from_byte ( byte ) & m ) size += 1 if ( size , buffer ) in lookup : symbol = lookup [ size , buffer ] if symbol == _EOF : return yield symbol buffer = 0 size = 0
Decode given data in streaming fashion
23,679
def from_data ( cls , data ) : frequencies = collections . Counter ( data ) return cls . from_frequencies ( frequencies , concat = _guess_concat ( data ) )
Build Huffman code table from symbol sequence
23,680
def _reset_state ( self ) : self . _uuid = None self . _columns = None self . _rownumber = 0 self . _state = self . _STATE_NONE self . _data = None self . _columns = None
Reset state about the previous query in preparation for running another query
23,681
def fetchone ( self ) : if self . _state == self . _STATE_NONE : raise Exception ( "No query yet" ) if not self . _data : return None else : self . _rownumber += 1 return self . _data . pop ( 0 )
Fetch the next row of a query result set returning a single sequence or None when no more data is available .
23,682
def _process_response ( self , response ) : assert self . _state == self . _STATE_RUNNING , "Should be running if processing response" cols = None data = [ ] for r in response : if not cols : cols = [ ( f , r . _fields [ f ] . db_type ) for f in r . _fields ] data . append ( [ getattr ( r , f ) for f in r . _fields ] ) self . _data = data self . _columns = cols self . _state = self . _STATE_FINISHED
Update the internal state with the data from the response
23,683
def _frequency_order_transform ( sets ) : logging . debug ( "Applying frequency order transform on tokens..." ) counts = reversed ( Counter ( token for s in sets for token in s ) . most_common ( ) ) order = dict ( ( token , i ) for i , ( token , _ ) in enumerate ( counts ) ) sets = [ np . sort ( [ order [ token ] for token in s ] ) for s in sets ] logging . debug ( "Done applying frequency order." ) return sets , order
Transform tokens to integers according to global frequency order . This step replaces all original tokens in the sets with integers and helps to speed up subsequent prefix filtering and similarity computation . See Section 4 . 3 . 2 in the paper A Primitive Operator for Similarity Joins in Data Cleaning by Chaudhuri et al ..
23,684
def all_pairs ( sets , similarity_func_name = "jaccard" , similarity_threshold = 0.5 ) : if not isinstance ( sets , list ) or len ( sets ) == 0 : raise ValueError ( "Input parameter sets must be a non-empty list." ) if similarity_func_name not in _similarity_funcs : raise ValueError ( "Similarity function {} is not supported." . format ( similarity_func_name ) ) if similarity_threshold < 0 or similarity_threshold > 1.0 : raise ValueError ( "Similarity threshold must be in the range [0, 1]." ) if similarity_func_name not in _symmetric_similarity_funcs : raise ValueError ( "The similarity function must be symmetric " "({})" . format ( ", " . join ( _symmetric_similarity_funcs ) ) ) similarity_func = _similarity_funcs [ similarity_func_name ] overlap_threshold_func = _overlap_threshold_funcs [ similarity_func_name ] position_filter_func = _position_filter_funcs [ similarity_func_name ] sets , _ = _frequency_order_transform ( sets ) index = defaultdict ( list ) logging . debug ( "Find all pairs with similarities >= {}..." . format ( similarity_threshold ) ) count = 0 for x1 in np . argsort ( [ len ( s ) for s in sets ] ) : s1 = sets [ x1 ] t = overlap_threshold_func ( len ( s1 ) , similarity_threshold ) prefix_size = len ( s1 ) - t + 1 prefix = s1 [ : prefix_size ] candidates = set ( [ x2 for p1 , token in enumerate ( prefix ) for x2 , p2 in index [ token ] if position_filter_func ( s1 , sets [ x2 ] , p1 , p2 , similarity_threshold ) ] ) for x2 in candidates : s2 = sets [ x2 ] sim = similarity_func ( s1 , s2 ) if sim < similarity_threshold : continue yield tuple ( sorted ( [ x1 , x2 ] , reverse = True ) + [ sim , ] ) count += 1 for j , token in enumerate ( prefix ) : index [ token ] . append ( ( x1 , j ) ) logging . debug ( "{} pairs found." . format ( count ) )
Find all pairs of sets with similarity greater than a threshold . This is an implementation of the All - Pair - Binary algorithm in the paper Scaling Up All Pairs Similarity Search by Bayardo et al . with position filter enhancement .
23,685
def query ( self , s ) : s1 = np . sort ( [ self . order [ token ] for token in s if token in self . order ] ) logging . debug ( "{} original tokens and {} tokens after applying " "frequency order." . format ( len ( s ) , len ( s1 ) ) ) prefix = self . _get_prefix ( s1 ) candidates = set ( [ i for p1 , token in enumerate ( prefix ) for i , p2 in self . index [ token ] if self . position_filter_func ( s1 , self . sets [ i ] , p1 , p2 , self . similarity_threshold ) ] ) logging . debug ( "{} candidates found." . format ( len ( candidates ) ) ) results = deque ( [ ] ) for i in candidates : s2 = self . sets [ i ] sim = self . similarity_func ( s1 , s2 ) if sim < self . similarity_threshold : continue results . append ( ( i , sim ) ) logging . debug ( "{} verified sets found." . format ( len ( results ) ) ) return list ( results )
Query the search index for sets similar to the query set .
23,686
def sample ( self , bqm , num_reads = 10 ) : values = tuple ( bqm . vartype . value ) def _itersample ( ) : for __ in range ( num_reads ) : sample = { v : choice ( values ) for v in bqm . linear } energy = bqm . energy ( sample ) yield sample , energy samples , energies = zip ( * _itersample ( ) ) return SampleSet . from_samples ( samples , bqm . vartype , energies )
Give random samples for a binary quadratic model .
23,687
def combinations ( n , k , strength = 1 , vartype = BINARY ) : r if isinstance ( n , abc . Sized ) and isinstance ( n , abc . Iterable ) : variables = n else : try : variables = range ( n ) except TypeError : raise TypeError ( 'n should be a collection or an integer' ) if k > len ( variables ) or k < 0 : raise ValueError ( "cannot select k={} from {} variables" . format ( k , len ( variables ) ) ) lbias = float ( strength * ( 1 - 2 * k ) ) qbias = float ( 2 * strength ) bqm = BinaryQuadraticModel . empty ( vartype ) bqm . add_variables_from ( ( ( v , lbias ) for v in variables ) , vartype = BINARY ) bqm . add_interactions_from ( ( ( u , v , qbias ) for u , v in itertools . combinations ( variables , 2 ) ) , vartype = BINARY ) bqm . add_offset ( strength * ( k ** 2 ) ) return bqm
r Generate a bqm that is minimized when k of n variables are selected .
23,688
def uniform ( graph , vartype , low = 0.0 , high = 1.0 , cls = BinaryQuadraticModel , seed = None ) : if seed is None : seed = numpy . random . randint ( 2 ** 32 , dtype = np . uint32 ) r = numpy . random . RandomState ( seed ) variables , edges = graph index = { v : idx for idx , v in enumerate ( variables ) } if edges : irow , icol = zip ( * ( ( index [ u ] , index [ v ] ) for u , v in edges ) ) else : irow = icol = tuple ( ) ldata = r . uniform ( low , high , size = len ( variables ) ) qdata = r . uniform ( low , high , size = len ( irow ) ) offset = r . uniform ( low , high ) return cls . from_numpy_vectors ( ldata , ( irow , icol , qdata ) , offset , vartype , variable_order = variables )
Generate a bqm with random biases and offset .
23,689
def ran_r ( r , graph , cls = BinaryQuadraticModel , seed = None ) : if not isinstance ( r , int ) : raise TypeError ( "r should be a positive integer" ) if r < 1 : raise ValueError ( "r should be a positive integer" ) if seed is None : seed = numpy . random . randint ( 2 ** 32 , dtype = np . uint32 ) rnd = numpy . random . RandomState ( seed ) variables , edges = graph index = { v : idx for idx , v in enumerate ( variables ) } if edges : irow , icol = zip ( * ( ( index [ u ] , index [ v ] ) for u , v in edges ) ) else : irow = icol = tuple ( ) ldata = np . zeros ( len ( variables ) ) rvals = np . empty ( 2 * r ) rvals [ 0 : r ] = range ( - r , 0 ) rvals [ r : ] = range ( 1 , r + 1 ) qdata = rnd . choice ( rvals , size = len ( irow ) ) offset = 0 return cls . from_numpy_vectors ( ldata , ( irow , icol , qdata ) , offset , vartype = 'SPIN' , variable_order = variables )
Generate an Ising model for a RANr problem .
23,690
def bqm_index_labels ( f ) : @ wraps ( f ) def _index_label ( sampler , bqm , ** kwargs ) : if not hasattr ( bqm , 'linear' ) : raise TypeError ( 'expected input to be a BinaryQuadraticModel' ) linear = bqm . linear if all ( v in linear for v in range ( len ( bqm ) ) ) : return f ( sampler , bqm , ** kwargs ) try : inverse_mapping = dict ( enumerate ( sorted ( linear ) ) ) except TypeError : inverse_mapping = dict ( enumerate ( linear ) ) mapping = { v : i for i , v in iteritems ( inverse_mapping ) } response = f ( sampler , bqm . relabel_variables ( mapping , inplace = False ) , ** kwargs ) return response . relabel_variables ( inverse_mapping , inplace = True ) return _index_label
Decorator to convert a bqm to index - labels and relabel the sample set output .
23,691
def bqm_index_labelled_input ( var_labels_arg_name , samples_arg_names ) : def index_label_decorator ( f ) : @ wraps ( f ) def _index_label ( sampler , bqm , ** kwargs ) : if not hasattr ( bqm , 'linear' ) : raise TypeError ( 'expected input to be a BinaryQuadraticModel' ) linear = bqm . linear var_labels = kwargs . get ( var_labels_arg_name , None ) has_samples_input = any ( kwargs . get ( arg_name , None ) is not None for arg_name in samples_arg_names ) if var_labels is None : if all ( v in linear for v in range ( len ( bqm ) ) ) : return f ( sampler , bqm , ** kwargs ) if has_samples_input : err_str = ( "Argument `{}` must be provided if any of the" " samples arguments {} are provided and the " "bqm is not already index-labelled" . format ( var_labels_arg_name , samples_arg_names ) ) raise ValueError ( err_str ) try : inverse_mapping = dict ( enumerate ( sorted ( linear ) ) ) except TypeError : inverse_mapping = dict ( enumerate ( linear ) ) var_labels = { v : i for i , v in iteritems ( inverse_mapping ) } else : inverse_mapping = { i : v for v , i in iteritems ( var_labels ) } response = f ( sampler , bqm . relabel_variables ( var_labels , inplace = False ) , ** kwargs ) return response . relabel_variables ( inverse_mapping , inplace = True ) return _index_label return index_label_decorator
Returns a decorator which ensures bqm variable labeling and all other specified sample - like inputs are index labeled and consistent .
23,692
def bqm_structured ( f ) : @ wraps ( f ) def new_f ( sampler , bqm , ** kwargs ) : try : structure = sampler . structure adjacency = structure . adjacency except AttributeError : if isinstance ( sampler , Structured ) : raise RuntimeError ( "something is wrong with the structured sampler" ) else : raise TypeError ( "sampler does not have a structure property" ) if not all ( v in adjacency for v in bqm . linear ) : raise BinaryQuadraticModelStructureError ( "given bqm does not match the sampler's structure" ) if not all ( u in adjacency [ v ] for u , v in bqm . quadratic ) : raise BinaryQuadraticModelStructureError ( "given bqm does not match the sampler's structure" ) return f ( sampler , bqm , ** kwargs ) return new_f
Decorator to raise an error if the given bqm does not match the sampler s structure .
23,693
def graph_argument ( * arg_names , ** options ) : if not arg_names : arg_names = [ 'G' ] allow_None = options . pop ( "allow_None" , False ) if options : key , _ = options . popitem ( ) msg = "graph_argument() for an unexpected keyword argument '{}'" . format ( key ) raise TypeError ( msg ) def _graph_arg ( f ) : argspec = getargspec ( f ) def _enforce_single_arg ( name , args , kwargs ) : try : G = kwargs [ name ] except KeyError : raise TypeError ( 'Graph argument missing' ) if hasattr ( G , 'edges' ) and hasattr ( G , 'nodes' ) : kwargs [ name ] = ( list ( G . nodes ) , list ( G . edges ) ) elif _is_integer ( G ) : kwargs [ name ] = ( list ( range ( G ) ) , list ( itertools . combinations ( range ( G ) , 2 ) ) ) elif isinstance ( G , abc . Sequence ) and len ( G ) == 2 : if isinstance ( G [ 0 ] , integer_types ) : kwargs [ name ] = ( list ( range ( G [ 0 ] ) ) , G [ 1 ] ) elif allow_None and G is None : return G else : raise ValueError ( 'Unexpected graph input form' ) return @ wraps ( f ) def new_f ( * args , ** kwargs ) : bound_args = inspect . getcallargs ( f , * args , ** kwargs ) final_args = list ( bound_args . pop ( argspec . varargs , ( ) ) ) final_kwargs = bound_args . pop ( argspec . keywords , { } ) final_kwargs . update ( bound_args ) for name in arg_names : _enforce_single_arg ( name , final_args , final_kwargs ) return f ( * final_args , ** final_kwargs ) return new_f return _graph_arg
Decorator to coerce given graph arguments into a consistent form .
23,694
def array2bytes ( arr , bytes_type = bytes ) : bio = io . BytesIO ( ) np . save ( bio , arr , allow_pickle = False ) return bytes_type ( bio . getvalue ( ) )
Wraps NumPy s save function to return bytes .
23,695
def sample ( self , bqm , ** kwargs ) : tkw = self . _truncate_kwargs if self . _aggregate : return self . child . sample ( bqm , ** kwargs ) . aggregate ( ) . truncate ( ** tkw ) else : return self . child . sample ( bqm , ** kwargs ) . truncate ( ** tkw )
Sample from the problem provided by bqm and truncate output .
23,696
def sample ( self , bqm ) : M = bqm . binary . to_numpy_matrix ( ) off = bqm . binary . offset if M . shape == ( 0 , 0 ) : return SampleSet . from_samples ( [ ] , bqm . vartype , energy = [ ] ) sample = np . zeros ( ( len ( bqm ) , ) , dtype = bool ) def iter_samples ( ) : sample = np . zeros ( ( len ( bqm ) ) , dtype = bool ) energy = 0.0 yield sample . copy ( ) , energy + off for i in range ( 1 , 1 << len ( bqm ) ) : v = _ffs ( i ) sample [ v ] = not sample [ v ] energy = sample . dot ( M ) . dot ( sample . transpose ( ) ) yield sample . copy ( ) , float ( energy ) + off samples , energies = zip ( * iter_samples ( ) ) response = SampleSet . from_samples ( np . array ( samples , dtype = 'int8' ) , Vartype . BINARY , energies ) response . change_vartype ( bqm . vartype , inplace = True ) return response
Sample from a binary quadratic model .
23,697
def as_vartype ( vartype ) : if isinstance ( vartype , Vartype ) : return vartype try : if isinstance ( vartype , str ) : vartype = Vartype [ vartype ] elif isinstance ( vartype , frozenset ) : vartype = Vartype ( vartype ) else : vartype = Vartype ( frozenset ( vartype ) ) except ( ValueError , KeyError ) : raise TypeError ( ( "expected input vartype to be one of: " "Vartype.SPIN, 'SPIN', {-1, 1}, " "Vartype.BINARY, 'BINARY', or {0, 1}." ) ) return vartype
Cast various inputs to a valid vartype object .
23,698
def energy ( self , sample_like , dtype = np . float ) : energy , = self . energies ( sample_like , dtype = dtype ) return energy
The energy of the given sample .
23,699
def energies ( self , samples_like , dtype = np . float ) : samples , labels = as_samples ( samples_like ) if labels : idx , label = zip ( * enumerate ( labels ) ) labeldict = dict ( zip ( label , idx ) ) else : labeldict = { } num_samples = samples . shape [ 0 ] energies = np . zeros ( num_samples , dtype = dtype ) for term , bias in self . items ( ) : if len ( term ) == 0 : energies += bias else : energies += np . prod ( [ samples [ : , labeldict [ v ] ] for v in term ] , axis = 0 ) * bias return energies
The energies of the given samples .