idx int64 0 251k | question stringlengths 53 3.53k | target stringlengths 5 1.23k | len_question int64 20 893 | len_target int64 3 238 |
|---|---|---|---|---|
13,900 | def get_connections ( self , data = False ) : if data : return self . _connections return list ( self . _connections . keys ( ) ) | Get agent s current connections . | 35 | 6 |
13,901 | def publish ( self , artifact ) : self . env . add_artifact ( artifact ) self . _log ( logging . DEBUG , "Published {} to domain." . format ( artifact ) ) | Publish artifact to agent s environment . | 40 | 8 |
13,902 | async def ask_opinion ( self , addr , artifact ) : remote_agent = await self . env . connect ( addr ) return await remote_agent . evaluate ( artifact ) | Ask an agent s opinion about an artifact . | 38 | 9 |
13,903 | def localization_feature ( app ) : # apply app default to babel app . config [ 'BABEL_DEFAULT_LOCALE' ] = app . config [ 'DEFAULT_LOCALE' ] app . config [ 'BABEL_DEFAULT_TIMEZONE' ] = app . config [ 'DEFAULT_TIMEZONE' ] # init babel babel = Babel ( ) babel . init_app ( app ) | Localization feature This will initialize support for translations and localization of values such as numbers money dates and formatting timezones . | 94 | 24 |
13,904 | def enrich_json_objects_by_object_type ( request , value ) : time_start_globally = time ( ) if isinstance ( value , list ) : json = [ x . to_json ( ) if hasattr ( x , "to_json" ) else x for x in value ] else : if isinstance ( value , dict ) : json = value else : json = value . to_json ( ) objects , nested = _collect_json_objects ( json , by = 'object_type' ) for enricher_info in _get_OBJECT_TYPE_ENRICHER_ORDER ( ) : if len ( enricher_info [ 'object_types' ] ) > 0 : enricher_objects = flatten ( [ objects . get ( object_type , [ ] ) for object_type in enricher_info [ 'object_types' ] ] ) enricher_nested = any ( [ nested . get ( object_type , False ) for object_type in enricher_info [ 'object_types' ] ] ) else : enricher_objects = flatten ( objects . values ( ) ) enricher_nested = any ( nested . values ( ) ) if len ( enricher_objects ) > 0 : time_start = time ( ) enricher_info [ 'enricher' ] ( request , enricher_objects , enricher_nested ) LOGGER . debug ( 'enrichment "{}" took {} seconds' . format ( enricher_info [ 'enricher_name' ] , time ( ) - time_start ) ) if not enricher_info [ 'pure' ] : # if the enricher modified object types we must collect objects # again objects , nested = _collect_json_objects ( json , by = 'object_type' ) LOGGER . debug ( 'The whole enrichment of json objects by their object_type took {} seconds.' . format ( time ( ) - time_start_globally ) ) return json | Take the given value and start enrichment by object_type . The va | 444 | 14 |
13,905 | def enrich_by_predicate ( request , json , fun , predicate , skip_nested = False , * * kwargs ) : time_start = time ( ) collected = [ ] memory = { 'nested' : False } def _collect ( json_inner , nested ) : if nested and skip_nested : return if isinstance ( json_inner , list ) : list ( map ( lambda x : _collect ( x , nested ) , json_inner ) ) elif isinstance ( json_inner , dict ) : if predicate ( json_inner ) : collected . append ( json_inner ) if nested : memory [ 'nested' ] = True list ( map ( lambda x : _collect ( x , True ) , list ( json_inner . values ( ) ) ) ) _collect ( json , False ) if len ( collected ) > 0 : fun ( request , collected , memory [ 'nested' ] , * * kwargs ) LOGGER . debug ( "enrichment of JSON by predicate by '%s' function took %s seconds" , fun . __name__ , ( time ( ) - time_start ) ) return json | Take the JSON find all its subparts satisfying the given condition and them by the given function . Other key - word arguments are passed to the function . | 247 | 30 |
13,906 | def enrich_by_object_type ( request , json , fun , object_type , skip_nested = False , * * kwargs ) : if not isinstance ( object_type , list ) : object_type = [ object_type ] predicate = lambda x : 'object_type' in x and x [ 'object_type' ] in object_type return enrich_by_predicate ( request , json , fun , predicate , skip_nested = skip_nested , * * kwargs ) | Take the JSON find its subparts having the given object part and transform them by the given function . Other key - word arguments are passed to the function . | 111 | 31 |
13,907 | def change_parent ( sender , instance , * * kwargs ) : if instance . id is None : return if len ( { 'term' , 'term_id' } & set ( instance . changed_fields ) ) != 0 : diff = instance . diff parent = diff [ 'term' ] [ 0 ] if 'term' in diff else diff [ 'term_id' ] [ 0 ] child_id = instance . item_id if parent is not None : parent_id = parent . item_id if isinstance ( parent , Term ) else Term . objects . get ( pk = parent ) . item_id ItemRelation . objects . filter ( parent_id = parent_id , child_id = child_id ) . delete ( ) ItemRelation . objects . get_or_create ( parent_id = instance . term . item_id , child_id = child_id , visible = True ) if len ( { 'term_secondary' , 'term_secondary_id' } & set ( instance . changed_fields ) ) != 0 : diff = instance . diff child_id = instance . item_id parent = diff [ 'term_secondary' ] [ 0 ] if 'term_secondary' in diff else diff [ 'term_secondary_id' ] [ 0 ] if parent is not None : parent_id = parent . item_id if isinstance ( parent , Term ) else Term . objects . get ( pk = parent ) . item_id ItemRelation . objects . filter ( parent_id = parent_id , child_id = child_id ) . delete ( ) if instance . term_secondary is not None or instance . term_secondary_id is not None : ItemRelation . objects . get_or_create ( parent_id = instance . term_secondary . item_id , child_id = child_id , visible = True ) if len ( { 'context' , 'context_id' } & set ( instance . changed_fields ) ) != 0 : diff = instance . diff parent = diff [ 'context' ] [ 0 ] if 'context' in diff else diff [ 'context_id' ] [ 0 ] child_id = instance . item_id if parent is not None : parent_id = parent . item_id if isinstance ( parent , Context ) else Context . objects . get ( pk = parent ) . item_id ItemRelation . objects . filter ( parent_id = parent_id , child_id = child_id ) . delete ( ) ItemRelation . objects . get_or_create ( parent_id = instance . context . item_id , child_id = child_id , visible = True ) | When the given flashcard has changed . Look at term and context and change the corresponding item relation . | 576 | 20 |
13,908 | def example ( ) : # 1a8d is an example from the loops benchmark # 1lfa contains hydrogens b = Bonsai . retrieve ( '1lfa' , cache_dir = '/tmp' ) search_radius = 10.0 atom_of_interest = b . get_atom ( 1095 ) nearby_atoms = b . find_atoms_near_atom ( atom_of_interest , search_radius ) for na in nearby_atoms : assert ( na - atom_of_interest <= search_radius ) for fa in b . get_atom_set_complement ( nearby_atoms ) : assert ( fa - atom_of_interest > search_radius ) # Get all heavy atoms within the radius (including HETATM) nearby_heavy_atoms = b . find_heavy_atoms_near_atom ( atom_of_interest , search_radius ) # Get all C-alpha atoms within the radius nearby_ca_atoms = b . find_atoms_near_atom ( atom_of_interest , search_radius , atom_names_to_include = [ 'CA' ] ) # Get all carbon atoms within the radius nearby_c_atoms = b . find_atoms_near_atom ( atom_of_interest , search_radius , atom_names_to_include = b . get_atom_names_by_group ( [ 'C' ] ) ) | This section gives examples of how to use the module . | 310 | 11 |
13,909 | def from_non_aligned_residue_IDs ( Chain , StartResidueID , EndResidueID , Sequence = None ) : return PDBSection ( Chain , PDB . ResidueID2String ( StartResidueID ) , PDB . ResidueID2String ( EndResidueID ) , Sequence = Sequence ) | A more forgiving method that does not care about the padding of the residue IDs . | 77 | 16 |
13,910 | def bin_atoms ( self ) : # Create the atom bins low_point = numpy . array ( [ self . min_x , self . min_y , self . min_z ] ) high_point = numpy . array ( [ self . max_x , self . max_y , self . max_z ] ) atom_bin_dimensions = numpy . ceil ( ( high_point - low_point ) / self . bin_size ) self . atom_bin_dimensions = ( int ( atom_bin_dimensions [ 0 ] ) - 1 , int ( atom_bin_dimensions [ 1 ] ) - 1 , int ( atom_bin_dimensions [ 2 ] ) - 1 ) atom_bins = [ ] for x in range ( int ( atom_bin_dimensions [ 0 ] ) ) : atom_bins . append ( [ ] ) for y in range ( int ( atom_bin_dimensions [ 1 ] ) ) : atom_bins [ x ] . append ( [ ] ) for z in range ( int ( atom_bin_dimensions [ 2 ] ) ) : atom_bins [ x ] [ y ] . append ( Bin ( x , y , z ) ) # Assign each Atom to a bin for serial_number , atom in self . atoms . iteritems ( ) : bin_location = numpy . trunc ( ( atom . point - low_point ) / self . bin_size ) bin = atom_bins [ int ( bin_location [ 0 ] ) ] [ int ( bin_location [ 1 ] ) ] [ int ( bin_location [ 2 ] ) ] bin . append ( atom ) atom . set_bin ( bin ) # Sanity_check if self . safe_mode : num_atoms = 0 for x in range ( int ( atom_bin_dimensions [ 0 ] ) ) : for y in range ( int ( atom_bin_dimensions [ 1 ] ) ) : for z in range ( int ( atom_bin_dimensions [ 2 ] ) ) : num_atoms += len ( atom_bins [ x ] [ y ] [ z ] ) assert ( num_atoms == len ( self . atoms ) ) # Snip empty sections (saves a little space after garbage collection - space savings increase with the number of empty arrays in the matrix) blank_section = ( ) for x in range ( int ( atom_bin_dimensions [ 0 ] ) ) : for y in range ( int ( atom_bin_dimensions [ 1 ] ) ) : for z in range ( int ( atom_bin_dimensions [ 2 ] ) ) : if not atom_bins [ x ] [ y ] [ z ] : atom_bins [ x ] [ y ] [ z ] = blank_section self . atom_bins = atom_bins | This function bins the Atoms into fixed - size sections of the protein space in 3D . | 612 | 19 |
13,911 | def find_heavy_atoms_near_atom ( self , source_atom , search_radius , atom_hit_cache = set ( ) , restrict_to_CA = False ) : #todo: Benchmark atom_hit_cache to see if it actually speeds up the search non_heavy_atoms = self . get_atom_names_by_group ( set ( [ 'H' , 'D' , 'T' ] ) ) return self . find_atoms_near_atom ( source_atom , search_radius , atom_names_to_exclude = non_heavy_atoms , atom_hit_cache = atom_hit_cache , restrict_to_CA = restrict_to_CA ) | atom_hit_cache is a set of atom serial numbers which have already been tested . We keep track of these to avoid recalculating the distance . | 157 | 31 |
13,912 | def get ( self , attr_name , * args ) : if not isinstance ( attr_name , six . string_types ) : raise TypeError ( 'attr_name must be a str.' ) # allow retrieval of data with alias or normalized name if '-' in attr_name : attr_name = attr_name . replace ( '-' , '_' ) parent_attr = self attr = getattr ( parent_attr , attr_name , None ) for arg in args : if not isinstance ( arg , six . string_types ) : raise TypeError ( 'each additional argument must be a string. {0} was not a string' . format ( arg ) ) if hasattr ( parent_attr , arg ) : parent_attr = getattr ( parent_attr , arg ) if hasattr ( parent_attr , attr_name ) : attr = getattr ( parent_attr , attr_name ) else : pass return attr | Get the most retrieval attribute in the configuration file . This method will recursively look through the configuration file for the attribute specified and return the last found value or None . The values can be referenced by the key name provided in the configuration file or that value normalized with snake_casing . | 210 | 58 |
13,913 | def service_references ( self ) : services_blue_print = self . _scheme_references . get ( 'services' ) if services_blue_print is None : raise LookupError ( 'unable to find any services in the config.' ) # TODO: this needs to be cleaned up and made solid. maybe when creating the blueprint ref normalize the damn keys return { key . replace ( '-' , '_' ) : key for key in services_blue_print [ 'keys' ] } | returns a list of service names | 110 | 7 |
13,914 | def validate ( self ) : if not isinstance ( self . _data , dict ) : raise TypeError ( 'freight forwarder configuration file must be a dict.' ) current_log_level = logger . get_level ( ) if self . _verbose : logger . set_level ( 'DEBUG' ) else : logger . set_level ( 'ERROR' ) logger . info ( 'Starting configuration validation' , extra = { "formatter" : 'config-start' } ) # copy config dict to allow config data to stay in its original state. config_data = self . _data . copy ( ) try : self . _walk_tree ( config_data , ROOT_SCHEME ) except ConfigValidationException as e : e . log_error ( ) raise logger . info ( "Config validation passed." , extra = { 'formatter' : 'config-success' } ) logger . set_level ( current_log_level ) | Validate the contents of the configuration file . Will return None if validation is successful or raise an error if not . | 204 | 23 |
13,915 | def _load ( self ) : if os . path . isdir ( self . _path ) : for file_ext in ( 'yml' , 'yaml' , 'json' ) : test_path = os . path . join ( self . _path , 'freight-forwarder.{0}' . format ( file_ext ) ) if os . path . isfile ( test_path ) : self . _path = test_path break if os . path . isfile ( self . _path ) : file_name , file_extension = os . path . splitext ( self . _path ) with open ( self . _path , 'r' ) as config_file : if file_extension in ( '.yaml' , '.yml' ) : self . _load_yml_config ( config_file . read ( ) ) elif file_extension == '.json' : try : config_data = json . loads ( config_file . read ( ) ) self . _data = normalize_keys ( config_data ) except Exception : raise SyntaxError ( "There is a syntax error in your freight-forwarder config." ) else : raise TypeError ( "Configuration file most be yaml or json." ) else : raise LookupError ( "Was unable to find a freight-forwarder configuration file." ) | Load a configuration file . This method will be called when the Config class is instantiated . The configuration file can be json or yaml . | 291 | 28 |
13,916 | def _load_yml_config ( self , config_file ) : if not isinstance ( config_file , six . string_types ) : raise TypeError ( 'config_file must be a str.' ) try : def construct_yaml_int ( self , node ) : obj = SafeConstructor . construct_yaml_int ( self , node ) data = ConfigInt ( obj , node . start_mark , node . end_mark ) return data def construct_yaml_float ( self , node ) : obj , = SafeConstructor . construct_yaml_float ( self , node ) data = ConfigFloat ( obj , node . start_mark , node . end_mark ) return data def construct_yaml_str ( self , node ) : # Override the default string handling function # to always return unicode objects obj = SafeConstructor . construct_scalar ( self , node ) assert isinstance ( obj , six . string_types ) data = ConfigUnicode ( obj , node . start_mark , node . end_mark ) return data def construct_yaml_mapping ( self , node ) : obj , = SafeConstructor . construct_yaml_map ( self , node ) data = ConfigDict ( obj , node . start_mark , node . end_mark ) return data def construct_yaml_seq ( self , node ) : obj , = SafeConstructor . construct_yaml_seq ( self , node ) data = ConfigSeq ( obj , node . start_mark , node . end_mark ) return data # SafeConstructor.add_constructor(u'tag:yaml.org,2002:bool', construct_yaml_bool) SafeConstructor . add_constructor ( u'tag:yaml.org,2002:float' , construct_yaml_float ) SafeConstructor . add_constructor ( u'tag:yaml.org,2002:int' , construct_yaml_int ) SafeConstructor . add_constructor ( u'tag:yaml.org,2002:map' , construct_yaml_mapping ) SafeConstructor . add_constructor ( u'tag:yaml.org,2002:seq' , construct_yaml_seq ) SafeConstructor . add_constructor ( u'tag:yaml.org,2002:str' , construct_yaml_str ) data = SafeLoader ( config_file ) . get_data ( ) if data is None : raise AttributeError ( 'The configuration file needs to have data in it.' ) self . _data = normalize_keys ( data , snake_case = False ) except YAMLError as e : if hasattr ( e , 'problem_mark' ) : mark = e . problem_mark raise SyntaxError ( "There is a syntax error in your freight-forwarder config file line: {0} column: {1}" . format ( mark . line + 1 , mark . column + 1 ) ) else : raise SyntaxError ( "There is a syntax error in your freight-forwarder config." ) | loads a yaml str creates a few constructs for pyaml serializes and normalized the config data . Then assigns the config data to self . _data . | 669 | 31 |
13,917 | def _create_attr ( self , property_key , data , ancestors ) : if not isinstance ( property_key , six . string_types ) : raise TypeError ( "property_key must be a string. type: {0} was passed." . format ( type ( property_key ) ) ) if not isinstance ( ancestors , OrderedDict ) : raise TypeError ( "ancestors must be an OrderedDict. type: {0} was passed." . format ( type ( ancestors ) ) ) previous_element = self normalized_key = normalize_value ( property_key ) . replace ( '-' , '_' ) normalized_ancestor_key = None # TODO: clean up and validation if ancestors : for ancestor_key , ancestors_value in six . iteritems ( ancestors ) : normalized_ancestor_key = normalize_value ( ancestor_key ) . replace ( '-' , '_' ) if normalized_ancestor_key . lower ( ) == 'root' : continue if not hasattr ( previous_element , normalized_ancestor_key ) : config_attr = ConfigDict ( { } , ancestors_value . start_mark , ancestors_value . end_mark ) config_attr . name = normalized_ancestor_key config_attr . alias = ancestor_key setattr ( previous_element , normalized_ancestor_key , config_attr ) previous_element = getattr ( previous_element , normalized_ancestor_key ) if normalized_key == normalized_ancestor_key : pass else : if isinstance ( data , ConfigNode ) : data . name = normalized_key data . alias = property_key setattr ( previous_element , normalized_key , data ) | Dynamically Creates attributes on for a Config . Also adds name and alias to each Config object . | 379 | 21 |
13,918 | def _collect_unrecognized_values ( self , scheme , data , ancestors ) : if not isinstance ( ancestors , OrderedDict ) : raise TypeError ( "ancestors must be an OrderedDict. type: {0} was passed." . format ( type ( ancestors ) ) ) if not isinstance ( scheme , dict ) : raise TypeError ( 'scheme must be a dict. type: {0} was passed' . format ( type ( scheme ) ) ) unrecognized_values = { } if isinstance ( data , dict ) : pruned_scheme = [ key for key in scheme . keys ( ) if key not in RESERVED_SCHEME_KEYS and key [ 0 ] not in RESERVED_SCHEME_KEYS ] for key , value in six . iteritems ( data ) : if key in pruned_scheme : continue unrecognized_values [ key ] = value validations = scheme . get ( 'is' ) if validations and 'one_of' in validations : for nested_scheme in validations [ 'one_of' ] : if isinstance ( nested_scheme , dict ) : updated_scheme = self . _update_scheme ( nested_scheme , ancestors ) pruned_scheme = [ key for key in updated_scheme . keys ( ) if key not in RESERVED_SCHEME_KEYS and key [ 0 ] not in RESERVED_SCHEME_KEYS ] for key in pruned_scheme : if key in unrecognized_values : del unrecognized_values [ key ] else : # TODO: maybe return an error? pass return unrecognized_values | Looks for values that aren t defined in the scheme and returns a dict with any unrecognized values found . | 373 | 21 |
13,919 | def _update_scheme ( self , scheme , ancestors ) : if not isinstance ( ancestors , OrderedDict ) : raise TypeError ( "ancestors must be an OrderedDict. type: {0} was passed." . format ( type ( ancestors ) ) ) if not isinstance ( scheme , dict ) : raise TypeError ( 'scheme must be a dict. type: {0} was passed' . format ( type ( scheme ) ) ) # TODO: what if we have more than one scheme :P need to fix this. definitions = ROOT_SCHEME . get ( '_' ) if 'inherit' in scheme : scheme = self . _scheme_propagation ( scheme , definitions ) updated_scheme = { } for scheme_key in six . iterkeys ( scheme ) : if not isinstance ( scheme_key , six . string_types ) : raise TypeError ( 'scheme keys are required to be strings. type: {0} was passed.' . format ( scheme_key ) ) if '@' in scheme_key : ref = scheme_key [ 1 : ] scheme_reference = self . _scheme_references . get ( ref ) if not scheme_reference : raise ConfigValidationException ( ancestors , ref , scheme_reference , 'required' , scheme ) for reference_key in scheme_reference [ 'keys' ] : scheme_reference [ 'scheme' ] . update ( scheme [ scheme_key ] ) updated_scheme [ reference_key ] = scheme_reference [ 'scheme' ] elif '~' in scheme_key : ref = scheme_key [ 1 : ] scheme_reference = self . _scheme_references . get ( ref ) if not scheme_reference : raise LookupError ( "was unable to find {0} in scheme reference." . format ( ref ) ) for reference_key in scheme_reference [ 'keys' ] : updated_scheme [ reference_key ] = scheme [ scheme_key ] scheme . update ( updated_scheme ) return scheme | Updates the current scheme based off special pre - defined keys and retruns a new updated scheme . | 444 | 20 |
13,920 | def _walk_tree ( self , data , scheme , ancestors = None , property_name = None , prefix = None ) : if property_name is None : property_name = 'root' # hack until i add this to references # reorder validates putting required first. If the data doesn't exist there is no need to continue. order = [ 'registries' ] + [ key for key in scheme . keys ( ) if key not in ( 'registries' , ) ] scheme = OrderedDict ( sorted ( scheme . items ( ) , key = lambda x : order . index ( x [ 0 ] ) ) ) if data is None : return elif not isinstance ( property_name , six . string_types ) : raise TypeError ( 'property_name must be a string.' ) ancestors = self . _update_ancestors ( data , property_name , ancestors ) if isinstance ( ancestors , OrderedDict ) : if list ( ancestors ) [ 0 ] != 'root' : raise LookupError ( 'root must be the first item in ancestors.' ) else : raise TypeError ( 'ancestors must be an OrderedDict. {0} was passed' . format ( type ( ancestors ) ) ) if not isinstance ( scheme , dict ) : raise TypeError ( 'scheme must be a dict. {0} was passed.' . format ( type ( scheme ) ) ) scheme = self . _update_scheme ( scheme , ancestors ) if property_name is not None and data : data = self . _get_cascading_attr ( property_name , * list ( ancestors ) [ 1 : ] ) if scheme . get ( 'cascading' , False ) else data for err in self . __execute_validations ( scheme . get ( 'is' , { } ) , data , property_name , ancestors , prefix = prefix ) : if err : raise err else : self . _create_attr ( property_name , data , ancestors ) self . __validate_unrecognized_values ( scheme , data , ancestors , prefix ) self . __populate_scheme_references ( scheme , property_name ) self . __validate_config_properties ( scheme , data , ancestors , prefix ) | This function takes configuration data and a validation scheme then walk the configuration tree validating the configuraton data agenst the scheme provided . Will raise error on failure otherwise return None . | 480 | 37 |
13,921 | def _update_ancestors ( self , config_data , property_name , ancestors = None ) : if not isinstance ( property_name , six . string_types ) : raise TypeError ( "property_key must be a string. type: {0} was passed." . format ( type ( property_name ) ) ) if ancestors is None : ancestors = OrderedDict ( [ ( 'root' , config_data ) ] ) elif not isinstance ( ancestors , OrderedDict ) : raise TypeError ( "ancestors must be an OrderedDict. type: {0} was passed." . format ( type ( ancestors ) ) ) elif 'root' not in ancestors : raise LookupError ( 'root must be in ancestors. currently in the ancestors chain {0}' . format ( ', ' . join ( ancestors . keys ( ) ) ) ) ancestors = ancestors . copy ( ) for previous_key in list ( ancestors ) [ : : - 1 ] : previous_item = ancestors [ previous_key ] if isinstance ( config_data , dict ) : if property_name in previous_item : ancestors [ property_name ] = config_data break return ancestors | Update ancestors for a specific property . | 254 | 7 |
13,922 | def _reference_keys ( self , reference ) : if not isinstance ( reference , six . string_types ) : raise TypeError ( 'When using ~ to reference dynamic attributes ref must be a str. a {0} was provided.' . format ( type ( reference ) . __name__ ) ) if '~' in reference : reference = reference [ 1 : ] scheme = self . _scheme_references . get ( reference ) if not scheme : # TODO: need to create nice error here as well and print pretty message. raise LookupError ( "Was unable to find {0} in the scheme references. " "available references {1}" . format ( reference , ', ' . join ( self . _scheme_references . keys ( ) ) ) ) return scheme [ 'keys' ] else : raise AttributeError ( 'references must start with ~. Please update {0} and retry.' . format ( reference ) ) | Returns a list of all of keys for a given reference . | 200 | 12 |
13,923 | def __execute_validations ( self , validations , data , property_name , ancestors , negation = False , prefix = None ) : if not isinstance ( ancestors , OrderedDict ) : raise TypeError ( "ancestors must be an OrderedDict. type: {0} was passed." . format ( type ( ancestors ) ) ) if not isinstance ( validations , dict ) : raise TypeError ( 'validations is required to be a dict. type: {1} was passed.' . format ( type ( validations ) ) ) if not isinstance ( property_name , six . string_types ) : raise TypeError ( "property_key must be a string. type: {0} was passed." . format ( type ( property_name ) ) ) # reorder validates putting required first. If the data doesn't exist there is no need to continue. order = [ 'type' , 'required' ] + [ key for key in validations . keys ( ) if key not in ( 'required' , 'type' ) ] ordered_validations = OrderedDict ( sorted ( validations . items ( ) , key = lambda x : order . index ( x [ 0 ] ) ) ) for validation , value in six . iteritems ( ordered_validations ) : if validation in VALIDATORS : if validation == 'not' : # TODO: need to test to make sure this works for err in self . __execute_validations ( value , data , property_name , ancestors , negation , prefix ) : yield err continue for err in getattr ( self , '_{0}' . format ( validation ) ) ( value , data , property_name , ancestors , negation , prefix ) : yield err else : raise LookupError ( "{0} isn't a validator or reserved scheme key." . format ( validation ) ) | Validate the data for a specific configuration value . This method will look up all of the validations provided and dynamically call any validation methods . If a validation fails a error will be thrown . If no errors are found a attributes will be dynamically created on the Config object for the configuration value . | 398 | 58 |
13,924 | def create ( name , url , tournament_type = "single elimination" , * * params ) : params . update ( { "name" : name , "url" : url , "tournament_type" : tournament_type , } ) return api . fetch_and_parse ( "POST" , "tournaments" , "tournament" , * * params ) | Create a new tournament . | 78 | 5 |
13,925 | def users_feature ( app ) : # check we have jwt secret configures if not app . config . get ( 'USER_JWT_SECRET' , None ) : raise x . JwtSecretMissing ( 'Please set USER_JWT_SECRET in config' ) # use custom session interface app . session_interface = BoilerSessionInterface ( ) # init user service user_service . init ( app ) # init login manager login_manager . init_app ( app ) login_manager . login_view = 'user.login' login_manager . login_message = None @ login_manager . user_loader def load_user ( id ) : return user_service . get ( id ) # init OAuth oauth . init_app ( app ) registry = OauthProviders ( app ) providers = registry . get_providers ( ) with app . app_context ( ) : for provider in providers : if provider not in oauth . remote_apps : oauth . remote_app ( provider , * * providers [ provider ] ) registry . register_token_getter ( provider ) # init principal principal . init_app ( app ) @ principal . identity_loader def load_identity ( ) : if current_user . is_authenticated : return Identity ( current_user . id ) session . pop ( 'identity.name' , None ) session . pop ( 'identity.auth_type' , None ) return AnonymousIdentity ( ) @ identity_loaded . connect_via ( app ) def on_identity_loaded ( sender , identity ) : identity . user = current_user if not current_user . is_authenticated : return identity . provides . add ( UserNeed ( current_user . id ) ) for role in current_user . roles : identity . provides . add ( RoleNeed ( role . handle ) ) | Add users feature Allows to register users and assign groups instantiates flask login flask principal and oauth integration | 395 | 20 |
13,926 | def rename_document ( self , did , name ) : payload = { 'name' : name } return self . _api . request ( 'post' , '/api/documents/' + did , body = payload ) | Renames the specified document . | 47 | 6 |
13,927 | def copy_workspace ( self , uri , new_name ) : payload = { 'isPublic' : True , 'newName' : new_name } return self . _api . request ( 'post' , '/api/documents/' + uri [ 'did' ] + '/workspaces/' + uri [ 'wvm' ] + '/copy' , body = payload ) | Copy the current workspace . | 86 | 5 |
13,928 | def create_workspace ( self , did , name , version_id = None ) : payload = { 'isPublic' : True , 'name' : name , } if version_id : payload [ 'versionId' ] = version_id return self . _api . request ( 'post' , '/api/documents/d/' + did + '/workspaces' , body = payload ) | Create a workspace in the specified document . | 85 | 8 |
13,929 | def get_partstudio_tessellatededges ( self , did , wid , eid ) : return self . _api . request ( 'get' , '/api/partstudios/d/' + did + '/w/' + wid + '/e/' + eid + '/tessellatededges' ) | Gets the tessellation of the edges of all parts in a part studio . | 71 | 18 |
13,930 | def upload_blob ( self , did , wid , filepath = './blob.json' ) : chars = string . ascii_letters + string . digits boundary_key = '' . join ( random . choice ( chars ) for i in range ( 8 ) ) mimetype = mimetypes . guess_type ( filepath ) [ 0 ] encoded_filename = os . path . basename ( filepath ) file_content_length = str ( os . path . getsize ( filepath ) ) blob = open ( filepath ) req_headers = { 'Content-Type' : 'multipart/form-data; boundary="%s"' % boundary_key } # build request body payload = '--' + boundary_key + '\r\nContent-Disposition: form-data; name="encodedFilename"\r\n\r\n' + encoded_filename + '\r\n' payload += '--' + boundary_key + '\r\nContent-Disposition: form-data; name="fileContentLength"\r\n\r\n' + file_content_length + '\r\n' payload += '--' + boundary_key + '\r\nContent-Disposition: form-data; name="file"; filename="' + encoded_filename + '"\r\n' payload += 'Content-Type: ' + mimetype + '\r\n\r\n' payload += blob . read ( ) payload += '\r\n--' + boundary_key + '--' return self . _api . request ( 'post' , '/api/blobelements/d/' + did + '/w/' + wid , headers = req_headers , body = payload ) | Uploads a file to a new blob element in the specified doc . | 385 | 14 |
13,931 | def part_studio_stl ( self , did , wid , eid ) : req_headers = { 'Accept' : 'application/vnd.onshape.v1+octet-stream' } return self . _api . request ( 'get' , '/api/partstudios/d/' + did + '/w/' + wid + '/e/' + eid + '/stl' , headers = req_headers ) | Exports STL export from a part studio | 98 | 8 |
13,932 | def create_assembly_instance ( self , assembly_uri , part_uri , configuration ) : payload = { "documentId" : part_uri [ "did" ] , "elementId" : part_uri [ "eid" ] , # could be added if needed: # "partId": "String", # "featureId": "String", # "microversionId": "String", "versionId" : part_uri [ "wvm" ] , # "microversionId": "String", "isAssembly" : False , "isWholePartStudio" : True , "configuration" : self . encode_configuration ( part_uri [ "did" ] , part_uri [ "eid" ] , configuration ) } return self . _api . request ( 'post' , '/api/assemblies/d/' + assembly_uri [ "did" ] + '/' + assembly_uri [ "wvm_type" ] + '/' + assembly_uri [ "wvm" ] + '/e/' + assembly_uri [ "eid" ] + '/instances' , body = payload ) | Insert a configurable part into an assembly . | 242 | 9 |
13,933 | def encode_configuration ( self , did , eid , parameters ) : # change to the type of list the API is expecting parameters = [ { "parameterId" : k , "parameterValue" : v } for ( k , v ) in parameters . items ( ) ] payload = { 'parameters' : parameters } req_headers = { 'Accept' : 'application/vnd.onshape.v1+json' , 'Content-Type' : 'application/json' } res = self . _api . request ( 'post' , '/api/elements/d/' + did + '/e/' + eid + '/configurationencodings' , body = payload , headers = req_headers ) return json . loads ( res . content . decode ( "utf-8" ) ) [ "encodedId" ] | Encode parameters as a URL - ready string | 183 | 9 |
13,934 | def get_configuration ( self , uri ) : req_headers = { 'Accept' : 'application/vnd.onshape.v1+json' , 'Content-Type' : 'application/json' } return self . _api . request ( 'get' , '/api/partstudios/d/' + uri [ "did" ] + '/' + uri [ "wvm_type" ] + '/' + uri [ "wvm" ] + '/e/' + uri [ "eid" ] + '/configuration' , headers = req_headers ) | get the configuration of a PartStudio | 131 | 7 |
13,935 | def update_configuration ( self , did , wid , eid , payload ) : req_headers = { 'Accept' : 'application/vnd.onshape.v1+json' , 'Content-Type' : 'application/json' } res = self . _api . request ( 'post' , '/api/partstudios/d/' + did + '/w/' + wid + '/e/' + eid + '/configuration' , body = payload , headers = req_headers ) return res | Update the configuration specified in the payload | 113 | 7 |
13,936 | def set_routing ( app , view_data ) : routing_modules = convert_routing_module ( view_data ) for module in routing_modules : view = import_string ( module . import_path ) app . add_url_rule ( module . url , view_func = view . as_view ( module . endpoint ) ) | apply the routing configuration you ve described | 74 | 7 |
13,937 | def retrieve_commands ( self , module ) : commands = [ ] for name , obj in inspect . getmembers ( module ) : if name != 'Command' and 'Command' in name : if name != 'GlimCommand' : cobject = getattr ( module , name ) commands . append ( cobject ) return commands | Function smartly imports Command type classes given module | 69 | 9 |
13,938 | def match ( self , args ) : command = None for c in self . commands : if c . name == args . which : c . args = args command = c break return command | Function dispatches the active command line utility . | 38 | 9 |
13,939 | def dispatch ( self , command , app ) : if self . is_glimcommand ( command ) : command . run ( app ) else : command . run ( ) | Function runs the active command . | 35 | 6 |
13,940 | def replace_sequence ( self , pdb_ID , chain_id , replacement_sequence ) : old_sequences = self . sequences old_unique_sequences = self . unique_sequences self . sequences = [ ] self . unique_sequences = { } for s in old_sequences : if s [ 0 ] == pdb_ID and s [ 1 ] == chain_id : self . _add_sequence ( pdb_ID , chain_id , replacement_sequence ) else : self . _add_sequence ( s [ 0 ] , s [ 1 ] , s [ 2 ] ) self . _find_identical_sequences ( ) | Replaces a sequence with another . Typically not useful but I use it in the ResidueRelatrix to make sure that the FASTA and SEQRES sequences match . | 141 | 37 |
13,941 | def retrieve ( pdb_id , cache_dir = None , bio_cache = None ) : pdb_id = pdb_id . upper ( ) if bio_cache : return FASTA ( bio_cache . get_fasta_contents ( pdb_id ) ) # Check to see whether we have a cached copy if cache_dir : filename = os . path . join ( cache_dir , "%s.fasta" % pdb_id ) if os . path . exists ( filename ) : return FASTA ( read_file ( filename ) ) else : filename += ".txt" if os . path . exists ( filename ) : return FASTA ( read_file ( filename ) ) # Get a copy from the RCSB contents = rcsb . retrieve_fasta ( pdb_id ) # Create a cached copy if appropriate if cache_dir : write_file ( os . path . join ( cache_dir , "%s.fasta" % pdb_id ) , contents ) # Return the object return FASTA ( contents ) | Creates a FASTA object by using a cached copy of the file if it exists or by retrieving the file from the RCSB . | 229 | 29 |
13,942 | def get_sequences ( self , pdb_id = None ) : sequences = { } if pdb_id : for chain_id , sequence in self . get ( pdb_id , { } ) . iteritems ( ) : sequences [ chain_id ] = Sequence . from_sequence ( chain_id , sequence ) else : for pdb_id , v in self . iteritems ( ) : sequences [ pdb_id ] = { } for chain_id , sequence in v . iteritems ( ) : sequences [ pdb_id ] [ chain_id ] = Sequence . from_sequence ( chain_id , sequence ) return sequences | Create Sequence objects for each FASTA sequence . | 138 | 10 |
13,943 | def get_chain_ids ( self , pdb_id = None , safe_call = False ) : if pdb_id == None and len ( self . keys ( ) ) == 1 : return self [ self . keys ( ) [ 0 ] ] . keys ( ) pdbID = pdbID . upper ( ) if not self . get ( pdbID ) : if not safe_call : raise Exception ( "FASTA object does not contain sequences for PDB %s." % pdbID ) else : return [ ] return self [ pdbID ] . keys ( ) | If the FASTA file only has one PDB ID pdb_id does not need to be specified . Otherwise the list of chains identifiers for pdb_id is returned . | 125 | 37 |
13,944 | def match ( self , other ) : colortext . message ( "FASTA Match" ) for frompdbID , fromchains in sorted ( self . iteritems ( ) ) : matched_pdbs = { } matched_chains = { } for fromchain , fromsequence in fromchains . iteritems ( ) : for topdbID , tochains in other . iteritems ( ) : for tochain , tosequence in tochains . iteritems ( ) : if fromsequence == tosequence : matched_pdbs [ topdbID ] = matched_pdbs . get ( topdbID , set ( ) ) matched_pdbs [ topdbID ] . add ( fromchain ) matched_chains [ fromchain ] = matched_chains . get ( fromchain , [ ] ) matched_chains [ fromchain ] . append ( ( topdbID , tochain ) ) foundmatches = [ ] colortext . printf ( " %s" % frompdbID , color = "silver" ) for mpdbID , mchains in matched_pdbs . iteritems ( ) : if mchains == set ( fromchains . keys ( ) ) : foundmatches . append ( mpdbID ) colortext . printf ( " PDB %s matched PDB %s on all chains" % ( mpdbID , frompdbID ) , color = "white" ) if foundmatches : for fromchain , fromsequence in fromchains . iteritems ( ) : colortext . printf ( " %s" % ( fromchain ) , color = "silver" ) colortext . printf ( " %s" % ( fromsequence ) , color = self . unique_sequences [ fromsequence ] ) mstr = [ ] for mchain in matched_chains [ fromchain ] : if mchain [ 0 ] in foundmatches : mstr . append ( "%s chain %s" % ( mchain [ 0 ] , mchain [ 1 ] ) ) colortext . printf ( " Matches: %s" % ", " . join ( mstr ) ) else : colortext . error ( " No matches found." ) | This is a noisy terminal - printing function at present since there is no need to make it a proper API function . | 454 | 23 |
13,945 | def _process_json_data ( person_data ) : person = SwsPerson ( ) if person_data [ "BirthDate" ] : person . birth_date = parse ( person_data [ "BirthDate" ] ) . date ( ) person . directory_release = person_data [ "DirectoryRelease" ] person . email = person_data [ "Email" ] person . employee_id = person_data [ "EmployeeID" ] person . first_name = person_data [ "FirstName" ] person . gender = person_data [ "Gender" ] person . last_name = person_data [ "LastName" ] person . student_name = person_data [ "StudentName" ] if person_data [ "LastEnrolled" ] is not None : last_enrolled = LastEnrolled ( ) last_enrolled . href = person_data [ "LastEnrolled" ] [ "Href" ] last_enrolled . quarter = person_data [ "LastEnrolled" ] [ "Quarter" ] last_enrolled . year = person_data [ "LastEnrolled" ] [ "Year" ] person . last_enrolled = last_enrolled if person_data [ "LocalAddress" ] is not None : address_data = person_data [ "LocalAddress" ] local_address = StudentAddress ( ) local_address . city = address_data [ "City" ] local_address . country = address_data [ "Country" ] local_address . street_line1 = address_data [ "Line1" ] local_address . street_line2 = address_data [ "Line2" ] local_address . postal_code = address_data [ "PostalCode" ] local_address . state = address_data [ "State" ] local_address . zip_code = address_data [ "Zip" ] person . local_address = local_address person . local_phone = person_data [ "LocalPhone" ] if person_data [ "PermanentAddress" ] is not None : perm_address_data = person_data [ "PermanentAddress" ] permanent_address = StudentAddress ( ) permanent_address . city = perm_address_data [ "City" ] permanent_address . country = perm_address_data [ "Country" ] permanent_address . street_line1 = perm_address_data [ "Line1" ] permanent_address . street_line2 = perm_address_data [ "Line2" ] permanent_address . postal_code = perm_address_data [ "PostalCode" ] permanent_address . state = perm_address_data [ "State" ] permanent_address . zip_code = perm_address_data [ "Zip" ] person . permanent_address = permanent_address person . permanent_phone = person_data [ "PermanentPhone" ] person . uwregid = person_data [ "RegID" ] person . student_number = person_data [ "StudentNumber" ] person . student_system_key = person_data [ "StudentSystemKey" ] person . uwnetid = person_data [ "UWNetID" ] person . visa_type = person_data [ "VisaType" ] return person | Returns a uw_sws . models . SwsPerson object | 703 | 14 |
13,946 | def _make_notice_date ( self , response ) : today = date . today ( ) yesterday = today - timedelta ( days = 1 ) tomorrow = today + timedelta ( days = 1 ) week = today + timedelta ( days = 2 ) next_week = today + timedelta ( weeks = 1 ) future = today + timedelta ( weeks = 3 ) future_end = today + timedelta ( weeks = 5 ) json_data = json . loads ( response . data ) for notice in json_data [ "Notices" ] : if notice [ "NoticeAttributes" ] and len ( notice [ "NoticeAttributes" ] ) > 0 : for attr in notice [ "NoticeAttributes" ] : if attr [ "DataType" ] == "date" : if attr [ "Value" ] == "yesterday" : attr [ "Value" ] = yesterday . strftime ( "%Y%m%d" ) elif attr [ "Value" ] == "today" : attr [ "Value" ] = today . strftime ( "%Y%m%d" ) elif attr [ "Value" ] == "tomorrow" : attr [ "Value" ] = tomorrow . strftime ( "%Y%m%d" ) elif attr [ "Value" ] == "future" : attr [ "Value" ] = future . strftime ( "%Y%m%d" ) elif attr [ "Value" ] == "future_end" : attr [ "Value" ] = future_end . strftime ( "%Y%m%d" ) elif attr [ "Value" ] == "next_week" : attr [ "Value" ] = next_week . strftime ( "%Y%m%d" ) elif attr [ "Value" ] == "week" : attr [ "Value" ] = week . strftime ( "%Y%m%d" ) else : pass # use original response . data = json . dumps ( json_data ) | Set the date attribte value in the notice mock data | 435 | 12 |
13,947 | def relative_symlink ( target , link_name ) : link_name = os . path . abspath ( link_name ) abs_target = os . path . abspath ( target ) rel_target = os . path . relpath ( target , os . path . dirname ( link_name ) ) if os . path . exists ( link_name ) : os . remove ( link_name ) os . symlink ( rel_target , link_name ) | Make a symlink to target using the shortest possible relative path . | 101 | 14 |
13,948 | def params ( self , dict ) : self . _configuration . update ( dict ) self . _measurements . update ( ) | Set configuration variables for an OnShape part . | 28 | 9 |
13,949 | def update ( self , params = None , client = c ) : uri = self . parent . uri if not params or not self . res : self . get_params ( ) return d = self . payload for k , v in params . items ( ) : m = d [ "currentConfiguration" ] [ self . parameter_map [ k ] ] [ "message" ] if isinstance ( v , bool ) or isinstance ( v , str ) : m [ "value" ] = v else : try : m [ "expression" ] = str ( v ) except KeyError : m [ "value" ] = str ( v ) res = client . update_configuration ( uri . did , uri . wvm , uri . eid , json . dumps ( d ) ) # If it was a good request, update config to be consistent with online. if res . status_code == 200 : self . res = res | Push params to OnShape and synchronize the local copy | 198 | 11 |
13,950 | def get_params ( self ) : self . res = c . get_configuration ( self . parent . uri . as_dict ( ) ) | Manually pull params defined in config from OnShape and return a python representation of the params . Quantities are converted to pint quantities Bools are converted to python bools and Enums are converted to strings . Note that Enum names are autogenerated by OnShape and do not match the name on the OnShape UI . | 32 | 67 |
13,951 | def params ( self ) : payload = self . payload d = { } for i , p in enumerate ( payload [ "currentConfiguration" ] ) : type_name = p [ "typeName" ] cp = payload [ "configurationParameters" ] [ i ] [ "message" ] name = cp [ "parameterName" ] if type_name == "BTMParameterQuantity" : try : v = q ( p [ "message" ] [ "expression" ] ) except : v = q ( p [ "message" ] [ "value" ] , p [ "message" ] [ "units" ] ) elif type_name == "BTMParameterBoolean" : v = p [ "message" ] [ "value" ] elif type_name == "BTMParameterEnum" : enum = p [ "message" ] [ "value" ] enum_map = { d [ 'message' ] [ 'option' ] : i for i , d in enumerate ( cp [ 'options' ] ) } v = cp [ 'options' ] [ enum_map [ enum ] ] [ 'message' ] [ 'optionName' ] d [ name ] = v return d | Get the params of response data from the API . | 257 | 10 |
13,952 | def update ( self ) : uri = self . parent . uri script = r""" function(context, queries) { return getVariable(context, "measurements"); } """ self . res = c . evaluate_featurescript ( uri . as_dict ( ) , script ) | Update all local variable names to match OnShape . | 61 | 10 |
13,953 | def getFailedJobIDs ( self , extraLapse = TYPICAL_LAPSE ) : scriptsRun = self . scriptsRun failedJobTimestamps = [ ] nodata = [ ] for name , details in sorted ( scriptsRun . iteritems ( ) ) : if details [ "lastSuccess" ] and expectedScripts . get ( name ) : if not expectedScripts . check ( name , details [ "lastSuccess" ] , extraLapse ) : if details [ "lastRun" ] : failedJobTimestamps . append ( details [ "lastRun" ] ) else : nodata . append ( name ) continue else : if details [ "lastRun" ] : failedJobTimestamps . append ( details [ "lastRun" ] ) else : nodata . append ( name ) continue if details [ "status" ] & RETROSPECT_FAIL : failedJobTimestamps . append ( details [ "lastRun" ] ) elif details [ "status" ] & RETROSPECT_WARNING : failedJobTimestamps . append ( details [ "lastRun" ] ) return failedJobTimestamps , nodata | Returns a list of which identify failed jobs in the scriptsRun table . If a time stamp for a job can be found we return this . The time stamp can be used to index the log . If no time stamp was found return the name of the script instead . | 244 | 53 |
13,954 | def to_csv ( args ) : result_file = args . result_file output_file = args . output_file delimiter = args . delimiter if not os . path . isfile ( result_file ) : raise OSError ( "Results file does not exists" ) headers = [ 'elapsed' , 'epoch' , 'turret_name' , 'scriptrun_time' , 'error' ] headers_row = { } set_database ( result_file , db , { } ) results = Result . select ( ) for item in results : result_item = item . to_dict ( ) for k in result_item [ 'custom_timers' ] . keys ( ) : if k not in headers : headers . append ( k ) headers_row [ k ] = k with open ( output_file , "w+" ) as f : writer = csv . DictWriter ( f , fieldnames = headers , delimiter = delimiter ) headers_row . update ( { 'elapsed' : 'elapsed time' , 'epoch' : 'epoch (in seconds)' , 'turret_name' : 'turret name' , 'scriptrun_time' : 'transaction time' , 'error' : 'error' } ) writer . writerow ( headers_row ) for result_item in results : line = result_item . to_dict ( ) for key , value in line [ 'custom_timers' ] . items ( ) : line [ key ] = value del line [ 'custom_timers' ] writer . writerow ( line ) | Take a sqlite filled database of results and return a csv file | 345 | 14 |
13,955 | def fraction_correct_fuzzy_linear_create_vector ( z , z_cutoff , z_fuzzy_range ) : assert ( z_fuzzy_range * 2 < z_cutoff ) if ( z == None or numpy . isnan ( z ) ) : # todo: and ignore_null_values: # If we are missing values then we either discount the case or consider it as incorrect depending on ignore_null_values return None elif ( z >= z_cutoff + z_fuzzy_range ) : # positive e.g. z >= 1.1 return [ 0 , 0 , 1 ] elif ( z <= - z_cutoff - z_fuzzy_range ) : # negative e.g. z <= -1.1 return [ 1 , 0 , 0 ] elif ( - z_cutoff + z_fuzzy_range <= z <= z_cutoff - z_fuzzy_range ) : # neutral e.g. -0.9 <= z <= 0.9 return [ 0 , 1 , 0 ] elif ( - z_cutoff - z_fuzzy_range < z < - z_cutoff + z_fuzzy_range ) : # negative/neutral e.g. -1.1 < z < 0.9 neutrality = ( z + z_cutoff + z_fuzzy_range ) / ( z_fuzzy_range * 2 ) zvec = [ 1 - neutrality , neutrality , 0 ] elif ( z_cutoff - z_fuzzy_range < z < z_cutoff + z_fuzzy_range ) : # neutral/positive e.g. 0.9 < z < 1.1 positivity = ( z - z_cutoff + z_fuzzy_range ) / ( z_fuzzy_range * 2 ) zvec = [ 0 , 1 - positivity , positivity ] else : raise Exception ( 'Logical error.' ) # normalize the vector length = math . sqrt ( numpy . dot ( zvec , zvec ) ) return numpy . divide ( zvec , length ) | A helper function for fraction_correct_fuzzy_linear . | 470 | 14 |
13,956 | def apply_quality_control_checks ( seq , check_gen9_seqs = True , check_short_length = True , check_local_gc_content = True , check_global_gc_content = True ) : seq = seq . upper ( ) failure_reasons = [ ] # Minimum length if check_short_length : if len ( seq ) < min_gene_length : failure_reasons . append ( 'minimum_length: Sequence is %d bp long and needs to be at least %d bp' % ( len ( seq ) , min_gene_length ) ) # Maximum length if len ( seq ) > max_gene_length : failure_reasons . append ( 'maximum_length: Sequence is %d bp long and needs to be shorter than %d bp' % ( len ( seq ) , max_gene_length ) ) # Gen9 restricted sequences if check_gen9_seqs : for site in reserved_restriction_sites : pattern = dna . dna_to_re ( site ) reverse_site = dna . reverse_complement ( site ) reverse_pattern = dna . dna_to_re ( reverse_site ) if pattern . match ( seq ) : failure_reasons . append ( 'gen9_restricted_sequences: Reserved sequence %s is present' % ( site ) ) if reverse_pattern . match ( seq ) : failure_reasons . append ( 'gen9_restricted_sequences: Reverse-complement of reserved sequence %s is present' % ( site ) ) # Global GC content if check_global_gc_content : gc_content = dna . gc_content ( seq ) if gc_content < global_gc_content_min : failure_reasons . append ( 'global_gc_content_min: Global GC content is %.3f%% and must be at least %.3f%%' % ( gc_content , global_gc_content_min ) ) if gc_content > global_gc_content_max : failure_reasons . append ( 'global_gc_content_max: Global GC content is %.3f%% and must be less than %.3f%%' % ( gc_content , global_gc_content_max ) ) # Local GC content (windows) if check_local_gc_content : windows = [ seq ] if local_gc_window_size < len ( seq ) : windows = dna . sliding_window ( seq , local_gc_window_size ) for seq_window in windows : lgc_content = dna . gc_content ( seq_window ) if lgc_content < local_gc_content_min : failure_reasons . append ( 'local_gc_content_min: Local GC content is %.3f%% and must be at least %.3f%%' % ( lgc_content , local_gc_content_min ) ) break if lgc_content > local_gc_content_max : failure_reasons . append ( 'local_gc_content_max: Local GC content is %.3f%% and must be less than %.3f%%' % ( lgc_content , local_gc_content_max ) ) break # Homopolymers for base in dna . dna_bases : homopolymer = base * homopolymer_max_lengths [ base ] if homopolymer in seq : failure_reasons . append ( 'max_%s_homopolymer: %s' % ( base . lower ( ) , dna . case_highlight ( seq , a_homopolymer ) ) ) # Make sure all the checks passed. if failure_reasons : intro = "The given sequence fails following Gen9 design guidelines:" raise ValueError ( '\n' . join ( [ intro ] + failure_reasons ) ) | Raise a ValueError if the given sequence doesn t pass all of the Gen9 quality control design guidelines . Certain checks can be enabled or disabled via the command line . | 848 | 34 |
13,957 | def get_default_values ( self ) : out = dict ( dx = 0 , dy = 0 , dz = 0 , theta = 0 , phi = 0 , psi = 0 ) dx , dy , dz , _ = np . mean ( self . coord1 - self . coord2 , axis = 1 ) out [ 'dx' ] = dx out [ 'dy' ] = dy out [ 'dz' ] = dz # C->N vector vec1 = self . coord1 [ : - 1 , 1 ] - self . coord1 [ : - 1 , - 1 ] vec2 = self . coord2 [ : - 1 , 1 ] - self . coord2 [ : - 1 , - 1 ] vec1 /= np . linalg . norm ( vec1 ) vec2 /= np . linalg . norm ( vec2 ) # Find the rotation matrix that converts vec1 to vec2: # http://math.stackexchange.com/questions/180418/#476311 v = np . cross ( vec1 , vec2 ) s = np . linalg . norm ( v ) + np . finfo ( DTYPE ) . eps c = vec1 . dot ( vec2 ) vx = np . array ( [ [ 0 , - v [ 2 ] , v [ 1 ] ] , [ v [ 2 ] , 0 , - v [ 0 ] ] , [ - v [ 1 ] , v [ 0 ] , 0 ] ] , dtype = DTYPE ) rotation_matrix = np . eye ( 3 ) + vx + vx . dot ( vx ) * ( 1 - c ) / ( s * s ) # Recover the angles from the matrix as seen here: # http://nghiaho.com/?page_id=846 out [ 'theta' ] = math . atan2 ( rotation_matrix [ 2 , 1 ] , rotation_matrix [ 2 , 2 ] ) out [ 'phi' ] = math . atan2 ( - rotation_matrix [ 2 , 0 ] , math . hypot ( rotation_matrix [ 2 , 1 ] , rotation_matrix [ 2 , 2 ] ) ) out [ 'psi' ] = math . atan2 ( rotation_matrix [ 1 , 0 ] , rotation_matrix [ 0 , 0 ] ) return out | Make a crude estimation of the alignment using the center of mass and general C - > N orientation . | 506 | 20 |
13,958 | def get_matrix ( theta , phi , psi , dx , dy , dz , matrix = np . zeros ( ( 4 , 4 ) , dtype = DTYPE ) , angles = np . zeros ( 3 , dtype = DTYPE ) ) : # NB!: matrix and angles by default are being overwritten on each call # thus, only created once at compile time. angles [ 0 ] = theta angles [ 1 ] = phi angles [ 2 ] = psi cx , cy , cz = np . cos ( angles ) sx , sy , sz = np . sin ( angles ) rotation = matrix [ : 3 , : 3 ] rotation . flat = ( cx * cz - sx * cy * sz , cx * sz + sx * cy * cz , sx * sy , - sx * cz - cx * cy * sz , - sx * sz + cx * cy * cz , cx * sy , sy * sz , - sy * cz , cy ) # Translation component matrix [ : 3 , 3 ] = dx , dy , dz matrix [ 3 , 3 ] = 1. return matrix | Build the rotation - translation matrix . | 243 | 7 |
13,959 | def _tm ( self , theta , phi , psi , dx , dy , dz ) : matrix = self . get_matrix ( theta , phi , psi , dx , dy , dz ) coord = matrix . dot ( self . coord2 ) dist = coord - self . coord1 d_i2 = ( dist * dist ) . sum ( axis = 0 ) tm = - ( 1 / ( 1 + ( d_i2 / self . d02 ) ) ) return tm | Compute the minimisation target not normalised . | 108 | 10 |
13,960 | def write ( self , outputfile = 'out.pdb' , appended = False ) : # FIXME some cases don't work. matrix = self . get_matrix ( * * self . get_current_values ( ) ) out = open ( outputfile , 'w' ) atomid = 1 if appended : for line in open ( self . pdb1 ) : if not line . startswith ( 'ATOM' ) or ( line [ 21 ] != self . chain_1 and line [ 21 ] != ' ' ) : continue out . write ( line [ : 7 ] ) out . write ( '{: >4}' . format ( atomid ) ) atomid += 1 out . write ( line [ 11 : 21 ] ) out . write ( 'A' ) out . write ( line [ 22 : ] ) for line in open ( self . pdb2 ) : if not line . startswith ( 'ATOM' ) or ( line [ 21 ] != self . chain_2 and line [ 21 ] != ' ' ) : continue x = float ( line [ 32 : 38 ] ) y = float ( line [ 39 : 46 ] ) z = float ( line [ 48 : 54 ] ) vec = np . array ( [ x , y , z , 1 ] ) x , y , z , _ = matrix . dot ( vec ) out . write ( line [ : 7 ] ) out . write ( '{: >4}' . format ( atomid ) ) atomid += 1 out . write ( line [ 11 : 21 ] ) out . write ( 'B' ) out . write ( line [ 22 : 30 ] ) out . write ( '{:>8.3f}{:>8.3f}{:>8.3f}' . format ( x , y , z ) ) out . write ( line [ 54 : ] ) out . close ( ) | Save the second PDB file aligned to the first . | 405 | 11 |
13,961 | def _load_data_alignment ( self , chain1 , chain2 ) : parser = PDB . PDBParser ( QUIET = True ) ppb = PDB . PPBuilder ( ) structure1 = parser . get_structure ( chain1 , self . pdb1 ) structure2 = parser . get_structure ( chain2 , self . pdb2 ) seq1 = str ( ppb . build_peptides ( structure1 ) [ 0 ] . get_sequence ( ) ) seq2 = str ( ppb . build_peptides ( structure2 ) [ 0 ] . get_sequence ( ) ) # Alignment parameters taken from PconsFold renumbering script. align = pairwise2 . align . globalms ( seq1 , seq2 , 2 , - 1 , - 0.5 , - 0.1 ) [ 0 ] indexes = set ( i for i , ( s1 , s2 ) in enumerate ( zip ( align [ 0 ] , align [ 1 ] ) ) if s1 != '-' and s2 != '-' ) coord1 = np . hstack ( [ np . concatenate ( ( r [ 'CA' ] . get_coord ( ) , ( 1 , ) ) ) [ : , None ] for i , r in enumerate ( structure1 . get_residues ( ) ) if i in indexes and 'CA' in r ] ) . astype ( DTYPE , copy = False ) coord2 = np . hstack ( [ np . concatenate ( ( r [ 'CA' ] . get_coord ( ) , ( 1 , ) ) ) [ : , None ] for i , r in enumerate ( structure2 . get_residues ( ) ) if i in indexes and 'CA' in r ] ) . astype ( DTYPE , copy = False ) self . coord1 = coord1 self . coord2 = coord2 self . N = len ( seq1 ) | Extract the sequences from the PDB file perform the alignment and load the coordinates of the CA of the common residues . | 417 | 24 |
13,962 | def _load_data_index ( self , chain1 , chain2 ) : parser = PDB . PDBParser ( QUIET = True ) structure1 = parser . get_structure ( chain1 , self . pdb1 ) structure2 = parser . get_structure ( chain2 , self . pdb2 ) residues1 = list ( structure1 . get_residues ( ) ) residues2 = list ( structure2 . get_residues ( ) ) indexes1 = set ( r . id [ 1 ] for r in residues1 ) indexes2 = set ( r . id [ 1 ] for r in residues2 ) indexes = indexes1 . intersection ( indexes2 ) self . indexes = indexes . copy ( ) self . N = len ( indexes ) coord1 = [ ] indexes1 = indexes . copy ( ) for r in residues1 : if r . id [ 1 ] in indexes1 and 'CA' in r : coord1 . append ( np . concatenate ( ( r [ 'CA' ] . get_coord ( ) , ( 1 , ) ) ) [ : , None ] ) # Remove from index to avoid repeated residues indexes1 . remove ( r . id [ 1 ] ) coord1 = np . hstack ( coord1 ) . astype ( DTYPE , copy = False ) coord2 = [ ] for r in residues2 : if r . id [ 1 ] in indexes and 'CA' in r : coord2 . append ( np . concatenate ( ( r [ 'CA' ] . get_coord ( ) , ( 1 , ) ) ) [ : , None ] ) indexes . remove ( r . id [ 1 ] ) coord2 = np . hstack ( coord2 ) . astype ( DTYPE , copy = False ) self . coord1 = coord1 self . coord2 = coord2 | Load the coordinates of the CA of the common residues . | 389 | 11 |
13,963 | def _json_to_sectionstatus ( section_data ) : section_status = SectionStatus ( ) if section_data [ "AddCodeRequired" ] == 'true' : section_status . add_code_required = True else : section_status . add_code_required = False section_status . current_enrollment = int ( section_data [ "CurrentEnrollment" ] ) current_period = int ( section_data [ "CurrentRegistrationPeriod" ] ) section_status . current_registration_period = current_period if section_data [ "FacultyCodeRequired" ] == 'true' : section_status . faculty_code_required = True else : section_status . faculty_code_required = False limit_estimate = int ( section_data [ "LimitEstimateEnrollment" ] ) section_status . limit_estimated_enrollment = limit_estimate indicator = section_data [ "LimitEstimateEnrollmentIndicator" ] section_status . limit_estimate_enrollment_indicator = indicator section_status . room_capacity = int ( section_data [ "RoomCapacity" ] ) section_status . sln = int ( section_data [ "SLN" ] ) section_status . space_available = int ( section_data [ "SpaceAvailable" ] ) if section_data [ "Status" ] == "open" : section_status . is_open = True else : section_status . is_open = False return section_status | Returns a uw_sws . models . SectionStatus object created from the passed json . | 323 | 19 |
13,964 | def renumber_atoms ( lines ) : new_lines = [ ] current_number = 1 for line in lines : if line . startswith ( 'ATOM' ) or line . startswith ( 'HETATM' ) : new_lines . append ( line [ : 6 ] + string . rjust ( '%d' % current_number , 5 ) + line [ 11 : ] ) current_number += 1 else : if line . startswith ( 'TER' ) : current_number += 1 new_lines . append ( line ) return new_lines | Takes in a list of PDB lines and renumbers the atoms appropriately | 124 | 15 |
13,965 | def clean_alternate_location_indicators ( lines ) : new_lines = [ ] previously_seen_alt_atoms = set ( ) for line in lines : if line . startswith ( 'ATOM' ) : alt_loc_id = line [ 16 ] if alt_loc_id != ' ' : atom_name = line [ 12 : 16 ] . strip ( ) res_name = line [ 17 : 20 ] . strip ( ) chain = line [ 21 ] resnum = long ( line [ 22 : 26 ] . strip ( ) ) loc_tup = ( atom_name , res_name , chain , resnum ) if loc_tup in previously_seen_alt_atoms : # Continue main for loop continue else : previously_seen_alt_atoms . add ( loc_tup ) line = line [ : 16 ] + ' ' + line [ 17 : ] new_lines . append ( line ) return new_lines | Keeps only the first atom if alternated location identifiers are being used Removes alternate location ID charactor | 206 | 21 |
13,966 | def parse_pdb_ligand_info ( self , pdb_ligand_info ) : mtchs = re . findall ( '(<ligand.*?</ligand>)' , pdb_ligand_info , re . DOTALL ) for m in mtchs : if m . upper ( ) . find ( 'CHEMICALID="{0}"' . format ( self . PDBCode . upper ( ) ) ) != - 1 : ligand_type = re . match ( '<ligand.*?\stype="(.*?)".*?>' , m , re . DOTALL ) if ligand_type : self . LigandType = ligand_type . group ( 1 ) | This only parses the ligand type as all the other information should be in the . cif file . The XML file has proper capitalization whereas the . cif file uses all caps for the ligand type . | 157 | 44 |
13,967 | def add_code_mapping ( self , from_pdb_code , to_pdb_code ) : # Consistency check - make sure that we always map the same code e.g. 'LIG' to the same code e.g. 'GTP' if from_pdb_code in self . code_map : assert ( self . code_map [ from_pdb_code ] == to_pdb_code ) else : self . code_map [ from_pdb_code ] = to_pdb_code | Add a code mapping without a given instance . | 119 | 9 |
13,968 | def reset_password ( self , action_token , signed_data ) : try : action = "reset-password" user = get_user_by_action_token ( action , action_token ) if not user or not user . signed_data_match ( signed_data , action ) : raise mocha_exc . AppError ( "Verification Invalid!" ) if request . method == "POST" : password = request . form . get ( "password" , "" ) . strip ( ) password_confirm = request . form . get ( "password_confirm" , "" ) . strip ( ) if not password or password != password_confirm : raise exceptions . AuthError ( "Password is missing or passwords don't match" ) user . change_password ( password ) user . set_email_verified ( True ) session_set_require_password_change ( False ) flash_success ( "Password updated successfully!" ) return redirect ( __options__ . get ( "login_view" ) or self . login ) return { "action_token" : action_token , "signed_data" : signed_data } except ( mocha_exc . AppError , exceptions . AuthError ) as ex : flash_error ( str ( ex ) ) except Exception as e : logging . exception ( e ) flash_error ( "Unable to reset password" ) return redirect ( self . login ) | Reset the user password . It was triggered by LOST - PASSWORD | 298 | 16 |
13,969 | def verify_email ( self , action_token , signed_data ) : try : action = "verify-email" user = get_user_by_action_token ( action , action_token ) if not user or not user . signed_data_match ( signed_data , action ) : raise mocha_exc . AppError ( "Verification Invalid!" ) else : user . set_email_verified ( True ) flash_success ( "Account verified. You can now login" ) username = user . username if user . login_method == "email" : username = user . email return redirect ( self . login , username = username ) except Exception as e : logging . exception ( e ) flash_error ( "Verification Failed!" ) return redirect ( self . login ) | Verify email account in which a link was sent to | 166 | 11 |
13,970 | def oauth_connect ( self , provider , action ) : valid_actions = [ "connect" , "authorized" , "test" ] _redirect = views . auth . Account . account_settings if is_authenticated ( ) else self . login if action not in valid_actions or "oauth" not in __options__ . get ( "registration_methods" ) or not __options__ . get ( "allow_registration" ) or not hasattr ( oauth , provider ) : return redirect ( _redirect ) client = getattr ( oauth , provider ) params = client . __params__ me_args = params . get ( "me" ) user_id = params . get ( "user_id" ) oauth_user_id = None oauth_name = None oauth_email = None if action == "test" : session_data = { "provider" : "ensure" , "user_id" : "1234" , "name" : "Mardix" , "email" : "mardix@email.com" , } set_oauth_session ( session_data ) return redirect ( url_for ( self . register , oauth = 1 ) ) if action == "connect" : _next = request . args . get ( 'next' ) authorized_url = url_for ( self , provider = provider , action = "authorized" , next = _next or request . referrer or None , _external = True ) return client . authorize ( callback = authorized_url ) elif action == "authorized" : resp = client . authorized_response ( ) if resp is None : pass elif isinstance ( resp , OAuthException ) : flash_error ( "Access Denied" ) else : if not me_args : oauth_user_id = resp . get ( user_id ) else : me = client . get ( me_args ) if action == "authorized" and oauth_user_id : if is_authenticated ( ) : try : # Add federated login to current_user current_user . add_federated_login ( provider = provider , federated_id = oauth_user_id ) flash_success ( "You can now login with your %s account" % provider . upper ( ) ) except Exception as e : logging . exception ( e ) return redirect ( views . auth . Account . account_settings ) # User not logged in else : # Existing user user = with_federation ( provider , oauth_user_id ) if user : create_session ( user ) return redirect ( request . args . get ( "next" ) or __options__ . get ( "login_view" ) ) # New User else : session_data = { "provider" : provider , "user_id" : oauth_user_id , "name" : oauth_name , "email" : oauth_email , } set_oauth_session ( session_data ) else : return redirect ( _redirect ) return { "action" : action , "provider" : provider , "authorized_url" : "" } return redirect ( _redirect ) | This endpoint doesn t check if user is logged in because it has two functions | 681 | 15 |
13,971 | def log ( self , message , level = None ) : if level is None : level = logging . INFO current_app . logger . log ( msg = message , level = level ) | Write a message to log | 38 | 5 |
13,972 | def is_instance ( self , model ) : result = isinstance ( model , self . __model__ ) if result is True : return True err = 'Object {} is not of type {}' raise ValueError ( err . format ( model , self . __model__ ) ) | Is instance? Checks if provided object is instance of this service s model . | 58 | 15 |
13,973 | def create ( self , * * kwargs ) : model = self . new ( * * kwargs ) return self . save ( model ) | Create Instantiates and persists new model populated from provided arguments | 31 | 12 |
13,974 | def save ( self , model , commit = True ) : self . is_instance ( model ) db . session . add ( model ) if commit : db . session . commit ( ) return model | Save Puts model into unit of work for persistence . Can optionally commit transaction . Returns persisted model as a result . | 40 | 23 |
13,975 | def delete ( self , model , commit = True ) : self . is_instance ( model ) db . session . delete ( model ) if commit : db . session . commit ( ) return model | Delete Puts model for deletion into unit of work and optionall commits transaction | 40 | 15 |
13,976 | def is_connectable ( host : str , port : Union [ int , str ] ) -> bool : socket_ = None try : socket_ = socket . create_connection ( ( host , port ) , 1 ) result = True except socket . timeout : result = False finally : if socket_ : socket_ . close ( ) return result | Tries to connect to the device to see if it is connectable . | 70 | 15 |
13,977 | def group_lines ( lines ) : groups = [ ] group = [ ] for line in lines : if line . strip ( ) == "" : groups . append ( group [ : ] ) group = [ ] continue group . append ( line ) if group : groups . append ( group [ : ] ) return groups | Split a list of lines using empty lines as separators . | 64 | 12 |
13,978 | async def set_neighbors ( self ) : t = time . time ( ) self . logger . debug ( "Settings grid neighbors for the multi-environments." ) tasks = [ ] for i in range ( len ( self . grid ) ) : for j in range ( len ( self . grid [ 0 ] ) ) : addr = self . grid [ i ] [ j ] N , E , S , W = None , None , None , None if i != 0 : W = self . grid [ i - 1 ] [ j ] if i != len ( self . grid ) - 1 : E = self . grid [ i + 1 ] [ j ] if j != 0 : N = self . grid [ i ] [ j - 1 ] if j != len ( self . grid [ 0 ] ) - 1 : S = self . grid [ i ] [ j + 1 ] task = asyncio . ensure_future ( self . _set_node_neighbors ( addr , N , E , S , W ) ) tasks . append ( task ) await asyncio . gather ( * tasks ) self . logger . debug ( "Setting grid neighbors for the slave environments " "and their agents." ) tasks = [ ] for addr in self . addrs : task = asyncio . ensure_future ( self . _set_neighbors ( addr ) ) tasks . append ( task ) await asyncio . gather ( * tasks ) self . logger . debug ( "All grid neighbors set in {} seconds." . format ( time . time ( ) - t ) ) x = self . _ngs [ 0 ] * self . _gs [ 0 ] * self . _n_slaves y = self . _ngs [ 1 ] * self . _gs [ 1 ] self . logger . info ( "Initialized a distributed grid with overall size " "({}, {}). Total of {} agents." . format ( x , y , x * y ) ) | Set neighbors for multi - environments their slave environments and agents . | 408 | 12 |
13,979 | async def ssh_exec ( server , cmd , timeout = 10 , * * ssh_kwargs ) : conn = await asyncio . wait_for ( asyncssh . connect ( server , * * ssh_kwargs ) , timeout = timeout ) ret = await conn . run ( cmd ) conn . close ( ) return ret | Execute a command on a given server using asynchronous SSH - connection . | 68 | 14 |
13,980 | async def spawn_slaves ( self , spawn_cmd , ports = None , * * ssh_kwargs ) : pool = multiprocessing . Pool ( len ( self . nodes ) ) rets = [ ] for i , node in enumerate ( self . nodes ) : server , server_port = node port = ports [ node ] if ports is not None else self . port mgr_addr = "tcp://{}:{}/0" . format ( server , port ) self . _manager_addrs . append ( mgr_addr ) if type ( spawn_cmd ) in [ list , tuple ] : cmd = spawn_cmd [ i ] else : cmd = spawn_cmd args = [ server , cmd ] ssh_kwargs_cp = ssh_kwargs . copy ( ) ssh_kwargs_cp [ 'port' ] = server_port ret = pool . apply_async ( ssh_exec_in_new_loop , args = args , kwds = ssh_kwargs_cp , error_callback = logger . warning ) rets . append ( ret ) self . _pool = pool self . _r = rets | Spawn multi - environments on the nodes through SSH - connections . | 248 | 12 |
13,981 | def get_slave_managers ( self , as_coro = False ) : async def slave_task ( addr ) : r_manager = await self . env . connect ( addr ) return await r_manager . get_slave_managers ( ) tasks = create_tasks ( slave_task , self . addrs ) return run_or_coro ( tasks , as_coro ) | Return all slave environment manager addresses . | 85 | 7 |
13,982 | def nodes ( self , type = None , failed = False ) : if type is None : type = Node if not issubclass ( type , Node ) : raise ( TypeError ( "{} is not a valid node type." . format ( type ) ) ) if failed not in [ "all" , False , True ] : raise ValueError ( "{} is not a valid node failed" . format ( failed ) ) if failed == "all" : return type . query . filter_by ( participant_id = self . id ) . all ( ) else : return type . query . filter_by ( failed = failed , participant_id = self . id ) . all ( ) | Get nodes associated with this participant . | 142 | 7 |
13,983 | def print_verbose ( self ) : print "Nodes: " for a in ( self . nodes ( failed = "all" ) ) : print a print "\nVectors: " for v in ( self . vectors ( failed = "all" ) ) : print v print "\nInfos: " for i in ( self . infos ( failed = "all" ) ) : print i print "\nTransmissions: " for t in ( self . transmissions ( failed = "all" ) ) : print t print "\nTransformations: " for t in ( self . transformations ( failed = "all" ) ) : print t | Print a verbose representation of a network . | 135 | 9 |
13,984 | def vectors ( self , direction = "all" , failed = False ) : # check direction if direction not in [ "all" , "incoming" , "outgoing" ] : raise ValueError ( "{} is not a valid vector direction. " "Must be all, incoming or outgoing." . format ( direction ) ) if failed not in [ "all" , False , True ] : raise ValueError ( "{} is not a valid vector failed" . format ( failed ) ) # get the vectors if failed == "all" : if direction == "all" : return Vector . query . filter ( or_ ( Vector . destination_id == self . id , Vector . origin_id == self . id ) ) . all ( ) if direction == "incoming" : return Vector . query . filter_by ( destination_id = self . id ) . all ( ) if direction == "outgoing" : return Vector . query . filter_by ( origin_id = self . id ) . all ( ) else : if direction == "all" : return Vector . query . filter ( and_ ( Vector . failed == failed , or_ ( Vector . destination_id == self . id , Vector . origin_id == self . id ) ) ) . all ( ) if direction == "incoming" : return Vector . query . filter_by ( destination_id = self . id , failed = failed ) . all ( ) if direction == "outgoing" : return Vector . query . filter_by ( origin_id = self . id , failed = failed ) . all ( ) | Get vectors that connect at this node . | 332 | 8 |
13,985 | def transmissions ( self , direction = "outgoing" , status = "all" , failed = False ) : # check parameters if direction not in [ "incoming" , "outgoing" , "all" ] : raise ( ValueError ( "You cannot get transmissions of direction {}." . format ( direction ) + "Type can only be incoming, outgoing or all." ) ) if status not in [ "all" , "pending" , "received" ] : raise ( ValueError ( "You cannot get transmission of status {}." . format ( status ) + "Status can only be pending, received or all" ) ) if failed not in [ "all" , False , True ] : raise ValueError ( "{} is not a valid transmission failed" . format ( failed ) ) # get transmissions if direction == "all" : if status == "all" : return Transmission . query . filter ( and_ ( Transmission . failed == False , or_ ( Transmission . destination_id == self . id , Transmission . origin_id == self . id ) ) ) . all ( ) else : return Transmission . query . filter ( and_ ( Transmission . failed == False , Transmission . status == status , or_ ( Transmission . destination_id == self . id , Transmission . origin_id == self . id ) ) ) . all ( ) if direction == "incoming" : if status == "all" : return Transmission . query . filter_by ( failed = False , destination_id = self . id ) . all ( ) else : return Transmission . query . filter ( and_ ( Transmission . failed == False , Transmission . destination_id == self . id , Transmission . status == status ) ) . all ( ) if direction == "outgoing" : if status == "all" : return Transmission . query . filter_by ( failed = False , origin_id = self . id ) . all ( ) else : return Transmission . query . filter ( and_ ( Transmission . failed == False , Transmission . origin_id == self . id , Transmission . status == status ) ) . all ( ) | Get transmissions sent to or from this node . | 437 | 9 |
13,986 | def receive ( self , what = None ) : # check self is not failed if self . failed : raise ValueError ( "{} cannot receive as it has failed." . format ( self ) ) received_transmissions = [ ] if what is None : pending_transmissions = self . transmissions ( direction = "incoming" , status = "pending" ) for transmission in pending_transmissions : transmission . status = "received" transmission . receive_time = timenow ( ) received_transmissions . append ( transmission ) elif isinstance ( what , Transmission ) : if what in self . transmissions ( direction = "incoming" , status = "pending" ) : transmission . status = "received" what . receive_time = timenow ( ) received_transmissions . append ( what ) else : raise ( ValueError ( "{} cannot receive {} as it is not " "in its pending_transmissions" . format ( self , what ) ) ) else : raise ValueError ( "Nodes cannot receive {}" . format ( what ) ) self . update ( [ t . info for t in received_transmissions ] ) | Receive some transmissions . | 239 | 5 |
13,987 | def replicate ( self , info_in ) : # check self is not failed if self . failed : raise ValueError ( "{} cannot replicate as it has failed." . format ( self ) ) from transformations import Replication info_out = type ( info_in ) ( origin = self , contents = info_in . contents ) Replication ( info_in = info_in , info_out = info_out ) | Replicate an info . | 87 | 5 |
13,988 | def mutate ( self , info_in ) : # check self is not failed if self . failed : raise ValueError ( "{} cannot mutate as it has failed." . format ( self ) ) from transformations import Mutation info_out = type ( info_in ) ( origin = self , contents = info_in . _mutated_contents ( ) ) Mutation ( info_in = info_in , info_out = info_out ) | Replicate an info + mutation . | 96 | 7 |
13,989 | def transmissions ( self , status = "all" ) : if status not in [ "all" , "pending" , "received" ] : raise ( ValueError ( "You cannot get {} transmissions." . format ( status ) + "Status can only be pending, received or all" ) ) if status == "all" : return Transmission . query . filter_by ( vector_id = self . id , failed = False ) . all ( ) else : return Transmission . query . filter_by ( vector_id = self . id , status = status , failed = False ) . all ( ) | Get transmissions sent along this Vector . | 125 | 7 |
13,990 | def serve_forever ( self , banner = None ) : if hasattr ( readline , "read_history_file" ) : try : readline . read_history_file ( self . histfile ) except IOError : pass atexit . register ( self . _save_history ) super ( Shell , self ) . serve_forever ( banner ) | Interact with the user . | 76 | 6 |
13,991 | def complete ( self , word , state ) : try : import rl # TODO: doing this manually right now, but may make sense to # exploit rl . completion . suppress_append = True except ImportError : pass word = transform ( word , self . transforms , word = True ) if state == 0 : self . matches = self . get_matches ( word ) try : match = self . matches [ state ] except IndexError : return None else : return transform ( match , self . transforms , word = True , inverse = True ) | Return the next possible completion for word . | 114 | 8 |
13,992 | def exclude_matches ( self , matches ) : for match in matches : for exclude_pattern in self . exclude_patterns : if re . match ( exclude_pattern , match ) is not None : break else : yield match | Filter any matches that match an exclude pattern . | 48 | 9 |
13,993 | def gen_filename_completions ( self , word , filenames ) : if not word : return filenames else : trie = pygtrie . CharTrie ( ) for filename in filenames : trie [ filename ] = filename return trie . iterkeys ( prefix = word ) | Generate a sequence of filenames that match word . | 65 | 12 |
13,994 | def gen_matches ( self , word ) : if word . startswith ( "$" ) : for match in self . gen_variable_completions ( word , os . environ ) : yield match else : head , tail = os . path . split ( word ) filenames = os . listdir ( head or '.' ) completions = self . gen_filename_completions ( tail , filenames ) for match in completions : yield os . path . join ( head , match ) for extension in self . extensions : for match in extension ( word ) : yield match | Generate a sequence of possible completions for word . | 126 | 11 |
13,995 | def gen_variable_completions ( self , word , env ) : # ignore the first character, which is a dollar sign var = word [ 1 : ] for k in env : if k . startswith ( var ) : yield "$" + k | Generate a sequence of possible variable completions for word . | 54 | 12 |
13,996 | def inflect ( self , filename ) : suffix = ( "/" if os . path . isdir ( filename ) else " " ) return self . _escape ( filename ) + suffix | Inflect a filename to indicate its type . | 38 | 9 |
13,997 | def state ( self , time = None ) : if time is None : return max ( self . infos ( type = State ) , key = attrgetter ( 'creation_time' ) ) else : states = [ s for s in self . infos ( type = State ) if s . creation_time < time ] return max ( states , key = attrgetter ( 'creation_time' ) ) | The most recently - created info of type State at the specfied time . | 88 | 16 |
13,998 | def sentry_feature ( app ) : # get keys sentry_public_key = app . config . get ( 'SENTRY_PUBLIC_KEY' ) sentry_project_id = app . config . get ( 'SENTRY_PROJECT_ID' ) if not sentry_public_key or not sentry_project_id : return # prepare dsn dsn = 'https://{key}@sentry.io/{project_id}' dsn = dsn . format ( key = sentry_public_key , project_id = sentry_project_id ) # init sentry sentry . init_app ( app = app , dsn = dsn ) | Sentry feature Adds basic integration with Sentry via the raven library | 152 | 13 |
13,999 | def new_plugin ( self , config , * args , * * kwargs ) : typ = None obj = None # if type is defined, create a new instance if 'type' in config : typ = config [ 'type' ] # single key is overriding an existing plugin instance elif isinstance ( config , collections . Mapping ) and len ( config ) == 1 : # get type name and shift out config to parent level ( typ , config ) = list ( config . items ( ) ) [ 0 ] obj = self . _ctor ( typ , config , * args , * * kwargs ) # store if named if 'name' in config : self . _instance [ config [ 'name' ] ] = obj else : # this could dupe on .name, make name=''? config [ 'name' ] = typ return obj | instantiate a plugin creates the object stores it in _instance | 178 | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.