idx int64 0 252k | question stringlengths 48 5.28k | target stringlengths 5 1.23k |
|---|---|---|
7,500 | async def wait ( self ) -> None : if self . triggered_token is not None : return futures = [ asyncio . ensure_future ( self . _triggered . wait ( ) , loop = self . loop ) ] for token in self . _chain : futures . append ( asyncio . ensure_future ( token . wait ( ) , loop = self . loop ) ) def cancel_not_done ( fut : 'as... | Coroutine which returns when this token has been triggered |
7,501 | async def cancellable_wait ( self , * awaitables : Awaitable [ _R ] , timeout : float = None ) -> _R : futures = [ asyncio . ensure_future ( a , loop = self . loop ) for a in awaitables + ( self . wait ( ) , ) ] try : done , pending = await asyncio . wait ( futures , timeout = timeout , return_when = asyncio . FIRST_CO... | Wait for the first awaitable to complete unless we timeout or the token is triggered . |
7,502 | def parent ( self ) -> Optional [ 'CtsReference' ] : if self . start . depth == 1 and ( self . end is None or self . end . depth <= 1 ) : return None else : if self . start . depth > 1 and ( self . end is None or self . end . depth == 0 ) : return CtsReference ( "{0}{1}" . format ( "." . join ( self . start . list [ : ... | Parent of the actual URN for example 1 . 1 for 1 . 1 . 1 |
7,503 | def highest ( self ) -> CtsSinglePassageId : if not self . end : return self . start elif len ( self . start ) < len ( self . end ) and len ( self . start ) : return self . start elif len ( self . start ) > len ( self . end ) and len ( self . end ) : return self . end elif len ( self . start ) : return self . start | Return highest reference level |
7,504 | def upTo ( self , key ) : middle = [ component for component in [ self . __parsed [ "textgroup" ] , self . __parsed [ "work" ] , self . __parsed [ "version" ] ] if component is not None ] if key == URN . COMPLETE : return self . __str__ ( ) elif key == URN . NAMESPACE : return ":" . join ( [ "urn" , self . __parsed [ "... | Returns the urn up to given level using URN Constants |
7,505 | def attribute ( self ) : refs = re . findall ( "\@([a-zA-Z:]+)=\\\?[\'\"]\$" + str ( self . refsDecl . count ( "$" ) ) + "\\\?[\'\"]" , self . refsDecl ) return refs [ - 1 ] | Attribute that serves as a reference getter |
7,506 | def match ( self , passageId ) : if not isinstance ( passageId , CtsReference ) : passageId = CtsReference ( passageId ) if self . is_root ( ) : return self [ passageId . depth - 1 ] return self . root . match ( passageId ) | Given a passageId matches a citation level |
7,507 | def fill ( self , passage = None , xpath = None ) : if xpath is True : xpath = self . xpath replacement = r"\1" if isinstance ( passage , str ) : replacement = r"\1\2'" + passage + "'" return REFERENCE_REPLACER . sub ( replacement , xpath ) else : if isinstance ( passage , CtsReference ) : passage = passage . start . l... | Fill the xpath with given informations |
7,508 | def ingest ( resource , xpath = ".//tei:cRefPattern" ) : if len ( resource ) == 0 and isinstance ( resource , list ) : return None elif isinstance ( resource , list ) : resource = resource [ 0 ] elif not isinstance ( resource , _Element ) : return None resource = resource . xpath ( xpath , namespaces = XPATH_NAMESPACES... | Ingest a resource and store data in its instance |
7,509 | def get_tweets_count_times ( twitter , count , query = None ) : r oldest_id , newest_id = _get_oldest_id ( query = query ) newest_id = newest_id or oldest_id all_tweets = [ ] i = 0 while i < count : i += 1 if oldest_id <= newest_id : tweets = get_tweets ( query = query , max_id = oldest_id - 1 , count = TWEETS_PER_SEAR... | r hits the twitter api count times and grabs tweets for the indicated query |
7,510 | def parse ( self , ** kwargs ) : try : output_folder = self . retrieved except exceptions . NotExistent : return self . exit_codes . ERROR_NO_RETRIEVED_FOLDER filename_stdout = self . node . get_attribute ( 'output_filename' ) filename_stderr = self . node . get_attribute ( 'error_filename' ) try : with output_folder .... | Parse the contents of the output files retrieved in the FolderData . |
7,511 | def parse_stdout ( self , filelike ) : from CifFile import StarError if not filelike . read ( ) . strip ( ) : return self . exit_codes . ERROR_EMPTY_OUTPUT_FILE try : filelike . seek ( 0 ) cif = CifData ( file = filelike ) except StarError : self . logger . exception ( 'Failed to parse a `CifData` from the stdout file\... | Parse the content written by the script to standard out into a CifData object . |
7,512 | def parse_stderr ( self , filelike ) : marker_error = 'ERROR,' marker_warning = 'WARNING,' messages = { 'errors' : [ ] , 'warnings' : [ ] } for line in filelike . readlines ( ) : if marker_error in line : messages [ 'errors' ] . append ( line . split ( marker_error ) [ - 1 ] . strip ( ) ) if marker_warning in line : me... | Parse the content written by the script to standard err . |
7,513 | def reset ( cls ) : cls . _func_from_name = { } cls . _func_from_hash = { } cls . _func_hash = { } register = cls . _do_register for ( func , hash_name , hash_new ) in cls . _std_func_data : register ( func , func . name , hash_name , hash_new ) assert set ( cls . _func_hash ) == set ( Func ) | Reset the registry to the standard multihash functions . |
7,514 | def get ( cls , func_hint ) : try : return Func ( func_hint ) except ValueError : pass if func_hint in cls . _func_from_name : return cls . _func_from_name [ func_hint ] if func_hint in cls . _func_hash : return func_hint raise KeyError ( "unknown hash function" , func_hint ) | Return a registered hash function matching the given hint . |
7,515 | def _do_register ( cls , code , name , hash_name = None , hash_new = None ) : cls . _func_from_name [ name . replace ( '-' , '_' ) ] = code cls . _func_from_name [ name . replace ( '_' , '-' ) ] = code if hash_name : cls . _func_from_hash [ hash_name ] = code cls . _func_hash [ code ] = cls . _hash ( hash_name , hash_n... | Add hash function data to the registry without checks . |
7,516 | def register ( cls , code , name , hash_name = None , hash_new = None ) : if not _is_app_specific_func ( code ) : raise ValueError ( "only application-specific functions can be registered" ) name_mapping_data = [ ( cls . _func_from_name , name , "function name is already registered for a different function" ) , ( cls .... | Add an application - specific function to the registry . |
7,517 | def unregister ( cls , code ) : if code in Func : raise ValueError ( "only application-specific functions can be unregistered" ) func_names = { n for ( n , f ) in cls . _func_from_name . items ( ) if f == code } for func_name in func_names : del cls . _func_from_name [ func_name ] hash = cls . _func_hash . pop ( code )... | Remove an application - specific function from the registry . |
7,518 | def hash_from_func ( cls , func ) : new = cls . _func_hash [ func ] . new return new ( ) if new else None | Return a hashlib - compatible object for the multihash func . |
7,519 | def thermal_data ( data , figsize = ( 12 , 4 ) , ms_data = 50 , v_label = 'Unit-cell volume $(\mathrm{\AA}^3)$' , pdf_filen = None , title = 'P-V-T data' ) : f , ax = plt . subplots ( 1 , 2 , figsize = figsize , sharex = True ) if isuncertainties ( [ data [ 'p' ] , data [ 'v' ] , data [ 'temp' ] ] ) : p = unp . nominal... | plot P - V - T data before fitting |
7,520 | def _do_digest ( data , func ) : func = FuncReg . get ( func ) hash = FuncReg . hash_from_func ( func ) if not hash : raise ValueError ( "no available hash function for hash" , func ) hash . update ( data ) return bytes ( hash . digest ( ) ) | Return the binary digest of data with the given func . |
7,521 | def digest ( data , func ) : digest = _do_digest ( data , func ) return Multihash ( func , digest ) | Hash the given data into a new Multihash . |
7,522 | def decode ( mhash , encoding = None ) : r mhash = bytes ( mhash ) if encoding : mhash = CodecReg . get_decoder ( encoding ) ( mhash ) try : func = mhash [ 0 ] length = mhash [ 1 ] digest = mhash [ 2 : ] except IndexError as ie : raise ValueError ( "multihash is too short" ) from ie if length != len ( digest ) : raise ... | r Decode a multihash - encoded digest into a Multihash . |
7,523 | def from_hash ( self , hash ) : try : func = FuncReg . func_from_hash ( hash ) except KeyError as ke : raise ValueError ( "no matching multihash function" , hash . name ) from ke digest = hash . digest ( ) return Multihash ( func , digest ) | Create a Multihash from a hashlib - compatible hash object . |
7,524 | def encode ( self , encoding = None ) : r try : fc = self . func . value except AttributeError : fc = self . func mhash = bytes ( [ fc , len ( self . digest ) ] ) + self . digest if encoding : mhash = CodecReg . get_encoder ( encoding ) ( mhash ) return mhash | r Encode into a multihash - encoded digest . |
7,525 | def verify ( self , data ) : r digest = _do_digest ( data , self . func ) return digest [ : len ( self . digest ) ] == self . digest | r Does the given data hash to the digest in this Multihash ? |
7,526 | def truncate ( self , length ) : if length > len ( self . digest ) : raise ValueError ( "cannot enlarge the original digest by %d bytes" % ( length - len ( self . digest ) ) ) return self . __class__ ( self . func , self . digest [ : length ] ) | Return a new Multihash with a shorter digest length . |
7,527 | def set ( self , key : URIRef , value : Union [ Literal , BNode , URIRef , str , int ] , lang : Optional [ str ] = None ) : if not isinstance ( value , Literal ) and lang is not None : value = Literal ( value , lang = lang ) elif not isinstance ( value , ( BNode , URIRef ) ) : value , _type = term . _castPythonToLitera... | Set the VALUE for KEY predicate in the Metadata Graph |
7,528 | def add ( self , key , value , lang = None ) : if not isinstance ( value , Literal ) and lang is not None : value = Literal ( value , lang = lang ) elif not isinstance ( value , ( BNode , URIRef ) ) : value , _type = term . _castPythonToLiteral ( value ) if _type is None : value = Literal ( value ) else : value = Liter... | Add a triple to the graph related to this node |
7,529 | def get ( self , key , lang = None ) : if lang is not None : for o in self . graph . objects ( self . asNode ( ) , key ) : if o . language == lang : yield o else : for o in self . graph . objects ( self . asNode ( ) , key ) : yield o | Returns triple related to this node . Can filter on lang |
7,530 | def get_single ( self , key , lang = None ) : if not isinstance ( key , URIRef ) : key = URIRef ( key ) if lang is not None : default = None for o in self . graph . objects ( self . asNode ( ) , key ) : default = o if o . language == lang : return o return default else : for o in self . graph . objects ( self . asNode ... | Returns a single triple related to this node . |
7,531 | def remove ( self , predicate = None , obj = None ) : self . graph . remove ( ( self . asNode ( ) , predicate , obj ) ) | Remove triple matching the predicate or the object |
7,532 | def unlink ( self , subj = None , predicate = None ) : self . graph . remove ( ( subj , predicate , self . asNode ( ) ) ) | Remove triple where Metadata is the object |
7,533 | def getOr ( subject , predicate , * args , ** kwargs ) : if ( subject , predicate , None ) in get_graph ( ) : return Metadata ( node = get_graph ( ) . objects ( subject , predicate ) . __next__ ( ) ) return Metadata ( * args , ** kwargs ) | Retrieve a metadata node or generate a new one |
7,534 | def forwards_func ( apps , schema_editor ) : print ( "\n" ) create_count = 0 BackupRun = apps . get_model ( "backup_app" , "BackupRun" ) backup_runs = BackupRun . objects . all ( ) for backup_run in backup_runs : temp = OriginBackupRun ( name = backup_run . name , backup_datetime = backup_run . backup_datetime ) try : ... | manage migrate backup_app 0004_BackupRun_ini_file_20160203_1415 |
7,535 | def reverse_func ( apps , schema_editor ) : print ( "\n" ) remove_count = 0 BackupRun = apps . get_model ( "backup_app" , "BackupRun" ) backup_runs = BackupRun . objects . all ( ) for backup_run in backup_runs : temp = OriginBackupRun ( name = backup_run . name , backup_datetime = backup_run . backup_datetime ) config_... | manage migrate backup_app 0003_auto_20160127_2002 |
7,536 | def speziale_grun ( v , v0 , gamma0 , q0 , q1 ) : if isuncertainties ( [ v , v0 , gamma0 , q0 , q1 ] ) : gamma = gamma0 * unp . exp ( q0 / q1 * ( ( v / v0 ) ** q1 - 1. ) ) else : gamma = gamma0 * np . exp ( q0 / q1 * ( ( v / v0 ) ** q1 - 1. ) ) return gamma | calculate Gruneisen parameter for the Speziale equation |
7,537 | def speziale_debyetemp ( v , v0 , gamma0 , q0 , q1 , theta0 ) : if isuncertainties ( [ v , v0 , gamma0 , q0 , q1 , theta0 ] ) : f_vu = np . vectorize ( uct . wrap ( integrate_gamma ) , excluded = [ 1 , 2 , 3 , 4 , 5 , 6 ] ) integ = f_vu ( v , v0 , gamma0 , q0 , q1 , theta0 ) theta = unp . exp ( unp . log ( theta0 ) - i... | calculate Debye temperature for the Speziale equation |
7,538 | def integrate_gamma ( v , v0 , gamma0 , q0 , q1 , theta0 ) : def f_integrand ( v ) : gamma = gamma0 * np . exp ( q0 / q1 * ( ( v / v0 ) ** q1 - 1. ) ) return gamma / v theta_term = quad ( f_integrand , v0 , v ) [ 0 ] return theta_term | internal function to calculate Debye temperature |
7,539 | def speziale_pth ( v , temp , v0 , gamma0 , q0 , q1 , theta0 , n , z , t_ref = 300. , three_r = 3. * constants . R ) : v_mol = vol_uc2mol ( v , z ) gamma = speziale_grun ( v , v0 , gamma0 , q0 , q1 ) theta = speziale_debyetemp ( v , v0 , gamma0 , q0 , q1 , theta0 ) xx = theta / temp debye = debye_E ( xx ) if t_ref == 0... | calculate thermal pressure for the Speziale equation |
7,540 | def text ( self ) -> str : return self . export ( output = Mimetypes . PLAINTEXT , exclude = self . default_exclude ) | String representation of the text |
7,541 | def set_creator ( self , value : Union [ Literal , Identifier , str ] , lang : str = None ) : self . metadata . add ( key = DC . creator , value = value , lang = lang ) | Set the DC Creator literal value |
7,542 | def set_title ( self , value : Union [ Literal , Identifier , str ] , lang : str = None ) : return self . metadata . add ( key = DC . title , value = value , lang = lang ) | Set the DC Title literal value |
7,543 | def get_description ( self , lang : str = None ) -> Literal : return self . metadata . get_single ( key = DC . description , lang = lang ) | Get the description of the object |
7,544 | def set_description ( self , value : Union [ Literal , Identifier , str ] , lang : str = None ) : return self . metadata . add ( key = DC . description , value = value , lang = lang ) | Set the DC Description literal value |
7,545 | def set_subject ( self , value : Union [ Literal , Identifier , str ] , lang : str = None ) : return self . metadata . add ( key = DC . subject , value = value , lang = lang ) | Set the DC Subject literal value |
7,546 | def childIds ( self ) -> BaseReferenceSet : if self . _childIds is None : self . _childIds = self . getReffs ( ) return self . _childIds | Identifiers of children |
7,547 | def firstId ( self ) -> BaseReference : if self . childIds is not None : if len ( self . childIds ) > 0 : return self . childIds [ 0 ] return None else : raise NotImplementedError | First child s id of current TextualNode |
7,548 | def lastId ( self ) -> BaseReference : if self . childIds is not None : if len ( self . childIds ) > 0 : return self . childIds [ - 1 ] return None else : raise NotImplementedError | Last child s id of current TextualNode |
7,549 | def compile_vocab ( docs , limit = 1e6 , verbose = 0 , tokenizer = Tokenizer ( stem = None , lower = None , strip = None ) ) : tokenizer = make_tokenizer ( tokenizer ) d = Dictionary ( ) try : limit = min ( limit , docs . count ( ) ) docs = docs . iterator ( ) except ( AttributeError , TypeError ) : pass for i , doc in... | Get the set of words used anywhere in a sequence of documents and assign an integer id |
7,550 | def gen_file_lines ( path , mode = 'rUb' , strip_eol = True , ascii = True , eol = '\n' ) : if isinstance ( path , str ) : path = open ( path , mode ) with path : for line in path : if ascii : line = str ( line ) if strip_eol : line = line . rstrip ( '\n' ) yield line | Generate a sequence of documents from the lines in a file |
7,551 | def inventory ( self , inventory_name ) : def decorator ( f ) : self . add ( func = f , inventory_name = inventory_name ) return f return decorator | Decorator to register filters for given inventory . For a function abc it has the same effect |
7,552 | def dispatch ( self , collection , ** kwargs ) : for inventory , method in self . methods [ : : - 1 ] : if method ( collection , ** kwargs ) is True : collection . parent = self . collection . children [ inventory ] return raise UndispatchedTextError ( "CapitainsCtsText not dispatched %s" % collection . id ) | Dispatch a collection using internal filters |
7,553 | def generate_tokens ( doc , regex = CRE_TOKEN , strip = True , nonwords = False ) : r if isinstance ( regex , basestring ) : regex = re . compile ( regex ) for w in regex . finditer ( doc ) : if w : w = w . group ( ) if strip : w = w . strip ( r'-_*`()}{' + r"'" ) if w and ( nonwords or not re . match ( r'^' + RE_NONWO... | r Return a sequence of words or tokens using a re . match iteratively through the str |
7,554 | def financial_float ( s , scale_factor = 1 , typ = float , ignore = FINANCIAL_WHITESPACE , percent_str = PERCENT_SYMBOLS , replace = FINANCIAL_MAPPING , normalize_case = str . lower ) : percent_scale_factor = 1 if isinstance ( s , basestring ) : s = normalize_case ( s ) . strip ( ) for i in ignore : s = s . replace ( n... | Strip dollar signs and commas from financial numerical string |
7,555 | def is_invalid_date ( d ) : if not isinstance ( d , DATE_TYPES ) : return False if d . year < 1970 or d . year >= 2100 : return True | Return boolean to indicate whether date is invalid None if valid False if not a date |
7,556 | def vocab_freq ( docs , limit = 1e6 , verbose = 1 , tokenizer = generate_tokens ) : total = Counter ( ) try : limit = min ( limit , docs . count ( ) ) docs = docs . iterator ( ) except : pass for i , doc in enumerate ( docs ) : try : doc = doc . values ( ) except AttributeError : if not isinstance ( doc , basestring ) ... | Get the set of words used anywhere in a sequence of documents and count occurrences |
7,557 | def make_filename ( s , allow_whitespace = False , allow_underscore = False , allow_hyphen = False , limit = 255 , lower = False ) : r s = stringify ( s ) s = CRE_BAD_FILENAME . sub ( '' , s ) if not allow_whitespace : s = CRE_WHITESPACE . sub ( '' , s ) if lower : s = str . lower ( s ) if not allow_hyphen : s = s . re... | r Make sure the provided string is a valid filename and optionally remove whitespace |
7,558 | def stem ( self , s ) : if self . _stemmer is None : return passthrough ( s ) try : return getattr ( getattr ( self , '_stemmer' , None ) , 'stem' , None ) ( s ) except ( AttributeError , TypeError ) : return getattr ( getattr ( self , '_stemmer' , self ) , 'lemmatize' , passthrough ) ( s ) | This should make the Stemmer picklable and unpicklable by not using bound methods |
7,559 | def assoc ( self , index , value ) : newvec = ImmutableVector ( ) newvec . tree = self . tree . assoc ( index , value ) if index >= self . _length : newvec . _length = index + 1 else : newvec . _length = self . _length return newvec | Return a new vector with value associated at index . The implicit parameter is not modified . |
7,560 | def concat ( self , tailvec ) : newvec = ImmutableVector ( ) vallist = [ ( i + self . _length , tailvec [ i ] ) for i in range ( 0 , tailvec . _length ) ] newvec . tree = self . tree . multi_assoc ( vallist ) newvec . _length = self . _length + tailvec . _length return newvec | Returns the result of concatenating tailvec to the implicit parameter |
7,561 | def pop ( self ) : if self . _length == 0 : raise IndexError ( ) newvec = ImmutableVector ( ) newvec . tree = self . tree . remove ( self . _length - 1 ) newvec . _length = self . _length - 1 return newvec | Return a new ImmutableVector with the last item removed . |
7,562 | def read ( self , identifier , path ) : with open ( path ) as f : o = self . classes [ "text" ] ( urn = identifier , resource = self . xmlparse ( f ) ) return o | Retrieve and parse a text given an identifier |
7,563 | def _parse_textgroup ( self , cts_file ) : with io . open ( cts_file ) as __xml__ : return self . classes [ "textgroup" ] . parse ( resource = __xml__ ) , cts_file | Parses a textgroup from a cts file |
7,564 | def _parse_work ( self , cts_file , textgroup ) : with io . open ( cts_file ) as __xml__ : work , texts = self . classes [ "work" ] . parse ( resource = __xml__ , parent = textgroup , _with_children = True ) return work , texts , os . path . dirname ( cts_file ) | Parses a work from a cts file |
7,565 | def _parse_text ( self , text , directory ) : text_id , text_metadata = text . id , text text_metadata . path = "{directory}/{textgroup}.{work}.{version}.xml" . format ( directory = directory , textgroup = text_metadata . urn . textgroup , work = text_metadata . urn . work , version = text_metadata . urn . version ) if... | Complete the TextMetadata object with its citation scheme by parsing the original text |
7,566 | def _dispatch ( self , textgroup , directory ) : if textgroup . id in self . dispatcher . collection : self . dispatcher . collection [ textgroup . id ] . update ( textgroup ) else : self . dispatcher . dispatch ( textgroup , path = directory ) for work_urn , work in textgroup . works . items ( ) : if work_urn in self ... | Run the dispatcher over a textgroup . |
7,567 | def parse ( self , resource ) : textgroups = [ ] texts = [ ] invalids = [ ] for folder in resource : cts_files = glob ( "{base_folder}/data/*/__cts__.xml" . format ( base_folder = folder ) ) for cts_file in cts_files : textgroup , cts_file = self . _parse_textgroup ( cts_file ) textgroups . append ( ( textgroup , cts_f... | Parse a list of directories and reads it into a collection |
7,568 | def velocities_to_moduli ( rho , v_phi , v_s ) : return v_phi * v_phi * rho , v_s * v_s * rho | convert velocities to moduli mainly to support Burnman operations |
7,569 | def moduli_to_velocities ( rho , K_s , G ) : return np . sqrt ( K_s / rho ) , np . sqrt ( G / rho ) | convert moduli to velocities mainly to support Burnman operations |
7,570 | def jamieson_pst ( v , v0 , c0 , s , gamma0 , q , theta0 , n , z , mass , c_v , three_r = 3. * constants . R , t_ref = 300. ) : rho = mass / vol_uc2mol ( v , z ) * 1.e-6 rho0 = mass / vol_uc2mol ( v0 , z ) * 1.e-6 p_h = hugoniot_p ( rho , rho0 , c0 , s ) p_th_h = jamieson_pth ( v , v0 , c0 , s , gamma0 , q , theta0 , n... | calculate static pressure at 300 K from Hugoniot data using the constq formulation |
7,571 | def jamieson_pth ( v , v0 , c0 , s , gamma0 , q , theta0 , n , z , mass , c_v , three_r = 3. * constants . R , t_ref = 300. ) : rho = mass / vol_uc2mol ( v , z ) * 1.e-6 rho0 = mass / vol_uc2mol ( v0 , z ) * 1.e-6 temp = hugoniot_t ( rho , rho0 , c0 , s , gamma0 , q , theta0 , n , mass , three_r = three_r , t_ref = t_r... | calculate thermal pressure from Hugoniot data using the constq formulation |
7,572 | def hugoniot_p_nlin ( rho , rho0 , a , b , c ) : eta = 1. - ( rho0 / rho ) Up = np . zeros_like ( eta ) if isuncertainties ( [ rho , rho0 , a , b , c ] ) : Up [ eta != 0. ] = ( ( b * eta - 1. ) + unp . sqrt ( np . power ( ( 1. - b * eta ) , 2. ) - 4. * np . power ( eta , 2. ) * a * c ) ) / ( - 2. * eta * c ) else : Up ... | calculate pressure along a Hugoniot throug nonlinear equations presented in Jameison 1982 |
7,573 | def generate_address_label ( self ) : if self . organisation_name : self . address_label . append ( self . organisation_name ) if self . department_name : self . address_label . append ( self . department_name ) if self . po_box_number : self . address_label . append ( 'PO Box ' + self . po_box_number ) elements = [ se... | Construct a list for address label . |
7,574 | def _is_exception_rule ( self , element ) : if element [ 0 ] . isdigit ( ) and element [ - 1 ] . isdigit ( ) : return True if len ( element ) > 1 and element [ 0 ] . isdigit ( ) and element [ - 2 ] . isdigit ( ) and element [ - 1 ] . isalpha ( ) : return True if len ( element ) == 1 and element . isalpha ( ) : return T... | Check for exception rule . |
7,575 | def _append_to_label ( self , element ) : if len ( self . address_label ) > 0 and self . _is_exception_rule ( self . address_label [ - 1 ] ) : self . address_label [ - 1 ] += ( ' ' + element ) else : self . address_label . append ( element ) | Append address element to the label . |
7,576 | def load_template_source ( template_name , template_dirs = None ) : template_zipfiles = getattr ( settings , "TEMPLATE_ZIP_FILES" , [ ] ) for fname in template_zipfiles : try : z = zipfile . ZipFile ( fname ) source = z . read ( template_name ) except ( IOError , KeyError ) : continue z . close ( ) template_path = "%s:... | Template loader that loads templates from a ZIP file . |
7,577 | def sanitize_capabilities ( caps ) : platform = caps [ "platform" ] upper_platform = platform . upper ( ) if upper_platform . startswith ( "WINDOWS 8" ) : caps [ "platform" ] = "WIN8" elif upper_platform . startswith ( "OS X " ) : caps [ "platform" ] = "MAC" elif upper_platform == "WINDOWS 10" : del caps [ "platform" ]... | Sanitize the capabilities we pass to Selenic so that they can be consumed by Browserstack . |
7,578 | def my_func ( version ) : class MyClass ( object ) : if version == 2 : import docs . support . python2_module as pm else : import docs . support . python3_module as pm def __init__ ( self , value ) : self . _value = value def _get_value ( self ) : return self . _value value = property ( _get_value , pm . _set_value , N... | Enclosing function . |
7,579 | def get_subscriptions ( self , publication_id = None , owner_id = None , since_when = None , limit_to = 200 , max_calls = None , start_record = 0 , verbose = False ) : query = "SELECT Objects() FROM Subscription" where_params = [ ] if owner_id : where_params . append ( ( 'owner' , '=' , "'%s'" % owner_id ) ) if publica... | Fetches all subscriptions from Membersuite of a particular publication_id if set . |
7,580 | def get_prep_value ( self , value ) : if isinstance ( value , JSON . JsonDict ) : return json . dumps ( value , cls = JSON . Encoder ) if isinstance ( value , JSON . JsonList ) : return value . json_string if isinstance ( value , JSON . JsonString ) : return json . dumps ( value ) return value | The psycopg adaptor returns Python objects but we also have to handle conversion ourselves |
7,581 | def registry ( attr , base = type ) : class Registry ( base ) : def __init__ ( cls , name , bases , attrs ) : super ( Registry , cls ) . __init__ ( name , bases , attrs ) if not hasattr ( cls , '__registry__' ) : cls . __registry__ = { } key = getattr ( cls , attr ) if key is not NotImplemented : assert key not in cls ... | Generates a meta class to index sub classes by their keys . |
7,582 | def debug_generate ( self , debug_generator , * gen_args , ** gen_kwargs ) : if self . isEnabledFor ( logging . DEBUG ) : message = debug_generator ( * gen_args , ** gen_kwargs ) if message is not None : return self . debug ( message ) | Used for efficient debug logging where the actual message isn t evaluated unless it will actually be accepted by the logger . |
7,583 | def verify_token ( token , public_key_or_address , signing_algorithm = "ES256K" ) : decoded_token = decode_token ( token ) decoded_token_payload = decoded_token [ "payload" ] if "subject" not in decoded_token_payload : raise ValueError ( "Token doesn't have a subject" ) if "publicKey" not in decoded_token_payload [ "su... | A function for validating an individual token . |
7,584 | def verify_token_record ( token_record , public_key_or_address , signing_algorithm = "ES256K" ) : if "token" not in token_record : raise ValueError ( "Token record must have a token inside it" ) token = token_record [ "token" ] decoded_token = verify_token ( token , public_key_or_address , signing_algorithm = signing_a... | A function for validating an individual token record and extracting the decoded token . |
7,585 | def get_profile_from_tokens ( token_records , public_key_or_address , hierarchical_keys = False ) : if hierarchical_keys : raise NotImplementedError ( "Hierarchical key support not implemented" ) profile = { } for token_record in token_records : try : decoded_token = verify_token_record ( token_record , public_key_or_a... | A function for extracting a profile from a list of tokens . |
7,586 | def resolve_zone_file_to_profile ( zone_file , address_or_public_key ) : if is_profile_in_legacy_format ( zone_file ) : return zone_file try : token_file_url = get_token_file_url_from_zone_file ( zone_file ) except Exception as e : raise Exception ( "Token file URL could not be extracted from zone file" ) try : r = req... | Resolves a zone file to a profile and checks to makes sure the tokens are signed with a key that corresponds to the address or public key provided . |
7,587 | def __dog_started ( self ) : if self . __task is not None : raise RuntimeError ( 'Unable to start task. In order to start a new task - at first stop it' ) self . __task = self . record ( ) . task ( ) if isinstance ( self . __task , WScheduleTask ) is False : task_class = self . __task . __class__ . __qualname__ raise R... | Prepare watchdog for scheduled task starting |
7,588 | def __thread_started ( self ) : if self . __task is None : raise RuntimeError ( 'Unable to start thread without "start" method call' ) self . __task . start ( ) self . __task . start_event ( ) . wait ( self . __scheduled_task_startup_timeout__ ) | Start a scheduled task |
7,589 | def _polling_iteration ( self ) : if self . __task is None : self . ready_event ( ) . set ( ) elif self . __task . check_events ( ) is True : self . ready_event ( ) . set ( ) self . registry ( ) . task_finished ( self ) | Poll for scheduled task stop events |
7,590 | def thread_stopped ( self ) : if self . __task is not None : if self . __task . stop_event ( ) . is_set ( ) is False : self . __task . stop ( ) self . __task = None | Stop scheduled task beacuse of watchdog stop |
7,591 | def stop_running_tasks ( self ) : for task in self . __running_registry : task . stop ( ) self . __running_registry . clear ( ) | Terminate all the running tasks |
7,592 | def add_source ( self , task_source ) : next_start = task_source . next_start ( ) self . __sources [ task_source ] = next_start self . __update ( task_source ) | Add new tasks source |
7,593 | def __update_all ( self ) : self . __next_start = None self . __next_sources = [ ] for source in self . __sources : self . __update ( source ) | Recheck next start of records from all the sources |
7,594 | def __update ( self , task_source ) : next_start = task_source . next_start ( ) if next_start is not None : if next_start . tzinfo is None or next_start . tzinfo != timezone . utc : raise ValueError ( 'Invalid timezone information' ) if self . __next_start is None or next_start < self . __next_start : self . __next_sta... | Recheck next start of tasks from the given one only |
7,595 | def check ( self ) : if self . __next_start is not None : utc_now = utc_datetime ( ) if utc_now >= self . __next_start : result = [ ] for task_source in self . __next_sources : records = task_source . has_records ( ) if records is not None : result . extend ( records ) self . __update_all ( ) if len ( result ) > 0 : re... | Check if there are records that are ready to start and return them if there are any |
7,596 | def thread_started ( self ) : self . __running_record_registry . start ( ) self . __running_record_registry . start_event ( ) . wait ( ) WPollingThreadTask . thread_started ( self ) | Start required registries and start this scheduler |
7,597 | def dir_contains ( dirname , path , exists = True ) : if exists : dirname = osp . abspath ( dirname ) path = osp . abspath ( path ) if six . PY2 or six . PY34 : return osp . exists ( path ) and osp . samefile ( osp . commonprefix ( [ dirname , path ] ) , dirname ) else : return osp . samefile ( osp . commonpath ( [ dir... | Check if a file of directory is contained in another . |
7,598 | def get_next_name ( old , fmt = '%i' ) : nums = re . findall ( '\d+' , old ) if not nums : raise ValueError ( "Could not get the next name because the old name " "has no numbers in it" ) num0 = nums [ - 1 ] num1 = str ( int ( num0 ) + 1 ) return old [ : : - 1 ] . replace ( num0 [ : : - 1 ] , num1 [ : : - 1 ] , 1 ) [ : ... | Return the next name that numerically follows old |
7,599 | def go_through_dict ( key , d , setdefault = None ) : patt = re . compile ( r'(?<!\\)\.' ) sub_d = d splitted = patt . split ( key ) n = len ( splitted ) for i , k in enumerate ( splitted ) : if i < n - 1 : if setdefault is not None : sub_d = sub_d . setdefault ( k , setdefault ( ) ) else : sub_d = sub_d [ k ] else : r... | Split up the key by . and get the value from the base dictionary d |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.