idx int64 0 251k | question stringlengths 53 3.53k | target stringlengths 5 1.23k | len_question int64 20 893 | len_target int64 3 238 |
|---|---|---|---|---|
15,600 | def create_command_history_subscription ( self , on_data = None , timeout = 60 ) : return self . _client . create_command_history_subscription ( issued_command = self , on_data = on_data , timeout = timeout ) | Create a new command history subscription for this command . | 56 | 10 |
15,601 | def acknowledged_by ( self ) : if ( self . is_acknowledged and self . _proto . acknowledgeInfo . HasField ( 'acknowledgedBy' ) ) : return self . _proto . acknowledgeInfo . acknowledgedBy return None | Username of the acknowledger . | 53 | 7 |
15,602 | def acknowledge_message ( self ) : if ( self . is_acknowledged and self . _proto . acknowledgeInfo . HasField ( 'acknowledgeMessage' ) ) : return self . _proto . acknowledgeInfo . acknowledgeMessage return None | Comment provided when acknowledging the alarm . | 53 | 7 |
15,603 | def acknowledge_time ( self ) : if ( self . is_acknowledged and self . _proto . acknowledgeInfo . HasField ( 'acknowledgeTime' ) ) : return parse_isostring ( self . _proto . acknowledgeInfo . acknowledgeTime ) return None | Processor time when the alarm was acknowledged . | 60 | 9 |
15,604 | def name ( self ) : if self . _proto . id . namespace : return self . _proto . id . namespace + '/' + self . _proto . id . name return self . _proto . id . name | An identifying name for the parameter value . Typically this is the fully - qualified XTCE name but it may also be an alias depending on how the parameter update was requested . | 50 | 35 |
15,605 | def validity_duration ( self ) : if self . _proto . HasField ( 'expireMillis' ) : return timedelta ( milliseconds = self . _proto . expireMillis ) return None | How long this parameter value is valid . | 44 | 8 |
15,606 | def range_condition ( self ) : if self . _proto . HasField ( 'rangeCondition' ) : return pvalue_pb2 . RangeCondition . Name ( self . _proto . rangeCondition ) return None | If the value is out of limits this indicates LOW or HIGH . | 47 | 13 |
15,607 | def reversebait ( self , maskmiddle = 'f' , k = 19 ) : logging . info ( 'Performing reverse kmer baiting of targets with FASTQ files' ) if self . kmer_size is None : kmer = k else : kmer = self . kmer_size with progressbar ( self . runmetadata ) as bar : for sample in bar : if sample . general . bestassemblyfile != 'NA' and sample [ self . analysistype ] . runanalysis : outfile = os . path . join ( sample [ self . analysistype ] . outputdir , 'baitedtargets.fa' ) sample [ self . analysistype ] . revbbdukcmd = 'bbduk.sh -Xmx{mem} ref={ref} in={in1} k={kmer} threads={cpus} mincovfraction={mcf} ' 'maskmiddle={mm} outm={outm}' . format ( mem = self . mem , ref = sample [ self . analysistype ] . baitedfastq , in1 = sample [ self . analysistype ] . baitfile , kmer = kmer , cpus = str ( self . cpus ) , mcf = self . cutoff , mm = maskmiddle , outm = outfile ) # Run the system call (if necessary) if not os . path . isfile ( outfile ) : out , err = run_subprocess ( sample [ self . analysistype ] . revbbdukcmd ) write_to_logfile ( sample [ self . analysistype ] . bbdukcmd , sample [ self . analysistype ] . bbdukcmd , self . logfile , sample . general . logout , sample . general . logerr , sample [ self . analysistype ] . logout , sample [ self . analysistype ] . logerr ) write_to_logfile ( out , err , self . logfile , sample . general . logout , sample . general . logerr , sample [ self . analysistype ] . logout , sample [ self . analysistype ] . logerr ) # Set the baitfile to use in the mapping steps as the newly created outfile sample [ self . analysistype ] . baitfile = outfile | Use the freshly - baited FASTQ files to bait out sequence from the original target files . This will reduce the number of possibly targets against which the baited reads must be aligned | 493 | 37 |
15,608 | def clipper ( self ) : for sample in self . runmetadata : # Create a dictionary to store all the samples that do not have features replacementresults = dict ( ) try : # SixteenS analyses seem to fail if results are filtered out if self . analysistype != 'sixteens_full' and self . analysistype != 'resfinder' : # Iterate through all the baited genes for gene in sample [ self . analysistype ] . faidict : try : percentidentity = sample [ self . analysistype ] . results [ gene ] try : # Create a list to store whether a feature is present in enough reads to discard the # sample passingfeature = list ( ) for location , feature in sample [ self . analysistype ] . features [ gene ] . items ( ) : # If the feature is present in under 30% of the reads, set the passing variable # to true if len ( feature ) < int ( float ( sample [ self . analysistype ] . avgdepth [ gene ] ) ) * 0.3 : passingfeature . append ( True ) # Otherwise set it to false else : passingfeature . append ( False ) # If all the features are 'true' (present in fewer than 30% of the reads), add this # contig to the list of passing results if all ( passingfeature ) : replacementresults [ gene ] = percentidentity # If the allele does not have any features, it is added to the passing list except KeyError : replacementresults [ gene ] = percentidentity except KeyError : pass # Update the .results attribute with the filtered dictionary sample [ self . analysistype ] . results = replacementresults except AttributeError : pass | Filter out results based on the presence of cigar features such as internal soft - clipping | 355 | 16 |
15,609 | def main ( ) : reporter = BugReporter ( ) print ( "JSON report:" ) print ( reporter . as_json ( ) ) print ( ) print ( "Markdown report:" ) print ( reporter . as_markdown ( ) ) print ( "SQL report:" ) print ( reporter . as_sql ( ) ) print ( "Choose the appropriate format (if you're submitting a Github Issue " "please chose the Markdown report) and paste it!" ) | Pretty - print the bug information as JSON | 97 | 8 |
15,610 | def get_platform_info ( ) : try : system_name = platform . system ( ) release_name = platform . release ( ) except : system_name = "Unknown" release_name = "Unknown" return { 'system' : system_name , 'release' : release_name , } | Gets platform info | 64 | 4 |
15,611 | def get_bug_report ( ) : platform_info = BugReporter . get_platform_info ( ) module_info = { 'version' : hal_version . __version__ , 'build' : hal_version . __build__ } return { 'platform' : platform_info , 'pyhal' : module_info } | Generate information for a bug report | 72 | 7 |
15,612 | def to_isostring ( dt ) : if dt . tzinfo is not None and dt . tzinfo . utcoffset ( dt ) > timedelta ( 0 ) : logging . warn ( 'Warning: aware datetimes are interpreted as if they were naive' ) # -3 to change microseconds to milliseconds return dt . strftime ( '%Y-%m-%dT%H:%M:%S.%f' ) [ : - 3 ] + 'Z' | Converts the given datetime to an ISO String . This assumes the datetime is UTC . | 111 | 19 |
15,613 | def parse_value ( proto ) : if proto . HasField ( 'floatValue' ) : return proto . floatValue elif proto . HasField ( 'doubleValue' ) : return proto . doubleValue elif proto . HasField ( 'sint32Value' ) : return proto . sint32Value elif proto . HasField ( 'uint32Value' ) : return proto . uint32Value elif proto . HasField ( 'binaryValue' ) : return proto . binaryValue elif proto . HasField ( 'timestampValue' ) : # Don't use the actual 'timestampValue' field, it contains a number # that is difficult to interpret on the client. Instead parse from # the ISO String also set by Yamcs. return parse_isostring ( proto . stringValue ) elif proto . HasField ( 'stringValue' ) : return proto . stringValue elif proto . HasField ( 'uint64Value' ) : return proto . uint64Value elif proto . HasField ( 'sint64Value' ) : return proto . sint64Value elif proto . HasField ( 'booleanValue' ) : return proto . booleanValue elif proto . HasField ( 'arrayValue' ) : return [ parse_value ( v ) for v in proto . arrayValue ] elif proto . HasField ( 'aggregateValue' ) : return OrderedDict ( zip ( proto . aggregateValue . name , proto . aggregateValue . value ) ) else : logging . warn ( 'Unrecognized value type for update %s' , proto ) return None | Convers a Protobuf Value from the API into a python native value | 336 | 15 |
15,614 | def create_correlation_matrix_plot ( correlation_matrix , title , feature_list ) : chart = SimpleChart ( title ) ax1 = chart . get_ax ( ) ax1 . set_xticks ( list ( range ( len ( feature_list ) ) ) ) ax1 . set_xticklabels ( [ feature_list [ i ] for i in range ( len ( feature_list ) ) ] , rotation = 90 ) ax1 . set_yticks ( list ( range ( len ( feature_list ) ) ) ) ax1 . set_yticklabels ( [ feature_list [ i ] for i in range ( len ( feature_list ) ) ] ) cax = ax1 . imshow ( correlation_matrix , interpolation = "nearest" , cmap = cm . get_cmap ( "jet" , 30 ) ) chart . get_fig ( ) . colorbar ( cax , ticks = np . linspace ( - 1 , 1 , 21 ) ) plt . gcf ( ) . subplots_adjust ( bottom = 0.25 ) | Creates plot for correlation matrix | 237 | 6 |
15,615 | def log_every_x_times ( logger , counter , x , msg , * args , * * kwargs ) : if counter == 1 or counter % x == 0 : #msg = msg + (' (counter %i)' % counter) logdebug ( logger , msg , * args , * * kwargs ) | Works like logdebug but only prints first and and every xth message . | 68 | 15 |
15,616 | def get_dataframe ( self , * args , * * kwargs ) : columns = kwargs . get ( 'columns' ) if columns : del kwargs [ 'columns' ] else : columns = self . default_titles return pd . DataFrame ( self . get_data ( * args , * * kwargs ) , columns = columns ) | Retrieve data as a Pandas dataframe . | 81 | 10 |
15,617 | def grabImage ( self , index ) : # grab an image of the cell we are moving # assume all rows same height row_height = self . rowHeight ( 0 ) # -5 becuase it a a little off y = ( row_height * index . row ( ) ) + row_height - 5 x = self . width ( ) rect = QtCore . QRect ( 5 , y , x , row_height ) pixmap = QtGui . QPixmap ( ) pixmap = pixmap . grabWidget ( self , rect ) return pixmap | Returns an image of the parameter row . | 124 | 8 |
15,618 | def mousePressEvent ( self , event ) : index = self . indexAt ( event . pos ( ) ) if index . isValid ( ) : self . selectRow ( index . row ( ) ) # selecting the row sets the current index to 0,0 for tab # order to work correctly, we must set the current index self . setCurrentIndex ( index ) self . parameterChanged . emit ( self . model ( ) . selection ( index ) ) self . edit ( index , QtGui . QAbstractItemView . DoubleClicked , event ) super ( AutoParameterTableView , self ) . mousePressEvent ( event ) | Begins edit on cell clicked if allowed and passes event to super class | 131 | 14 |
15,619 | def request ( self , path , method , data = None , * * kwargs ) : if self . api_token : self . request_headers [ 'X-Cachet-Token' ] = self . api_token if not path . startswith ( 'http://' ) and not path . startswith ( 'https://' ) : url = "%s/%s" % ( self . api_endpoint , path ) else : url = path if data is None : data = { } response = self . r_session . request ( method , url , data = json . dumps ( data ) , headers = self . request_headers , timeout = self . timeout , verify = self . verify , * * kwargs ) # If API returns an error, we simply raise and let caller handle it response . raise_for_status ( ) try : return response . json ( ) except ValueError : return { 'data' : response . text } | Handle requests to API | 204 | 4 |
15,620 | def paginate_request ( self , path , method , data = None , * * kwargs ) : next_page = path while next_page : response = self . request ( next_page , method , data = data , * * kwargs ) if not isinstance ( response . get ( 'data' ) , list ) : next_page = None yield response [ 'data' ] else : for entry in response [ 'data' ] : yield entry # Get next page if it exists try : links = response [ 'meta' ] [ 'pagination' ] [ 'links' ] next_page = links . get ( 'next_page' ) except KeyError : next_page = None | Handle paginated requests to API | 150 | 6 |
15,621 | def maybe_open ( infile , mode = 'r' ) : # ENH: Exception safety? if isinstance ( infile , basestring ) : handle = open ( infile , mode ) do_close = True else : handle = infile do_close = False yield handle if do_close : handle . close ( ) | Take a file name or a handle and return a handle . | 71 | 12 |
15,622 | def _get_row_tag ( row , tag ) : is_empty = True data = [ ] for column_label in row . find_all ( tag ) : # cycle through all labels data . append ( String ( column_label . text ) . strip_bad_html ( ) ) if data [ - 1 ] : is_empty = False if not is_empty : return data return None | Parses row and gets columns matching tag | 84 | 9 |
15,623 | def _parse_row ( row ) : data = [ ] labels = HtmlTable . _get_row_tag ( row , "th" ) if labels : data += labels columns = HtmlTable . _get_row_tag ( row , "td" ) if columns : data += columns return data | Parses HTML row | 65 | 5 |
15,624 | def parse ( self ) : data = [ ] # add name of section for row in self . soup . find_all ( "tr" ) : # cycle through all rows parsed = self . _parse_row ( row ) if parsed : data . append ( parsed ) return data | Parses data in table | 58 | 6 |
15,625 | def delete_module ( modname ) : try : _ = sys . modules [ modname ] except KeyError : raise ValueError ( "Module not found in sys.modules: '{}'" . format ( modname ) ) for module in list ( sys . modules . keys ( ) ) : if module and module . startswith ( modname ) : del sys . modules [ module ] | Delete module and sub - modules from sys . module | 82 | 10 |
15,626 | def reload_module ( module ) : try : # For Python 2.x reload ( module ) except ( ImportError , NameError ) : # For <= Python3.3: import imp imp . reload ( module ) except ( ImportError , NameError ) : # For >= Python3.4 import importlib importlib . reload ( module ) | Reload the Python module | 71 | 5 |
15,627 | def lazy_load_modules ( * modules ) : def decorator ( function ) : def wrapper ( * args , * * kwargs ) : module_dict = { } for module_string in modules : module = __import__ ( module_string ) # Add `module` entry in `sys.modules`. After deleting the module # from `sys.modules` and re-importing the module don't update # the module entry in `sys.modules` dict sys . modules [ module . __package__ ] = module reload_module ( module ) module_dict [ module_string ] = module func_response = function ( * args , * * kwargs ) for module_string , module in module_dict . items ( ) : # delete idna module delete_module ( module_string ) del module # delete reference to idna return func_response return wrapper return decorator | Decorator to load module to perform related operation for specific function and delete the module from imports once the task is done . GC frees the memory related to module during clean - up . | 186 | 38 |
15,628 | def format ( self , record ) : if record . levelno == DEBUG : return self . debug_formatter . format ( record ) if record . levelno == INFO : return self . info_formatter . format ( record ) if record . levelno == ERROR : return self . error_formatter . format ( record ) if record . levelno == WARNING : return self . warning_formatter . format ( record ) if record . levelno == CRITICAL : return self . critical_formatter . format ( record ) | Format the record using the corresponding formatter . | 110 | 9 |
15,629 | def load_stylesheet ( self , id , path ) : self . add_child ( HeadLink ( id = id , link_type = "stylesheet" , path = path ) ) | Proper way to dynamically inject a stylesheet in a page . | 40 | 13 |
15,630 | def add_child ( self , widget ) : li_itm = _li ( id = self . id + str ( self . _count ) ) li_itm . add_child ( widget ) super ( List , self ) . add_child ( li_itm ) self . _items . append ( ( widget , li_itm ) ) self . _count += 1 | Append a widget to the list . | 81 | 8 |
15,631 | def remove_child ( self , widget ) : raw = list ( filter ( lambda x : x [ 0 ] == widget , self . _items ) ) if raw : itm , wrapped = raw [ 0 ] self . _items . remove ( raw [ 0 ] ) super ( List , self ) . remove_child ( wrapped ) else : raise ValueError ( "Child not in list." ) | Remove a widget from the list . | 82 | 7 |
15,632 | def move_page ( request , page_id , extra_context = None ) : page = Page . objects . get ( pk = page_id ) target = request . POST . get ( 'target' , None ) position = request . POST . get ( 'position' , None ) if target is not None and position is not None : try : target = Page . objects . get ( pk = target ) except Page . DoesNotExist : pass # TODO: should use the django message system # to display this message # _('Page could not been moved.') else : page . invalidate ( ) target . invalidate ( ) from mptt . exceptions import InvalidMove invalid_move = False try : page . move_to ( target , position ) except InvalidMove : invalid_move = True return list_pages_ajax ( request , invalid_move ) return HttpResponseRedirect ( '../../' ) | Move the page to the requested target at the given position . | 197 | 12 |
15,633 | def reloc_var ( var_name , reloc_delta , pointer , var_type ) : template = '{0} {3}{1} = RELOC_VAR(_{1}, {2}, {0});\n' return template . format ( var_type , var_name , reloc_delta , '*' if pointer else '' ) | Build C source code to relocate a variable . | 80 | 9 |
15,634 | def make_c_args ( arg_pairs ) : logging . debug ( arg_pairs ) c_args = [ '{} {}' . format ( arg_type , arg_name ) if arg_name else arg_type for dummy_number , arg_type , arg_name in sorted ( arg_pairs ) ] return ', ' . join ( c_args ) | Build a C argument list from return type and arguments pairs . | 82 | 12 |
15,635 | def interop_parse ( self ) : # Parse the files and load the data try : run_metrics = py_interop_run_metrics . run_metrics ( ) valid_to_load = py_interop_run . uchar_vector ( py_interop_run . MetricCount , 0 ) py_interop_run_metrics . list_summary_metrics_to_load ( valid_to_load ) run_metrics . read ( self . path , valid_to_load ) summary = py_interop_summary . run_summary ( ) py_interop_summary . summarize_run_metrics ( run_metrics , summary ) # PhiX error rate for run over all "usable cycles" errorrate = summary . total_summary ( ) . error_rate ( ) # Percent aligned PhiX pctaligned = summary . total_summary ( ) . percent_aligned ( ) # Add the error rate and the percent of reads that align to PhiX to the metadata object for sample in self . metadata : sample . run . error_rate = '{:.2f}' . format ( errorrate ) sample . run . phix_aligned = '{:.2f}' . format ( pctaligned ) except : for sample in self . metadata : sample . run . error_rate = 'ND' sample . run . phix_aligned = 'ND' | Use interop to parse the files in the InterOp folder to extract the number of reads mapping to PhiX as well as the error rate | 306 | 28 |
15,636 | def make_inc ( incs ) : inc_args = [ [ '/I' , inc ] for inc in incs ] return list ( chain . from_iterable ( inc_args ) ) | Make include directory for link . exe . | 42 | 9 |
15,637 | def make_objs ( names , out_dir = '' ) : objs = [ replace_ext ( name , '.obj' ) for name in names ] if out_dir : objs = [ os . path . join ( out_dir , obj ) for obj in objs ] return objs | Make object file names for cl . exe and link . exe . | 64 | 15 |
15,638 | def examples ( ) : sci = InterLexClient ( api_key = os . environ . get ( 'INTERLEX_API_KEY' ) , base_url = 'https://beta.scicrunch.org/api/1/' , # NEVER CHANGE ) entity = { 'label' : 'brain115' , 'type' : 'fde' , # broken at the moment NEEDS PDE HARDCODED 'definition' : 'Part of the central nervous system' , 'comment' : 'Cannot live without it' , 'superclass' : { 'ilx_id' : 'ilx_0108124' , # ILX ID for Organ } , 'synonyms' : [ { 'literal' : 'Encephalon' } , { 'literal' : 'Cerebro' } , ] , 'existing_ids' : [ { 'iri' : 'http://uri.neuinfo.org/nif/nifstd/birnlex_796' , 'curie' : 'BIRNLEX:796' , } , ] , } simple_entity = { 'label' : entity [ 'label' ] , 'type' : entity [ 'type' ] , # broken at the moment NEEDS PDE HARDCODED 'definition' : entity [ 'definition' ] , 'comment' : entity [ 'comment' ] , 'superclass' : entity [ 'superclass' ] [ 'ilx_id' ] , 'synonyms' : [ syn [ 'literal' ] for syn in entity [ 'synonyms' ] ] , 'predicates' : { 'tmp_0381624' : 'http://example_dbxref' } } annotation = { 'term_ilx_id' : 'ilx_0101431' , # brain ILX ID 'annotation_type_ilx_id' : 'tmp_0381624' , # hasDbXref ILX ID 'annotation_value' : 'PMID:12345' , } relationship = { 'entity1_ilx' : 'ilx_0101431' , # brain 'relationship_ilx' : 'ilx_0115023' , # Related to 'entity2_ilx' : 'ilx_0108124' , #organ } update_entity_data = { 'ilx_id' : 'ilx_0101431' , 'label' : 'Brain' , 'definition' : 'update_test!!' , 'type' : 'fde' , 'comment' : 'test comment' , 'superclass' : 'ilx_0108124' , 'synonyms' : [ 'test' , 'test2' , 'test2' ] , } # resp = sci.delete_annotation(**{ # 'term_ilx_id': 'ilx_0101431', # brain ILX ID # 'annotation_type_ilx_id': 'ilx_0115071', # hasConstraint ILX ID # 'annotation_value': 'test_12345', # }) relationship = { 'entity1_ilx' : 'http://uri.interlex.org/base/ilx_0100001' , # (R)N6 chemical ILX ID 'relationship_ilx' : 'http://uri.interlex.org/base/ilx_0112772' , # Afferent projection ILX ID 'entity2_ilx' : 'http://uri.interlex.org/base/ilx_0100000' , #1,2-Dibromo chemical ILX ID } | Examples of how to use . Default are that some functions are commented out in order to not cause harm to existing metadata within the database . | 798 | 27 |
15,639 | def process_response ( self , response : requests . models . Response ) -> dict : try : output = response . json ( ) except json . JSONDecodeError : # Server is having a bad day and crashed. raise self . BadResponseError ( 'Json not returned with status code [' + str ( response . status_code ) + ']' ) if response . status_code == 400 : return output if response . status_code not in [ 200 , 201 ] : # Safety catch. raise self . BadResponseError ( str ( output ) + ': with status code [' + str ( response . status_code ) + '] and params:' + str ( output ) ) return output [ 'data' ] | Checks for correct data response and status codes | 149 | 9 |
15,640 | def process_superclass ( self , entity : List [ dict ] ) -> List [ dict ] : superclass = entity . pop ( 'superclass' ) label = entity [ 'label' ] if not superclass . get ( 'ilx_id' ) : raise self . SuperClassDoesNotExistError ( f'Superclass not given an interlex ID for label: {label}' ) superclass_data = self . get_entity ( superclass [ 'ilx_id' ] ) if not superclass_data [ 'id' ] : raise self . SuperClassDoesNotExistError ( 'Superclass ILX ID: ' + superclass [ 'ilx_id' ] + ' does not exist in SciCrunch' ) # BUG: only excepts superclass_tid entity [ 'superclasses' ] = [ { 'superclass_tid' : superclass_data [ 'id' ] } ] return entity | Replaces ILX ID with superclass ID | 202 | 9 |
15,641 | def check_scicrunch_for_label ( self , label : str ) -> dict : list_of_crude_matches = self . crude_search_scicrunch_via_label ( label ) for crude_match in list_of_crude_matches : # If labels match if crude_match [ 'label' ] . lower ( ) . strip ( ) == label . lower ( ) . strip ( ) : complete_data_of_crude_match = self . get_entity ( crude_match [ 'ilx' ] ) crude_match_label = crude_match [ 'label' ] crude_match_user_id = complete_data_of_crude_match [ 'uid' ] # If label was created by you if str ( self . user_id ) == str ( crude_match_user_id ) : return complete_data_of_crude_match # You created the entity already # No label AND user id match return { } | Sees if label with your user ID already exists | 214 | 10 |
15,642 | def add_raw_entity ( self , entity : dict ) -> dict : needed_in_entity = set ( [ 'label' , 'type' , ] ) options_in_entity = set ( [ 'label' , 'type' , 'definition' , 'comment' , 'superclass' , 'synonyms' , 'existing_ids' ] ) prime_entity_url = self . base_url + 'ilx/add' add_entity_url = self . base_url + 'term/add' ### Checking if key/value format is correct ### # Seeing if you are missing a needed key if ( set ( entity ) & needed_in_entity ) != needed_in_entity : raise self . MissingKeyError ( 'You need key(s): ' + str ( needed_in_entity - set ( entity ) ) ) # Seeing if you have other options not included in the description elif ( set ( entity ) | options_in_entity ) != options_in_entity : raise self . IncorrectKeyError ( 'Unexpected key(s): ' + str ( set ( entity ) - options_in_entity ) ) entity [ 'type' ] = entity [ 'type' ] . lower ( ) # BUG: server only takes lowercase if entity [ 'type' ] not in [ 'term' , 'relationship' , 'annotation' , 'cde' , 'fde' , 'pde' ] : raise TypeError ( 'Entity should be one of the following: ' + 'term, relationship, annotation, cde, fde, pde' ) if entity . get ( 'superclass' ) : entity = self . process_superclass ( entity ) if entity . get ( 'synonyms' ) : entity = self . process_synonyms ( entity ) if entity . get ( 'existing_ids' ) : entity = self . process_existing_ids ( entity ) entity [ 'uid' ] = self . user_id # BUG: php lacks uid update ### Adding entity to SciCrunch ### entity [ 'term' ] = entity . pop ( 'label' ) # ilx/add nuance ilx_data = self . post ( url = prime_entity_url , data = entity . copy ( ) , ) # requesting spot in server for entity if ilx_data . get ( 'ilx' ) : ilx_id = ilx_data [ 'ilx' ] else : ilx_id = ilx_data [ 'fragment' ] # beta.scicrunch.org entity [ 'label' ] = entity . pop ( 'term' ) # term/add nuance entity [ 'ilx' ] = ilx_id # need entity ilx_id to place entity in db output = self . post ( url = add_entity_url , data = entity . copy ( ) , ) # data represented in SciCrunch interface ### Checking if label already exisits ### if output . get ( 'errormsg' ) : if 'already exists' in output [ 'errormsg' ] . lower ( ) : prexisting_data = self . check_scicrunch_for_label ( entity [ 'label' ] ) if prexisting_data : print ( 'You already added entity' , entity [ 'label' ] , 'with ILX ID:' , prexisting_data [ 'ilx' ] ) return prexisting_data self . Error ( output ) # FIXME what is the correct error here? self . Error ( output ) # FIXME what is the correct error here? # BUG: server output incomplete compared to search via ilx ids output = self . get_entity ( output [ 'ilx' ] ) return output | Adds entity if it does not already exist under your user ID . | 803 | 13 |
15,643 | def add_annotation ( self , term_ilx_id : str , annotation_type_ilx_id : str , annotation_value : str ) -> dict : url = self . base_url + 'term/add-annotation' term_data = self . get_entity ( term_ilx_id ) if not term_data [ 'id' ] : exit ( 'term_ilx_id: ' + term_ilx_id + ' does not exist' ) anno_data = self . get_entity ( annotation_type_ilx_id ) if not anno_data [ 'id' ] : exit ( 'annotation_type_ilx_id: ' + annotation_type_ilx_id + ' does not exist' ) data = { 'tid' : term_data [ 'id' ] , 'annotation_tid' : anno_data [ 'id' ] , 'value' : annotation_value , 'term_version' : term_data [ 'version' ] , 'annotation_term_version' : anno_data [ 'version' ] , 'orig_uid' : self . user_id , # BUG: php lacks orig_uid update } output = self . post ( url = url , data = data , ) ### If already exists, we return the actual annotation properly ### if output . get ( 'errormsg' ) : if 'already exists' in output [ 'errormsg' ] . lower ( ) : term_annotations = self . get_annotation_via_tid ( term_data [ 'id' ] ) for term_annotation in term_annotations : if str ( term_annotation [ 'annotation_tid' ] ) == str ( anno_data [ 'id' ] ) : if term_annotation [ 'value' ] == data [ 'value' ] : print ( 'Annotation: [' + term_data [ 'label' ] + ' -> ' + anno_data [ 'label' ] + ' -> ' + data [ 'value' ] + '], already exists.' ) return term_annotation exit ( output ) exit ( output ) return output | Adding an annotation value to a prexisting entity | 476 | 10 |
15,644 | def delete_annotation ( self , term_ilx_id : str , annotation_type_ilx_id : str , annotation_value : str ) -> dict : term_data = self . get_entity ( term_ilx_id ) if not term_data [ 'id' ] : exit ( 'term_ilx_id: ' + term_ilx_id + ' does not exist' ) anno_data = self . get_entity ( annotation_type_ilx_id ) if not anno_data [ 'id' ] : exit ( 'annotation_type_ilx_id: ' + annotation_type_ilx_id + ' does not exist' ) entity_annotations = self . get_annotation_via_tid ( term_data [ 'id' ] ) annotation_id = '' for annotation in entity_annotations : if str ( annotation [ 'tid' ] ) == str ( term_data [ 'id' ] ) : if str ( annotation [ 'annotation_tid' ] ) == str ( anno_data [ 'id' ] ) : if str ( annotation [ 'value' ] ) == str ( annotation_value ) : annotation_id = annotation [ 'id' ] break if not annotation_id : print ( '''WARNING: Annotation you wanted to delete does not exist ''' ) return None url = self . base_url + 'term/edit-annotation/{annotation_id}' . format ( annotation_id = annotation_id ) data = { 'tid' : ' ' , # for delete 'annotation_tid' : ' ' , # for delete 'value' : ' ' , # for delete 'term_version' : ' ' , 'annotation_term_version' : ' ' , } output = self . post ( url = url , data = data , ) # check output return output | If annotation doesnt exist add it | 412 | 6 |
15,645 | def main ( self ) : self . secret_finder ( ) self . parse_access_token ( ) self . get_session_token ( ) self . parse_session_token ( ) self . get_route ( ) self . download_profile ( ) self . find_loci ( ) self . download_loci ( ) | Run the appropriate methods in the correct order | 70 | 8 |
15,646 | def secret_finder ( self ) : secretlist = list ( ) if os . path . isfile ( self . secret_file ) : # Open the file, and put the contents into a list with open ( self . secret_file , 'r' ) as secret : for line in secret : secretlist . append ( line . rstrip ( ) ) # Extract the key and secret from the list self . consumer_key = secretlist [ 0 ] self . consumer_secret = secretlist [ 1 ] else : print ( '"Cannot find the secret.txt file required for authorization. ' 'Please ensure that this file exists, and that the supplied consumer key is on the ' 'first line, and the consumer secret is on he second line. ' 'Contact keith.jolley@zoo.ox.ac.uk for an account, and the necessary keys' ) quit ( ) | Parses the supplied secret . txt file for the consumer key and secrets | 187 | 16 |
15,647 | def parse_access_token ( self ) : access_file = os . path . join ( self . file_path , 'access_token' ) # Ensure that the access_token file exists if os . path . isfile ( access_file ) : # Initialise a list to store the secret and token access_list = list ( ) with open ( access_file , 'r' ) as access_token : for line in access_token : value , data = line . split ( '=' ) access_list . append ( data . rstrip ( ) ) # Set the variables appropriately self . access_secret = access_list [ 0 ] self . access_token = access_list [ 1 ] else : print ( 'Missing access_token' ) self . get_request_token ( ) self . get_access_token ( ) | Extract the secret and token values from the access_token file | 177 | 13 |
15,648 | def get_request_token ( self ) : print ( 'Obtaining request token' ) try : os . remove ( os . path . join ( self . file_path , 'request_token' ) ) except FileNotFoundError : pass # Create a new session session = OAuth1Session ( consumer_key = self . consumer_key , consumer_secret = self . consumer_secret ) # Use the test URL in the GET request r = session . request ( method = 'GET' , url = self . request_token_url , params = { 'oauth_callback' : 'oob' } ) # If the status code is '200' (OK), proceed if r . status_code == 200 : # Save the JSON-decoded token secret and token self . request_token = r . json ( ) [ 'oauth_token' ] self . request_secret = r . json ( ) [ 'oauth_token_secret' ] # Write the token and secret to file self . write_token ( 'request_token' , self . request_token , self . request_secret ) | Obtain a request token | 235 | 5 |
15,649 | def get_session_token ( self ) : # self.logging.info('Getting session token') # Rather than testing any previous session tokens to see if they are still valid, simply delete old tokens in # preparation of the creation of new ones try : os . remove ( os . path . join ( self . file_path , 'session_token' ) ) except FileNotFoundError : pass # Create a new session session_request = OAuth1Session ( self . consumer_key , self . consumer_secret , access_token = self . access_token , access_token_secret = self . access_secret ) # Perform a GET request with the appropriate keys and tokens r = session_request . get ( self . session_token_url ) # If the status code is '200' (OK), proceed if r . status_code == 200 : # Save the JSON-decoded token secret and token self . session_token = r . json ( ) [ 'oauth_token' ] self . session_secret = r . json ( ) [ 'oauth_token_secret' ] # Write the token and secret to file self . write_token ( 'session_token' , self . session_token , self . session_secret ) # Any other status than 200 is considered a failure else : print ( 'Failed:' ) print ( r . json ( ) [ 'message' ] ) | Use the accession token to request a new session token | 295 | 11 |
15,650 | def parse_session_token ( self ) : session_file = os . path . join ( self . file_path , 'session_token' ) # Only try to extract the strings if the file exists if os . path . isfile ( session_file ) : # Create a list to store the data from the file session_list = list ( ) with open ( session_file , 'r' ) as session_token : for line in session_token : # Split the description e.g. secret= from the line value , data = line . split ( '=' ) # Add each string to the list session_list . append ( data . rstrip ( ) ) # Extract the appropriate variable from the list self . session_secret = session_list [ 0 ] self . session_token = session_list [ 1 ] | Extract the session secret and token strings from the session token file | 173 | 13 |
15,651 | def get_route ( self ) : # Create a new session session = OAuth1Session ( self . consumer_key , self . consumer_secret , access_token = self . session_token , access_token_secret = self . session_secret ) # Use the test URL in the GET request r = session . get ( self . test_rest_url ) if r . status_code == 200 or r . status_code == 201 : if re . search ( 'json' , r . headers [ 'content-type' ] , flags = 0 ) : decoded = r . json ( ) else : decoded = r . text # Extract the URLs from the returned data self . loci = decoded [ 'loci' ] self . profile = decoded [ 'schemes' ] | Creates a session to find the URL for the loci and schemes | 169 | 14 |
15,652 | def download_profile ( self ) : # Set the name of the profile file profile_file = os . path . join ( self . output_path , 'profile.txt' ) size = 0 # Ensure that the file exists, and that it is not too small; likely indicating a failed download try : stats = os . stat ( profile_file ) size = stats . st_size except FileNotFoundError : pass # Only download the profile if the file doesn't exist, or is likely truncated if not os . path . isfile ( profile_file ) or size <= 100 : # Create a new session session = OAuth1Session ( self . consumer_key , self . consumer_secret , access_token = self . session_token , access_token_secret = self . session_secret ) # The profile file is called profiles_csv on the server. Updated the URL appropriately r = session . get ( self . profile + '/1/profiles_csv' ) # On a successful GET request, parse the returned data appropriately if r . status_code == 200 or r . status_code == 201 : if re . search ( 'json' , r . headers [ 'content-type' ] , flags = 0 ) : decoded = r . json ( ) else : decoded = r . text # Write the profile file to disk with open ( profile_file , 'w' ) as profile : profile . write ( decoded ) | Download the profile from the database | 302 | 6 |
15,653 | def find_loci ( self ) : session = OAuth1Session ( self . consumer_key , self . consumer_secret , access_token = self . session_token , access_token_secret = self . session_secret ) # Use the URL for all loci determined above r = session . get ( self . loci ) if r . status_code == 200 or r . status_code == 201 : if re . search ( 'json' , r . headers [ 'content-type' ] , flags = 0 ) : decoded = r . json ( ) else : decoded = r . text # Extract all the URLs in the decoded dictionary under the key 'loci' for locus in decoded [ 'loci' ] : # Add each URL to the list self . loci_url . append ( locus ) | Finds the URLs for all allele files | 178 | 8 |
15,654 | def download_loci ( self ) : # Setup the multiprocessing pool. pool = multiprocessing . Pool ( processes = self . threads ) # Map the list of loci URLs to the download method pool . map ( self . download_threads , self . loci_url ) pool . close ( ) pool . join ( ) | Uses a multi - threaded approach to download allele files | 73 | 11 |
15,655 | def download_threads ( self , url ) : # Set the name of the allele file - split the gene name from the URL output_file = os . path . join ( self . output_path , '{}.tfa' . format ( os . path . split ( url ) [ - 1 ] ) ) # Check to see whether the file already exists, and if it is unusually small size = 0 try : stats = os . stat ( output_file ) size = stats . st_size except FileNotFoundError : pass # If the file doesn't exist, or is truncated, proceed with the download if not os . path . isfile ( output_file ) or size <= 100 : # Create a new session session = OAuth1Session ( self . consumer_key , self . consumer_secret , access_token = self . session_token , access_token_secret = self . session_secret ) # The allele file on the server is called alleles_fasta. Update the URL appropriately r = session . get ( url + '/alleles_fasta' ) if r . status_code == 200 or r . status_code == 201 : if re . search ( 'json' , r . headers [ 'content-type' ] , flags = 0 ) : decoded = r . json ( ) else : decoded = r . text # Write the allele to disk with open ( output_file , 'w' ) as allele : allele . write ( decoded ) | Download the allele files | 312 | 4 |
15,656 | def dumps ( obj , * args , * * kwargs ) : kwargs [ 'default' ] = object2dict return json . dumps ( obj , * args , * * kwargs ) | Serialize a object to string | 43 | 6 |
15,657 | def dump ( obj , fp , * args , * * kwargs ) : kwargs [ 'default' ] = object2dict json . dump ( obj , fp , * args , * * kwargs ) | Serialize a object to a file object . | 48 | 9 |
15,658 | def calc_delay ( remainingDrops ) : global sameDelay , lastDelay # Reset lastDelay for new appids if remainingDrops > 1 : lastDelay = 5 sameDelay = 0 if remainingDrops > 2 : return 15 * 60 # Check every 15 minutes elif remainingDrops == 2 : return 10 * 60 # Check every 10 minutes else : # decrease delay by one minute every two calls if lastDelay > 1 : if sameDelay == 2 : sameDelay = 0 lastDelay -= 1 sameDelay += 1 return lastDelay * 60 | Calculate the idle delay Minimum play time for cards to drop is ~20min again . Except for accounts that requested a refund? | 123 | 27 |
15,659 | def configfilepopulator ( self ) : # Set the number of cycles for each read and index using the number of reads specified in the sample sheet self . forwardlength = self . metadata . header . forwardlength self . reverselength = self . metadata . header . reverselength # Create a list of lists containing [cycle start, cycle end, and :runid] for each of forward reads, index 1 # index 2, and reverse reads cycles = [ [ 1 , self . forwardlength , self . runid ] , [ self . forwardlength + 1 , self . forwardlength + 8 , self . runid ] , [ self . forwardlength + 9 , self . forwardlength + 16 , self . runid ] , [ self . forwardlength + 17 , self . forwardlength + 16 + self . reverselength , self . runid ] ] # A dictionary of parameters (keys) and the values to use when repopulating the config file parameters = { 'RunFolder' : self . runid , 'RunFolderDate' : self . metadata . date . replace ( "-" , "" ) , 'RunFolderId' : self . metadata . runnumber , 'RunFlowcellId' : self . metadata . flowcell } # Load the xml file using element tree config = ElementTree . parse ( "{}/config.xml" . format ( self . homepath ) ) # Get the root of the tree configroot = config . getroot ( ) # The run node is the only child node of the root for run in configroot : # Iterate through the child nodes. There are three nodes sections that must be populated for child in run : # Find the cycles tag if child . tag == 'Cycles' : # Set the attributes with a dictionary containing the total reads child . attrib = { 'Last' : '{}' . format ( self . forwardlength + 16 + self . reverselength ) , 'Number' : '{}' . format ( self . totalreads ) , 'First' : '1' } elif child . tag == 'RunParameters' : # Name the child as runparameter for easier coding runparameters = child for runparameter in runparameters : # This replaces data in both 'ImagingReads' and 'Reads' nodes if 'Reads' in runparameter . tag : # Enumerate through the run parameters for indexcount , reads in enumerate ( runparameter ) : # The values for the index are 1, 2, 3, 4. Subtract one to get the index of the first # list in cycles index = int ( runparameter . attrib [ 'Index' ] ) - 1 # Set the text value as the appropriate value from cycles reads . text = str ( cycles [ index ] [ indexcount ] ) # Populate the instrument value if runparameter . tag == 'Instrument' : runparameter . text = self . instrument # Iterate through the parameters in the parameter dictionary for parameter in parameters : # If the key is encountered if runparameter . tag == parameter : # Replace the text with the value runparameter . text = parameters [ parameter ] if 'Barcode' in runparameter . tag : for cycle , barcode in enumerate ( runparameter ) : # Add the barcode cycles. These are the number of forward reads (+ 1 as the barcode # starts 1 cycle after the first run) plus the current iterator barcode . text = str ( self . forwardlength + 1 + cycle ) # Write the modified config file to the desired location config . write ( '{}Data/Intensities/BaseCalls/config.xml' . format ( self . miseqfolder ) ) | Populates an unpopulated config . xml file with run - specific values and creates the file in the appropriate location | 781 | 22 |
15,660 | def subscribe_param ( ) : def print_data ( data ) : for parameter in data . parameters : print ( parameter ) processor . create_parameter_subscription ( '/YSS/SIMULATOR/BatteryVoltage2' , on_data = print_data ) | Print value of parameter | 59 | 4 |
15,661 | def _check_holiday_structure ( self , times ) : if not isinstance ( times , list ) : raise TypeError ( "an list is required" ) for time in times : if not isinstance ( time , tuple ) : raise TypeError ( "a tuple is required" ) if len ( time ) > 5 : raise TypeError ( "Target time takes at most 5 arguments" " ('%d' given)" % len ( time ) ) if len ( time ) < 5 : raise TypeError ( "Required argument '%s' (pos '%d')" " not found" % ( TIME_LABEL [ len ( time ) ] , len ( time ) ) ) self . _check_time_format ( TIME_LABEL , time ) | To check the structure of the HolidayClass | 160 | 8 |
15,662 | def _check_time_format ( self , labels , values ) : for label , value in zip ( labels , values ) : if value == "*" : continue if label == "day_of_week" : if isinstance ( value , string_types ) : if value not in ORDER_WEEK : raise ParseError ( "'%s' is not day of the week. " "character is the only '%s'" % ( value , ', ' . join ( ORDER_WEEK ) ) ) elif not isinstance ( value , int ) : raise TypeError ( "'%s' is not an int" % value ) if label in [ "year" , "month" , "day" , "num_of_week" ] : if not isinstance ( value , int ) : raise TypeError ( "'%s' is not an int" % value ) if isinstance ( value , int ) : start , end = TIME_INFO [ label ] if not start <= value <= end : raise PeriodRangeError ( "'%d' is outside the scope of the period " "'%s' range: '%d' to '%d'" % ( value , label , start , end ) ) | To check the format of the times | 256 | 7 |
15,663 | def is_holiday ( self , date ) : time = [ date . year , date . month , date . day , date . isoweekday ( ) , _extract_week_number ( date ) ] target = [ ] for key , data in list ( zip ( TIME_LABEL , time ) ) : d = getattr ( self , key ) asterisk = d . get ( "*" , set ( ) ) s = asterisk . union ( d . get ( data , set ( ) ) ) target . append ( list ( s ) ) for result in map ( set , product ( * target ) ) : if len ( result ) == 1 : return True return False | Whether holiday judges | 144 | 3 |
15,664 | def create ( self , python = None , system_site = False , always_copy = False ) : command = 'virtualenv' if python : command = '{0} --python={1}' . format ( command , python ) if system_site : command = '{0} --system-site-packages' . format ( command ) if always_copy : command = '{0} --always-copy' . format ( command ) command = '{0} {1}' . format ( command , self . path ) self . _execute ( command ) | Create a new virtual environment . | 120 | 6 |
15,665 | def epcrparse ( self ) : logging . info ( 'Parsing ePCR results' ) for sample in self . metadata : if sample . general . bestassemblyfile != 'NA' : if 'stx' in sample . general . datastore : # Initialise count - this allows for the population of vtyperresults with unique values uniquecount = 0 # This populates vtyperresults with the verotoxin subtypes toxinlist = [ ] if os . path . isfile ( sample [ self . analysistype ] . resultsfile ) : epcrresults = open ( sample [ self . analysistype ] . resultsfile , 'r' ) for result in epcrresults : # Only the lines without a # contain results if "#" not in result : uniquecount += 1 # Split on \t data = result . split ( '\t' ) # The subtyping primer pair is the first entry on lines with results vttype = data [ 0 ] . split ( '_' ) [ 0 ] # Push the name of the primer pair - stripped of anything after a _ to the dictionary if vttype not in toxinlist : toxinlist . append ( vttype ) # Create a string of the entries in list1 joined with ";" toxinstring = ";" . join ( sorted ( toxinlist ) ) # Save the string to the metadata sample [ self . analysistype ] . toxinprofile = toxinstring else : setattr ( sample , self . analysistype , GenObject ( ) ) sample [ self . analysistype ] . toxinprofile = 'NA' else : setattr ( sample , self . analysistype , GenObject ( ) ) sample [ self . analysistype ] . toxinprofile = 'NA' | Parse the ePCR text file outputs | 374 | 9 |
15,666 | def populate ( cls , graph ) : [ graph . bind ( k , v ) for k , v in cls . _dict . items ( ) ] | populate an rdflib graph with these curies | 33 | 12 |
15,667 | def authorized ( route ) : @ wraps ( route ) def authorized_route ( * args , * * kwargs ) : if 'mwoauth_access_token' in flask . session : return route ( * args , * * kwargs ) else : return flask . redirect ( flask . url_for ( 'mwoauth.mwoauth_initiate' ) + "?next=" + flask . request . endpoint ) return authorized_route | Wrap a flask route . Ensure that the user has authorized via OAuth or redirect the user to the authorization endpoint with a delayed redirect back to the originating endpoint . | 99 | 33 |
15,668 | def blum_blum_shub ( seed , amount , prime0 , prime1 ) : if amount == 0 : return [ ] assert ( prime0 % 4 == 3 and prime1 % 4 == 3 ) # primes must be congruent 3 mod 4 mod = prime0 * prime1 rand = [ seed ] for _ in range ( amount - 1 ) : last_num = rand [ len ( rand ) - 1 ] next_num = ( last_num * last_num ) % mod rand . append ( next_num ) return rand | Creates pseudo - number generator | 116 | 6 |
15,669 | async def setup ( self ) : try : engine = await self . db created = False if not await engine . has_table ( self . table_name ) : # create table logger . info ( "Creating SQL table [{}]" . format ( self . table_name ) ) items = self . _get_table ( ) await engine . execute ( CreateTable ( items ) ) # create indeces conn = await engine . connect ( ) await conn . execute ( "CREATE INDEX `lb_last_updated` ON `{}` (`source_id` DESC,`updated` DESC);" . format ( self . table_name ) ) await conn . execute ( "CREATE INDEX `lb_post` ON `{}` (`target_id` DESC,`post_id` DESC);" . format ( self . table_name ) ) await conn . close ( ) created = True # create control table if not already created. if self . control_table_name and not await engine . has_table ( self . control_table_name ) : # create table logger . info ( "Creating SQL control table [{}]" . format ( self . control_table_name ) ) items = self . _get_control_table ( ) await engine . execute ( CreateTable ( items ) ) created = True return created except Exception as exc : logger . error ( "[DB] Error when setting up SQL table: {}" . format ( exc ) ) return False | Setting up SQL table if it not exists . | 318 | 9 |
15,670 | def is_dst ( zonename ) : tz = pytz . timezone ( zonename ) now = pytz . utc . localize ( datetime . utcnow ( ) ) return now . astimezone ( tz ) . dst ( ) != timedelta ( 0 ) | Find out whether it s Daylight Saving Time in this timezone | 64 | 12 |
15,671 | def load_datetime ( value , dt_format ) : if dt_format . endswith ( '%z' ) : dt_format = dt_format [ : - 2 ] offset = value [ - 5 : ] value = value [ : - 5 ] if offset != offset . replace ( ':' , '' ) : # strip : from HHMM if needed (isoformat() adds it between HH and MM) offset = '+' + offset . replace ( ':' , '' ) value = value [ : - 1 ] return OffsetTime ( offset ) . localize ( datetime . strptime ( value , dt_format ) ) return datetime . strptime ( value , dt_format ) | Create timezone - aware datetime object | 156 | 8 |
15,672 | def list_to_json ( source_list ) : result = [ ] for item in source_list : result . append ( item . to_json ( ) ) return result | Serialise all the items in source_list to json | 37 | 11 |
15,673 | def list_from_json ( source_list_json ) : result = [ ] if source_list_json == [ ] or source_list_json == None : return result for list_item in source_list_json : item = json . loads ( list_item ) try : if item [ 'class_name' ] == 'Departure' : temp = Departure ( ) elif item [ 'class_name' ] == 'Disruption' : temp = Disruption ( ) elif item [ 'class_name' ] == 'Station' : temp = Station ( ) elif item [ 'class_name' ] == 'Trip' : temp = Trip ( ) elif item [ 'class_name' ] == 'TripRemark' : temp = TripRemark ( ) elif item [ 'class_name' ] == 'TripStop' : temp = TripStop ( ) elif item [ 'class_name' ] == 'TripSubpart' : temp = TripSubpart ( ) else : print ( 'Unrecognised Class ' + item [ 'class_name' ] + ', skipping' ) continue temp . from_json ( list_item ) result . append ( temp ) except KeyError : print ( 'Unrecognised item with no class_name, skipping' ) continue return result | Deserialise all the items in source_list from json | 284 | 12 |
15,674 | def list_diff ( list_a , list_b ) : result = [ ] for item in list_b : if not item in list_a : result . append ( item ) return result | Return the items from list_b that differ from list_a | 41 | 13 |
15,675 | def list_same ( list_a , list_b ) : result = [ ] for item in list_b : if item in list_a : result . append ( item ) return result | Return the items from list_b that are also on list_a | 40 | 14 |
15,676 | def list_merge ( list_a , list_b ) : #return list(collections.OrderedDict.fromkeys(list_a + list_b)) #result = list(list_b) result = [ ] for item in list_a : if not item in result : result . append ( item ) for item in list_b : if not item in result : result . append ( item ) return result | Merge two lists without duplicating items | 91 | 8 |
15,677 | def delay ( self ) : delay = { 'departure_time' : None , 'departure_delay' : None , 'requested_differs' : None , 'remarks' : self . trip_remarks , 'parts' : [ ] } if self . departure_time_actual > self . departure_time_planned : delay [ 'departure_delay' ] = self . departure_time_actual - self . departure_time_planned delay [ 'departure_time' ] = self . departure_time_actual if self . requested_time != self . departure_time_actual : delay [ 'requested_differs' ] = self . departure_time_actual for part in self . trip_parts : if part . has_delay : delay [ 'parts' ] . append ( part ) return delay | Return the delay of the train for this instance | 181 | 9 |
15,678 | def get_actual ( cls , trip_list , time ) : for trip in trip_list : if simple_time ( trip . departure_time_planned ) == time : return trip return None | Look for the train actually leaving at time | 42 | 8 |
15,679 | def parse_disruptions ( self , xml ) : obj = xmltodict . parse ( xml ) disruptions = { } disruptions [ 'unplanned' ] = [ ] disruptions [ 'planned' ] = [ ] if obj [ 'Storingen' ] [ 'Ongepland' ] : raw_disruptions = obj [ 'Storingen' ] [ 'Ongepland' ] [ 'Storing' ] if isinstance ( raw_disruptions , collections . OrderedDict ) : raw_disruptions = [ raw_disruptions ] for disruption in raw_disruptions : newdis = Disruption ( disruption ) #print(newdis.__dict__) disruptions [ 'unplanned' ] . append ( newdis ) if obj [ 'Storingen' ] [ 'Gepland' ] : raw_disruptions = obj [ 'Storingen' ] [ 'Gepland' ] [ 'Storing' ] if isinstance ( raw_disruptions , collections . OrderedDict ) : raw_disruptions = [ raw_disruptions ] for disruption in raw_disruptions : newdis = Disruption ( disruption ) #print(newdis.__dict__) disruptions [ 'planned' ] . append ( newdis ) return disruptions | Parse the NS API xml result into Disruption objects | 280 | 11 |
15,680 | def parse_departures ( self , xml ) : obj = xmltodict . parse ( xml ) departures = [ ] for departure in obj [ 'ActueleVertrekTijden' ] [ 'VertrekkendeTrein' ] : newdep = Departure ( departure ) departures . append ( newdep ) #print('-- dep --') #print(newdep.__dict__) #print(newdep.to_json()) print ( newdep . delay ) return departures | Parse the NS API xml result into Departure objects | 107 | 12 |
15,681 | def parse_trips ( self , xml , requested_time ) : obj = xmltodict . parse ( xml ) trips = [ ] if 'error' in obj : print ( 'Error in trips: ' + obj [ 'error' ] [ 'message' ] ) return None try : for trip in obj [ 'ReisMogelijkheden' ] [ 'ReisMogelijkheid' ] : newtrip = Trip ( trip , requested_time ) trips . append ( newtrip ) except TypeError : # If no options are found, obj['ReisMogelijkheden'] is None return None return trips | Parse the NS API xml result into Trip objects | 137 | 10 |
15,682 | def get_stations ( self ) : url = 'http://webservices.ns.nl/ns-api-stations-v2' raw_stations = self . _request ( 'GET' , url ) return self . parse_stations ( raw_stations ) | Fetch the list of stations | 62 | 6 |
15,683 | def stop ( self ) : self . log . debug ( 'Stopping bot {}' . format ( self . _name ) ) self . _stop = True for t in self . _threads : t . join ( ) self . log . debug ( 'Stopping bot {} finished. All threads joined.' . format ( self . _name ) ) | Stops this bot . | 73 | 5 |
15,684 | def _listen_comments ( self ) : # Collect comments in a queue comments_queue = Queue ( maxsize = self . _n_jobs * 4 ) threads = [ ] # type: List[BotQueueWorker] try : # Create n_jobs CommentsThreads for i in range ( self . _n_jobs ) : t = BotQueueWorker ( name = 'CommentThread-t-{}' . format ( i ) , jobs = comments_queue , target = self . _process_comment ) t . start ( ) threads . append ( t ) # Iterate over all comments in the comment stream for comment in self . _reddit . subreddit ( '+' . join ( self . _subs ) ) . stream . comments ( ) : # Check for stopping if self . _stop : self . _do_stop ( comments_queue , threads ) break comments_queue . put ( comment ) self . log . debug ( 'Listen comments stopped' ) except Exception as e : self . _do_stop ( comments_queue , threads ) self . log . error ( 'Exception while listening to comments:' ) self . log . error ( str ( e ) ) self . log . error ( 'Waiting for 10 minutes and trying again.' ) time . sleep ( 10 * 60 ) # Retry self . _listen_comments ( ) | Start listening to comments using a separate thread . | 286 | 9 |
15,685 | def _listen_submissions ( self ) : # Collect submissions in a queue subs_queue = Queue ( maxsize = self . _n_jobs * 4 ) threads = [ ] # type: List[BotQueueWorker] try : # Create n_jobs SubmissionThreads for i in range ( self . _n_jobs ) : t = BotQueueWorker ( name = 'SubmissionThread-t-{}' . format ( i ) , jobs = subs_queue , target = self . _process_submission ) t . start ( ) self . _threads . append ( t ) # Iterate over all comments in the comment stream for submission in self . _reddit . subreddit ( '+' . join ( self . _subs ) ) . stream . submissions ( ) : # Check for stopping if self . _stop : self . _do_stop ( subs_queue , threads ) break subs_queue . put ( submission ) self . log . debug ( 'Listen submissions stopped' ) except Exception as e : self . _do_stop ( subs_queue , threads ) self . log . error ( 'Exception while listening to submissions:' ) self . log . error ( str ( e ) ) self . log . error ( 'Waiting for 10 minutes and trying again.' ) time . sleep ( 10 * 60 ) # Retry: self . _listen_submissions ( ) | Start listening to submissions using a separate thread . | 295 | 9 |
15,686 | def _listen_inbox_messages ( self ) : # Collect messages in a queue inbox_queue = Queue ( maxsize = self . _n_jobs * 4 ) threads = [ ] # type: List[BotQueueWorker] try : # Create n_jobs inbox threads for i in range ( self . _n_jobs ) : t = BotQueueWorker ( name = 'InboxThread-t-{}' . format ( i ) , jobs = inbox_queue , target = self . _process_inbox_message ) t . start ( ) self . _threads . append ( t ) # Iterate over all messages in the messages stream for message in self . _reddit . inbox . stream ( ) : # Check for stopping if self . _stop : self . _do_stop ( inbox_queue , threads ) break inbox_queue . put ( message ) self . log . debug ( 'Listen inbox stopped' ) except Exception as e : self . _do_stop ( inbox_queue , threads ) self . log . error ( 'Exception while listening to inbox:' ) self . log . error ( str ( e ) ) self . log . error ( 'Waiting for 10 minutes and trying again.' ) time . sleep ( 10 * 60 ) # Retry: self . _listen_inbox_messages ( ) | Start listening to messages using a separate thread . | 286 | 9 |
15,687 | def to_struct ( self ) : structobj = self . struct_type ( ) for k in structobj . attributes ( ) : self . log . info ( "Setting attribute %s to %r" % ( k , getattr ( self , k ) ) ) setattr ( structobj , k , getattr ( self , k ) ) return structobj | Initialize properties of the appropriate struct class from this model class . | 75 | 13 |
15,688 | def move ( self , group , index = None ) : return self . group . db . move_entry ( self , group , index = index ) | This method moves the entry to another group . | 31 | 9 |
15,689 | def dump_data ( data , filename = None , file_type = 'json' , klazz = YapconfError , open_kwargs = None , dump_kwargs = None ) : _check_file_type ( file_type , klazz ) open_kwargs = open_kwargs or { 'encoding' : 'utf-8' } dump_kwargs = dump_kwargs or { } if filename : with open ( filename , 'w' , * * open_kwargs ) as conf_file : _dump ( data , conf_file , file_type , * * dump_kwargs ) else : _dump ( data , sys . stdout , file_type , * * dump_kwargs ) | Dump data given to file or stdout in file_type . | 159 | 14 |
15,690 | def load_file ( filename , file_type = 'json' , klazz = YapconfError , open_kwargs = None , load_kwargs = None ) : _check_file_type ( file_type , klazz ) open_kwargs = open_kwargs or { 'encoding' : 'utf-8' } load_kwargs = load_kwargs or { } data = None with open ( filename , * * open_kwargs ) as conf_file : if str ( file_type ) . lower ( ) == 'json' : data = json . load ( conf_file , * * load_kwargs ) elif str ( file_type ) . lower ( ) == 'yaml' : data = yaml . safe_load ( conf_file . read ( ) ) else : raise NotImplementedError ( 'Someone forgot to implement how to ' 'load a %s file_type.' % file_type ) if not isinstance ( data , dict ) : raise klazz ( 'Successfully loaded %s, but the result was ' 'not a dictionary.' % filename ) return data | Load a file with the given file type . | 244 | 9 |
15,691 | def flatten ( dictionary , separator = '.' , prefix = '' ) : new_dict = { } for key , value in dictionary . items ( ) : new_key = prefix + separator + key if prefix else key if isinstance ( value , collections . MutableMapping ) : new_dict . update ( flatten ( value , separator , new_key ) ) elif isinstance ( value , list ) : new_value = [ ] for item in value : if isinstance ( item , collections . MutableMapping ) : new_value . append ( flatten ( item , separator , new_key ) ) else : new_value . append ( item ) new_dict [ new_key ] = new_value else : new_dict [ new_key ] = value return new_dict | Flatten the dictionary keys are separated by separator | 173 | 10 |
15,692 | def relocate ( source , destination , move = False ) : venv = api . VirtualEnvironment ( source ) if not move : venv . relocate ( destination ) return None venv . move ( destination ) return None | Adjust the virtual environment settings and optional move it . | 44 | 10 |
15,693 | def main ( ) : parser = argparse . ArgumentParser ( description = 'Relocate a virtual environment.' ) parser . add_argument ( '--source' , help = 'The existing virtual environment.' , required = True , ) parser . add_argument ( '--destination' , help = 'The location for which to configure the virtual environment.' , required = True , ) parser . add_argument ( '--move' , help = 'Move the virtual environment to the destination.' , default = False , action = 'store_true' , ) args = parser . parse_args ( ) relocate ( args . source , args . destination , args . move ) | Relocate a virtual environment . | 138 | 6 |
15,694 | def confirm ( prompt = 'Really?' , color = 'warning' , yes_values = ( 'y' , 'yes' ) , abort_on_unconfirmed = False , abort_options = None ) : if isinstance ( yes_values , str ) : yes_values = ( yes_values , ) prompt = '{prompt} [{yes_value}/N] ' . format ( prompt = prompt , yes_value = yes_values [ 0 ] ) if color : prompt = printer . colorize ( prompt , color = color ) try : answer = input ( prompt ) except KeyboardInterrupt : print ( ) confirmed = False else : answer = answer . strip ( ) . lower ( ) confirmed = answer in yes_values # NOTE: The abort-on-unconfirmed logic is somewhat convoluted # because of the special case for return code 0. do_abort_on_unconfirmed = not confirmed and ( # True, non-zero return code, non-empty string, or any other # truthy value (in the manner of typical Python duck-typing) bool ( abort_on_unconfirmed ) or # Zero return code (special case) ( abort_on_unconfirmed == 0 and abort_on_unconfirmed is not False ) ) if do_abort_on_unconfirmed : if abort_options is None : abort_options = { } if abort_on_unconfirmed is True : abort_options . setdefault ( 'return_code' , 0 ) elif isinstance ( abort_on_unconfirmed , int ) : abort_options . setdefault ( 'return_code' , abort_on_unconfirmed ) elif isinstance ( abort_on_unconfirmed , str ) : abort_options . setdefault ( 'message' , abort_on_unconfirmed ) else : abort_options . setdefault ( 'return_code' , 0 ) abort ( * * abort_options ) return confirmed | Prompt for confirmation . | 411 | 5 |
15,695 | def get_mime_message ( self ) : message = MIMEText ( "<html>" + self . get_email_header ( ) + get_email_content ( self . content_file ) + self . get_email_footer ( ) + "</html>" , "html" ) message [ "subject" ] = self . email_subject return message | Gets email MIME message | 79 | 6 |
15,696 | def increment ( name , tags = None ) : def wrap ( f ) : @ wraps ( f ) def decorator ( * args , * * kwargs ) : stats = client ( ) ret = f ( * args , * * kwargs ) stats . incr ( name , tags = tags ) return ret return decorator return wrap | Function decorator for incrementing a statsd stat whenever a function is invoked . | 71 | 16 |
15,697 | def decrement ( name , tags = None ) : def wrap ( f ) : @ wraps ( f ) def decorator ( * args , * * kwargs ) : stats = client ( ) ret = f ( * args , * * kwargs ) stats . decr ( name , tags = tags ) return ret return decorator return wrap | Function decorator for decrementing a statsd stat whenever a function is invoked . | 72 | 17 |
15,698 | def timed ( name , tags = None ) : def wrap ( f ) : @ wraps ( f ) def decorator ( * args , * * kwargs ) : stats = client ( ) with stats . timer ( name , tags = tags ) : return f ( * args , * * kwargs ) return decorator return wrap | Function decorator for tracking timing information on a function s invocation . | 69 | 13 |
15,699 | def move_file_to_directory ( file_path , directory_path ) : file_name = os . path . basename ( file_path ) # get name of file if not os . path . exists ( directory_path ) : os . makedirs ( directory_path ) # create directory if necessary os . rename ( file_path , os . path . join ( directory_path , file_name ) ) | Moves file to given directory | 89 | 6 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.