idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
11,700
def get_sample_size ( self , key = None ) : if key is None : return len ( self . Y ) else : return len ( self . get_partitions ( self . persistence ) [ key ] )
Returns the number of samples in the input data
46
9
11,701
def to_json ( self ) : capsule = { } capsule [ "Hierarchy" ] = [ ] for ( dying , ( persistence , surviving , saddle ) , ) in self . merge_sequence . items ( ) : capsule [ "Hierarchy" ] . append ( { "Dying" : dying , "Persistence" : persistence , "Surviving" : surviving , "Saddle" : saddle , } ) capsule [ "Partitions" ] = [ ] base = np . array ( [ None , None ] * len ( self . Y ) ) . reshape ( - 1 , 2 ) for ( min_index , max_index ) , items in self . base_partitions . items ( ) : base [ items , : ] = [ min_index , max_index ] capsule [ "Partitions" ] = base . tolist ( ) return json . dumps ( capsule )
Writes the complete Morse - Smale merge hierarchy to a string object .
188
15
11,702
def dict_to_numpy_array ( d ) : return fromarrays ( d . values ( ) , np . dtype ( [ ( str ( k ) , v . dtype ) for k , v in d . items ( ) ] ) )
Convert a dict of 1d array to a numpy recarray
54
14
11,703
def concatenate_1d ( arrays ) : if len ( arrays ) == 0 : return np . array ( [ ] ) if len ( arrays ) == 1 : return np . asanyarray ( arrays [ 0 ] ) if any ( map ( np . ma . is_masked , arrays ) ) : return np . ma . concatenate ( arrays ) return np . concatenate ( arrays )
Concatenate 1D numpy arrays . Similar to np . concatenate but work with empty input and masked arrays .
85
26
11,704
def formula_html ( self , reversed_ = False ) : if self . H_count == 1 : text = "H" elif self . H_count > 1 : text = "H<sub>{}</sub>" . format ( self . H_count ) else : text = "" seq = [ self . symbol , text , self . charge_sign_html ( ) ] if reversed_ : seq = reversed ( seq ) return "" . join ( seq )
Chemical formula HTML
99
4
11,705
def charge_sign ( self ) : if self . charge > 0 : sign = "+" elif self . charge < 0 : sign = "–" en dash, not hyphen-minus else : return "" ab = abs ( self . charge ) if ab > 1 : return str ( ab ) + sign return sign
Charge sign text
66
3
11,706
def send_message ( self , message ) : with self . _instance_lock : if message is None : Global . LOGGER . error ( "can't deliver a null messages" ) return if message . sender is None : Global . LOGGER . error ( f"can't deliver anonymous messages with body {message.body}" ) return if message . receiver is None : Global . LOGGER . error ( f"can't deliver message from {message.sender}: recipient not specified" ) return if message . message is None : Global . LOGGER . error ( f"can't deliver message with no body from {message.sender}" ) return sender = "*" + message . sender + "*" self . socket . send_multipart ( [ bytes ( sender , 'utf-8' ) , pickle . dumps ( message ) ] ) if Global . CONFIG_MANAGER . tracing_mode : Global . LOGGER . debug ( "dispatched : " + message . sender + "-" + message . message + "-" + message . receiver ) self . dispatched = self . dispatched + 1
Dispatch a message using 0mq
233
7
11,707
def update_properties_cache ( sender , instance , action , reverse , model , pk_set , * * kwargs ) : if action == 'post_add' : instance . save_properties_cache ( )
Property cache actualization at POI save . It will not work yet after property removal .
47
18
11,708
def to_json ( self ) : capsule = { } capsule [ "Hierarchy" ] = [ ] for ( dying , ( persistence , surviving , saddle ) , ) in self . merge_sequence . items ( ) : capsule [ "Hierarchy" ] . append ( { "Persistence" : persistence , "Dying" : dying , "Surviving" : surviving , "Saddle" : saddle , } ) capsule [ "Partitions" ] = [ ] base = np . array ( [ None ] * len ( self . Y ) ) for label , items in self . base_partitions . items ( ) : base [ items ] = label capsule [ "Partitions" ] = base . tolist ( ) return json . dumps ( capsule , separators = ( "," , ":" ) )
Writes the complete Morse complex merge hierarchy to a string object .
170
13
11,709
def iter ( context , sequence , limit = 10 ) : params = { 'limit' : limit , 'offset' : 0 } uri = '%s/%s/%s' % ( context . dci_cs_api , RESOURCE , sequence ) while True : j = context . session . get ( uri , params = params ) . json ( ) if len ( j [ 'jobs_events' ] ) : for i in j [ 'jobs_events' ] : yield i else : break params [ 'offset' ] += params [ 'limit' ]
Iter to list all the jobs events .
121
8
11,710
def delete ( context , sequence ) : uri = '%s/%s/%s' % ( context . dci_cs_api , RESOURCE , sequence ) return context . session . delete ( uri )
Delete jobs events from a given sequence
47
7
11,711
def get_ldap ( cls , global_options = None ) : if cls . ldap is None : import ldap . filter # Support for python-ldap < 2.0.6 try : import ldap . dn except ImportError : from django_auth_ldap import dn ldap . dn = dn cls . ldap = ldap # Apply global LDAP options once if ( not cls . _ldap_configured ) and ( global_options is not None ) : for opt , value in global_options . items ( ) : cls . ldap . set_option ( opt , value ) cls . _ldap_configured = True return cls . ldap
Returns the ldap module . The unit test harness will assign a mock object to _LDAPConfig . ldap . It is imperative that the ldap module not be imported anywhere else so that the unit tests will pass in the absence of python - ldap .
164
57
11,712
def _begin ( self , connection , filterargs = ( ) , escape = True ) : if escape : filterargs = self . _escape_filterargs ( filterargs ) try : filterstr = self . filterstr % filterargs msgid = connection . search ( force_str ( self . base_dn ) , self . scope , force_str ( filterstr ) ) except ldap . LDAPError as e : msgid = None logger . error ( u"search('%s', %d, '%s') raised %s" % ( self . base_dn , self . scope , filterstr , pprint . pformat ( e ) ) ) return msgid
Begins an asynchronous search and returns the message id to retrieve the results .
143
15
11,713
def _results ( self , connection , msgid ) : try : kind , results = connection . result ( msgid ) if kind != ldap . RES_SEARCH_RESULT : results = [ ] except ldap . LDAPError as e : results = [ ] logger . error ( u"result(%d) raised %s" % ( msgid , pprint . pformat ( e ) ) ) return self . _process_results ( results )
Returns the result of a previous asynchronous query .
99
9
11,714
def _escape_filterargs ( self , filterargs ) : if isinstance ( filterargs , tuple ) : filterargs = tuple ( self . ldap . filter . escape_filter_chars ( value ) for value in filterargs ) elif isinstance ( filterargs , dict ) : filterargs = dict ( ( key , self . ldap . filter . escape_filter_chars ( value ) ) for key , value in filterargs . items ( ) ) else : raise TypeError ( "filterargs must be a tuple or dict." ) return filterargs
Escapes values in filterargs .
120
7
11,715
def _process_results ( self , results ) : results = [ r for r in results if r [ 0 ] is not None ] results = _DeepStringCoder ( 'utf-8' ) . decode ( results ) # The normal form of a DN is lower case. results = [ ( r [ 0 ] . lower ( ) , r [ 1 ] ) for r in results ] result_dns = [ result [ 0 ] for result in results ] logger . debug ( u"search_s('%s', %d, '%s') returned %d objects: %s" % ( self . base_dn , self . scope , self . filterstr , len ( result_dns ) , "; " . join ( result_dns ) ) ) return results
Returns a sanitized copy of raw LDAP results . This scrubs out references decodes utf8 normalizes DNs etc .
164
27
11,716
def get_connection_string ( params , hide_password = True ) : connection_string = params [ 'driver' ] + '://' user = params . get ( 'user' , None ) password = params . get ( 'password' , None ) host = params . get ( 'host' , None ) port = params . get ( 'port' , None ) database = params . get ( 'database' , None ) if database is None : raise ValueError ( "Field 'database' of connection parameters cannot be None." ) # if password is not set, try to get it from keyring if password is None and user is not None : # noinspection PyTypeChecker password = Client . _get_password ( params ) if password is None : raise RuntimeError ( "Password not defined and not available in keyring." ) # don't add host/port/user/password if no host given if host is not None : # don't add user/password if user not given if user is not None : connection_string += user # omit zero-length passwords if len ( password ) > 0 : if hide_password : connection_string += ":[password hidden]" else : connection_string += ":" + password connection_string += "@" connection_string += host if port is not None : connection_string += ':' + str ( port ) # noinspection PyTypeChecker connection_string += '/' + database return connection_string
Get a database connection string
305
5
11,717
def pubticker ( self , symbol = 'btcusd' ) : url = self . base_url + '/v1/pubticker/' + symbol return requests . get ( url )
Send a request for latest ticker info return the response .
42
12
11,718
def book ( self , symbol = 'btcusd' , limit_bids = 0 , limit_asks = 0 ) : url = self . base_url + '/v1/book/' + symbol params = { 'limit_bids' : limit_bids , 'limit_asks' : limit_asks } return requests . get ( url , params )
Send a request to get the public order book return the response .
78
13
11,719
def trades ( self , symbol = 'btcusd' , since = 0 , limit_trades = 50 , include_breaks = 0 ) : url = self . base_url + '/v1/trades/' + symbol params = { 'since' : since , 'limit_trades' : limit_trades , 'include_breaks' : include_breaks } return requests . get ( url , params )
Send a request to get all public trades return the response .
89
12
11,720
def auction ( self , symbol = 'btcusd' ) : url = self . base_url + '/v1/auction/' + symbol return requests . get ( url )
Send a request for latest auction info return the response .
39
11
11,721
def auction_history ( self , symbol = 'btcusd' , since = 0 , limit_auction_results = 50 , include_indicative = 1 ) : url = self . base_url + '/v1/auction/' + symbol + '/history' params = { 'since' : since , 'limit_auction_results' : limit_auction_results , 'include_indicative' : include_indicative } return requests . get ( url , params )
Send a request for auction history info return the response .
104
11
11,722
def new_order ( self , amount , price , side , client_order_id = None , symbol = 'btcusd' , type = 'exchange limit' , options = None ) : request = '/v1/order/new' url = self . base_url + request params = { 'request' : request , 'nonce' : self . get_nonce ( ) , 'symbol' : symbol , 'amount' : amount , 'price' : price , 'side' : side , 'type' : type } if client_order_id is not None : params [ 'client_order_id' ] = client_order_id if options is not None : params [ 'options' ] = options return requests . post ( url , headers = self . prepare ( params ) )
Send a request to place an order return the response .
171
11
11,723
def cancel_order ( self , order_id ) : request = '/v1/order/cancel' url = self . base_url + request params = { 'request' : request , 'nonce' : self . get_nonce ( ) , 'order_id' : order_id } return requests . post ( url , headers = self . prepare ( params ) )
Send a request to cancel an order return the response .
81
11
11,724
def past_trades ( self , symbol = 'btcusd' , limit_trades = 50 , timestamp = 0 ) : request = '/v1/mytrades' url = self . base_url + request params = { 'request' : request , 'nonce' : self . get_nonce ( ) , 'symbol' : symbol , 'limit_trades' : limit_trades , 'timestamp' : timestamp } return requests . post ( url , headers = self . prepare ( params ) )
Send a trade history request return the response .
112
9
11,725
def tradevolume ( self ) : request = '/v1/tradevolume' url = self . base_url + request params = { 'request' : request , 'nonce' : self . get_nonce ( ) } return requests . post ( url , headers = self . prepare ( params ) )
Send a request to get your trade volume return the response .
64
12
11,726
def newAddress ( self , currency = 'btc' , label = '' ) : request = '/v1/deposit/' + currency + '/newAddress' url = self . base_url + request params = { 'request' : request , 'nonce' : self . get_nonce ( ) } if label != '' : params [ 'label' ] = label return requests . post ( url , headers = self . prepare ( params ) )
Send a request for a new cryptocurrency deposit address with an optional label . Return the response .
96
18
11,727
def prepare ( self , params ) : jsonparams = json . dumps ( params ) payload = base64 . b64encode ( jsonparams . encode ( ) ) signature = hmac . new ( self . secret_key . encode ( ) , payload , hashlib . sha384 ) . hexdigest ( ) return { 'X-GEMINI-APIKEY' : self . api_key , 'X-GEMINI-PAYLOAD' : payload , 'X-GEMINI-SIGNATURE' : signature }
Prepare return the required HTTP headers .
115
8
11,728
def merge ( cls , source_blocks ) : if len ( source_blocks ) == 1 : return source_blocks [ 0 ] source_blocks . sort ( key = operator . attrgetter ( 'start_line_number' ) ) main_block = source_blocks [ 0 ] boot_lines = main_block . boot_lines source_lines = [ source_line for source_block in source_blocks for source_line in source_block . source_lines ] return cls ( boot_lines , source_lines , directive = main_block . directive , language = main_block . language , roles = main_block . roles )
Merge multiple SourceBlocks together
138
6
11,729
def character_summary_table ( ) : # a database client/session to run queries in cl = client . get_client ( ) session = cl . create_session ( ) # Define the query. Note that we need to rename the two joined-in name columns, # to make the labels intelligible and to not have two identical column names in the output. # Also, we need a left outer join on the place of birth (instead of the default left inner join) # if we want results for characters that have no place of birth set. query = session . query ( models . Character , models . Universe . name . label ( 'universe' ) , models . Place . name . label ( 'place_of_birth' ) ) . join ( models . Character . universe ) . outerjoin ( models . Character . place_of_birth ) # download all data as a pandas DataFrame, index it by the character ID characters = cl . df_query ( query ) . set_index ( 'id' ) # query the number of movie appearances per character query = session . query ( sa . func . count ( models . MovieAppearance . id ) . label ( 'movie_appearances' ) , models . MovieAppearance . character_id ) . group_by ( models . MovieAppearance . character_id ) appearances = cl . df_query ( query ) . set_index ( 'character_id' ) # join both tables, sort by name df = characters . join ( appearances , how = 'left' ) . sort_values ( by = 'name' ) # drop the foreign key columns (have no meaning outside our DB) df = df . drop ( [ 'universe_id' , 'place_of_birth_id' ] , axis = 1 ) # write output as both CSV and Excel; do not include index column df . to_csv ( path . join ( out_dir , "characters.csv" ) , encoding = 'utf-8' , index = False ) df . to_excel ( path . join ( out_dir , "characters.xlsx" ) , encoding = 'utf-8' , index = False ) session . close ( )
Export a table listing all characters and their data
463
9
11,730
def fig_to_svg ( fig ) : buf = io . StringIO ( ) fig . savefig ( buf , format = 'svg' ) buf . seek ( 0 ) return buf . getvalue ( )
Helper function to convert matplotlib figure to SVG string
46
11
11,731
def movie_network ( ) : # page template template = jenv . get_template ( "movie_network.html" ) # container for template context context = dict ( ) # a database client/session to run queries in cl = client . get_client ( ) session = cl . create_session ( ) # # query data # # get all Movies query = session . query ( models . Movie . id , models . Movie . name , models . Movie . url , models . Movie . budget_inflation_adjusted , models . Movie . imdb_rating ) movies = cl . df_query ( query ) # get all Movie Appearances query = session . query ( models . MovieAppearance . movie_id , models . MovieAppearance . character_id ) appearances = cl . df_query ( query ) # get all Characters that have movie appearances query = session . query ( models . Character . id , models . Character . url , models . Character . name ) . filter ( models . Character . id . in_ ( [ int ( i ) for i in appearances [ 'character_id' ] . unique ( ) ] ) ) characters = cl . df_query ( query ) # # transform to network graph # graph = dict ( nodes = [ ] , graph = [ ] , # this stays empty links = [ ] , directed = False , multigraph = True ) # containers for lookups from movie/character IDs to node IDs movie_node_id = dict ( ) character_node_id = dict ( ) # normalization for movie node size: 100 = max budget movie_size_factor = 100. / movies [ 'budget_inflation_adjusted' ] . max ( ) # nodes for movies for _ , data in movies . iterrows ( ) : movie_node_id [ data [ 'id' ] ] = len ( graph [ 'nodes' ] ) # noinspection PyTypeChecker graph [ 'nodes' ] . append ( dict ( id = data [ 'name' ] , size = max ( 5. , data [ 'budget_inflation_adjusted' ] * movie_size_factor ) , score = data [ 'imdb_rating' ] / 10. , type = 'square' , url = "http://marvel.wikia.com" + data [ 'url' ] ) ) # nodes for characters for _ , data in characters . iterrows ( ) : character_node_id [ data [ 'id' ] ] = len ( graph [ 'nodes' ] ) # noinspection PyTypeChecker graph [ 'nodes' ] . append ( dict ( id = data [ 'name' ] , size = 10 , type = 'circle' , url = "http://marvel.wikia.com" + data [ 'url' ] ) ) # links: movie appearances for _ , data in appearances . iterrows ( ) : # noinspection PyTypeChecker graph [ 'links' ] . append ( dict ( source = movie_node_id [ data [ 'movie_id' ] ] , target = character_node_id [ data [ 'character_id' ] ] ) ) context [ 'graph' ] = json . dumps ( graph , indent = 4 ) # # render template # out_file = path . join ( out_dir , "movie_network.html" ) html_content = template . render ( * * context ) with open ( out_file , 'w' ) as f : f . write ( html_content ) # done, clean up plt . close ( 'all' ) session . close ( )
Generate interactive network graph of movie appearances
758
8
11,732
def unpack2D ( _x ) : _x = np . atleast_2d ( _x ) x = _x [ : , 0 ] y = _x [ : , 1 ] return x , y
Helper function for splitting 2D data into x and y component to make equations simpler
47
16
11,733
def is_at_exit ( ) : if _threading_main_thread is not None : if not hasattr ( threading , "main_thread" ) : return True if threading . main_thread ( ) != _threading_main_thread : return True if not _threading_main_thread . is_alive ( ) : return True return False
Some heuristics to figure out whether this is called at a stage where the Python interpreter is shutting down .
79
22
11,734
def better_exchook ( etype , value , tb , debugshell = False , autodebugshell = True , file = None , with_color = None ) : if file is None : file = sys . stderr def output ( ln ) : """ :param str ln: :return: nothing, prints to ``file`` """ file . write ( ln + "\n" ) color = Color ( enable = with_color ) output ( color ( "EXCEPTION" , color . fg_colors [ 1 ] , bold = True ) ) all_locals , all_globals = { } , { } if tb is not None : print_tb ( tb , allLocals = all_locals , allGlobals = all_globals , file = file , withTitle = True , with_color = color . enable ) else : output ( color ( "better_exchook: traceback unknown" , color . fg_colors [ 1 ] ) ) import types # noinspection PyShadowingNames def _some_str ( value ) : """ :param object value: :rtype: str """ # noinspection PyBroadException try : return str ( value ) except Exception : return '<unprintable %s object>' % type ( value ) . __name__ # noinspection PyShadowingNames def _format_final_exc_line ( etype , value ) : value_str = _some_str ( value ) if value is None or not value_str : line = color ( "%s" % etype , color . fg_colors [ 1 ] ) else : line = color ( "%s" % etype , color . fg_colors [ 1 ] ) + ": %s" % ( value_str , ) return line # noinspection PyUnresolvedReferences if ( isinstance ( etype , BaseException ) or ( hasattr ( types , "InstanceType" ) and isinstance ( etype , types . InstanceType ) ) or etype is None or type ( etype ) is str ) : output ( _format_final_exc_line ( etype , value ) ) else : output ( _format_final_exc_line ( etype . __name__ , value ) ) if autodebugshell : # noinspection PyBroadException try : debugshell = int ( os . environ [ "DEBUG" ] ) != 0 except Exception : pass if debugshell : output ( "---------- DEBUG SHELL -----------" ) debug_shell ( user_ns = all_locals , user_global_ns = all_globals , traceback = tb ) file . flush ( )
Replacement for sys . excepthook .
582
10
11,735
def dump_all_thread_tracebacks ( exclude_thread_ids = None , file = None ) : if exclude_thread_ids is None : exclude_thread_ids = [ ] if not file : file = sys . stdout import threading if hasattr ( sys , "_current_frames" ) : print ( "" , file = file ) threads = { t . ident : t for t in threading . enumerate ( ) } # noinspection PyProtectedMember for tid , stack in sys . _current_frames ( ) . items ( ) : if tid in exclude_thread_ids : continue # This is a bug in earlier Python versions. # http://bugs.python.org/issue17094 # Note that this leaves out all threads not created via the threading module. if tid not in threads : continue tags = [ ] thread = threads . get ( tid ) if thread : assert isinstance ( thread , threading . Thread ) if thread is threading . currentThread ( ) : tags += [ "current" ] # noinspection PyProtectedMember,PyUnresolvedReferences if isinstance ( thread , threading . _MainThread ) : tags += [ "main" ] tags += [ str ( thread ) ] else : tags += [ "unknown with id %i" % tid ] print ( "Thread %s:" % ", " . join ( tags ) , file = file ) print_tb ( stack , file = file ) print ( "" , file = file ) print ( "That were all threads." , file = file ) else : print ( "Does not have sys._current_frames, cannot get thread tracebacks." , file = file )
Prints the traceback of all threads .
354
9
11,736
def _main ( ) : if sys . argv [ 1 : ] == [ "test" ] : for k , v in sorted ( globals ( ) . items ( ) ) : if not k . startswith ( "test_" ) : continue print ( "running: %s()" % k ) v ( ) print ( "ok." ) sys . exit ( ) elif sys . argv [ 1 : ] == [ "debug_shell" ] : debug_shell ( locals ( ) , globals ( ) ) sys . exit ( ) elif sys . argv [ 1 : ] == [ "debug_shell_exception" ] : try : raise Exception ( "demo exception" ) except Exception : better_exchook ( * sys . exc_info ( ) , debugshell = True ) sys . exit ( ) elif sys . argv [ 1 : ] : print ( "Usage: %s (test|...)" % sys . argv [ 0 ] ) sys . exit ( 1 ) # some examples # this code produces this output: https://gist.github.com/922622 try : x = { 1 : 2 , "a" : "b" } # noinspection PyMissingOrEmptyDocstring def f ( ) : y = "foo" # noinspection PyUnresolvedReferences,PyStatementEffect x , 42 , sys . stdin . __class__ , sys . exc_info , y , z f ( ) except Exception : better_exchook ( * sys . exc_info ( ) ) try : # noinspection PyArgumentList ( lambda _x : None ) ( __name__ , 42 ) # multiline except Exception : better_exchook ( * sys . exc_info ( ) ) try : class Obj : def __repr__ ( self ) : return ( "<Obj multi-\n" + " line repr>" ) obj = Obj ( ) assert not obj except Exception : better_exchook ( * sys . exc_info ( ) ) # noinspection PyMissingOrEmptyDocstring def f1 ( a ) : f2 ( a + 1 , 2 ) # noinspection PyMissingOrEmptyDocstring def f2 ( a , b ) : f3 ( a + b ) # noinspection PyMissingOrEmptyDocstring def f3 ( a ) : b = ( "abc" * 100 ) + "-interesting" # some long demo str a ( b ) # error, not callable try : f1 ( 13 ) except Exception : better_exchook ( * sys . exc_info ( ) ) # use this to overwrite the global exception handler install ( ) # and fail # noinspection PyUnresolvedReferences finalfail ( sys )
Some demo .
584
3
11,737
def verify_mid_signature ( certificate_data , sp_challenge , response_challenge , signature ) : if not response_challenge . startswith ( sp_challenge ) : return False try : key = RSA . importKey ( certificate_data ) verifier = PKCS1_v1_5 . new ( key ) except ValueError : key = ECC . import_key ( certificate_data ) verifier = DSS . new ( key , 'deterministic-rfc6979' ) digest = PrehashedMessageData ( response_challenge ) try : verifier . verify ( digest , signature ) return True except ValueError : return False
Verify mobile id Authentication signature is valid
143
8
11,738
def drive ( self , event , * args ) : maps = self . base . get ( event , self . step ) for handle , data in maps [ : ] : params = args + data try : handle ( self , * params ) except Stop : break except StopIteration : pass except Kill as Root : raise except Erase : maps . remove ( ( handle , data ) ) except Exception as e : debug ( event , params ) for handle in self . pool : handle ( self , event , args )
Used to dispatch events .
105
5
11,739
def send ( self , data ) : self . stdin . write ( data ) self . stdin . flush ( )
Send data to the child process through .
25
8
11,740
def _simplify_arguments ( arguments ) : if len ( arguments . args ) == 0 : return arguments . kwargs elif len ( arguments . kwargs ) == 0 : return arguments . args else : return arguments
If positional or keyword arguments are empty return only one or the other .
49
14
11,741
def load ( self ) : hdf_filename = os . path . join ( self . _dump_dirname , 'result.h5' ) if os . path . isfile ( hdf_filename ) : store = pd . HDFStore ( hdf_filename , mode = 'r' ) keys = store . keys ( ) if keys == [ '/df' ] : self . result = store [ 'df' ] else : if set ( keys ) == set ( map ( lambda i : '/%s' % i , range ( len ( keys ) ) ) ) : # keys are not necessarily ordered self . result = [ store [ str ( k ) ] for k in range ( len ( keys ) ) ] else : self . result = { k [ 1 : ] : store [ k ] for k in keys } else : self . result = joblib . load ( os . path . join ( self . _output_dirname , 'dump' , 'result.pkl' ) )
Load this step s result from its dump directory
212
9
11,742
def setup_dump ( self ) : dumpdir = self . _dump_dirname if not os . path . isdir ( dumpdir ) : os . makedirs ( dumpdir ) dump = False yaml_filename = self . _yaml_filename if not os . path . isfile ( yaml_filename ) : dump = True else : with open ( yaml_filename ) as f : if f . read ( ) != yaml . dump ( self ) : logging . warning ( 'Existing step.yaml does not match hash, regenerating' ) dump = True if dump : with open ( yaml_filename , 'w' ) as f : yaml . dump ( self , f )
Set up dump creating directories and writing step . yaml file containing yaml dump of this step .
151
20
11,743
def main ( ctx , root_dir , verbose ) : root_dir = discover_package_doc_dir ( root_dir ) # Subcommands should use the click.pass_obj decorator to get this # ctx.obj object as the first argument. ctx . obj = { 'root_dir' : root_dir , 'verbose' : verbose } # Set up application logging. This ensures that only documenteer's # logger is activated. If necessary, we can add other app's loggers too. if verbose : log_level = logging . DEBUG else : log_level = logging . INFO logger = logging . getLogger ( 'documenteer' ) logger . addHandler ( logging . StreamHandler ( ) ) logger . setLevel ( log_level )
package - docs is a CLI for building single - package previews of documentation in the LSST Stack .
167
20
11,744
def apply_and_name ( self , aggregator ) : reduced_df = self . _apply ( aggregator ) if len ( self . names ) != len ( reduced_df . columns ) : raise IndexError ( "ColumnFunction creates more columns than it has names for." ) reduced_df . columns = self . names return reduced_df
Fetches the row - aggregated input columns for this ColumnFunction .
72
15
11,745
def aggregate ( self , index ) : # deal with index as a string vs index as a index/MultiIndex if isinstance ( index , string_types ) : col_df_grouped = self . col_df . groupby ( self . df [ index ] ) else : self . col_df . index = pd . MultiIndex . from_arrays ( [ self . df [ i ] for i in index ] ) col_df_grouped = self . col_df . groupby ( level = index ) self . col_df . index = self . df . index # perform the actual aggregation self . reduced_df = pd . DataFrame ( { colred : col_df_grouped [ colred . column ] . agg ( colred . agg_func ) for colred in self . column_reductions } ) # then apply the functions to produce the final dataframe reduced_dfs = [ ] for cf in self . column_functions : # each apply_and_name() calls get_reduced() with the column reductions it wants reduced_dfs . append ( cf . apply_and_name ( self ) ) return pd . concat ( reduced_dfs , axis = 1 )
Performs a groupby of the unique Columns by index as constructed from self . df .
261
19
11,746
def _apply ( self , aggregator ) : reduced_dfs = [ ] if self . include_fraction : n_df = self . numerator . apply_and_name ( aggregator ) d_df = self . denominator . apply_and_name ( aggregator ) reduced_dfs . extend ( [ n_df [ cn ] / d_df [ cd ] for cn , cd in product ( n_df . columns , d_df . columns ) ] ) if self . include_numerator : reduced_dfs . append ( self . numerator . apply_and_name ( aggregator ) ) if self . include_denominator : reduced_dfs . append ( self . denominator . apply_and_name ( aggregator ) ) return pd . concat ( reduced_dfs , axis = 1 )
Returns a dataframe with the requested ColumnReductions .
183
11
11,747
def clone ( self , * * kwargs ) : init_kwargs = { "name" : self . __name , "dataframe" : self . __df , "include_columns" : self . __include_columns , "include_index" : self . __include_index , "style" : self . __style , "column_styles" : self . __col_styles , "column_widths" : self . __column_widths , "row_styles" : self . __row_styles , "header_style" : self . header_style , "index_style" : self . index_style } init_kwargs . update ( kwargs ) return self . __class__ ( * * init_kwargs )
Create a clone of the Table optionally with some properties changed
164
11
11,748
def inspect ( lines ) : labels = set ( ) count = 0 exp = re . compile ( r">.*?<([\w ]+)>" ) # Space should be accepted valid = False for line in lines : if line . startswith ( "M END\n" ) : valid = True elif line . startswith ( "$$$$" ) : count += 1 valid = False else : result = exp . match ( line ) if result : labels . add ( result . group ( 1 ) ) if valid : count += 1 return list ( labels ) , count
Inspect SDFile list of string
119
8
11,749
def inspect_file ( path ) : with open ( path , 'rb' ) as f : labels , count = inspect ( tx . decode ( line ) for line in f ) return labels , count
Inspect SDFile structure
41
6
11,750
def optional_data ( lines ) : data = { } exp = re . compile ( r">.*?<([\w ]+)>" ) # Space should be accepted for i , line in enumerate ( lines ) : result = exp . match ( line ) if result : data [ result . group ( 1 ) ] = lines [ i + 1 ] return data
Parse SDFile data part into dict
75
9
11,751
def atoms ( lines ) : # Convert sdf style charge to actual charge conv_charge_table = { 0 : 0 , 1 : 3 , 2 : 2 , 3 : 1 , 4 : 0 , 5 : - 1 , 6 : - 2 , 7 : - 3 } results = { } for i , line in enumerate ( lines ) : symbol = line [ 31 : 34 ] . rstrip ( ) try : atom = Atom ( symbol ) except KeyError : raise ValueError ( symbol ) xpos = float ( line [ 0 : 10 ] ) ypos = float ( line [ 10 : 20 ] ) zpos = float ( line [ 20 : 30 ] ) atom . coords = ( xpos , ypos , zpos ) atom . mass_diff = int ( line [ 34 : 37 ] ) old_sdf_charge = int ( line [ 37 : 40 ] ) atom . charge = conv_charge_table [ old_sdf_charge ] if old_sdf_charge == 4 : atom . radical = 1 # atom.stereo_flag = int(line[40:43]) # Not used # valence = int(line[46:49]) # if valence: # atom.valence = valence results [ i + 1 ] = { "atom" : atom } return results
Parse atom block into atom objects
277
7
11,752
def bonds ( lines , atoms ) : # Convert sdf style stereobond (see chem.model.bond.Bond) conv_stereo_table = { 0 : 0 , 1 : 1 , 3 : 3 , 4 : 3 , 6 : 2 } results = { a : { } for a in atoms } for line in lines : bond = Bond ( ) first = int ( line [ 0 : 3 ] ) second = int ( line [ 3 : 6 ] ) if first > second : bond . is_lower_first = 0 order = int ( line [ 6 : 9 ] ) if order < 4 : bond . order = order bond . type = conv_stereo_table [ int ( line [ 9 : 12 ] ) ] results [ first ] [ second ] = { "bond" : bond } results [ second ] [ first ] = { "bond" : bond } return results
Parse bond block into bond objects
190
7
11,753
def properties ( lines ) : results = { } for i , line in enumerate ( lines ) : type_ = line [ 3 : 6 ] if type_ not in [ "CHG" , "RAD" , "ISO" ] : continue # Other properties are not supported yet count = int ( line [ 6 : 9 ] ) results [ type_ ] = [ ] for j in range ( count ) : idx = int ( line [ 10 + j * 8 : 13 + j * 8 ] ) val = int ( line [ 14 + j * 8 : 17 + j * 8 ] ) results [ type_ ] . append ( ( idx , val ) ) return results
Parse properties block
142
4
11,754
def add_properties ( props , mol ) : if not props : return # The properties supersedes all charge and radical values in the atom block for _ , atom in mol . atoms_iter ( ) : atom . charge = 0 atom . multi = 1 atom . mass = None for prop in props . get ( "CHG" , [ ] ) : mol . atom ( prop [ 0 ] ) . charge = prop [ 1 ] for prop in props . get ( "RAD" , [ ] ) : mol . atom ( prop [ 0 ] ) . multi = prop [ 1 ] for prop in props . get ( "ISO" , [ ] ) : mol . atom ( prop [ 0 ] ) . mass = prop [ 1 ]
apply properties to the molecule object
153
6
11,755
def molecule ( lines ) : count_line = lines [ 3 ] num_atoms = int ( count_line [ 0 : 3 ] ) num_bonds = int ( count_line [ 3 : 6 ] ) # chiral_flag = int(count_line[12:15]) # Not used # num_prop = int(count_line[30:33]) # "No longer supported" compound = Compound ( ) compound . graph . _node = atoms ( lines [ 4 : num_atoms + 4 ] ) compound . graph . _adj = bonds ( lines [ num_atoms + 4 : num_atoms + num_bonds + 4 ] , compound . graph . _node . keys ( ) ) props = properties ( lines [ num_atoms + num_bonds + 4 : ] ) add_properties ( props , compound ) return compound
Parse molfile part into molecule object
185
9
11,756
def mol_supplier ( lines , no_halt , assign_descriptors ) : def sdf_block ( lns ) : mol = [ ] opt = [ ] is_mol = True for line in lns : if line . startswith ( "$$$$" ) : yield mol [ : ] , opt [ : ] is_mol = True mol . clear ( ) opt . clear ( ) elif line . startswith ( "M END" ) : is_mol = False elif is_mol : mol . append ( line . rstrip ( ) ) else : opt . append ( line . rstrip ( ) ) if mol : yield mol , opt for i , ( mol , opt ) in enumerate ( sdf_block ( lines ) ) : try : c = molecule ( mol ) if assign_descriptors : molutil . assign_descriptors ( c ) except ValueError as err : if no_halt : print ( "Unsupported symbol: {} (#{} in v2000reader)" . format ( err , i + 1 ) ) c = molutil . null_molecule ( assign_descriptors ) else : raise ValueError ( "Unsupported symbol: {}" . format ( err ) ) except RuntimeError as err : if no_halt : print ( "Failed to minimize ring: {} (#{} in v2000reader)" . format ( err , i + 1 ) ) else : raise RuntimeError ( "Failed to minimize ring: {}" . format ( err ) ) except : if no_halt : print ( "Unexpected error (#{} in v2000reader)" . format ( i + 1 ) ) c = molutil . null_molecule ( assign_descriptors ) c . data = optional_data ( opt ) yield c continue else : print ( traceback . format_exc ( ) ) raise Exception ( "Unsupported Error" ) c . data = optional_data ( opt ) yield c
Yields molecules generated from CTAB text
418
10
11,757
def mols_from_text ( text , no_halt = True , assign_descriptors = True ) : if isinstance ( text , bytes ) : t = tx . decode ( text ) else : t = text # Lazy line splitter. More efficient memory usage than str.split. exp = re . compile ( r"[^\n]*\n|." ) sp = ( x . group ( 0 ) for x in re . finditer ( exp , t ) ) for c in mol_supplier ( sp , no_halt , assign_descriptors ) : yield c
Returns molecules generated from sdfile text
128
8
11,758
def mol_from_text ( text , assign_descriptors = True ) : cg = mols_from_text ( text , False , assign_descriptors ) return next ( cg )
Parse CTAB text and return first one as a Compound object .
44
16
11,759
def mol_from_file ( path , assign_descriptors = True ) : cs = mols_from_file ( path , False , assign_descriptors ) return next ( cs )
Parse CTAB file and return first one as a Compound object .
42
16
11,760
def load_from_resource ( name ) : filepath = Path ( USER_DIR ) / name if filepath . exists ( ) : with filepath . open ( ) as fh : return fh . read ( ) else : return resource_string ( 'wdiffhtml' , 'data/' + name ) . decode ( 'utf-8' )
Returns the contents of a file resource .
77
8
11,761
def connect ( token , protocol = RtmProtocol , factory = WebSocketClientFactory , factory_kwargs = None , api_url = None , debug = False ) : if factory_kwargs is None : factory_kwargs = dict ( ) metadata = request_session ( token , api_url ) wsfactory = factory ( metadata . url , * * factory_kwargs ) if debug : warnings . warn ( 'debug=True has been deprecated in autobahn 0.14.0' ) wsfactory . protocol = lambda * a , * * k : protocol ( * a , * * k ) . _seedMetadata ( metadata ) connection = connectWS ( wsfactory ) return connection
Creates a new connection to the Slack Real - Time API .
149
13
11,762
def next_block ( self ) : assert self . pos <= self . input_len if self . pos == self . input_len : return None # Overshoot i = self . START_OVERSHOOT while True : try_size = int ( self . bs * i ) size = self . check_request_size ( try_size ) c , d = self . compress_next_chunk ( size ) if size != try_size : break if len ( d ) < self . bs : i += self . OVERSHOOT_INCREASE else : break # Reduce by one byte until we hit the target while True : if len ( d ) <= self . bs : self . c = c # self.c = self.factory() crc32 = zlib . crc32 ( self . get_input ( size ) , 0xffffffff ) & 0xffffffff self . pos += size self . compressed_bytes += len ( d ) return crc32 , size , d size -= 1 if size == 0 : return None c , d = self . compress_next_chunk ( size )
This could probably be improved ; at the moment it starts by trying to overshoot the desired compressed block size then it reduces the input bytes one by one until it has met the required block size
241
38
11,763
def set_up_logging ( log_file , console_log_level ) : logger = logging . getLogger ( ) logger . setLevel ( logging . DEBUG ) fh = logging . FileHandler ( str ( log_file ) ) fh . setLevel ( logging . DEBUG ) ch = logging . StreamHandler ( ) ch . setLevel ( console_log_level ) formatter = logging . Formatter ( "{asctime} {levelname} ({name}): {message}" , style = '{' ) fh . setFormatter ( formatter ) ch . setFormatter ( formatter ) logger . addHandler ( fh ) logger . addHandler ( ch ) return logger
Configure logging settings and return a logger object .
148
10
11,764
def main ( ) : args = get_args ( ) # Make Chronophore's directories and files in $HOME DATA_DIR = pathlib . Path ( appdirs . user_data_dir ( __title__ ) ) LOG_FILE = pathlib . Path ( appdirs . user_log_dir ( __title__ ) , 'debug.log' ) os . makedirs ( str ( DATA_DIR ) , exist_ok = True ) os . makedirs ( str ( LOG_FILE . parent ) , exist_ok = True ) if args . version : print ( '{} {}' . format ( __title__ , __version__ ) ) raise SystemExit if args . debug : CONSOLE_LOG_LEVEL = logging . DEBUG elif args . verbose : CONSOLE_LOG_LEVEL = logging . INFO else : CONSOLE_LOG_LEVEL = logging . WARNING logger = set_up_logging ( LOG_FILE , CONSOLE_LOG_LEVEL ) logger . debug ( '-' * 80 ) logger . info ( '{} {}' . format ( __title__ , __version__ ) ) logger . debug ( 'Log File: {}' . format ( LOG_FILE ) ) logger . debug ( 'Data Directory: {}' . format ( DATA_DIR ) ) if args . testdb : DATABASE_FILE = DATA_DIR . joinpath ( 'test.sqlite' ) logger . info ( 'Using test database.' ) else : DATABASE_FILE = DATA_DIR . joinpath ( 'chronophore.sqlite' ) logger . debug ( 'Database File: {}' . format ( DATABASE_FILE ) ) engine = create_engine ( 'sqlite:///{}' . format ( str ( DATABASE_FILE ) ) ) Base . metadata . create_all ( engine ) Session . configure ( bind = engine ) if args . log_sql : logging . getLogger ( 'sqlalchemy.engine' ) . setLevel ( logging . INFO ) if args . testdb : add_test_users ( session = Session ( ) ) controller . flag_forgotten_entries ( session = Session ( ) ) if args . tk : from chronophore . tkview import TkChronophoreUI TkChronophoreUI ( ) else : try : from PyQt5 . QtWidgets import QApplication except ImportError : print ( 'Error: PyQt5, which chronophore uses for its' + ' graphical interface, is not installed.' + "\nInstall it with 'pip install PyQt5'" + " or use the old Tk ui with 'chronophore --tk'." ) raise SystemExit else : from chronophore . qtview import QtChronophoreUI app = QApplication ( sys . argv ) chrono_ui = QtChronophoreUI ( ) chrono_ui . show ( ) sys . exit ( app . exec_ ( ) ) logger . debug ( '{} stopping' . format ( __title__ ) )
Run Chronophore based on the command line arguments .
667
11
11,765
def filter ( self , record ) : fmt = LogManager . spec . context_format if fmt : data = self . context . to_dict ( ) if data : record . context = fmt % "," . join ( "%s=%s" % ( key , val ) for key , val in sorted ( data . items ( ) ) if key and val ) else : record . context = "" return True
Determines if the record should be logged and injects context info into the record . Always returns True
85
21
11,766
def enable_faulthandler ( cls , signum = signal . SIGUSR1 ) : with cls . _lock : if not signum : cls . _disable_faulthandler ( ) return if not cls . file_handler or faulthandler is None : return cls . faulthandler_signum = signum dump_file = cls . file_handler . stream faulthandler . enable ( file = dump_file , all_threads = True ) faulthandler . register ( signum , file = dump_file , all_threads = True , chain = False )
Enable dumping thread stack traces when specified signals are received similar to java s handling of SIGQUIT
142
19
11,767
def override_spec ( cls , * * kwargs ) : cls . _default_spec . set ( * * kwargs ) cls . spec . set ( * * kwargs )
OVerride spec and _default_spec with given values
44
12
11,768
def _fix_logging_shortcuts ( cls ) : if cls . is_using_format ( "%(pathname)s %(filename)s %(funcName)s %(module)s" ) : logging . _srcfile = cls . _logging_snapshot . _srcfile else : logging . _srcfile = None logging . logProcesses = cls . is_using_format ( "%(process)d" ) logging . logThreads = cls . is_using_format ( "%(thread)d %(threadName)s" ) def getframe ( ) : return sys . _getframe ( 4 ) def log ( level , msg , * args , * * kwargs ) : """Wrapper to make logging.info() etc report the right module %(name)""" name = get_caller_name ( ) logger = logging . getLogger ( name ) try : logging . currentframe = getframe logger . log ( level , msg , * args , * * kwargs ) finally : logging . currentframe = ORIGINAL_CF def wrap ( level , * * kwargs ) : """Wrap corresponding logging shortcut function""" original = getattr ( logging , logging . getLevelName ( level ) . lower ( ) ) f = partial ( log , level , * * kwargs ) f . __doc__ = original . __doc__ return f logging . critical = wrap ( logging . CRITICAL ) logging . fatal = logging . critical logging . error = wrap ( logging . ERROR ) logging . exception = partial ( logging . error , exc_info = True ) logging . warning = wrap ( logging . WARNING ) logging . info = wrap ( logging . INFO ) logging . debug = wrap ( logging . DEBUG ) logging . log = log
Fix standard logging shortcuts to correctly report logging module .
382
10
11,769
def _parse_single ( self , text , tagname ) : return minidom . parseString ( text ) . getElementsByTagName ( tagname ) [ 0 ] . firstChild . data
A hack to get the content of the XML responses from the CAS server .
43
15
11,770
def quick ( self , q , context = None , task_name = "quickie" , system = False ) : if not context : context = self . context params = { "qry" : q , "context" : context , "taskname" : task_name , "isSystem" : system } r = self . _send_request ( "ExecuteQuickJob" , params = params ) return self . _parse_single ( r . text , "string" )
Run a quick job .
102
5
11,771
def submit ( self , q , context = None , task_name = "casjobs" , estimate = 30 ) : if not context : context = self . context params = { "qry" : q , "context" : context , "taskname" : task_name , "estimate" : estimate } r = self . _send_request ( "SubmitJob" , params = params ) job_id = int ( self . _parse_single ( r . text , "long" ) ) return job_id
Submit a job to CasJobs .
110
8
11,772
def status ( self , job_id ) : params = { "jobid" : job_id } r = self . _send_request ( "GetJobStatus" , params = params ) status = int ( self . _parse_single ( r . text , "int" ) ) return status , self . status_codes [ status ]
Check the status of a job .
72
7
11,773
def monitor ( self , job_id , timeout = 5 ) : while True : status = self . status ( job_id ) logging . info ( "Monitoring job: %d - Status: %d, %s" % ( job_id , status [ 0 ] , status [ 1 ] ) ) if status [ 0 ] in [ 3 , 4 , 5 ] : return status time . sleep ( timeout )
Monitor the status of a job .
86
7
11,774
def request_output ( self , table , outtype ) : job_types = [ "CSV" , "DataSet" , "FITS" , "VOTable" ] assert outtype in job_types params = { "tableName" : table , "type" : outtype } r = self . _send_request ( "SubmitExtractJob" , params = params ) job_id = int ( self . _parse_single ( r . text , "long" ) ) return job_id
Request the output for a given table .
109
8
11,775
def get_output ( self , job_id , outfn ) : job_info = self . job_info ( jobid = job_id ) [ 0 ] # Make sure that the job is finished. status = int ( job_info [ "Status" ] ) if status != 5 : raise Exception ( "The status of job %d is %d (%s)" % ( job_id , status , self . status_codes [ status ] ) ) # Try to download the output file. remotefn = job_info [ "OutputLoc" ] r = requests . get ( remotefn ) # Make sure that the request went through. code = r . status_code if code != 200 : raise Exception ( "Getting file %s yielded status: %d" % ( remotefn , code ) ) # Save the data to a file. try : outfn . write ( r . content ) except AttributeError : f = open ( outfn , "wb" ) f . write ( r . content ) f . close ( )
Download an output file given the id of the output request job .
222
13
11,776
def request_and_get_output ( self , table , outtype , outfn ) : job_id = self . request_output ( table , outtype ) status = self . monitor ( job_id ) if status [ 0 ] != 5 : raise Exception ( "Output request failed." ) self . get_output ( job_id , outfn )
Shorthand for requesting an output file and then downloading it when ready .
75
15
11,777
def drop_table ( self , table ) : job_id = self . submit ( "DROP TABLE %s" % table , context = "MYDB" ) status = self . monitor ( job_id ) if status [ 0 ] != 5 : raise Exception ( "Couldn't drop table %s" % table )
Drop a table from the MyDB context .
68
9
11,778
def count ( self , q ) : q = "SELECT COUNT(*) %s" % q return int ( self . quick ( q ) . split ( "\n" ) [ 1 ] )
Shorthand for counting the results of a specific query .
41
12
11,779
def list_tables ( self ) : q = 'SELECT Distinct TABLE_NAME FROM information_schema.TABLES' res = self . quick ( q , context = 'MYDB' , task_name = 'listtables' , system = True ) # the first line is a header and the last is always empty # also, the table names have " as the first and last characters return [ l [ 1 : - 1 ] for l in res . split ( '\n' ) [ 1 : - 1 ] ]
Lists the tables in mydb .
112
8
11,780
def multiply_and_add ( n ) : multiplier , offset = di . resolver . unpack ( multiply_and_add ) return ( multiplier * n ) + offset
Multiply the given number n by some configured multiplier and then add a configured offset .
36
18
11,781
def flush_buffer ( self ) : if len ( self . buffer ) > 0 : return_value = '' . join ( self . buffer ) self . buffer . clear ( ) self . send_message ( return_value ) self . last_flush_date = datetime . datetime . now ( )
Flush the buffer of the tail
64
7
11,782
def set ( self , * args , * * kwargs ) : if args : for arg in args : if arg is not None : for name in self . __slots__ : self . _set ( name , getattr ( arg , name , UNSET ) ) for name in kwargs : self . _set ( name , kwargs . get ( name , UNSET ) )
Conveniently set one or more fields at a time .
83
12
11,783
def enable ( self ) : with self . _lock : if self . filter is None : self . filter = self . _filter_type ( self )
Enable contextual logging
32
3
11,784
def set_threadlocal ( self , * * values ) : with self . _lock : self . _ensure_threadlocal ( ) self . _tpayload . context = values
Set current thread s logging context to specified values
39
9
11,785
def add_threadlocal ( self , * * values ) : with self . _lock : self . _ensure_threadlocal ( ) self . _tpayload . context . update ( * * values )
Add values to current thread s logging context
44
8
11,786
def add_global ( self , * * values ) : with self . _lock : self . _ensure_global ( ) self . _gpayload . update ( * * values )
Add values to global logging context
40
6
11,787
def display_terminal_carbon ( mol ) : for i , a in mol . atoms_iter ( ) : if mol . neighbor_count ( i ) == 1 : a . visible = True
Set visible = True to the terminal carbon atoms .
41
10
11,788
def equalize_terminal_double_bond ( mol ) : for i , a in mol . atoms_iter ( ) : if mol . neighbor_count ( i ) == 1 : nb = list ( mol . neighbors ( i ) . values ( ) ) [ 0 ] if nb . order == 2 : nb . type = 2
Show equalized double bond if it is connected to terminal atom .
73
13
11,789
def spine_to_terminal_wedge ( mol ) : for i , a in mol . atoms_iter ( ) : if mol . neighbor_count ( i ) == 1 : ni , nb = list ( mol . neighbors ( i ) . items ( ) ) [ 0 ] if nb . order == 1 and nb . type in ( 1 , 2 ) and ni > i != nb . is_lower_first : nb . is_lower_first = not nb . is_lower_first nb . type = { 1 : 2 , 2 : 1 } [ nb . type ]
Arrange stereo wedge direction from spine to terminal atom
130
10
11,790
def format_ring_double_bond ( mol ) : mol . require ( "Topology" ) mol . require ( "ScaleAndCenter" ) for r in sorted ( mol . rings , key = len , reverse = True ) : vertices = [ mol . atom ( n ) . coords for n in r ] try : if geometry . is_clockwise ( vertices ) : cpath = iterator . consecutive ( itertools . cycle ( r ) , 2 ) else : cpath = iterator . consecutive ( itertools . cycle ( reversed ( r ) ) , 2 ) except ValueError : continue for _ in r : u , v = next ( cpath ) b = mol . bond ( u , v ) if b . order == 2 : b . type = int ( ( u > v ) == b . is_lower_first )
Set double bonds around the ring .
179
7
11,791
def ready_to_draw ( mol ) : copied = molutil . clone ( mol ) # display_terminal_carbon(mol) equalize_terminal_double_bond ( copied ) # spine_to_terminal_wedge(copied) scale_and_center ( copied ) format_ring_double_bond ( copied ) return copied
Shortcut function to prepare molecule to draw . Overwrite this function for customized appearance . It is recommended to clone the molecule before draw because all the methods above are destructive .
77
34
11,792
def update_from_object ( self , obj , criterion = lambda key : key . isupper ( ) ) : log . debug ( 'Loading config from {0}' . format ( obj ) ) if isinstance ( obj , basestring ) : if '.' in obj : path , name = obj . rsplit ( '.' , 1 ) mod = __import__ ( path , globals ( ) , locals ( ) , [ name ] , 0 ) obj = getattr ( mod , name ) else : obj = __import__ ( obj , globals ( ) , locals ( ) , [ ] , 0 ) self . update ( ( key , getattr ( obj , key ) ) for key in filter ( criterion , dir ( obj ) ) )
Update dict from the attributes of a module class or other object .
157
13
11,793
def update_from_env_namespace ( self , namespace ) : self . update ( ConfigLoader ( os . environ ) . namespace ( namespace ) )
Update dict from any environment variables that have a given prefix .
33
12
11,794
def update_from ( self , obj = None , yaml_env = None , yaml_file = None , json_env = None , json_file = None , env_namespace = None , ) : if obj : self . update_from_object ( obj ) if yaml_env : self . update_from_yaml_env ( yaml_env ) if yaml_file : self . update_from_yaml_file ( yaml_file ) if json_env : self . update_from_json_env ( json_env ) if json_file : self . update_from_json_file ( json_file ) if env_namespace : self . update_from_env_namespace ( env_namespace )
Update dict from several sources at once .
163
8
11,795
def namespace ( self , namespace , key_transform = lambda key : key ) : namespace = namespace . rstrip ( '_' ) + '_' return ConfigLoader ( ( key_transform ( key [ len ( namespace ) : ] ) , value ) for key , value in self . items ( ) if key [ : len ( namespace ) ] == namespace )
Return a copy with only the keys from a given namespace .
75
12
11,796
def namespace_lower ( self , namespace ) : return self . namespace ( namespace , key_transform = lambda key : key . lower ( ) )
Return a copy with only the keys from a given namespace lower - cased .
30
16
11,797
def sanitize ( s , normalize_whitespace = True , normalize_unicode = True , form = 'NFKC' , enforce_encoding = True , encoding = 'utf-8' ) : if enforce_encoding : s = s . encode ( encoding , errors = 'ignore' ) . decode ( encoding , errors = 'ignore' ) if normalize_unicode : s = unicodedata . normalize ( form , s ) if normalize_whitespace : s = re . sub ( r'\s+' , ' ' , s ) . strip ( ) return s
Normalize a string
130
4
11,798
def get_ticket_for_sns_token ( self ) : self . logger . info ( "%s\t%s" % ( self . request_method , self . request_url ) ) return { "openid" : self . get_openid ( ) , "persistent_code" : self . get_persistent_code ( ) , }
This is a shortcut for getting the sns_token as a post data of request body .
78
19
11,799
def reinitialize ( ) : from ozelot import client # import all additional models needed in this project # noinspection PyUnresolvedReferences from ozelot . orm . target import ORMTargetMarker client = client . get_client ( ) base . Base . drop_all ( client ) base . Base . create_all ( client )
Drop all tables for all models then re - create them
75
11