idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
10,500
def _pathway_side_information ( pathway_positive_series , pathway_negative_series , index ) : positive_series_label = pd . Series ( [ "pos" ] * len ( pathway_positive_series ) ) negative_series_label = pd . Series ( [ "neg" ] * len ( pathway_negative_series ) ) side_information = positive_series_label . append ( negative_series_label ) side_information . index = index side_information . name = "side" return side_information
Create the pandas . Series containing the side labels that correspond to each pathway based on the user - specified gene signature definition .
114
25
10,501
def _significant_pathways_dataframe ( pvalue_information , side_information , alpha ) : significant_pathways = pd . concat ( [ pvalue_information , side_information ] , axis = 1 ) # fdr_bh: false discovery rate, Benjamini & Hochberg (1995, 2000) below_alpha , qvalues , _ , _ = multipletests ( significant_pathways [ "p-value" ] , alpha = alpha , method = "fdr_bh" ) below_alpha = pd . Series ( below_alpha , index = pvalue_information . index , name = "pass" ) qvalues = pd . Series ( qvalues , index = pvalue_information . index , name = "q-value" ) significant_pathways = pd . concat ( [ significant_pathways , below_alpha , qvalues ] , axis = 1 ) significant_pathways = significant_pathways [ significant_pathways [ "pass" ] ] significant_pathways . drop ( "pass" , axis = 1 , inplace = True ) significant_pathways . loc [ : , "pathway" ] = significant_pathways . index return significant_pathways
Create the significant pathways pandas . DataFrame . Given the p - values corresponding to each pathway in a feature apply the FDR correction for multiple testing and remove those that do not have a q - value of less than alpha .
261
45
10,502
async def flush ( self , request : Request , stacks : List [ Stack ] ) : ns = await self . expand_stacks ( request , stacks ) ns = self . split_stacks ( ns ) ns = self . clean_stacks ( ns ) await self . next ( request , [ Stack ( x ) for x in ns ] )
For all stacks to be sent append a pause after each text layer .
73
14
10,503
def split_stacks ( self , stacks : List [ List [ BaseLayer ] ] ) -> List [ List [ BaseLayer ] ] : ns : List [ List [ BaseLayer ] ] = [ ] for stack in stacks : cur : List [ BaseLayer ] = [ ] for layer in stack : if cur and isinstance ( layer , lyr . RawText ) : ns . append ( cur ) cur = [ ] cur . append ( layer ) if cur : ns . append ( cur ) return ns
First step of the stacks cleanup process . We consider that if inside a stack there s a text layer showing up then it s the beginning of a new stack and split upon that .
103
36
10,504
async def expand ( self , request : Request , layer : BaseLayer ) : if isinstance ( layer , lyr . RawText ) : t = self . reading_time ( layer . text ) yield layer yield lyr . Sleep ( t ) elif isinstance ( layer , lyr . MultiText ) : texts = await render ( layer . text , request , True ) for text in texts : t = self . reading_time ( text ) yield lyr . RawText ( text ) yield lyr . Sleep ( t ) elif isinstance ( layer , lyr . Text ) : text = await render ( layer . text , request ) t = self . reading_time ( text ) yield lyr . RawText ( text ) yield lyr . Sleep ( t ) else : yield layer
Expand a layer into a list of layers including the pauses .
158
13
10,505
def reading_time ( self , text : TextT ) : wc = re . findall ( r'\w+' , text ) period = 60.0 / settings . USERS_READING_SPEED return float ( len ( wc ) ) * period + settings . USERS_READING_BUBBLE_START
Computes the time in seconds that the user will need to read a bubble containing the text passed as parameter .
72
22
10,506
async def flush ( self , request : Request , stacks : List [ Stack ] ) : ns : List [ Stack ] = [ ] for stack in stacks : ns . extend ( self . typify ( stack ) ) if len ( ns ) > 1 and ns [ - 1 ] == Stack ( [ lyr . Typing ( ) ] ) : ns [ - 1 ] . get_layer ( lyr . Typing ) . active = False await self . next ( request , ns )
Add a typing stack after each stack .
99
8
10,507
async def pre_handle ( self , request : Request , responder : 'Responder' ) : responder . send ( [ lyr . Typing ( ) ] ) await responder . flush ( request ) responder . clear ( ) await self . next ( request , responder )
Start typing right when the message is received .
60
9
10,508
async def get_friendly_name ( self ) -> Text : if 'first_name' not in self . _user : user = await self . _get_full_user ( ) else : user = self . _user return user . get ( 'first_name' )
Let s use the first name of the user as friendly name . In some cases the user object is incomplete and in those cases the full user is fetched .
59
32
10,509
def _get_chat ( self ) -> Dict : if 'callback_query' in self . _update : query = self . _update [ 'callback_query' ] if 'message' in query : return query [ 'message' ] [ 'chat' ] else : return { 'id' : query [ 'chat_instance' ] } elif 'inline_query' in self . _update : return patch_dict ( self . _update [ 'inline_query' ] [ 'from' ] , is_inline_query = True , ) elif 'message' in self . _update : return self . _update [ 'message' ] [ 'chat' ]
As Telegram changes where the chat object is located in the response this method tries to be smart about finding it in the right place .
143
26
10,510
def send ( self , stack : Layers ) : if not isinstance ( stack , Stack ) : stack = Stack ( stack ) if 'callback_query' in self . _update and stack . has_layer ( Update ) : layer = stack . get_layer ( Update ) try : msg = self . _update [ 'callback_query' ] [ 'message' ] except KeyError : layer . inline_message_id = self . _update [ 'callback_query' ] [ 'inline_message_id' ] else : layer . chat_id = msg [ 'chat' ] [ 'id' ] layer . message_id = msg [ 'message_id' ] if stack . has_layer ( AnswerCallbackQuery ) : self . _acq = stack . get_layer ( AnswerCallbackQuery ) stack = Stack ( [ l for l in stack . layers if not isinstance ( l , AnswerCallbackQuery ) ] ) if stack . has_layer ( Reply ) : layer = stack . get_layer ( Reply ) if 'message' in self . _update : layer . message = self . _update [ 'message' ] elif 'callback_query' in self . _update : layer . message = self . _update [ 'callback_query' ] [ 'message' ] if 'inline_query' in self . _update and stack . has_layer ( AnswerInlineQuery ) : a = stack . get_layer ( AnswerInlineQuery ) a . inline_query_id = self . _update [ 'inline_query' ] [ 'id' ] if stack . layers : return super ( TelegramResponder , self ) . send ( stack )
Intercept any potential AnswerCallbackQuery before adding the stack to the output buffer .
352
16
10,511
async def flush ( self , request : BernardRequest ) : if self . _acq and 'callback_query' in self . _update : try : cbq_id = self . _update [ 'callback_query' ] [ 'id' ] except KeyError : pass else : await self . platform . call ( 'answerCallbackQuery' , * * ( await self . _acq . serialize ( cbq_id ) ) ) return await super ( TelegramResponder , self ) . flush ( request )
If there s a AnswerCallbackQuery scheduled for reply place the call before actually flushing the buffer .
111
20
10,512
async def receive_updates ( self , request : Request ) : body = await request . read ( ) try : content = ujson . loads ( body ) except ValueError : return json_response ( { 'error' : True , 'message' : 'Cannot decode body' , } , status = 400 ) logger . debug ( 'Received from Telegram: %s' , content ) message = TelegramMessage ( content , self ) responder = TelegramResponder ( content , self ) await self . _notify ( message , responder ) return json_response ( { 'error' : False , } )
Handle updates from Telegram
129
4
10,513
def make_url ( self , method ) : token = self . settings ( ) [ 'token' ] return TELEGRAM_URL . format ( token = quote ( token ) , method = quote ( method ) , )
Generate a Telegram URL for this bot .
47
9
10,514
async def call ( self , method : Text , _ignore : Set [ Text ] = None , * * params : Any ) : logger . debug ( 'Calling Telegram %s(%s)' , method , params ) url = self . make_url ( method ) headers = { 'content-type' : 'application/json' , } post = self . session . post ( url , data = ujson . dumps ( params ) , headers = headers , ) async with post as r : out = await self . _handle_telegram_response ( r , _ignore ) logger . debug ( 'Telegram replied: %s' , out ) return out
Call a telegram method
138
5
10,515
async def _handle_telegram_response ( self , response , ignore = None ) : if ignore is None : ignore = set ( ) ok = response . status == 200 try : data = await response . json ( ) if not ok : desc = data [ 'description' ] if desc in ignore : return raise PlatformOperationError ( 'Telegram replied with an error: {}' . format ( desc ) ) except ( ValueError , TypeError , KeyError ) : raise PlatformOperationError ( 'An unknown Telegram error occurred' ) return data
Parse a response from Telegram . If there s an error an exception will be raised with an explicative message .
113
23
10,516
def make_hook_path ( self ) : token = self . settings ( ) [ 'token' ] h = sha256 ( ) h . update ( token . encode ( ) ) key = str ( h . hexdigest ( ) ) return f'/hooks/telegram/{key}'
Compute the path to the hook URL
64
8
10,517
async def _deferred_init ( self ) : hook_path = self . make_hook_path ( ) url = urljoin ( settings . BERNARD_BASE_URL , hook_path ) await self . call ( 'setWebhook' , url = url ) logger . info ( 'Setting Telegram webhook to "%s"' , url )
Register the web hook onto which Telegram should send its messages .
76
12
10,518
async def _send_text ( self , request : Request , stack : Stack , parse_mode : Optional [ Text ] = None ) : parts = [ ] chat_id = request . message . get_chat_id ( ) for layer in stack . layers : if isinstance ( layer , ( lyr . Text , lyr . RawText , lyr . Markdown ) ) : text = await render ( layer . text , request ) parts . append ( text ) for part in parts [ : - 1 ] : await self . call ( 'sendMessage' , text = part , chat_id = chat_id , ) msg = { 'text' : parts [ - 1 ] , 'chat_id' : chat_id , } if parse_mode is not None : msg [ 'parse_mode' ] = parse_mode await set_reply_markup ( msg , request , stack ) if stack . has_layer ( Reply ) : reply = stack . get_layer ( Reply ) if reply . message : msg [ 'reply_to_message_id' ] = reply . message [ 'message_id' ] if stack . has_layer ( Update ) : update = stack . get_layer ( Update ) if update . inline_message_id : msg [ 'inline_message_id' ] = update . inline_message_id del msg [ 'chat_id' ] else : msg [ 'message_id' ] = update . message_id await self . call ( 'editMessageText' , { 'Bad Request: message is not modified' } , * * msg ) else : await self . call ( 'sendMessage' , * * msg )
Base function for sending text
350
5
10,519
async def _send_sleep ( self , request : Request , stack : Stack ) : duration = stack . get_layer ( lyr . Sleep ) . duration await sleep ( duration )
Sleep for the amount of time specified in the Sleep layer
38
11
10,520
async def _send_typing ( self , request : Request , stack : Stack ) : t = stack . get_layer ( lyr . Typing ) if t . active : await self . call ( 'sendChatAction' , chat_id = request . message . get_chat_id ( ) , action = 'typing' , )
In telegram the typing stops when the message is received . Thus there is no typing stops messages to send . The API is only called when typing must start .
73
32
10,521
def create_app ( metadata , processors = None , pipes = None ) : instance = Application ( metadata ) # In order to avoid code duplication, we use existing built-in import # processor to import and register built-in processors on the application # instance. This is, to be honest, the main purpose of this factory # function, because otherwise one must create an Application instance # directly. import_processors . process ( instance , [ ] , imports = [ "archive = holocron.processors.archive:process" , "commonmark = holocron.processors.commonmark:process" , "feed = holocron.processors.feed:process" , "frontmatter = holocron.processors.frontmatter:process" , "import-processors = holocron.processors.import_processors:process" , "jinja2 = holocron.processors.jinja2:process" , "markdown = holocron.processors.markdown:process" , "metadata = holocron.processors.metadata:process" , "pipe = holocron.processors.pipe:process" , "prettyuri = holocron.processors.prettyuri:process" , "restructuredtext = holocron.processors.restructuredtext:process" , "save = holocron.processors.save:process" , "sitemap = holocron.processors.sitemap:process" , "source = holocron.processors.source:process" , "todatetime = holocron.processors.todatetime:process" , "when = holocron.processors.when:process" , ] ) for name , processor in ( processors or { } ) . items ( ) : instance . add_processor ( name , processor ) for name , pipeline in ( pipes or { } ) . items ( ) : instance . add_pipe ( name , pipeline ) return instance
Return an application instance with processors & pipes setup .
425
10
10,522
async def page_view ( self , url : str , title : str , user_id : str , user_lang : str = '' ) -> None : ga_url = 'https://www.google-analytics.com/collect' args = { 'v' : '1' , 'ds' : 'web' , 'de' : 'UTF-8' , 'tid' : self . ga_id , 'cid' : self . hash_user_id ( user_id ) , 't' : 'pageview' , 'dh' : self . ga_domain , 'dp' : url , 'dt' : title , } if user_lang : args [ 'ul' ] = user_lang logger . debug ( 'GA settings = %s' , urlencode ( args ) ) async with self . session . post ( ga_url , data = args ) as r : if r . status == 200 : logger . debug ( f'Sent to GA {url} ({title}) for user {user_id}' ) else : logger . warning ( f'Could not contact GA' )
Log a page view .
240
5
10,523
def get ( self , username = None , password = None , headers = { } ) : if all ( ( username , password , ) ) : return BasicAuth ( username , password , headers ) elif not any ( ( username , password , ) ) : return AnonymousAuth ( headers ) else : if username is None : data = ( "username" , username , ) else : data = ( "Password" , password , ) msg = "%s must have a value (instead of '%s')" % ( data [ 0 ] , data [ 1 ] ) raise ValueError ( msg )
Factory method to get the correct AuthInfo object .
122
10
10,524
def populate_request_data ( self , request_args ) : request_args [ 'auth' ] = HTTPBasicAuth ( self . _username , self . _password ) return request_args
Add the authentication info to the supplied dictionary .
41
9
10,525
def register_workflow ( self , name , workflow ) : assert name not in self . workflows self . workflows [ name ] = workflow
Register an workflow to be showed in the workflows list .
30
12
10,526
def add_header ( self , name , value ) : self . _headers . setdefault ( _hkey ( name ) , [ ] ) . append ( _hval ( value ) )
Add an additional response header not removing duplicates .
40
10
10,527
def load_module ( self , path , squash = True ) : config_obj = load ( path ) obj = { key : getattr ( config_obj , key ) for key in dir ( config_obj ) if key . isupper ( ) } if squash : self . load_dict ( obj ) else : self . update ( obj ) return self
Load values from a Python module .
74
7
10,528
def _set_virtual ( self , key , value ) : if key in self and key not in self . _virtual_keys : return # Do nothing for non-virtual keys. self . _virtual_keys . add ( key ) if key in self and self [ key ] is not value : self . _on_change ( key , value ) dict . __setitem__ ( self , key , value ) for overlay in self . _iter_overlays ( ) : overlay . _set_virtual ( key , value )
Recursively set or update virtual keys . Do nothing if non - virtual value is present .
111
19
10,529
def _delete_virtual ( self , key ) : if key not in self . _virtual_keys : return # Do nothing for non-virtual keys. if key in self : self . _on_change ( key , None ) dict . __delitem__ ( self , key ) self . _virtual_keys . discard ( key ) for overlay in self . _iter_overlays ( ) : overlay . _delete_virtual ( key )
Recursively delete virtual entry . Do nothing if key is not virtual .
93
15
10,530
def meta_set ( self , key , metafield , value ) : self . _meta . setdefault ( key , { } ) [ metafield ] = value
Set the meta field for a key to a new value .
36
12
10,531
def filename ( self ) : fname = self . raw_filename if not isinstance ( fname , unicode ) : fname = fname . decode ( 'utf8' , 'ignore' ) fname = normalize ( 'NFKD' , fname ) fname = fname . encode ( 'ASCII' , 'ignore' ) . decode ( 'ASCII' ) fname = os . path . basename ( fname . replace ( '\\' , os . path . sep ) ) fname = re . sub ( r'[^a-zA-Z0-9-_.\s]' , '' , fname ) . strip ( ) fname = re . sub ( r'[-\s]+' , '-' , fname ) . strip ( '.-' ) return fname [ : 255 ] or 'empty'
Name of the file on the client file system but normalized to ensure file system compatibility . An empty filename is returned as empty .
184
25
10,532
async def render ( text : TransText , request : Optional [ 'Request' ] , multi_line = False ) -> Union [ Text , List [ Text ] ] : if isinstance ( text , str ) : out = [ text ] elif isinstance ( text , StringToTranslate ) : out = await text . render_list ( request ) else : raise TypeError ( 'Provided text cannot be rendered' ) if multi_line : return out else : return ' ' . join ( out )
Render either a normal string either a string to translate into an actual string for the specified request .
106
19
10,533
def score ( self , flags : Flags ) -> int : score = 0 for k , v in flags . items ( ) : if self . flags . get ( k ) == v : score += 1 return score
Counts how many of the flags can be matched
43
10
10,534
def best_for_flags ( self , flags : Flags ) -> List [ TransItem ] : best_score : int = 0 best_list : List [ TransItem ] = [ ] for item in self . items : score = item . score ( flags ) if score == best_score : best_list . append ( item ) elif score > best_score : best_list = [ item ] best_score = score return best_list
Given flags find all items of this sentence that have an equal matching score and put them in a list .
93
21
10,535
def render ( self , flags : Flags ) -> Text : return random . choice ( self . best_for_flags ( flags ) ) . value
Chooses a random sentence from the list and returns it .
30
12
10,536
def update ( self , new : 'Sentence' , flags : Flags ) : items = [ i for i in self . items if i . flags != flags ] items . extend ( new . items ) self . items = items
Erase items with the specified flags and insert the new items from the other sentence instead .
47
18
10,537
def render ( self , flags : Flags ) -> List [ Text ] : return [ x . render ( flags ) for x in self . sentences ]
Returns a list of randomly chosen outcomes for each sentence of the list .
30
14
10,538
def append ( self , item : TransItem ) : if not ( 1 <= item . index <= settings . I18N_MAX_SENTENCES_PER_GROUP ) : return if len ( self . sentences ) < item . index : for _ in range ( len ( self . sentences ) , item . index ) : self . sentences . append ( Sentence ( ) ) self . sentences [ item . index - 1 ] . append ( item )
Append an item to the list . If there is not enough sentences in the list then the list is extended as needed .
94
25
10,539
def update ( self , group : 'SentenceGroup' , flags : Flags ) -> None : to_append = [ ] for old , new in zip_longest ( self . sentences , group . sentences ) : if old is None : old = Sentence ( ) to_append . append ( old ) if new is None : new = Sentence ( ) old . update ( new , flags ) self . sentences . extend ( to_append )
This object is considered to be a global sentence group while the other one is flags - specific . All data related to the specified flags will be overwritten by the content of the specified group .
93
38
10,540
def extract ( self ) : out = { } for key , group in self . data . items ( ) : out [ key ] = group return out
Extract only the valid sentence groups into a dictionary .
31
11
10,541
def append ( self , item : TransItem ) : self . data [ item . key ] . append ( item )
Append an item to the internal dictionary .
24
9
10,542
def _init_loaders ( self ) -> None : for loader in settings . I18N_TRANSLATION_LOADERS : loader_class = import_class ( loader [ 'loader' ] ) instance = loader_class ( ) instance . on_update ( self . update ) run ( instance . load ( * * loader [ 'params' ] ) )
This creates the loaders instances and subscribes to their updates .
77
13
10,543
def update_lang ( self , lang : Optional [ Text ] , data : List [ Tuple [ Text , Text ] ] , flags : Flags ) : sd = SortingDict ( ) for item in ( self . parse_item ( x [ 0 ] , x [ 1 ] , flags ) for x in data ) : if item : sd . append ( item ) if lang not in self . dict : self . dict [ lang ] = { } d = self . dict [ lang ] for k , v in sd . extract ( ) . items ( ) : if k not in d : d [ k ] = SentenceGroup ( ) d [ k ] . update ( v , flags )
Update translations for one specific lang
144
6
10,544
def update ( self , data : TransDict , flags : Flags ) : for lang , lang_data in data . items ( ) : self . update_lang ( lang , lang_data , flags )
Update all langs at once
43
6
10,545
def get ( self , key : Text , count : Optional [ int ] = None , formatter : Formatter = None , locale : Text = None , params : Optional [ Dict [ Text , Any ] ] = None , flags : Optional [ Flags ] = None ) -> List [ Text ] : if params is None : params = { } if count is not None : raise TranslationError ( 'Count parameter is not supported yet' ) locale = self . choose_locale ( locale ) try : group : SentenceGroup = self . dict [ locale ] [ key ] except KeyError : raise MissingTranslationError ( 'Translation "{}" does not exist' . format ( key ) ) try : trans = group . render ( flags or { } ) out = [ ] for line in trans : if not formatter : out . append ( line . format ( * * params ) ) else : out . append ( formatter . format ( line , * * params ) ) except KeyError as e : raise MissingParamError ( 'Parameter "{}" missing to translate "{}"' . format ( e . args [ 0 ] , key ) ) else : return out
Get the appropriate translation given the specified parameters .
237
9
10,546
async def _resolve_params ( self , params : Dict [ Text , Any ] , request : Optional [ 'Request' ] ) : out = { } for k , v in params . items ( ) : if isinstance ( v , StringToTranslate ) : out [ k ] = await render ( v , request ) else : out [ k ] = v return out
If any StringToTranslate was passed as parameter then it is rendered at this moment .
80
18
10,547
async def render_list ( self , request = None ) -> List [ Text ] : from bernard . middleware import MiddlewareManager if request : tz = await request . user . get_timezone ( ) locale = await request . get_locale ( ) flags = await request . get_trans_flags ( ) else : tz = None locale = self . wd . list_locales ( ) [ 0 ] flags = { } rp = MiddlewareManager . instance ( ) . get ( 'resolve_trans_params' , self . _resolve_params ) resolved_params = await rp ( self . params , request ) f = I18nFormatter ( self . wd . choose_locale ( locale ) , tz ) return self . wd . get ( self . key , self . count , f , locale , resolved_params , flags , )
Render the translation as a list if there is multiple strings for this single key .
191
16
10,548
def send ( self , stack : Layers ) : if not isinstance ( stack , Stack ) : stack = Stack ( stack ) if not self . platform . accept ( stack ) : raise UnacceptableStack ( 'The platform does not allow "{}"' . format ( stack . describe ( ) ) ) self . _stacks . append ( stack )
Add a message stack to the send list .
72
9
10,549
async def flush ( self , request : 'Request' ) : from bernard . middleware import MiddlewareManager for stack in self . _stacks : await stack . convert_media ( self . platform ) func = MiddlewareManager . instance ( ) . get ( 'flush' , self . _flush ) await func ( request , self . _stacks )
Send all queued messages .
77
6
10,550
async def make_transition_register ( self , request : 'Request' ) : register = { } for stack in self . _stacks : register = await stack . patch_register ( register , request ) return register
Use all underlying stacks to generate the next transition register .
47
11
10,551
def preloop ( self ) : lines = textwrap . dedent ( self . banner ) . split ( "\n" ) for line in lines : Console . _print ( "BLUE" , "" , line )
adds the banner to the preloop
45
8
10,552
def getDebt ( self ) : debt = float ( self [ 'principalBalance' ] ) + float ( self [ 'interestBalance' ] ) debt += float ( self [ 'feesBalance' ] ) + float ( self [ 'penaltyBalance' ] ) return debt
Sums up all the balances of the account and returns them .
60
13
10,553
def setRepayments ( self , * args , * * kwargs ) : def duedate ( repayment ) : """Util function used for sorting repayments according to due Date""" try : return repayment [ 'dueDate' ] except KeyError as kerr : return datetime . now ( ) try : reps = self . mamburepaymentsclass ( entid = self [ 'id' ] , * args , * * kwargs ) except AttributeError as ae : from . mamburepayment import MambuRepayments self . mamburepaymentsclass = MambuRepayments reps = self . mamburepaymentsclass ( entid = self [ 'id' ] , * args , * * kwargs ) reps . attrs = sorted ( reps . attrs , key = duedate ) self [ 'repayments' ] = reps return 1
Adds the repayments for this loan to a repayments field .
192
13
10,554
def setTransactions ( self , * args , * * kwargs ) : def transactionid ( transaction ) : """Util function used for sorting transactions according to id""" try : return transaction [ 'transactionId' ] except KeyError as kerr : return None try : trans = self . mambutransactionsclass ( entid = self [ 'id' ] , * args , * * kwargs ) except AttributeError as ae : from . mambutransaction import MambuTransactions self . mambutransactionsclass = MambuTransactions trans = self . mambutransactionsclass ( entid = self [ 'id' ] , * args , * * kwargs ) trans . attrs = sorted ( trans . attrs , key = transactionid ) self [ 'transactions' ] = trans return 1
Adds the transactions for this loan to a transactions field .
181
11
10,555
def setCentre ( self , * args , * * kwargs ) : try : centre = self . mambucentreclass ( entid = self [ 'assignedCentreKey' ] , * args , * * kwargs ) except AttributeError as ae : from . mambucentre import MambuCentre self . mambucentreclass = MambuCentre centre = self . mambucentreclass ( entid = self [ 'assignedCentreKey' ] , * args , * * kwargs ) self [ 'assignedCentreName' ] = centre [ 'name' ] self [ 'assignedCentre' ] = centre return 1
Adds the centre for this loan to a assignedCentre field .
152
13
10,556
def setUser ( self , * args , * * kwargs ) : try : user = self . mambuuserclass ( entid = self [ 'assignedUserKey' ] , * args , * * kwargs ) except KeyError as kerr : err = MambuError ( "La cuenta %s no tiene asignado un usuario" % self [ 'id' ] ) err . noUser = True raise err except AttributeError as ae : from . mambuuser import MambuUser self . mambuuserclass = MambuUser try : user = self . mambuuserclass ( entid = self [ 'assignedUserKey' ] , * args , * * kwargs ) except KeyError as kerr : err = MambuError ( "La cuenta %s no tiene asignado un usuario" % self [ 'id' ] ) err . noUser = True raise err self [ 'user' ] = user return 1
Adds the user for this loan to a user field .
219
11
10,557
def setProduct ( self , cache = False , * args , * * kwargs ) : if cache : try : prods = self . allmambuproductsclass ( * args , * * kwargs ) except AttributeError as ae : from . mambuproduct import AllMambuProducts self . allmambuproductsclass = AllMambuProducts prods = self . allmambuproductsclass ( * args , * * kwargs ) for prod in prods : if prod [ 'encodedKey' ] == self [ 'productTypeKey' ] : self [ 'product' ] = prod try : # asked for cache, but cache was originally empty prods . noinit except AttributeError : return 1 return 0 try : product = self . mambuproductclass ( entid = self [ 'productTypeKey' ] , * args , * * kwargs ) except AttributeError as ae : from . mambuproduct import MambuProduct self . mambuproductclass = MambuProduct product = self . mambuproductclass ( entid = self [ 'productTypeKey' ] , * args , * * kwargs ) self [ 'product' ] = product return 1
Adds the product for this loan to a product field .
276
11
10,558
def getClientDetails ( self , * args , * * kwargs ) : loannames = [ ] holder = kwargs [ 'holder' ] for client in holder [ 'clients' ] : loannames . append ( { 'id' : client [ 'id' ] , 'name' : client [ 'name' ] , 'client' : client , 'amount' : self [ 'loanAmount' ] } ) return loannames
Gets the loan details for every client holder of the account .
97
13
10,559
def get_fields_for_keyword ( self , keyword , mode = 'a' ) : field = self . keyword_to_fields . get ( keyword , keyword ) if isinstance ( field , dict ) : return field [ mode ] elif isinstance ( field , ( list , tuple ) ) : return field return [ field ]
Convert keyword to fields .
71
6
10,560
def merge_dict ( a , b , path = None ) : if not path : path = [ ] for key in b : if key in a : if isinstance ( a [ key ] , dict ) and isinstance ( b [ key ] , dict ) : merge_dict ( a [ key ] , b [ key ] , path + [ str ( key ) ] ) else : continue else : a [ key ] = b [ key ] return a
Merge dict b into a
94
6
10,561
def make_date ( obj : Union [ date , datetime , Text ] , timezone : tzinfo = None ) : if isinstance ( obj , datetime ) : if hasattr ( obj , 'astimezone' ) and timezone : obj = obj . astimezone ( timezone ) return obj . date ( ) elif isinstance ( obj , date ) : return obj elif isinstance ( obj , str ) : return make_date ( parse_date ( obj ) , timezone )
A flexible method to get a date object .
107
9
10,562
def format_date ( self , value , format_ ) : date_ = make_date ( value ) return dates . format_date ( date_ , format_ , locale = self . lang )
Format the date using Babel
41
5
10,563
def format_datetime ( self , value , format_ ) : date_ = make_datetime ( value ) return dates . format_datetime ( date_ , format_ , locale = self . lang )
Format the datetime using Babel
44
6
10,564
def format_field ( self , value , spec ) : if spec . startswith ( 'date:' ) : _ , format_ = spec . split ( ':' , 1 ) return self . format_date ( value , format_ ) elif spec . startswith ( 'datetime:' ) : _ , format_ = spec . split ( ':' , 1 ) return self . format_datetime ( value , format_ ) elif spec == 'number' : return self . format_number ( value ) else : return super ( I18nFormatter , self ) . format_field ( value , spec )
Provide the additional formatters for localization .
130
9
10,565
def _decode ( cls , value ) : value = cls . _DEC_RE . sub ( lambda x : '%c' % int ( x . group ( 1 ) , 16 ) , value ) return json . loads ( value )
Decode the given value reverting % - encoded groups .
52
12
10,566
def decode ( cls , key ) : # Determine bucket key version prefix , sep , param_str = key . partition ( ':' ) if sep != ':' or prefix not in cls . _prefix_to_version : raise ValueError ( "%r is not a bucket key" % key ) version = cls . _prefix_to_version [ prefix ] # Take the parameters apart... parts = param_str . split ( '/' ) uuid = parts . pop ( 0 ) params = { } for part in parts : name , sep , value = part . partition ( '=' ) # Make sure it's well-formed if sep != '=' : raise ValueError ( "Cannot interpret parameter expression %r" % part ) params [ name ] = cls . _decode ( value ) # Return a BucketKey return cls ( uuid , params , version = version )
Decode a bucket key into a BucketKey instance .
188
11
10,567
def need_summary ( self , now , max_updates , max_age ) : # Handle the case where an old summarize record exists if self . summarized is True and self . last_summarize_ts + max_age <= now : return True return self . summarized is False and self . updates >= max_updates
Helper method to determine if a summarize record should be added .
69
12
10,568
def dehydrate ( self ) : # Only concerned about very specific attributes result = { } for attr in self . attrs : result [ attr ] = getattr ( self , attr ) return result
Return a dict representing this bucket .
43
7
10,569
def delay ( self , params , now = None ) : if now is None : now = time . time ( ) # Initialize last... if not self . last : self . last = now elif now < self . last : now = self . last # How much has leaked out? leaked = now - self . last # Update the last message time self . last = now # Update the water level self . level = max ( self . level - leaked , 0 ) # Are we too full? difference = self . level + self . limit . cost - self . limit . unit_value if difference >= self . eps : self . next = now + difference return difference # OK, raise the water level and set next to an appropriate # value self . level += self . limit . cost self . next = now return None
Determine delay until next request .
170
8
10,570
def messages ( self ) : return int ( math . floor ( ( ( self . limit . unit_value - self . level ) / self . limit . unit_value ) * self . limit . value ) )
Return remaining messages before limiting .
44
6
10,571
def dehydrate ( self ) : # Only concerned about very specific attributes result = dict ( limit_class = self . _limit_full_name ) for attr in self . attrs : # Using getattr allows the properties to come into play result [ attr ] = getattr ( self , attr ) return result
Return a dict representing this limit .
67
7
10,572
def load ( self , key ) : # Turn the key into a BucketKey if isinstance ( key , basestring ) : key = BucketKey . decode ( key ) # Make sure the uuids match if key . uuid != self . uuid : raise ValueError ( "%s is not a bucket corresponding to this limit" % key ) # If the key is a version 1 key, load it straight from the # database if key . version == 1 : raw = self . db . get ( str ( key ) ) if raw is None : return self . bucket_class ( self . db , self , str ( key ) ) return self . bucket_class . hydrate ( self . db , msgpack . loads ( raw ) , self , str ( key ) ) # OK, use a BucketLoader records = self . db . lrange ( str ( key ) , 0 , - 1 ) loader = BucketLoader ( self . bucket_class , self . db , self , str ( key ) , records ) return loader . bucket
Given a bucket key load the corresponding bucket .
216
9
10,573
def decode ( self , key ) : # Parse the bucket key key = BucketKey . decode ( key ) # Make sure the uuids match if key . uuid != self . uuid : raise ValueError ( "%s is not a bucket corresponding to this limit" % key ) return key . params
Given a bucket key compute the parameters used to compute that key .
64
13
10,574
def format ( self , status , headers , environ , bucket , delay ) : # This is a default response entity, which can be overridden # by limit subclasses. entity = ( "This request was rate-limited. " "Please retry your request after %s." % time . strftime ( "%Y-%m-%dT%H:%M:%SZ" , time . gmtime ( bucket . next ) ) ) headers [ 'Content-Type' ] = 'text/plain' return status , entity
Formats a response entity . Returns a tuple of the desired status code and the formatted entity . The default status code is passed in as is a dictionary of headers .
114
33
10,575
def drop_prefix ( strings ) : strings_without_extensions = [ s . split ( "." , 2 ) [ 0 ] for s in strings ] if len ( strings_without_extensions ) == 1 : return [ os . path . basename ( strings_without_extensions [ 0 ] ) ] prefix_len = len ( os . path . commonprefix ( strings_without_extensions ) ) result = [ string [ prefix_len : ] for string in strings_without_extensions ] if len ( set ( result ) ) != len ( strings ) : # If these operations resulted in a collision, just return the original # strings. return strings return result
Removes common prefix from a collection of strings
141
9
10,576
def count ( self ) : if self . _nodes is None : return 1 return sum ( i . count ( ) for i in self . _nodes )
Return how many nodes this contains including self .
34
9
10,577
def app_size ( self ) : if self . _nodes is None : return self . _app_size return sum ( i . app_size ( ) for i in self . _nodes )
Return the total apparent size including children .
43
8
10,578
def use_size ( self ) : if self . _nodes is None : return self . _use_size return sum ( i . use_size ( ) for i in self . _nodes )
Return the total used size including children .
43
8
10,579
def _prune_all_if_small ( self , small_size , a_or_u ) : if self . _nodes is None : return True total_size = ( self . app_size ( ) if a_or_u else self . use_size ( ) ) if total_size < small_size : if a_or_u : self . _set_size ( total_size , self . use_size ( ) ) else : self . _set_size ( self . app_size ( ) , total_size ) return True return False
Return True and delete children if small enough .
122
9
10,580
def _prune_some_if_small ( self , small_size , a_or_u ) : # Assert that we're not messing things up. prev_app_size = self . app_size ( ) prev_use_size = self . use_size ( ) keep_nodes = [ ] prune_app_size = 0 prune_use_size = 0 for node in self . _nodes : node_size = node . app_size ( ) if a_or_u else node . use_size ( ) if node_size < small_size : if a_or_u : prune_app_size += node_size prune_use_size += node . use_size ( ) else : prune_app_size += node . app_size ( ) prune_use_size += node_size else : keep_nodes . append ( node ) # Last "leftover" node? Merge with parent. if len ( keep_nodes ) == 1 and keep_nodes [ - 1 ] . _isdir is None : prune_app_size += keep_nodes [ - 1 ] . _app_size prune_use_size += keep_nodes [ - 1 ] . _use_size keep_nodes = [ ] if prune_app_size : if not keep_nodes : # The only node to keep, no "leftovers" here. Move data # to the parent. keep_nodes = None assert self . _isdir and self . _nodes is not None self . _set_size ( prune_app_size , prune_use_size ) elif keep_nodes and keep_nodes [ - 1 ] . _isdir is None : # There was already a leftover node. Add the new leftovers. keep_nodes [ - 1 ] . _add_size ( prune_app_size , prune_use_size ) else : # Create a new leftover node. keep_nodes . append ( DuNode . new_leftovers ( self . _path , prune_app_size , prune_use_size ) ) # Update nodes and do the actual assertion. self . _nodes = keep_nodes assert prev_app_size == self . app_size ( ) , ( prev_app_size , self . app_size ( ) ) assert prev_use_size == self . use_size ( ) , ( prev_use_size , self . use_size ( ) )
Merge some nodes in the directory whilst keeping others .
545
11
10,581
def merge_upwards_if_smaller_than ( self , small_size , a_or_u ) : # Assert that we're not messing things up. prev_app_size = self . app_size ( ) prev_use_size = self . use_size ( ) small_nodes = self . _find_small_nodes ( small_size , ( ) , a_or_u ) for node , parents in small_nodes : # Check immediate grandparent for isdir=None and if it # exists, move this there. The isdir=None node is always # last. if len ( parents ) >= 2 : tail = parents [ - 2 ] . _nodes [ - 1 ] if tail . _isdir is None : assert tail . _app_size is not None , tail tail . _add_size ( node . app_size ( ) , node . use_size ( ) ) parents [ - 1 ] . _nodes . remove ( node ) assert len ( parents [ - 1 ] . _nodes ) # The actual assertion. assert prev_app_size == self . app_size ( ) , ( prev_app_size , self . app_size ( ) ) assert prev_use_size == self . use_size ( ) , ( prev_use_size , self . use_size ( ) )
After prune_if_smaller_than is run we may still have excess nodes .
291
19
10,582
def as_tree ( self ) : if self . _nodes is None : return [ self ] ret = [ self ] for node in self . _nodes : ret . append ( node . as_tree ( ) ) return ret
Return the nodes as a list of lists .
49
9
10,583
def _check_path ( self ) : if not path . isdir ( self . _path or '/' ) : raise OSError ( 'Path {!r} is not a directory' . format ( self . _path ) )
Immediately check if we can access path . Otherwise bail .
51
12
10,584
def version ( command = 'dmenu' ) : args = [ command , '-v' ] try : # start the dmenu process proc = subprocess . Popen ( args , universal_newlines = True , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) except OSError as err : # something went wrong with starting the process raise DmenuCommandError ( args , err ) if proc . wait ( ) == 0 : # version information from stdout return proc . stdout . read ( ) . rstrip ( '\n' ) # error from dmenu raise DmenuCommandError ( args , proc . stderr . read ( ) )
The dmenu command s version message .
149
8
10,585
def show ( items , command = 'dmenu' , bottom = None , fast = None , case_insensitive = None , lines = None , monitor = None , prompt = None , font = None , background = None , foreground = None , background_selected = None , foreground_selected = None ) : # construct args args = [ command ] if bottom : args . append ( '-b' ) if fast : args . append ( '-f' ) if case_insensitive : args . append ( '-i' ) if lines is not None : args . extend ( ( '-l' , str ( lines ) ) ) if monitor is not None : args . extend ( ( '-m' , str ( monitor ) ) ) if prompt is not None : args . extend ( ( '-p' , prompt ) ) if font is not None : args . extend ( ( '-fn' , font ) ) if background is not None : args . extend ( ( '-nb' , background ) ) if foreground is not None : args . extend ( ( '-nf' , foreground ) ) if background_selected is not None : args . extend ( ( '-sb' , background_selected ) ) if foreground_selected is not None : args . extend ( ( '-sf' , foreground_selected ) ) try : # start the dmenu process proc = subprocess . Popen ( args , universal_newlines = True , stdin = subprocess . PIPE , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) except OSError as err : # something went wrong with starting the process raise DmenuCommandError ( args , err ) # write items over to dmenu with proc . stdin : for item in items : proc . stdin . write ( item ) proc . stdin . write ( '\n' ) if proc . wait ( ) == 0 : # user made a selection return proc . stdout . read ( ) . rstrip ( '\n' ) stderr = proc . stderr . read ( ) if stderr == '' : # user hit escape return None if re . match ( 'usage' , stderr , re . I ) : # usage error raise DmenuUsageError ( args , stderr ) # other error from dmenu raise DmenuCommandError ( args , stderr )
Present a dmenu to the user .
508
8
10,586
def get_upregulated_genes_network ( self ) -> Graph : logger . info ( "In get_upregulated_genes_network()" ) deg_graph = self . graph . copy ( ) # deep copy graph not_diff_expr = self . graph . vs ( up_regulated_eq = False ) # delete genes which are not differentially expressed or have no connections to others deg_graph . delete_vertices ( not_diff_expr . indices ) deg_graph . delete_vertices ( deg_graph . vs . select ( _degree_eq = 0 ) ) return deg_graph
Get the graph of up - regulated genes .
130
9
10,587
def get_downregulated_genes_network ( self ) -> Graph : logger . info ( "In get_downregulated_genes_network()" ) deg_graph = self . graph . copy ( ) # deep copy graph not_diff_expr = self . graph . vs ( down_regulated_eq = False ) # delete genes which are not differentially expressed or have no connections to others deg_graph . delete_vertices ( not_diff_expr . indices ) deg_graph . delete_vertices ( deg_graph . vs . select ( _degree_eq = 0 ) ) return deg_graph
Get the graph of down - regulated genes .
130
9
10,588
def make_parser ( ) : parser = argparse . ArgumentParser ( description = 'BERNARD CLI utility' ) sp = parser . add_subparsers ( help = 'Sub-command' ) parser_run = sp . add_parser ( 'run' , help = 'Run the BERNARD server' ) parser_run . set_defaults ( action = 'run' ) parser_sheet = sp . add_parser ( 'sheet' , help = 'Import files from Google ' 'Sheets' ) parser_sheet . set_defaults ( action = 'sheet' ) parser_sheet . add_argument ( '--auth_host_name' , default = 'localhost' , help = 'Hostname when running a local web server.' ) parser_sheet . add_argument ( '--noauth_local_webserver' , action = 'store_true' , default = False , help = 'Do not run a local web server.' ) parser_sheet . add_argument ( '--auth_host_port' , default = [ 8080 , 8090 ] , type = int , nargs = '*' , help = 'Port web server should listen on.' ) parser_sheet . add_argument ( '--logging_level' , default = 'ERROR' , choices = [ 'DEBUG' , 'INFO' , 'WARNING' , 'ERROR' , 'CRITICAL' ] , help = 'Set the logging level of detail.' ) parser_sp = sp . add_parser ( 'start_project' , help = 'Starts a project' ) parser_sp . set_defaults ( action = 'start_project' ) parser_sp . add_argument ( 'project_name' , help = 'A snake-case name for your project' ) parser_sp . add_argument ( 'dir' , help = 'Directory to store the project' ) return parser
Generate the parser for all sub - commands
407
9
10,589
def main ( ) : parser = make_parser ( ) args = parser . parse_args ( ) if not hasattr ( args , 'action' ) : parser . print_help ( ) exit ( 1 ) if args . action == 'sheet' : from bernard . misc . sheet_sync import main as main_sheet main_sheet ( args ) elif args . action == 'run' : from bernard . cli import main as main_run main_run ( ) elif args . action == 'start_project' : from bernard . misc . start_project import main as main_sp main_sp ( args )
Run the appropriate main function according to the output of the parser .
137
13
10,590
def load_dotenv ( dotenv_path , verbose = False ) : if not os . path . exists ( dotenv_path ) : if verbose : warnings . warn ( f"Not loading {dotenv_path}, it doesn't exist." ) return None for k , v in dotenv_values ( dotenv_path ) . items ( ) : os . environ . setdefault ( k , v ) return True
Read a . env file and load into os . environ .
91
13
10,591
def get_key ( dotenv_path , key_to_get , verbose = False ) : key_to_get = str ( key_to_get ) if not os . path . exists ( dotenv_path ) : if verbose : warnings . warn ( f"Can't read {dotenv_path}, it doesn't exist." ) return None dotenv_as_dict = dotenv_values ( dotenv_path ) if key_to_get in dotenv_as_dict : return dotenv_as_dict [ key_to_get ] else : if verbose : warnings . warn ( f"key {key_to_get} not found in {dotenv_path}." ) return None
Gets the value of a given key from the given . env
155
13
10,592
def _get_format ( value , quote_mode = 'always' ) : formats = { 'always' : '{key}="{value}"\n' , 'auto' : '{key}={value}\n' } if quote_mode not in formats . keys ( ) : return KeyError ( f'quote_mode {quote_mode} is invalid' ) _mode = quote_mode if quote_mode == 'auto' and ' ' in value : _mode = 'always' return formats . get ( _mode )
Returns the quote format depending on the quote_mode . This determines if the key value will be quoted when written to the env file .
114
27
10,593
def find_dotenv ( filename = '.env' , raise_error_if_not_found = False , usecwd = False ) : if usecwd or '__file__' not in globals ( ) : # should work without __file__, e.g. in REPL or IPython notebook path = os . getcwd ( ) else : # will work for .py files frame_filename = sys . _getframe ( ) . f_back . f_code . co_filename path = os . path . dirname ( os . path . abspath ( frame_filename ) ) for dirname in _walk_to_root ( path ) : check_path = os . path . join ( dirname , filename ) if os . path . exists ( check_path ) : return check_path if raise_error_if_not_found : raise IOError ( 'File not found' ) return ''
Search in increasingly higher folders for the given file
196
9
10,594
def reducer ( * tokens ) : def wrapper ( func ) : # Make sure that we have a list of reducer sequences if not hasattr ( func , 'reducers' ) : func . reducers = [ ] # Add the token to the list of reducer sequences func . reducers . append ( list ( tokens ) ) return func return wrapper
Decorator for reduction methods .
74
7
10,595
def parse_rule ( rule : str , raise_error = False ) : parser = Parser ( raise_error ) return parser . parse ( rule )
Parses policy to a tree of Check objects .
32
11
10,596
def _reduce ( self ) : for reduction , methname in self . reducers : token_num = len ( reduction ) if ( len ( self . tokens ) >= token_num and self . tokens [ - token_num : ] == reduction ) : # Get the reduction method meth = getattr ( self , methname ) # Reduce the token stream results = meth ( * self . values [ - token_num : ] ) self . tokens [ - token_num : ] = [ r [ 0 ] for r in results ] self . values [ - token_num : ] = [ r [ 1 ] for r in results ] # Check for any more reductions return self . _reduce ( )
Perform a greedy reduction of token stream .
146
9
10,597
def _parse_check ( self , rule ) : # Handle the special constant-type checks for check_cls in ( checks . FalseCheck , checks . TrueCheck ) : check = check_cls ( ) if rule == str ( check ) : return check try : kind , match = rule . split ( ':' , 1 ) except Exception : if self . raise_error : raise InvalidRuleException ( rule ) else : LOG . exception ( 'Failed to understand rule %r' , rule ) # If the rule is invalid, we'll fail closed return checks . FalseCheck ( ) if kind in checks . registered_checks : return checks . registered_checks [ kind ] ( kind , match ) elif None in checks . registered_checks : return checks . registered_checks [ None ] ( kind , match ) elif self . raise_error : raise InvalidRuleException ( rule ) else : LOG . error ( 'No handler for matches of kind %r' , kind ) # If the rule is invalid, we'll fail closed return checks . FalseCheck ( )
Parse a single base check rule into an appropriate Check object .
222
13
10,598
def _parse_tokenize ( self , rule ) : for token in self . _TOKENIZE_RE . split ( rule ) : # Skip empty tokens if not token or token . isspace ( ) : continue # Handle leading parens on the token clean = token . lstrip ( '(' ) for i in range ( len ( token ) - len ( clean ) ) : yield '(' , '(' # If it was only parentheses, continue if not clean : continue else : token = clean # Handle trailing parens on the token clean = token . rstrip ( ')' ) trail = len ( token ) - len ( clean ) # Yield the cleaned token lowered = clean . lower ( ) if lowered in ( 'and' , 'or' , 'not' ) : # Special tokens yield lowered , clean elif clean : # Not a special token, but not composed solely of ')' if len ( token ) >= 2 and ( ( token [ 0 ] , token [ - 1 ] ) in [ ( '"' , '"' ) , ( "'" , "'" ) ] ) : # It's a quoted string yield 'string' , token [ 1 : - 1 ] else : yield 'check' , self . _parse_check ( clean ) # Yield the trailing parens for i in range ( trail ) : yield ')' , ')'
Tokenizer for the policy language .
285
7
10,599
def parse ( self , rule : str ) : # Empty rule means always accept if not rule : return checks . TrueCheck ( ) for token , value in self . _parse_tokenize ( rule ) : self . _shift ( token , value ) try : return self . result except ValueError : LOG . exception ( 'Failed to understand rule %r' , rule ) # Fail closed return checks . FalseCheck ( )
Parses policy to tree .
88
7