idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
12,800
def generate_output_path ( args , project_path ) : milisec = datetime . now ( ) . microsecond dirname = 'results_{}_{}' . format ( time . strftime ( '%Y.%m.%d_%H.%M.%S' , time . localtime ( ) ) , str ( milisec ) ) return os . path . join ( project_path , 'results' , dirname )
Generate default output directory
96
5
12,801
def run ( args ) : kwargs = vars ( args ) if 'func' in kwargs : del kwargs [ 'func' ] project_path = kwargs . pop ( 'project_path' ) config = configure ( project_path , kwargs . get ( 'config_file' ) ) output_dir = kwargs . pop ( 'output_dir' , None ) or generate_output_path ( args , project_path ) stats_handler . init_stats ( output_dir , config ) topic = args . publisher_channel or uuid . uuid4 ( ) . hex print ( "External publishing topic is %s" % topic ) start_hq ( output_dir , config , topic , * * kwargs ) if not args . no_results : process_results ( output_dir , config ) copy_config ( project_path , output_dir ) print ( 'done.\n' )
Start an oct project
204
4
12,802
def guest_access ( func ) : def decorated ( * _ , * * kwargs ) : public_profiles = current_app . config [ 'USER_PUBLIC_PROFILES' ] if not public_profiles : if not current_user . is_authenticated : abort ( 401 ) elif current_user . id != kwargs [ 'id' ] : abort ( 403 ) return func ( * * kwargs ) return decorated
Guest access decorator Checks if public profiles option is enabled in config and checks access to profile pages based on that .
98
23
12,803
def only_owner ( func ) : def decorated ( * _ , * * kwargs ) : id = kwargs [ 'id' ] if not current_user . is_authenticated : abort ( 401 ) elif current_user . id != id : abort ( 403 ) return func ( * * kwargs ) return decorated
Only owner decorator Restricts access to view ony to profile owner
71
14
12,804
def load ( config_file ) : with open ( config_file , "r" ) as f : def env_get ( ) : return dict ( os . environ ) tmpl = Template ( f . read ( ) ) return Config ( yaml . load ( tmpl . render ( * * env_get ( ) ) ) )
Processes and loads config file .
73
7
12,805
def get_term_by_year_and_quarter ( year , quarter ) : url = "{}/{},{}.json" . format ( term_res_url_prefix , year , quarter . lower ( ) ) return _json_to_term_model ( get_resource ( url ) )
Returns a uw_sws . models . Term object for the passed year and quarter .
65
19
12,806
def get_current_term ( ) : url = "{}/current.json" . format ( term_res_url_prefix ) term = _json_to_term_model ( get_resource ( url ) ) # A term doesn't become "current" until 2 days before the start of # classes. That's too late to be useful, so if we're after the last # day of grade submission window, use the next term resource. if datetime . now ( ) > term . grade_submission_deadline : return get_next_term ( ) return term
Returns a uw_sws . models . Term object for the current term .
122
17
12,807
def get_term_before ( aterm ) : prev_year = aterm . year prev_quarter = QUARTER_SEQ [ QUARTER_SEQ . index ( aterm . quarter ) - 1 ] if prev_quarter == "autumn" : prev_year -= 1 return get_term_by_year_and_quarter ( prev_year , prev_quarter )
Returns a uw_sws . models . Term object for the term before the term given .
83
20
12,808
def get_term_after ( aterm ) : next_year = aterm . year if aterm . quarter == "autumn" : next_quarter = QUARTER_SEQ [ 0 ] else : next_quarter = QUARTER_SEQ [ QUARTER_SEQ . index ( aterm . quarter ) + 1 ] if next_quarter == "winter" : next_year += 1 return get_term_by_year_and_quarter ( next_year , next_quarter )
Returns a uw_sws . models . Term object for the term after the term given .
108
20
12,809
def get_term_by_date ( date ) : year = date . year term = None for quarter in ( 'autumn' , 'summer' , 'spring' , 'winter' ) : term = get_term_by_year_and_quarter ( year , quarter ) if date >= term . first_day_quarter : break # If we're in a year, before the start of winter quarter, we need to go # to the previous year's autumn term: if date < term . first_day_quarter : term = get_term_by_year_and_quarter ( year - 1 , 'autumn' ) # Autumn quarter should always last through the end of the year, # with winter of the next year starting in January. But this makes sure # we catch it if not. term_after = get_term_after ( term ) if term_after . first_day_quarter > date : return term else : return term_after pass
Returns a term for the datetime . date object given .
204
12
12,810
def logging_feature ( app ) : # this is important because otherwise only log warn, err and crit app . logger . setLevel ( logging . INFO ) # enable loggers email_exceptions = app . config . get ( 'LOGGING_EMAIL_EXCEPTIONS_TO_ADMINS' ) if email_exceptions and not app . debug and not app . testing : # config.debug=False mail_handler = mail_logger ( app ) app . logger . addHandler ( mail_handler ) if not app . testing : file_handler = file_logger ( app ) app . logger . addHandler ( file_handler )
Add logging Accepts flask application and registers logging functionality within it
138
12
12,811
async def rtm ( self ) -> AsyncIterator [ Event ] : response = cast ( RTMStart , await self . api ( "rtm.start" ) ) self . me = Auto . generate ( response . self_ , "Me" , recursive = False ) self . team = Auto . generate ( response . team , "Team" , recursive = False ) self . channels . fill ( Channel . build ( item ) for item in response . channels ) self . users . fill ( User . build ( item ) for item in response . users ) self . groups . fill ( Group . build ( item ) for item in response . groups ) log . debug ( f"received {len(self.users)} users, {len(self.channels)} channels " f"and {len(self.groups)} groups from rtm.start" ) async with self . session . ws_connect ( response [ "url" ] ) as ws : async for msg in ws : event : Event = Event . generate ( msg . json ( ) , recursive = False ) if event . type == "goodbye" : break yield event
Connect to the realtime event API and start yielding events .
239
12
12,812
def _apply ( self , method_name , * args , * * kwargs ) : return [ getattr ( member , method_name ) ( * args , * * kwargs ) for member in self . forms ]
Call method_name with args and kwargs on each member .
48
14
12,813
def html_id ( self , field_name , form = None ) : if form is None : form = self return form . auto_id % ( form . add_prefix ( field_name ) , )
Return the html ID for the given field_name .
44
11
12,814
def save ( self ) : # first call save with commit=False for all Forms for form in self . _forms : if isinstance ( form , BaseForm ) : form . save ( commit = False ) # call save on the instance self . instance . save ( ) # call any post-commit hooks that have been stashed on Forms for form in self . forms : if isinstance ( form , BaseForm ) : if hasattr ( form , 'save_m2m' ) : form . save_m2m ( ) if hasattr ( form , 'save_related' ) : form . save_related ( ) # call save on any formsets for form in self . _forms : if isinstance ( form , BaseFormSet ) : form . save ( commit = True ) return self . instance
Save the changes to the instance and any related objects .
169
11
12,815
def make_csv ( self ) : import csv try : from StringIO import StringIO # Python 2.7 except ImportError : from io import StringIO out = StringIO ( ) writer = csv . writer ( out , delimiter = '|' , lineterminator = '\n' , quoting = csv . QUOTE_MINIMAL ) if self . function == 'total' : writer . writerows ( self . results ) elif self . function == 'top' : rows = [ [ 'Value' , self . headers . strip ( '"' ) ] ] if self . results [ 0 ] is not None : for res in self . results : if res is not None : rows . append ( tuple ( [ res [ 0 ] , ',' . join ( res [ 1 ] ) ] ) ) writer . writerows ( rows ) elif self . function == 'table' : rows = [ [ header . strip ( '"' ) for header in re . split ( '\s*,\s*' , self . headers ) ] ] for res in sorted ( self . results , key = lambda x : x [ 0 ] ) : row = list ( res [ : - 1 ] ) lastcol = get_fmt_results ( res [ - 1 ] , limit = 10 ) if lastcol [ - 1 ] [ 0 ] == '[' and lastcol [ - 1 ] [ - 1 ] == ']' : row . append ( u'{0} {1}' . format ( u', ' . join ( lastcol [ : - 1 ] ) , lastcol [ - 1 ] ) ) else : row . append ( u', ' . join ( lastcol ) ) rows . append ( row ) writer . writerows ( rows ) self . csv = out . getvalue ( )
Get the text representation of a report element as csv .
380
12
12,816
def make ( self , apps ) : for ( appname , app ) in sorted ( apps . items ( ) , key = lambda x : ( x [ 1 ] . priority , x [ 0 ] ) ) : logger . info ( 'Getting report results from %r' , appname ) for report_data in app . report_data : if report_data . subreport != self . name : continue if report_data . function == 'total' : for opt in report_data : match = report_data . parse_report_data ( opt ) cond = match . group ( 'condition' ) valfld = match . group ( 'valfld' ) unit = match . group ( 'unit' ) itemtitle = match . group ( 'fields' ) . strip ( '"' ) total = report_data . rules [ opt ] . total_events ( cond , valfld ) if total == 0 : continue if unit is not None : total , unit = get_value_unit ( total , unit , 'T' ) total = '{0} {1}' . format ( total , unit ) else : total = str ( total ) report_data . results . append ( tuple ( [ total , itemtitle ] ) ) elif report_data . function == 'top' : k = int ( report_data . topnum ) for opt in report_data : match = report_data . parse_report_data ( opt ) valfld = match . group ( 'valfld' ) field = match . group ( 'fields' ) usemax = match . group ( 'add2res' ) is None toplist = report_data . rules [ opt ] . top_events ( k , valfld , usemax , field ) report_data . results . extend ( toplist ) elif report_data . function == 'table' : cols = len ( re . split ( '\s*,\s*' , report_data . headers ) ) for opt in report_data : match = report_data . parse_report_data ( opt ) cond = match . group ( 'condition' ) fields = re . split ( '\s*,\s*' , match . group ( 'fields' ) ) tablelist = report_data . rules [ opt ] . list_events ( cond , cols , fields ) report_data . results . extend ( tablelist ) if report_data . results : self . report_data . append ( report_data ) # Sort and rewrite results as strings with units for report_data in self . report_data : if report_data . function == 'top' : # Sort values report_data . results = sorted ( report_data . results , key = lambda x : x [ 0 ] , reverse = True ) # Get the unit if any and convert numeric results to strings unit = None for opt in report_data : match = report_data . parse_report_data ( opt ) unit = match . group ( 'unit' ) if unit is not None : break for res in report_data . results : if unit is not None : v , u = get_value_unit ( res [ 0 ] , unit , 'T' ) res [ 0 ] = '{0} {1}' . format ( v , u ) else : res [ 0 ] = str ( res [ 0 ] )
Make subreport items from results .
714
7
12,817
def make_format ( self , fmt , width ) : if not self . report_data : return for data_item in self . report_data : if data_item . results : if fmt is None or fmt == 'text' : data_item . make_text ( width ) elif fmt == 'html' : data_item . make_html ( ) elif fmt == 'csv' : data_item . make_csv ( )
Make subreport text in a specified format
94
8
12,818
def compact_tables ( self ) : items_to_del = set ( ) for i in range ( len ( self . report_data ) ) : if i in items_to_del : continue if self . report_data [ i ] . function [ 0 : 5 ] == 'table' : for j in range ( i + 1 , len ( self . report_data ) ) : if self . report_data [ j ] . function [ 0 : 5 ] == 'table' : if self . report_data [ i ] == self . report_data [ j ] : logger . debug ( 'Merge of 2 identical report tables: {0}' . format ( self . report_data [ i ] . title ) ) items_to_del . add ( j ) self . report_data [ i ] . results . extend ( self . report_data [ j ] . results ) if items_to_del : for i in reversed ( sorted ( items_to_del , key = lambda x : x ) ) : self . report_data . pop ( i )
Compact report items of type table with same results type . Report items of type tables in the same subreport is merged into one . The data are ordered by 1st column .
228
36
12,819
def make ( self , apps ) : for subreport in self . subreports : logger . debug ( 'Make subreport "{0}"' . format ( subreport . name ) ) subreport . make ( apps ) for subreport in self . subreports : subreport . compact_tables ( )
Create the report from application results
63
6
12,820
def get_report_parts ( self , apps , formats ) : for fmt in formats : width = 100 if fmt is not None else tui . get_terminal_size ( ) [ 0 ] for sr in self . subreports : sr . make_format ( fmt , width ) logger . debug ( 'Build a map for arguments and run\'s statistics ...' ) value_mapping = { 'title' : self . title , 'patterns' : ', ' . join ( [ repr ( pattern ) for pattern in self . args . patterns ] ) or None , 'pattern_files' : ', ' . join ( self . args . pattern_files ) or None , 'hosts' : ', ' . join ( self . args . hosts ) or None , 'apps' : u', ' . join ( [ u'%s(%d)' % ( app . name , app . matches ) for app in apps . values ( ) if app . matches > 0 ] ) , 'version' : __version__ } filters = [ ] for flt in self . args . filters : filters . append ( ' AND ' . join ( [ '%s=%r' % ( k , v . pattern ) for k , v in flt . items ( ) ] ) ) if filters : value_mapping [ 'filters' ] = ' OR ' . join ( [ '(%s)' % item for item in filters ] ) else : value_mapping [ 'filters' ] = filters [ 0 ] if filters else None value_mapping . update ( self . stats ) report = [ ] for fmt in formats : if fmt == 'text' : logger . info ( 'appends a text page report' ) report . append ( self . make_text_page ( value_mapping ) ) elif fmt == 'html' : logger . info ( 'appends a html page report' ) report . append ( self . make_html_page ( value_mapping ) ) elif fmt == 'csv' : logger . info ( 'extends with a list of csv subreports' ) report . extend ( self . make_csv_tables ( ) ) return report
Make report item texts in a specified format .
462
9
12,821
def set_stats ( self , run_stats ) : self . stats = run_stats . copy ( ) self . stats [ 'files' ] = ', ' . join ( self . stats [ 'files' ] ) self . stats [ 'tot_files' ] = len ( run_stats [ 'files' ] ) self . stats [ 'extra_tags' ] = ', ' . join ( self . stats [ 'extra_tags' ] )
Set run statistics for the report .
96
7
12,822
def make_html_page ( self , valumap ) : logger . info ( 'Making an html report using template %r.' , self . html_template ) fh = open ( self . html_template ) template = fh . read ( ) fh . close ( ) parts = [ ] for sr in self . subreports : report_data = [ item . html for item in sr . report_data if item . html ] if report_data : parts . append ( '\n<h2>{1}</h2>\n' . format ( sr . title , sr . reptext ) ) parts . extend ( report_data ) parts . append ( '\n<hr/>' ) valumap [ 'subreports' ] = '\n' . join ( parts ) # or "\n<<NO SUBREPORT RELATED EVENTS>>\n" html_page = Template ( template ) . safe_substitute ( valumap ) return TextPart ( fmt = 'html' , text = html_page , ext = 'html' )
Builds the report as html page using the template page from file .
227
14
12,823
def make_text_page ( self , valumap ) : logger . info ( 'Making a text report page using template %r.' , self . text_template ) fh = open ( self . text_template ) template = fh . read ( ) fh . close ( ) parts = [ ] for sr in self . subreports : report_data = [ item . text for item in sr . report_data if item . text ] if report_data : parts . append ( '\n{1}\n***** {0} *****\n{1}' . format ( sr . title , '*' * ( len ( sr . title ) + 12 ) ) ) parts . extend ( report_data ) valumap [ 'subreports' ] = '\n' . join ( parts ) # "\n<<NO SUBREPORT RELATED EVENTS>>\n" text_page = Template ( template ) . safe_substitute ( valumap ) return TextPart ( fmt = 'text' , text = text_page , ext = 'txt' )
Builds the report as text page using the template page from file .
227
14
12,824
def make_csv_tables ( self ) : logger . info ( 'Generate csv report tables' ) report_parts = [ ] for sr in self . subreports : for data_item in sr . report_data : report_parts . append ( TextPart ( fmt = 'csv' , text = data_item . csv , ext = 'csv' ) ) return report_parts
Builds the report as a list of csv tables with titles .
84
14
12,825
def fetch_items ( self ) : offset = self . per_page * ( self . page - 1 ) items = self . _query . limit ( self . per_page ) . offset ( offset ) . all ( ) return items
Fetch items Performs a query to retrieve items based on current query and pagination settings .
49
19
12,826
def next_page ( self ) : if self . is_last_page ( ) : return False self . page += 1 self . items = self . fetch_items ( ) return True
Next page Uses query object to fetch next slice of items unless on last page in which case does nothing
39
20
12,827
def previous_page ( self ) : if self . is_first_page ( ) : return False self . page -= 1 self . items = self . fetch_items ( ) return True
Previous page Uses query object to fetch previous slice of items unless on first page in which case does nothing
39
20
12,828
def _make_spec_file ( self ) : spec_file = setuptools . command . bdist_rpm . bdist_rpm . _make_spec_file ( self ) spec_file . append ( '%config(noreplace) /etc/lograptor/lograptor.conf' ) spec_file . append ( '%config(noreplace) /etc/lograptor/report_template.*' ) spec_file . append ( '%config(noreplace) /etc/lograptor/conf.d/*.conf' ) return spec_file
Customize spec file inserting %config section
132
8
12,829
def user_save_event ( user ) : msg = 'User ({}){} updated/saved' . format ( user . id , user . email ) current_app . logger . info ( msg )
Handle persist event for user entities
44
6
12,830
def user_got_role_event ( user , role ) : msg = 'User ({}){} got new role [{}]' current_app . logger . info ( msg . format ( user . id , user . email , role . handle ) )
User got new role
54
4
12,831
def generate_hash ( self , length = 30 ) : import random , string chars = string . ascii_letters + string . digits ran = random . SystemRandom ( ) . choice hash = '' . join ( ran ( chars ) for i in range ( length ) ) return hash
Generate random string of given length
59
7
12,832
def gravatar ( self , size ) : hash = md5 ( self . email . encode ( 'utf-8' ) ) . hexdigest ( ) url = 'http://www.gravatar.com/avatar/{}?d=mm&s={}' return url . format ( hash , size )
Get url to gravatar
69
5
12,833
def is_locked ( self ) : now = datetime . datetime . utcnow ( ) if self . locked_until and self . locked_until >= now : return True elif self . locked_until and self . locked_until < now : self . unlock_account ( ) return False else : return False
Is locked? Checks locking and possibly unlocks upon timeout if account was previously locked .
67
16
12,834
def lock_account ( self , minutes = 30 ) : period = datetime . timedelta ( minutes = minutes ) self . locked_until = datetime . datetime . utcnow ( ) + period
Lock user account for a period
43
6
12,835
def increment_failed_logins ( self ) : if not self . failed_logins : self . failed_logins = 1 elif not self . failed_login_limit_reached ( ) : self . failed_logins += 1 else : self . reset_login_counter ( ) self . lock_account ( 30 )
Increment failed logins counter
71
6
12,836
def failed_login_limit_reached ( self ) : login_limit = 10 if self . failed_logins and self . failed_logins >= login_limit : return True else : return False
A boolean method to check for failed login limit being reached
43
11
12,837
def email_secure ( self ) : email = self . _email if not email : return '' address , host = email . split ( '@' ) if len ( address ) <= 2 : return ( '*' * len ( address ) ) + '@' + host import re host = '@' + host obfuscated = re . sub ( r'[a-zA-z0-9]' , '*' , address [ 1 : - 1 ] ) return address [ : 1 ] + obfuscated + address [ - 1 : ] + host
Obfuscated email used for display
117
7
12,838
def email ( self , email ) : if email == self . email : return email = email . lower ( ) if self . _email is None : self . _email = email self . require_email_confirmation ( ) else : self . email_new = email self . require_email_confirmation ( )
Set email and generate confirmation
66
5
12,839
def require_email_confirmation ( self ) : self . email_confirmed = False self . email_link = self . generate_hash ( 50 ) now = datetime . datetime . utcnow ( ) self . email_link_expires = now + datetime . timedelta ( hours = 24 )
Mark email as unconfirmed
66
5
12,840
def cancel_email_change ( self ) : if not self . email_new : return self . email_new = None self . email_confirmed = True self . email_link = None self . email_new = None self . email_link_expires = None
Cancel email change for new users and roll back data
57
11
12,841
def email_link_expired ( self , now = None ) : if not now : now = datetime . datetime . utcnow ( ) return self . email_link_expires < now
Check if email link expired
43
5
12,842
def password ( self , password ) : from boiler . user . util . passlib import passlib_context password = str ( password ) encrypted = passlib_context . encrypt ( password ) self . _password = encrypted
Encode a string and set as password
45
8
12,843
def verify_password ( self , password ) : if self . password is None : return False from boiler . user . util . passlib import passlib_context return passlib_context . verify ( str ( password ) , self . password )
Verify a given string for being valid password
50
9
12,844
def generate_password_link ( self ) : self . password_link = self . generate_hash ( 50 ) now = datetime . datetime . utcnow ( ) self . password_link_expires = now + datetime . timedelta ( hours = 24 )
Generates a link to reset password
58
7
12,845
def password_link_expired ( self , now = None ) : if not now : now = datetime . datetime . utcnow ( ) return self . password_link_expires < now
Check if password link expired
43
5
12,846
def add_role ( self , role ) : schema = RoleSchema ( ) ok = schema . process ( role ) if not ok or not role . id : err = 'Role must be valid and saved before adding to user' raise x . UserException ( err ) self . __roles . append ( role )
Add role to user Role must be valid and saved first otherwise will raise an exception .
66
17
12,847
def has_role ( self , role_or_handle ) : if not isinstance ( role_or_handle , str ) : return role_or_handle in self . roles has_role = False for role in self . roles : if role . handle == role_or_handle : has_role = True break return has_role
Checks if user has role
71
6
12,848
def push ( remote = 'origin' , branch = 'master' ) : print ( cyan ( "Pulling changes from repo ( %s / %s)..." % ( remote , branch ) ) ) local ( "git push %s %s" % ( remote , branch ) )
git push commit
59
3
12,849
def pull ( remote = 'origin' , branch = 'master' ) : print ( cyan ( "Pulling changes from repo ( %s / %s)..." % ( remote , branch ) ) ) local ( "git pull %s %s" % ( remote , branch ) )
git pull commit
59
3
12,850
def sync ( remote = 'origin' , branch = 'master' ) : pull ( branch , remote ) push ( branch , remote ) print ( cyan ( "Git Synced!" ) )
git pull and push commit
40
5
12,851
def update ( tournament , match , attachment , * * params ) : api . fetch ( "PUT" , "tournaments/%s/matches/%s/attachments/%s" % ( tournament , match , attachment ) , "match_attachment" , * * params )
Update the attributes of a match attachment .
61
8
12,852
def count_row ( engine , table ) : return engine . execute ( select ( [ func . count ( ) ] ) . select_from ( table ) ) . fetchone ( ) [ 0 ]
Return number of rows in a table .
41
8
12,853
def get_providers ( self ) : if self . providers : return self . providers providers = dict ( ) for provider in self . config : configurator = provider . lower ( ) + '_config' if not hasattr ( self , configurator ) : err = 'Provider [{}] not recognized' . format ( provider ) raise ValueError ( err ) provider_config = self . config [ provider ] configurator = getattr ( self , configurator ) providers [ provider ] = configurator ( id = provider_config . get ( 'id' ) , secret = provider_config . get ( 'secret' ) , scope = provider_config . get ( 'scope' ) , offline = provider_config . get ( 'offline' ) ) self . providers = providers return self . providers
Get OAuth providers Returns a dictionary of oauth applications ready to be registered with flask oauth extension at application bootstrap .
172
25
12,854
def token_getter ( provider , token = None ) : session_key = provider + '_token' if token is None : token = session . get ( session_key ) return token
Generic token getter for all the providers
40
8
12,855
def register_token_getter ( self , provider ) : app = oauth . remote_apps [ provider ] decorator = getattr ( app , 'tokengetter' ) def getter ( token = None ) : return self . token_getter ( provider , token ) decorator ( getter )
Register callback to retrieve token from session
65
7
12,856
def vkontakte_config ( self , id , secret , scope = None , offline = False , * * _ ) : if scope is None : scope = 'email,offline' if offline : scope += ',offline' token_params = dict ( scope = scope ) config = dict ( request_token_url = None , access_token_url = 'https://oauth.vk.com/access_token' , authorize_url = 'https://oauth.vk.com/authorize' , base_url = 'https://api.vk.com/method/' , consumer_key = id , consumer_secret = secret , request_token_params = token_params ) return config
Get config dictionary for vkontakte oauth
155
11
12,857
def instagram_config ( self , id , secret , scope = None , * * _ ) : scope = scope if scope else 'basic' token_params = dict ( scope = scope ) config = dict ( # request_token_url=None, access_token_url = '/oauth/access_token/' , authorize_url = '/oauth/authorize/' , base_url = 'https://api.instagram.com/' , consumer_key = id , consumer_secret = secret , request_token_params = token_params ) return config
Get config dictionary for instagram oauth
122
8
12,858
def convert ( self , chain_id , residue_id , from_scheme , to_scheme ) : # At the cost of three function calls, we ignore the case of the scheme parameters to be more user-friendly. from_scheme = from_scheme . lower ( ) to_scheme = to_scheme . lower ( ) assert ( from_scheme in ResidueRelatrix . schemes ) assert ( to_scheme in ResidueRelatrix . schemes ) return self . _convert ( chain_id , residue_id , from_scheme , to_scheme )
The API conversion function . This converts between the different residue ID schemes .
131
14
12,859
def _convert ( self , chain_id , residue_id , from_scheme , to_scheme ) : # There are 12 valid combinations but rather than write them all out explicitly, we will use recursion, sacrificing speed for brevity if from_scheme == 'rosetta' : atom_id = self . rosetta_to_atom_sequence_maps . get ( chain_id , { } ) [ residue_id ] if to_scheme == 'atom' : return atom_id else : return self . _convert ( chain_id , atom_id , 'atom' , to_scheme ) if from_scheme == 'atom' : if to_scheme == 'rosetta' : return self . atom_to_rosetta_sequence_maps . get ( chain_id , { } ) [ residue_id ] else : seqres_id = self . atom_to_seqres_sequence_maps . get ( chain_id , { } ) [ residue_id ] if to_scheme == 'seqres' : return seqres_id return self . convert ( chain_id , seqres_id , 'seqres' , to_scheme ) if from_scheme == 'seqres' : if to_scheme == 'uniparc' : return self . seqres_to_uniparc_sequence_maps . get ( chain_id , { } ) [ residue_id ] else : atom_id = self . seqres_to_atom_sequence_maps . get ( chain_id , { } ) [ residue_id ] if to_scheme == 'atom' : return atom_id return self . convert ( chain_id , atom_id , 'atom' , to_scheme ) if from_scheme == 'uniparc' : seqres_id = self . uniparc_to_seqres_sequence_maps . get ( chain_id , { } ) [ residue_id ] if to_scheme == 'seqres' : return seqres_id else : return self . _convert ( chain_id , seqres_id , 'seqres' , to_scheme ) raise Exception ( "We should never reach this line." )
The actual private conversion function .
485
6
12,860
def convert_from_rosetta ( self , residue_id , to_scheme ) : assert ( type ( residue_id ) == types . IntType ) # Find the chain_id associated with the residue_id # Scan *all* sequences without breaking out to make sure that we do not have any duplicate maps chain_id = None for c , sequence in self . rosetta_sequences . iteritems ( ) : for id , r in sequence : if r . ResidueID == residue_id : assert ( chain_id == None ) chain_id = c if chain_id : return self . convert ( chain_id , residue_id , 'rosetta' , to_scheme ) else : return None
A simpler conversion function to convert from Rosetta numbering without requiring the chain identifier .
156
16
12,861
def _validate ( self ) : self . _validate_fasta_vs_seqres ( ) self . _validate_mapping_signature ( ) self . _validate_id_types ( ) self . _validate_residue_types ( )
Validate the mappings .
60
6
12,862
def _validate_id_types ( self ) : for sequences in [ self . uniparc_sequences , self . fasta_sequences , self . seqres_sequences , self . rosetta_sequences ] : for chain_id , sequence in sequences . iteritems ( ) : sequence_id_types = set ( map ( type , sequence . ids ( ) ) ) if sequence_id_types : assert ( len ( sequence_id_types ) == 1 ) assert ( sequence_id_types . pop ( ) == types . IntType ) for chain_id , sequence in self . atom_sequences . iteritems ( ) : sequence_id_types = set ( map ( type , sequence . ids ( ) ) ) assert ( len ( sequence_id_types ) == 1 ) sequence_id_type = sequence_id_types . pop ( ) assert ( sequence_id_type == types . StringType or sequence_id_type == types . UnicodeType )
Check that the ID types are integers for Rosetta SEQRES and UniParc sequences and 6 - character PDB IDs for the ATOM sequences .
213
31
12,863
def _validate_residue_types ( self ) : for chain_id , sequence_map in self . rosetta_to_atom_sequence_maps . iteritems ( ) : rosetta_sequence = self . rosetta_sequences [ chain_id ] atom_sequence = self . atom_sequences [ chain_id ] for rosetta_id , atom_id , _ in sequence_map : assert ( rosetta_sequence [ rosetta_id ] . ResidueAA == atom_sequence [ atom_id ] . ResidueAA ) for chain_id , sequence_map in self . atom_to_seqres_sequence_maps . iteritems ( ) : atom_sequence = self . atom_sequences [ chain_id ] seqres_sequence = self . seqres_sequences [ chain_id ] for atom_id , seqres_id , _ in sorted ( sequence_map ) : assert ( atom_sequence [ atom_id ] . ResidueAA == seqres_sequence [ seqres_id ] . ResidueAA ) for chain_id , sequence_map in self . seqres_to_uniparc_sequence_maps . iteritems ( ) : if self . pdb_chain_to_uniparc_chain_mapping . get ( chain_id ) : seqres_sequence = self . seqres_sequences [ chain_id ] uniparc_sequence = self . uniparc_sequences [ self . pdb_chain_to_uniparc_chain_mapping [ chain_id ] ] for seqres_id , uniparc_id_resid_pair , substitution_match in sequence_map : uniparc_id = uniparc_id_resid_pair [ 1 ] # Some of the matches may not be identical but all the '*' Clustal Omega matches should be identical if substitution_match and substitution_match . clustal == 1 : assert ( seqres_sequence [ seqres_id ] . ResidueAA == uniparc_sequence [ uniparc_id ] . ResidueAA )
Make sure all the residue types map through translation .
464
10
12,864
def search ( cls , query_string , options = None , enable_facet_discovery = False , return_facets = None , facet_options = None , facet_refinements = None , deadline = None , * * kwargs ) : search_class = cls . search_get_class_names ( ) [ - 1 ] query_string += ' ' + 'class_name:%s' % ( search_class , ) q = search . Query ( query_string = query_string , options = options , enable_facet_discovery = enable_facet_discovery , return_facets = return_facets , facet_options = facet_options , facet_refinements = facet_refinements ) index = cls . search_get_index ( ) return index . search ( q , deadline = deadline , * * kwargs )
Searches the index . Conveniently searches only for documents that belong to instances of this class .
191
21
12,865
def search_update_index ( self ) : doc_id = self . search_get_document_id ( self . key ) fields = [ search . AtomField ( 'class_name' , name ) for name in self . search_get_class_names ( ) ] index = self . search_get_index ( ) if self . searchable_fields is None : searchable_fields = [ ] for field , prop in self . _properties . items ( ) : if field == 'class' : continue for class_ , field_type in SEARCHABLE_PROPERTY_TYPES . items ( ) : if isinstance ( prop , class_ ) : searchable_fields . append ( field ) else : searchable_fields = self . searchable_fields for f in set ( searchable_fields ) : prop = self . _properties [ f ] value = getattr ( self , f ) field = None field_found = False for class_ , field_type in SEARCHABLE_PROPERTY_TYPES . items ( ) : if isinstance ( prop , class_ ) : field_found = True if value is not None : if isinstance ( value , list ) or isinstance ( value , tuple ) or isinstance ( value , set ) : for v in value : field = field_type ( name = f , value = v ) elif isinstance ( value , ndb . Key ) : field = field_type ( name = f , value = value . urlsafe ( ) ) else : field = field_type ( name = f , value = value ) if not field_found : raise ValueError ( 'Cannot find field type for %r on %r' % ( prop , self . __class__ ) ) if field is not None : fields . append ( field ) document = search . Document ( doc_id , fields = fields ) index . put ( document )
Updates the search index for this instance .
406
9
12,866
def search_get_class_names ( cls ) : if hasattr ( cls , '_class_key' ) : class_names = [ ] for n in cls . _class_key ( ) : class_names . append ( n ) return class_names else : return [ cls . __name__ ]
Returns class names for use in document indexing .
70
10
12,867
def from_urlsafe ( cls , urlsafe ) : try : key = ndb . Key ( urlsafe = urlsafe ) except : return None obj = key . get ( ) if obj and isinstance ( obj , cls ) : return obj
Returns an instance of the model from a urlsafe string .
56
13
12,868
def get_from_search_doc ( cls , doc_id ) : # If the document was passed instead of the doc_id, get the document. if hasattr ( doc_id , 'doc_id' ) : doc_id = doc_id . doc_id return cls . from_urlsafe ( doc_id )
Returns an instance of the model from a search document id .
73
12
12,869
def _pre_delete_hook ( cls , key ) : if cls . searching_enabled : doc_id = cls . search_get_document_id ( key ) index = cls . search_get_index ( ) index . delete ( doc_id )
Removes instance from index .
59
6
12,870
def process_answer ( self , user , item , asked , answered , time , answer , response_time , guess , * * kwargs ) : pass
This method is used during the answer streaming and is called after the predictive model for each answer .
33
19
12,871
def _get_sz_info ( self ) : if 'None' == self . _state : return None cmd = 'show virtual-service detail name guestshell+' got = self . cli ( cmd ) got = got [ 'TABLE_detail' ] [ 'ROW_detail' ] sz_cpu = int ( got [ 'cpu_reservation' ] ) sz_disk = int ( got [ 'disk_reservation' ] ) sz_memory = int ( got [ 'memory_reservation' ] ) self . sz_has = _guestshell . Resources ( cpu = sz_cpu , memory = sz_memory , disk = sz_disk )
Obtains the current resource allocations assumes that the guestshell is in an Activated state
150
17
12,872
def fractal_dimension ( image ) : pixels = [ ] for i in range ( image . shape [ 0 ] ) : for j in range ( image . shape [ 1 ] ) : if image [ i , j ] > 0 : pixels . append ( ( i , j ) ) lx = image . shape [ 1 ] ly = image . shape [ 0 ] pixels = np . array ( pixels ) if len ( pixels ) < 2 : return 0 scales = np . logspace ( 1 , 4 , num = 20 , endpoint = False , base = 2 ) Ns = [ ] for scale in scales : H , edges = np . histogramdd ( pixels , bins = ( np . arange ( 0 , lx , scale ) , np . arange ( 0 , ly , scale ) ) ) H_sum = np . sum ( H > 0 ) if H_sum == 0 : H_sum = 1 Ns . append ( H_sum ) coeffs = np . polyfit ( np . log ( scales ) , np . log ( Ns ) , 1 ) hausdorff_dim = - coeffs [ 0 ] return hausdorff_dim
Estimates the fractal dimension of an image with box counting . Counts pixels with value 0 as empty and everything else as non - empty . Input image has to be grayscale .
248
38
12,873
def channel_portion ( image , channel ) : # Separate color channels rgb = [ ] for i in range ( 3 ) : rgb . append ( image [ : , : , i ] . astype ( int ) ) ch = rgb . pop ( channel ) relative_values = ch - np . sum ( rgb , axis = 0 ) / 2 relative_values = np . maximum ( np . zeros ( ch . shape ) , relative_values ) return float ( np . average ( relative_values ) / 255 )
Estimates the amount of a color relative to other colors .
108
12
12,874
def intensity ( image ) : if len ( image . shape ) > 2 : # Convert to grayscale image = cv2 . cvtColor ( image , cv2 . COLOR_RGB2GRAY ) / 255 elif issubclass ( image . dtype . type , np . integer ) : image /= 255 return float ( np . sum ( image ) / np . prod ( image . shape ) )
Calculates the average intensity of the pixels in an image . Accepts both RGB and grayscale images .
89
23
12,875
def sliding_window ( sequence , win_size , step = 1 ) : # Verify the inputs try : it = iter ( sequence ) except TypeError : raise ValueError ( "sequence must be iterable." ) if not isinstance ( win_size , int ) : raise ValueError ( "type(win_size) must be int." ) if not isinstance ( step , int ) : raise ValueError ( "type(step) must be int." ) if step > win_size : raise ValueError ( "step must not be larger than win_size." ) if win_size > len ( sequence ) : raise ValueError ( "win_size must not be larger than sequence length." ) # Pre-compute number of chunks to emit num_chunks = ( ( len ( sequence ) - win_size ) / step ) + 1 # Do the work for i in range ( 0 , num_chunks * step , step ) : yield sequence [ i : i + win_size ]
Returns a generator that will iterate through the defined chunks of input sequence . Input sequence must be iterable .
209
22
12,876
def dna_to_re ( seq ) : seq = seq . replace ( 'K' , '[GT]' ) seq = seq . replace ( 'M' , '[AC]' ) seq = seq . replace ( 'R' , '[AG]' ) seq = seq . replace ( 'Y' , '[CT]' ) seq = seq . replace ( 'S' , '[CG]' ) seq = seq . replace ( 'W' , '[AT]' ) seq = seq . replace ( 'B' , '[CGT]' ) seq = seq . replace ( 'V' , '[ACG]' ) seq = seq . replace ( 'H' , '[ACT]' ) seq = seq . replace ( 'D' , '[AGT]' ) seq = seq . replace ( 'X' , '[GATC]' ) seq = seq . replace ( 'N' , '[GATC]' ) return re . compile ( seq )
Return a compiled regular expression that will match anything described by the input sequence . For example a sequence that contains a N matched any base at that position .
193
30
12,877
def case_highlight ( seq , subseq ) : return re . subs ( subseq . lower ( ) , subseq . upper ( ) , seq . lower ( ) )
Highlights all instances of subseq in seq by making them uppercase and everything else lowercase .
37
21
12,878
def index_relations ( sender , pid_type , json = None , record = None , index = None , * * kwargs ) : if not json : json = { } pid = PersistentIdentifier . query . filter ( PersistentIdentifier . object_uuid == record . id , PersistentIdentifier . pid_type == pid_type , ) . one_or_none ( ) relations = None if pid : relations = serialize_relations ( pid ) if relations : json [ 'relations' ] = relations return json
Add relations to the indexed record .
113
7
12,879
def index_siblings ( pid , include_pid = False , children = None , neighbors_eager = False , eager = False , with_deposits = True ) : assert not ( neighbors_eager and eager ) , """Only one of the 'eager' and 'neighbors_eager' flags can be set to True, not both""" if children is None : parent_pid = PIDNodeVersioning ( pid = pid ) . parents . first ( ) children = PIDNodeVersioning ( pid = parent_pid ) . children . all ( ) objid = str ( pid . object_uuid ) children = [ str ( p . object_uuid ) for p in children ] idx = children . index ( objid ) if objid in children else len ( children ) # Split children (which can include the pid) into left and right siblings # If 'pid' is not in children, idx is the length of list, so 'left' # will be all children, and 'right' will be an empty list # [X X X] X [X X X] if include_pid : # [X X X X] [X X X] Includes pid to the 'left' set left = children [ : idx + 1 ] else : # [X X X] X [X X X] left = children [ : idx ] right = children [ idx + 1 : ] if eager : eager_uuids = left + right bulk_uuids = [ ] elif neighbors_eager : # neighbors are last of 'left' and first or 'right' siblings # X X [X] X [X] X X eager_uuids = left [ - 1 : ] + right [ : 1 ] # all of the siblings, except the neighbours # [X X] X X X [X X] bulk_uuids = left [ : - 1 ] + right [ 1 : ] else : eager_uuids = [ ] bulk_uuids = left + right def get_dep_uuids ( rec_uuids ) : """Get corresponding deposit UUIDs from record's UUIDs.""" return [ str ( PersistentIdentifier . get ( 'depid' , Record . get_record ( id_ ) [ '_deposit' ] [ 'id' ] ) . object_uuid ) for id_ in rec_uuids ] if with_deposits : eager_uuids += get_dep_uuids ( eager_uuids ) bulk_uuids += get_dep_uuids ( bulk_uuids ) for id_ in eager_uuids : RecordIndexer ( ) . index_by_id ( id_ ) if bulk_uuids : RecordIndexer ( ) . bulk_index ( bulk_uuids )
Send sibling records of the passed pid for indexing .
592
11
12,880
def iter_paths ( self , pathnames = None , mapfunc = None ) : pathnames = pathnames or self . _pathnames if self . recursive and not pathnames : pathnames = [ '.' ] elif not pathnames : yield [ ] if mapfunc is not None : for mapped_paths in map ( mapfunc , pathnames ) : for path in mapped_paths : if self . recursive and ( os . path . isdir ( path ) or os . path . islink ( path ) ) : for t in os . walk ( path , followlinks = self . follow_symlinks ) : for filename , values in self . iglob ( os . path . join ( t [ 0 ] , '*' ) ) : yield filename , values else : empty_glob = True for filename , values in self . iglob ( path ) : yield filename , values empty_glob = False if empty_glob : yield path , None else : for path in pathnames : if self . recursive and ( os . path . isdir ( path ) or os . path . islink ( path ) ) : for t in os . walk ( path , followlinks = self . follow_symlinks ) : for filename , values in self . iglob ( os . path . join ( t [ 0 ] , '*' ) ) : yield filename , values else : empty_glob = True for filename , values in self . iglob ( path ) : yield filename , values empty_glob = False if empty_glob : yield path , None
Special iteration on paths . Yields couples of path and items . If a expanded path doesn t match with any files a couple with path and None is returned .
335
33
12,881
def check_stat ( self , path ) : statinfo = os . stat ( path ) st_mtime = datetime . fromtimestamp ( statinfo . st_mtime ) if platform . system ( ) == 'Linux' : check = st_mtime >= self . start_dt else : st_ctime = datetime . fromtimestamp ( statinfo . st_ctime ) check = st_mtime >= self . start_dt and st_ctime <= self . end_dt if not check : logger . info ( "file %r not in datetime period!" , path ) return check
Checks logfile stat information for excluding files not in datetime period . On Linux it s possible to checks only modification time because file creation info are not available so it s possible to exclude only older files . In Unix BSD systems and windows information about file creation date and times are available so is possible to exclude too newer files .
130
67
12,882
def add ( self , files , items ) : if isinstance ( files , ( str , bytes ) ) : files = iter ( [ files ] ) for pathname in files : try : values = self . _filemap [ pathname ] except KeyError : self . _filemap [ pathname ] = items else : values . extend ( items )
Add a list of files with a reference to a list of objects .
73
14
12,883
def recruit ( self ) : participants = Participant . query . with_entities ( Participant . status ) . all ( ) # if all networks are full, close recruitment, if not self . networks ( full = False ) : print "All networks are full, closing recruitment." self . recruiter ( ) . close_recruitment ( ) # if anyone is still working, don't recruit elif [ p for p in participants if p . status < 100 ] : print "People are still participating: not recruiting." # we only need to recruit if the current generation is complete elif ( len ( [ p for p in participants if p . status == 101 ] ) % self . generation_size ) == 0 : print "Recruiting another generation." self . recruiter ( ) . recruit_participants ( n = self . generation_size ) # otherwise do nothing else : print "not recruiting."
Recruit more participants .
186
5
12,884
def data_check ( self , participant ) : participant_id = participant . uniqueid nodes = Node . query . filter_by ( participant_id = participant_id ) . all ( ) if len ( nodes ) != self . experiment_repeats + self . practice_repeats : print ( "Error: Participant has {} nodes. Data check failed" . format ( len ( nodes ) ) ) return False nets = [ n . network_id for n in nodes ] if len ( nets ) != len ( set ( nets ) ) : print "Error: Participant participated in the same network \ multiple times. Data check failed" return False if None in [ n . fitness for n in nodes ] : print "Error: some of participants nodes are missing a fitness. \ Data check failed." return False if None in [ n . score for n in nodes ] : print "Error: some of participants nodes are missing a score. \ Data check failed" return False return True
Check a participants data .
200
5
12,885
def add_node_to_network ( self , node , network ) : network . add_node ( node ) node . receive ( ) environment = network . nodes ( type = Environment ) [ 0 ] environment . connect ( whom = node ) gene = node . infos ( type = LearningGene ) [ 0 ] . contents if ( gene == "social" ) : prev_agents = RogersAgent . query . filter ( and_ ( RogersAgent . failed == False , RogersAgent . network_id == network . id , RogersAgent . generation == node . generation - 1 ) ) . all ( ) parent = random . choice ( prev_agents ) parent . connect ( whom = node ) parent . transmit ( what = Meme , to_whom = node ) elif ( gene == "asocial" ) : environment . transmit ( to_whom = node ) else : raise ValueError ( "{} has invalid learning gene value of {}" . format ( node , gene ) ) node . receive ( )
Add participant s node to a network .
209
8
12,886
def create_state ( self , proportion ) : if random . random ( ) < 0.5 : proportion = 1 - proportion State ( origin = self , contents = proportion )
Create an environmental state .
36
5
12,887
def step ( self ) : current_state = max ( self . infos ( type = State ) , key = attrgetter ( 'creation_time' ) ) current_contents = float ( current_state . contents ) new_contents = 1 - current_contents info_out = State ( origin = self , contents = new_contents ) transformations . Mutation ( info_in = current_state , info_out = info_out )
Prompt the environment to change .
98
7
12,888
def print_subprocess_output ( subp ) : if subp : if subp . errorcode != 0 : print ( '<error errorcode="%s">' % str ( subp . errorcode ) ) print ( subp . stderr ) print ( "</error>" ) print_tag ( 'stdout' , '\n%s\n' % subp . stdout ) else : print_tag ( 'success' , '\n%s\n' % subp . stdout ) print_tag ( 'warnings' , '\n%s\n' % subp . stderr )
Prints the stdout and stderr output .
129
11
12,889
def get_all ( self , force_download = False ) : cl = self . client return [ cl . get_item ( item , force_download ) for item in self . item_urls ]
Retrieve the metadata for all items in this list from the server as Item objects
43
16
12,890
def get_item ( self , item_index , force_download = False ) : return self . client . get_item ( self . item_urls [ item_index ] , force_download )
Retrieve the metadata for a specific item in this ItemGroup
43
12
12,891
def refresh ( self ) : refreshed = self . client . get_item_list ( self . url ( ) ) self . item_urls = refreshed . urls ( ) self . list_name = refreshed . name ( ) return self
Update this ItemList by re - downloading it from the server
50
12
12,892
def append ( self , items ) : resp = self . client . add_to_item_list ( items , self . url ( ) ) self . refresh ( ) return resp
Add some items to this ItemList and save the changes to the server
37
14
12,893
def get_document ( self , index = 0 ) : try : return Document ( self . metadata ( ) [ 'alveo:documents' ] [ index ] , self . client ) except IndexError : raise ValueError ( 'No document exists for this item with index: ' + str ( index ) )
Return the metadata for the specified document as a Document object
65
11
12,894
def get_primary_text ( self , force_download = False ) : return self . client . get_primary_text ( self . url ( ) , force_download )
Retrieve the primary text for this item from the server
37
11
12,895
def get_annotations ( self , atype = None , label = None ) : return self . client . get_item_annotations ( self . url ( ) , atype , label )
Retrieve the annotations for this item from the server
41
10
12,896
def get_content ( self , force_download = False ) : return self . client . get_document ( self . url ( ) , force_download )
Retrieve the content for this Document from the server
33
10
12,897
def download_content ( self , dir_path = '' , filename = None , force_download = False ) : if filename is None : filename = self . get_filename ( ) path = os . path . join ( dir_path , filename ) data = self . client . get_document ( self . url ( ) , force_download ) with open ( path , 'wb' ) as f : f . write ( data ) return path
Download the content for this document to a file
92
9
12,898
def generic_ref_formatter ( view , context , model , name , lazy = False ) : try : if lazy : rel_model = getattr ( model , name ) . fetch ( ) else : rel_model = getattr ( model , name ) except ( mongoengine . DoesNotExist , AttributeError ) as e : # custom_field_type_formatters seems to fix the issue of stale references # crashing pages, since it intercepts the display of all ReferenceField's. return Markup ( '<span class="label label-danger">Error</span> <small>%s</small>' % e ) if rel_model is None : return '' try : return Markup ( '<a href="%s">%s</a>' % ( url_for ( # Flask-Admin creates URL's namespaced w/ model class name, lowercase. '%s.details_view' % rel_model . __class__ . __name__ . lower ( ) , id = rel_model . id , ) , rel_model , ) ) except werkzeug . routing . BuildError as e : return Markup ( '<span class="label label-danger">Error</span> <small>%s</small>' % e )
For GenericReferenceField and LazyGenericReferenceField
272
10
12,899
def generic_document_type_formatter ( view , context , model , name ) : _document_model = model . get ( 'document' ) . document_type url = _document_model . get_admin_list_url ( ) return Markup ( '<a href="%s">%s</a>' % ( url , _document_model . __name__ ) )
Return AdminLog . document field wrapped in URL to its list view .
84
14