idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
248,000
def parse_vals ( cfg , section , option ) : try : vals = cfg . get ( section , option ) except ConfigParser . NoSectionError as err : if section != 'DEFAULT' : vals = cfg . get ( 'DEFAULT' , option ) else : raise err vals = vals . split ( '#' ) [ 0 ] vals = vals . strip ( ) vals = vals . split ( ',' ) vals = [ v . strip ( ) for v in vals ] vals = [ v for v in vals if len ( v ) ] return vals
parse comma separated values in debian control file style from . cfg
133
13
248,001
def parse_val ( cfg , section , option ) : vals = parse_vals ( cfg , section , option ) if len ( vals ) == 0 : return '' else : assert len ( vals ) == 1 , ( section , option , vals , type ( vals ) ) return vals [ 0 ]
extract a single value from . cfg
69
9
248,002
def check_cfg_files ( cfg_files , module_name ) : cfg = ConfigParser . SafeConfigParser ( ) cfg . read ( cfg_files ) if cfg . has_section ( module_name ) : section_items = cfg . items ( module_name ) else : section_items = [ ] default_items = cfg . items ( 'DEFAULT' ) n_items = len ( section_items ) + len ( default_items ) if n_items == 0 : log . warn ( 'configuration files were specified, but no options were ' 'found in "%s" or "DEFAULT" sections.' % ( module_name , ) )
check if the configuration files actually specify something
147
8
248,003
def _build_url ( self , host , handler ) : scheme = 'https' if self . use_https else 'http' return '%s://%s/%s' % ( scheme , host , handler )
Build a url for our request based on the host handler and use_http property
47
16
248,004
def setting ( self , opt , val ) : opt = opt . encode ( ) if isinstance ( val , basestring ) : fluid_settings_setstr ( self . settings , opt , val ) elif isinstance ( val , int ) : fluid_settings_setint ( self . settings , opt , val ) elif isinstance ( val , float ) : fluid_settings_setnum ( self . settings , opt , val )
change an arbitrary synth setting type - smart
93
8
248,005
def start ( self , driver = None , device = None , midi_driver = None ) : if driver is not None : assert ( driver in [ 'alsa' , 'oss' , 'jack' , 'portaudio' , 'sndmgr' , 'coreaudio' , 'Direct Sound' , 'pulseaudio' ] ) fluid_settings_setstr ( self . settings , b'audio.driver' , driver . encode ( ) ) if device is not None : fluid_settings_setstr ( self . settings , str ( 'audio.%s.device' % ( driver ) ) . encode ( ) , device . encode ( ) ) self . audio_driver = new_fluid_audio_driver ( self . settings , self . synth ) if midi_driver is not None : assert ( midi_driver in [ 'alsa_seq' , 'alsa_raw' , 'oss' , 'winmidi' , 'midishare' , 'coremidi' ] ) fluid_settings_setstr ( self . settings , b'midi.driver' , midi_driver . encode ( ) ) self . router = new_fluid_midi_router ( self . settings , fluid_synth_handle_midi_event , self . synth ) fluid_synth_set_midi_router ( self . synth , self . router ) self . midi_driver = new_fluid_midi_driver ( self . settings , fluid_midi_router_handle_midi_event , self . router )
Start audio output driver in separate background thread
338
8
248,006
def sfload ( self , filename , update_midi_preset = 0 ) : return fluid_synth_sfload ( self . synth , filename . encode ( ) , update_midi_preset )
Load SoundFont and return its ID
47
7
248,007
def channel_info ( self , chan ) : info = fluid_synth_channel_info_t ( ) fluid_synth_get_channel_info ( self . synth , chan , byref ( info ) ) return ( info . sfont_id , info . bank , info . program , info . name )
get soundfont bank prog preset name of channel
70
9
248,008
def decompress_messages ( self , partitions_offmsgs ) : for pomsg in partitions_offmsgs : if pomsg [ 'message' ] : pomsg [ 'message' ] = self . decompress_fun ( pomsg [ 'message' ] ) yield pomsg
Decompress pre - defined compressed fields for each message .
66
12
248,009
def _init_offsets ( self , batchsize ) : upper_offsets = previous_lower_offsets = self . _lower_offsets if not upper_offsets : upper_offsets = self . latest_offsets self . _upper_offsets = { p : o for p , o in upper_offsets . items ( ) if o > self . _min_lower_offsets [ p ] } # remove db dupes not used anymore if self . _dupes : for p in list ( six . iterkeys ( self . _dupes ) ) : if p not in self . _upper_offsets : db = self . _dupes . pop ( p ) db . close ( ) os . remove ( db . filename ) partition_batchsize = 0 if self . _upper_offsets : partition_batchsize = max ( int ( batchsize * self . __scan_excess ) , batchsize ) self . _lower_offsets = self . _upper_offsets . copy ( ) total_offsets_run = 0 for p in sorted ( self . _upper_offsets . keys ( ) ) : # readjust partition_batchsize when a partition scan starts from latest offset if total_offsets_run > 0 and partition_batchsize > batchsize : partition_batchsize = batchsize if partition_batchsize > 0 : self . _lower_offsets [ p ] = max ( self . _upper_offsets [ p ] - partition_batchsize , self . _min_lower_offsets [ p ] ) offsets_run = self . _upper_offsets [ p ] - self . _lower_offsets [ p ] total_offsets_run += offsets_run partition_batchsize = partition_batchsize - offsets_run else : break log . info ( 'Offset run: %d' , total_offsets_run ) # create new consumer if partition list changes if previous_lower_offsets is not None and set ( previous_lower_offsets . keys ( ) ) != set ( self . _lower_offsets ) : self . _create_scan_consumer ( self . _lower_offsets . keys ( ) ) # consumer must restart from newly computed lower offsets self . _update_offsets ( self . _lower_offsets ) log . info ( 'Initial offsets for topic %s: %s' , self . _topic , repr ( self . _lower_offsets ) ) log . info ( 'Target offsets for topic %s: %s' , self . _topic , repr ( self . _upper_offsets ) ) return batchsize
Compute new initial and target offsets and do other maintenance tasks
562
12
248,010
def _filter_deleted_records ( self , batches ) : for batch in batches : for record in batch : if not self . must_delete_record ( record ) : yield record
Filter out deleted records
40
4
248,011
def get_catalog ( mid ) : if isinstance ( mid , _uuid . UUID ) : mid = mid . hex return _get_catalog ( mid )
Return catalog entry for the specified ID .
37
8
248,012
def _convert_entry ( self , entry ) : result = { } for key , value in entry . items ( ) : if isinstance ( value , list ) : result [ key ] = [ self . _convert_field ( key , val ) for val in value ] else : result [ key ] = self . _convert_field ( key , value ) return result
Convert entire journal entry utilising _convert_field .
80
13
248,013
def add_match ( self , * args , * * kwargs ) : args = list ( args ) args . extend ( _make_line ( key , val ) for key , val in kwargs . items ( ) ) for arg in args : super ( Reader , self ) . add_match ( arg )
Add one or more matches to the filter journal log entries .
67
12
248,014
def get_next ( self , skip = 1 ) : if super ( Reader , self ) . _next ( skip ) : entry = super ( Reader , self ) . _get_all ( ) if entry : entry [ '__REALTIME_TIMESTAMP' ] = self . _get_realtime ( ) entry [ '__MONOTONIC_TIMESTAMP' ] = self . _get_monotonic ( ) entry [ '__CURSOR' ] = self . _get_cursor ( ) return self . _convert_entry ( entry ) return dict ( )
r Return the next log entry as a dictionary .
129
10
248,015
def query_unique ( self , field ) : return set ( self . _convert_field ( field , value ) for value in super ( Reader , self ) . query_unique ( field ) )
Return a list of unique values appearing in the journal for the given field .
42
15
248,016
def wait ( self , timeout = None ) : us = - 1 if timeout is None else int ( timeout * 1000000 ) return super ( Reader , self ) . wait ( us )
Wait for a change in the journal .
38
8
248,017
def seek_realtime ( self , realtime ) : if isinstance ( realtime , _datetime . datetime ) : realtime = int ( float ( realtime . strftime ( "%s.%f" ) ) * 1000000 ) elif not isinstance ( realtime , int ) : realtime = int ( realtime * 1000000 ) return super ( Reader , self ) . seek_realtime ( realtime )
Seek to a matching journal entry nearest to timestamp time .
91
12
248,018
def seek_monotonic ( self , monotonic , bootid = None ) : if isinstance ( monotonic , _datetime . timedelta ) : monotonic = monotonic . total_seconds ( ) monotonic = int ( monotonic * 1000000 ) if isinstance ( bootid , _uuid . UUID ) : bootid = bootid . hex return super ( Reader , self ) . seek_monotonic ( monotonic , bootid )
Seek to a matching journal entry nearest to monotonic time .
104
14
248,019
def log_level ( self , level ) : if 0 <= level <= 7 : for i in range ( level + 1 ) : self . add_match ( PRIORITY = "%d" % i ) else : raise ValueError ( "Log level must be 0 <= level <= 7" )
Set maximum log level by setting matches for PRIORITY .
61
12
248,020
def messageid_match ( self , messageid ) : if isinstance ( messageid , _uuid . UUID ) : messageid = messageid . hex self . add_match ( MESSAGE_ID = messageid )
Add match for log entries with specified messageid .
50
10
248,021
def this_boot ( self , bootid = None ) : if bootid is None : bootid = _id128 . get_boot ( ) . hex else : bootid = getattr ( bootid , 'hex' , bootid ) self . add_match ( _BOOT_ID = bootid )
Add match for _BOOT_ID for current boot or the specified boot ID .
66
17
248,022
def this_machine ( self , machineid = None ) : if machineid is None : machineid = _id128 . get_machine ( ) . hex else : machineid = getattr ( machineid , 'hex' , machineid ) self . add_match ( _MACHINE_ID = machineid )
Add match for _MACHINE_ID equal to the ID of this machine .
67
17
248,023
def emit ( self , record ) : try : msg = self . format ( record ) pri = self . map_priority ( record . levelno ) # defaults extras = self . _extra . copy ( ) # higher priority if record . exc_text : extras [ 'EXCEPTION_TEXT' ] = record . exc_text if record . exc_info : extras [ 'EXCEPTION_INFO' ] = record . exc_info if record . args : extras [ 'CODE_ARGS' ] = str ( record . args ) # explicit arguments — highest priority extras . update ( record . __dict__ ) self . send ( msg , PRIORITY = format ( pri ) , LOGGER = record . name , THREAD_NAME = record . threadName , PROCESS_NAME = record . processName , CODE_FILE = record . pathname , CODE_LINE = record . lineno , CODE_FUNC = record . funcName , * * extras ) except Exception : self . handleError ( record )
Write record as a journal event .
214
7
248,024
def listen_fds ( unset_environment = True ) : num = _listen_fds ( unset_environment ) return list ( range ( LISTEN_FDS_START , LISTEN_FDS_START + num ) )
Return a list of socket activated descriptors
54
8
248,025
def connect ( self ) : self . _socket = socket . socket ( socket . AF_INET , socket . SOCK_STREAM ) self . _socket . settimeout ( self . _connect_timeout ) SocketError . wrap ( self . _socket . connect , ( self . host , self . port ) ) self . _socket . settimeout ( None ) self . _socket_file = self . _socket . makefile ( 'rb' )
Connect to beanstalkd server .
96
8
248,026
def close ( self ) : try : self . _socket . sendall ( 'quit\r\n' ) except socket . error : pass try : self . _socket . close ( ) except socket . error : pass
Close connection to server .
46
5
248,027
def put ( self , body , priority = DEFAULT_PRIORITY , delay = 0 , ttr = DEFAULT_TTR ) : assert isinstance ( body , str ) , 'Job body must be a str instance' jid = self . _interact_value ( 'put %d %d %d %d\r\n%s\r\n' % ( priority , delay , ttr , len ( body ) , body ) , [ 'INSERTED' ] , [ 'JOB_TOO_BIG' , 'BURIED' , 'DRAINING' ] ) return int ( jid )
Put a job into the current tube . Returns job id .
136
12
248,028
def reserve ( self , timeout = None ) : if timeout is not None : command = 'reserve-with-timeout %d\r\n' % timeout else : command = 'reserve\r\n' try : return self . _interact_job ( command , [ 'RESERVED' ] , [ 'DEADLINE_SOON' , 'TIMED_OUT' ] ) except CommandFailed : exc = sys . exc_info ( ) [ 1 ] _ , status , results = exc . args if status == 'TIMED_OUT' : return None elif status == 'DEADLINE_SOON' : raise DeadlineSoon ( results )
Reserve a job from one of the watched tubes with optional timeout in seconds . Returns a Job object or None if the request times out .
145
28
248,029
def release ( self , jid , priority = DEFAULT_PRIORITY , delay = 0 ) : self . _interact ( 'release %d %d %d\r\n' % ( jid , priority , delay ) , [ 'RELEASED' , 'BURIED' ] , [ 'NOT_FOUND' ] )
Release a reserved job back into the ready queue .
73
10
248,030
def delete ( self ) : self . conn . delete ( self . jid ) self . reserved = False
Delete this job .
22
4
248,031
def release ( self , priority = None , delay = 0 ) : if self . reserved : self . conn . release ( self . jid , priority or self . _priority ( ) , delay ) self . reserved = False
Release this job back into the ready queue .
46
9
248,032
def bury ( self , priority = None ) : if self . reserved : self . conn . bury ( self . jid , priority or self . _priority ( ) ) self . reserved = False
Bury this job .
40
5
248,033
def abspath ( self ) : return Path ( os . path . abspath ( os . path . expanduser ( str ( self . path ) ) ) )
Absolute path to the local storage
33
7
248,034
def fetch ( self , fname , processor = None ) : self . _assert_file_in_registry ( fname ) # Create the local data directory if it doesn't already exist if not self . abspath . exists ( ) : os . makedirs ( str ( self . abspath ) ) full_path = self . abspath / fname in_storage = full_path . exists ( ) if not in_storage : action = "download" elif in_storage and file_hash ( str ( full_path ) ) != self . registry [ fname ] : action = "update" else : action = "fetch" if action in ( "download" , "update" ) : action_word = dict ( download = "Downloading" , update = "Updating" ) warn ( "{} data file '{}' from remote data store '{}' to '{}'." . format ( action_word [ action ] , fname , self . get_url ( fname ) , str ( self . path ) ) ) self . _download_file ( fname ) if processor is not None : return processor ( str ( full_path ) , action , self ) return str ( full_path )
Get the absolute path to a file in the local storage .
261
12
248,035
def get_url ( self , fname ) : self . _assert_file_in_registry ( fname ) return self . urls . get ( fname , "" . join ( [ self . base_url , fname ] ) )
Get the full URL to download a file in the registry .
53
12
248,036
def _download_file ( self , fname ) : destination = self . abspath / fname source = self . get_url ( fname ) # Stream the file to a temporary so that we can safely check its hash before # overwriting the original fout = tempfile . NamedTemporaryFile ( delete = False , dir = str ( self . abspath ) ) try : with fout : response = requests . get ( source , stream = True ) response . raise_for_status ( ) for chunk in response . iter_content ( chunk_size = 1024 ) : if chunk : fout . write ( chunk ) tmphash = file_hash ( fout . name ) if tmphash != self . registry [ fname ] : raise ValueError ( "Hash of downloaded file '{}' doesn't match the entry in the registry:" " Expected '{}' and got '{}'." . format ( fout . name , self . registry [ fname ] , tmphash ) ) # Make sure the parent directory exists in case the file is in a subdirectory. # Otherwise, move will cause an error. if not os . path . exists ( str ( destination . parent ) ) : os . makedirs ( str ( destination . parent ) ) shutil . move ( fout . name , str ( destination ) ) except Exception : os . remove ( fout . name ) raise
Download a file from the remote data storage to the local storage .
297
13
248,037
def load_registry ( self , fname ) : with open ( fname ) as fin : for linenum , line in enumerate ( fin ) : elements = line . strip ( ) . split ( ) if len ( elements ) > 3 or len ( elements ) < 2 : raise IOError ( "Expected 2 or 3 elements in line {} but got {}." . format ( linenum , len ( elements ) ) ) file_name = elements [ 0 ] file_sha256 = elements [ 1 ] if len ( elements ) == 3 : file_url = elements [ 2 ] self . urls [ file_name ] = file_url self . registry [ file_name ] = file_sha256
Load entries from a file and add them to the registry .
148
12
248,038
def is_available ( self , fname ) : self . _assert_file_in_registry ( fname ) source = self . get_url ( fname ) response = requests . head ( source , allow_redirects = True ) return bool ( response . status_code == 200 )
Check availability of a remote file without downloading it .
64
10
248,039
def file_hash ( fname ) : # Calculate the hash in chunks to avoid overloading the memory chunksize = 65536 hasher = hashlib . sha256 ( ) with open ( fname , "rb" ) as fin : buff = fin . read ( chunksize ) while buff : hasher . update ( buff ) buff = fin . read ( chunksize ) return hasher . hexdigest ( )
Calculate the SHA256 hash of a given file .
88
12
248,040
def check_version ( version , fallback = "master" ) : parse = Version ( version ) if parse . local is not None : return fallback return version
Check that a version string is PEP440 compliant and there are no unreleased changes .
34
18
248,041
def make_registry ( directory , output , recursive = True ) : directory = Path ( directory ) if recursive : pattern = "**/*" else : pattern = "*" files = sorted ( [ str ( path . relative_to ( directory ) ) for path in directory . glob ( pattern ) if path . is_file ( ) ] ) hashes = [ file_hash ( str ( directory / fname ) ) for fname in files ] with open ( output , "w" ) as outfile : for fname , fhash in zip ( files , hashes ) : # Only use Unix separators for the registry so that we don't go insane # dealing with file paths. outfile . write ( "{} {}\n" . format ( fname . replace ( "\\" , "/" ) , fhash ) )
Make a registry of files and hashes for the given directory .
173
12
248,042
def loads ( s , * * kwargs ) : try : return _engine [ 0 ] ( s ) except _engine [ 2 ] : # except_clause: 'except' [test ['as' NAME]] # grammar for py3x # except_clause: 'except' [test [('as' | ',') test]] # grammar for py2x why = sys . exc_info ( ) [ 1 ] raise JSONError ( why )
Loads JSON object .
97
5
248,043
def dumps ( o , * * kwargs ) : try : return _engine [ 1 ] ( o ) except : ExceptionClass , why = sys . exc_info ( ) [ : 2 ] if any ( [ ( issubclass ( ExceptionClass , e ) ) for e in _engine [ 2 ] ] ) : raise JSONError ( why ) else : raise why
Dumps JSON object .
77
5
248,044
def from_table ( table , engine , limit = None ) : sql = select ( [ table ] ) if limit is not None : sql = sql . limit ( limit ) result_proxy = engine . execute ( sql ) return from_db_cursor ( result_proxy . cursor )
Select data in a database table and put into prettytable .
60
12
248,045
def from_data ( data ) : if len ( data ) == 0 : # pragma: no cover return None else : ptable = PrettyTable ( ) ptable . field_names = data [ 0 ] . keys ( ) for row in data : ptable . add_row ( row ) return ptable
Construct a Prettytable from list of rows .
65
9
248,046
def generate_table ( self , rows ) : table = PrettyTable ( * * self . kwargs ) for row in self . rows : if len ( row [ 0 ] ) < self . max_row_width : appends = self . max_row_width - len ( row [ 0 ] ) for i in range ( 1 , appends ) : row [ 0 ] . append ( "-" ) if row [ 1 ] is True : self . make_fields_unique ( row [ 0 ] ) table . field_names = row [ 0 ] else : table . add_row ( row [ 0 ] ) return table
Generates from a list of rows a PrettyTable object .
132
12
248,047
def sql_to_csv ( sql , engine , filepath , chunksize = 1000 , overwrite = False ) : if overwrite : # pragma: no cover if os . path . exists ( filepath ) : raise Exception ( "'%s' already exists!" % filepath ) import pandas as pd columns = [ str ( column . name ) for column in sql . columns ] with open ( filepath , "w" ) as f : # write header df = pd . DataFrame ( [ ] , columns = columns ) df . to_csv ( f , header = True , index = False ) # iterate big database table result_proxy = engine . execute ( sql ) while True : data = result_proxy . fetchmany ( chunksize ) if len ( data ) == 0 : break else : df = pd . DataFrame ( data , columns = columns ) df . to_csv ( f , header = False , index = False )
Export sql result to csv file .
199
8
248,048
def table_to_csv ( table , engine , filepath , chunksize = 1000 , overwrite = False ) : sql = select ( [ table ] ) sql_to_csv ( sql , engine , filepath , chunksize )
Export entire table to a csv file .
48
9
248,049
def update_all ( engine , table , data , upsert = False ) : data = ensure_list ( data ) ins = table . insert ( ) upd = table . update ( ) # Find all primary key columns pk_cols = OrderedDict ( ) for column in table . _columns : if column . primary_key : pk_cols [ column . name ] = column data_to_insert = list ( ) # Multiple primary key column if len ( pk_cols ) >= 2 : for row in data : result = engine . execute ( upd . where ( and_ ( * [ col == row [ name ] for name , col in pk_cols . items ( ) ] ) ) . values ( * * row ) ) if result . rowcount == 0 : data_to_insert . append ( row ) # Single primary key column elif len ( pk_cols ) == 1 : for row in data : result = engine . execute ( upd . where ( [ col == row [ name ] for name , col in pk_cols . items ( ) ] [ 0 ] ) . values ( * * row ) ) if result . rowcount == 0 : data_to_insert . append ( row ) else : # pragma: no cover data_to_insert = data # Insert rest of data if upsert : if len ( data_to_insert ) : engine . execute ( ins , data_to_insert )
Update data by its primary_key column .
311
9
248,050
def upsert_all ( engine , table , data ) : update_all ( engine , table , data , upsert = True )
Update data by primary key columns . If not able to update do insert .
28
15
248,051
def pk_names ( cls ) : if cls . _cache_pk_names is None : cls . _cache_pk_names = cls . _get_primary_key_names ( ) return cls . _cache_pk_names
Primary key column name list .
59
6
248,052
def id_field_name ( cls ) : if cls . _cache_id_field_name is None : pk_names = cls . pk_names ( ) if len ( pk_names ) == 1 : cls . _cache_id_field_name = pk_names [ 0 ] else : # pragma: no cover raise ValueError ( "{classname} has more than 1 primary key!" . format ( classname = cls . __name__ ) ) return cls . _cache_id_field_name
If only one primary_key then return it . Otherwise raise ValueError .
119
15
248,053
def values ( self ) : return [ getattr ( self , c . name , None ) for c in self . __table__ . _columns ]
return list of value of all declared columns .
32
9
248,054
def items ( self ) : return [ ( c . name , getattr ( self , c . name , None ) ) for c in self . __table__ . _columns ]
return list of pair of name and value of all declared columns .
38
13
248,055
def to_dict ( self , include_null = True ) : if include_null : return dict ( self . items ( ) ) else : return { attr : value for attr , value in self . __dict__ . items ( ) if not attr . startswith ( "_sa_" ) }
Convert to dict .
66
5
248,056
def to_OrderedDict ( self , include_null = True ) : if include_null : return OrderedDict ( self . items ( ) ) else : items = list ( ) for c in self . __table__ . _columns : try : items . append ( ( c . name , self . __dict__ [ c . name ] ) ) except KeyError : pass return OrderedDict ( items )
Convert to OrderedDict .
90
8
248,057
def by_id ( cls , _id , engine_or_session ) : ses , auto_close = ensure_session ( engine_or_session ) obj = ses . query ( cls ) . get ( _id ) if auto_close : ses . close ( ) return obj
Get one object by primary_key value .
64
9
248,058
def by_sql ( cls , sql , engine_or_session ) : ses , auto_close = ensure_session ( engine_or_session ) result = ses . query ( cls ) . from_statement ( sql ) . all ( ) if auto_close : ses . close ( ) return result
Query with sql statement or texture sql .
68
8
248,059
def fixcode ( * * kwargs ) : # repository direcotry repo_dir = Path ( __file__ ) . parent . absolute ( ) # source code directory source_dir = Path ( repo_dir , package . __name__ ) if source_dir . exists ( ) : print ( "Source code locate at: '%s'." % source_dir ) print ( "Auto pep8 all python file ..." ) source_dir . autopep8 ( * * kwargs ) else : print ( "Source code directory not found!" ) # unittest code directory unittest_dir = Path ( repo_dir , "tests" ) if unittest_dir . exists ( ) : print ( "Unittest code locate at: '%s'." % unittest_dir ) print ( "Auto pep8 all python file ..." ) unittest_dir . autopep8 ( * * kwargs ) else : print ( "Unittest code directory not found!" ) print ( "Complete!" )
auto pep8 format all python file in source code and tests dir .
218
15
248,060
def _get_rows ( self , options ) : if options [ "oldsortslice" ] : rows = copy . deepcopy ( self . _rows [ options [ "start" ] : options [ "end" ] ] ) else : rows = copy . deepcopy ( self . _rows ) # Sort if options [ "sortby" ] : sortindex = self . _field_names . index ( options [ "sortby" ] ) # Decorate rows = [ [ row [ sortindex ] ] + row for row in rows ] # Sort rows . sort ( reverse = options [ "reversesort" ] , key = options [ "sort_key" ] ) # Undecorate rows = [ row [ 1 : ] for row in rows ] # Slice if necessary if not options [ "oldsortslice" ] : rows = rows [ options [ "start" ] : options [ "end" ] ] return rows
Return only those data rows that should be printed based on slicing and sorting .
197
15
248,061
def create_postgresql_pg8000 ( username , password , host , port , database , * * kwargs ) : # pragma: no cover return create_engine ( _create_postgresql_pg8000 ( username , password , host , port , database ) , * * kwargs )
create an engine connected to a postgresql database using pg8000 .
65
14
248,062
def create_postgresql_pygresql ( username , password , host , port , database , * * kwargs ) : # pragma: no cover return create_engine ( _create_postgresql_pygresql ( username , password , host , port , database ) , * * kwargs )
create an engine connected to a postgresql database using pygresql .
67
15
248,063
def create_postgresql_psycopg2cffi ( username , password , host , port , database , * * kwargs ) : # pragma: no cover return create_engine ( _create_postgresql_psycopg2cffi ( username , password , host , port , database ) , * * kwargs )
create an engine connected to a postgresql database using psycopg2cffi .
75
19
248,064
def create_postgresql_pypostgresql ( username , password , host , port , database , * * kwargs ) : # pragma: no cover return create_engine ( _create_postgresql_pypostgresql ( username , password , host , port , database ) , * * kwargs )
create an engine connected to a postgresql database using pypostgresql .
71
17
248,065
def create_mysql_mysqlconnector ( username , password , host , port , database , * * kwargs ) : # pragma: no cover return create_engine ( _create_mysql_mysqlconnector ( username , password , host , port , database ) , * * kwargs )
create an engine connected to a mysql database using mysqlconnector .
67
13
248,066
def create_mysql_oursql ( username , password , host , port , database , * * kwargs ) : # pragma: no cover return create_engine ( _create_mysql_oursql ( username , password , host , port , database ) , * * kwargs )
create an engine connected to a mysql database using oursql .
63
12
248,067
def create_mysql_pymysql ( username , password , host , port , database , * * kwargs ) : # pragma: no cover return create_engine ( _create_mysql_pymysql ( username , password , host , port , database ) , * * kwargs )
create an engine connected to a mysql database using pymysql .
67
14
248,068
def create_mysql_cymysql ( username , password , host , port , database , * * kwargs ) : # pragma: no cover return create_engine ( _create_mysql_cymysql ( username , password , host , port , database ) , * * kwargs )
create an engine connected to a mysql database using cymysql .
65
13
248,069
def create_mssql_pyodbc ( username , password , host , port , database , * * kwargs ) : # pragma: no cover return create_engine ( _create_mssql_pyodbc ( username , password , host , port , database ) , * * kwargs )
create an engine connected to a mssql database using pyodbc .
67
15
248,070
def create_mssql_pymssql ( username , password , host , port , database , * * kwargs ) : # pragma: no cover return create_engine ( _create_mssql_pymssql ( username , password , host , port , database ) , * * kwargs )
create an engine connected to a mssql database using pymssql .
69
16
248,071
def titleize ( text ) : if len ( text ) == 0 : # if empty string, return it return text else : text = text . lower ( ) # lower all char # delete redundant empty space chunks = [ chunk [ 0 ] . upper ( ) + chunk [ 1 : ] for chunk in text . split ( " " ) if len ( chunk ) >= 1 ] return " " . join ( chunks )
Capitalizes all the words and replaces some characters in the string to create a nicer looking title .
85
19
248,072
def grouper_list ( l , n ) : chunk = list ( ) counter = 0 for item in l : counter += 1 chunk . append ( item ) if counter == n : yield chunk chunk = list ( ) counter = 0 if len ( chunk ) > 0 : yield chunk
Evenly divide list into fixed - length piece no filled value if chunk size smaller than fixed - length .
58
21
248,073
def convert_query_to_sql_statement ( query ) : context = query . _compile_context ( ) context . statement . use_labels = False return context . statement
Convert a Query object created from orm query into executable sql statement .
39
15
248,074
def execute_query_return_result_proxy ( query ) : context = query . _compile_context ( ) context . statement . use_labels = False if query . _autoflush and not query . _populate_existing : query . session . _autoflush ( ) conn = query . _get_bind_args ( context , query . _connection_from_session , close_with_result = True ) return conn . execute ( context . statement , query . _params )
Execute a query yield result proxy .
108
8
248,075
def find_state ( self , state , best_match = True , min_similarity = 70 ) : result_state_short_list = list ( ) # check if it is a abbreviate name if state . upper ( ) in STATE_ABBR_SHORT_TO_LONG : result_state_short_list . append ( state . upper ( ) ) # if not, find out what is the state that user looking for else : if best_match : state_long , confidence = extractOne ( state , self . state_list ) if confidence >= min_similarity : result_state_short_list . append ( STATE_ABBR_LONG_TO_SHORT [ state_long ] ) else : for state_long , confidence in extract ( state , self . state_list ) : if confidence >= min_similarity : result_state_short_list . append ( STATE_ABBR_LONG_TO_SHORT [ state_long ] ) if len ( result_state_short_list ) == 0 : message = ( "'%s' is not a valid state name, use 2 letter " "short name or correct full name please." ) raise ValueError ( message % state ) return result_state_short_list
Fuzzy search correct state .
267
7
248,076
def find_city ( self , city , state = None , best_match = True , min_similarity = 70 ) : # find out what is the city that user looking for if state : state_sort = self . find_state ( state , best_match = True ) [ 0 ] city_pool = self . state_to_city_mapper [ state_sort . upper ( ) ] else : city_pool = self . city_list result_city_list = list ( ) if best_match : city , confidence = extractOne ( city , city_pool ) if confidence >= min_similarity : result_city_list . append ( city ) else : for city , confidence in extract ( city , city_pool ) : if confidence >= min_similarity : result_city_list . append ( city ) if len ( result_city_list ) == 0 : raise ValueError ( "'%s' is not a valid city name" % city ) return result_city_list
Fuzzy search correct city .
211
7
248,077
def _resolve_sort_by ( sort_by , flag_radius_query ) : if sort_by is None : if flag_radius_query : sort_by = SORT_BY_DIST elif isinstance ( sort_by , string_types ) : if sort_by . lower ( ) == SORT_BY_DIST : if flag_radius_query is False : msg = "`sort_by` arg can be 'dist' only under distance based query!" raise ValueError ( msg ) sort_by = SORT_BY_DIST elif sort_by not in SimpleZipcode . __table__ . columns : msg = "`sort_by` arg has to be one of the Zipcode attribute or 'dist'!" raise ValueError ( msg ) else : sort_by = sort_by . name return sort_by
Result sort_by argument .
183
6
248,078
def by_zipcode ( self , zipcode , zipcode_type = None , zero_padding = True ) : if zero_padding : zipcode = str ( zipcode ) . zfill ( 5 ) else : # pragma: no cover zipcode = str ( zipcode ) res = self . query ( zipcode = zipcode , sort_by = None , returns = 1 , zipcode_type = zipcode_type , ) if len ( res ) : return res [ 0 ] else : return self . zip_klass ( )
Search zipcode by exact 5 digits zipcode . No zero padding is needed .
115
16
248,079
def by_prefix ( self , prefix , zipcode_type = ZipcodeType . Standard , sort_by = SimpleZipcode . zipcode . name , ascending = True , returns = DEFAULT_LIMIT ) : return self . query ( prefix = prefix , sort_by = sort_by , zipcode_type = zipcode_type , ascending = ascending , returns = returns , )
Search zipcode information by first N digits .
83
9
248,080
def by_pattern ( self , pattern , zipcode_type = ZipcodeType . Standard , sort_by = SimpleZipcode . zipcode . name , ascending = True , returns = DEFAULT_LIMIT ) : return self . query ( pattern = pattern , sort_by = sort_by , zipcode_type = zipcode_type , ascending = ascending , returns = returns , )
Search zipcode by wildcard .
83
7
248,081
def by_state ( self , state , zipcode_type = ZipcodeType . Standard , sort_by = SimpleZipcode . zipcode . name , ascending = True , returns = DEFAULT_LIMIT ) : return self . query ( state = state , sort_by = sort_by , zipcode_type = zipcode_type , ascending = ascending , returns = returns , )
Search zipcode information by fuzzy State name .
83
9
248,082
def by_coordinates ( self , lat , lng , radius = 25.0 , zipcode_type = ZipcodeType . Standard , sort_by = SORT_BY_DIST , ascending = True , returns = DEFAULT_LIMIT ) : return self . query ( lat = lat , lng = lng , radius = radius , sort_by = sort_by , zipcode_type = zipcode_type , ascending = ascending , returns = returns , )
Search zipcode information near a coordinates on a map .
102
11
248,083
def by_population ( self , lower = - 1 , upper = 2 ** 31 , zipcode_type = ZipcodeType . Standard , sort_by = SimpleZipcode . population . name , ascending = False , returns = DEFAULT_LIMIT ) : return self . query ( population_lower = lower , population_upper = upper , sort_by = sort_by , zipcode_type = zipcode_type , ascending = ascending , returns = returns , )
Search zipcode information by population range .
99
8
248,084
def by_population_density ( self , lower = - 1 , upper = 2 ** 31 , zipcode_type = ZipcodeType . Standard , sort_by = SimpleZipcode . population_density . name , ascending = False , returns = DEFAULT_LIMIT ) : return self . query ( population_density_lower = lower , population_density_upper = upper , sort_by = sort_by , zipcode_type = zipcode_type , ascending = ascending , returns = returns , )
Search zipcode information by population density range .
107
9
248,085
def by_housing_units ( self , lower = - 1 , upper = 2 ** 31 , zipcode_type = ZipcodeType . Standard , sort_by = SimpleZipcode . housing_units . name , ascending = False , returns = DEFAULT_LIMIT ) : return self . query ( housing_units_lower = lower , housing_units_upper = upper , sort_by = sort_by , zipcode_type = zipcode_type , ascending = ascending , returns = returns , )
Search zipcode information by house of units .
107
9
248,086
def by_occupied_housing_units ( self , lower = - 1 , upper = 2 ** 31 , zipcode_type = ZipcodeType . Standard , sort_by = SimpleZipcode . occupied_housing_units . name , ascending = False , returns = DEFAULT_LIMIT ) : return self . query ( occupied_housing_units_lower = lower , occupied_housing_units_upper = upper , sort_by = sort_by , zipcode_type = zipcode_type , ascending = ascending , returns = returns , )
Search zipcode information by occupied house of units .
115
10
248,087
def by_median_home_value ( self , lower = - 1 , upper = 2 ** 31 , zipcode_type = ZipcodeType . Standard , sort_by = SimpleZipcode . median_home_value . name , ascending = False , returns = DEFAULT_LIMIT ) : return self . query ( median_home_value_lower = lower , median_home_value_upper = upper , sort_by = sort_by , zipcode_type = zipcode_type , ascending = ascending , returns = returns , )
Search zipcode information by median home value .
116
9
248,088
def by_median_household_income ( self , lower = - 1 , upper = 2 ** 31 , zipcode_type = ZipcodeType . Standard , sort_by = SimpleZipcode . median_household_income . name , ascending = False , returns = DEFAULT_LIMIT ) : return self . query ( median_household_income_lower = lower , median_household_income_upper = upper , sort_by = sort_by , zipcode_type = zipcode_type , ascending = ascending , returns = returns , )
Search zipcode information by median household income .
120
9
248,089
def select_single_column ( engine , column ) : s = select ( [ column ] ) return column . name , [ row [ 0 ] for row in engine . execute ( s ) ]
Select data from single column .
40
6
248,090
def select_many_column ( engine , * columns ) : if isinstance ( columns [ 0 ] , Column ) : pass elif isinstance ( columns [ 0 ] , ( list , tuple ) ) : columns = columns [ 0 ] s = select ( columns ) headers = [ str ( column ) for column in columns ] data = [ tuple ( row ) for row in engine . execute ( s ) ] return headers , data
Select data from multiple columns .
88
6
248,091
def select_random ( engine , table_or_columns , limit = 5 ) : s = select ( table_or_columns ) . order_by ( func . random ( ) ) . limit ( limit ) return engine . execute ( s ) . fetchall ( )
Randomly select some rows from table .
58
8
248,092
def smart_insert ( engine , table , data , minimal_size = 5 ) : insert = table . insert ( ) if isinstance ( data , list ) : # 首先进行尝试bulk insert try : engine . execute ( insert , data ) # 失败了 except IntegrityError : # 分析数据量 n = len ( data ) # 如果数据条数多于一定数量 if n >= minimal_size ** 2 : # 则进行分包 n_chunk = math . floor ( math . sqrt ( n ) ) for chunk in grouper_list ( data , n_chunk ) : smart_insert ( engine , table , chunk , minimal_size ) # 否则则一条条地逐条插入 else : for row in data : try : engine . execute ( insert , row ) except IntegrityError : pass else : try : engine . execute ( insert , data ) except IntegrityError : pass
An optimized Insert strategy . Guarantee successful and highest insertion speed . But ATOMIC WRITE IS NOT ENSURED IF THE PROGRAM IS INTERRUPTED .
251
33
248,093
def load_keys ( ) : consumer_key = os . environ . get ( 'CONSUMER_KEY' ) consumer_secret = os . environ . get ( 'CONSUMER_SECRET' ) access_token = os . environ . get ( 'ACCESS_TOKEN' ) access_token_secret = os . environ . get ( 'ACCESS_TOKEN_SECRET' ) return consumer_key , consumer_secret , access_token , access_token_secret
Loads Twitter keys .
109
5
248,094
def search ( self , q ) : results = self . _api . search ( q = q ) return results
Search tweets by keyword .
23
5
248,095
def search_by_user ( self , screen_name , count = 100 ) : results = self . _api . user_timeline ( screen_name = screen_name , count = count ) return results
Search tweets by user .
44
5
248,096
def on_successful_login ( self , subject , authc_token , account_id ) : # always clear any previous identity: self . forget_identity ( subject ) # now save the new identity: if authc_token . is_remember_me : self . remember_identity ( subject , authc_token , account_id ) else : msg = ( "AuthenticationToken did not indicate that RememberMe is " "requested. RememberMe functionality will not be executed " "for corresponding account." ) logger . debug ( msg )
Reacts to the successful login attempt by first always forgetting any previously stored identity . Then if the authc_token is a RememberMe type of token the associated identity will be remembered for later retrieval during a new user session .
115
45
248,097
def remember_identity ( self , subject , authc_token , account_id ) : try : identifiers = self . get_identity_to_remember ( subject , account_id ) except AttributeError : msg = "Neither account_id nor identifier arguments passed" raise AttributeError ( msg ) encrypted = self . convert_identifiers_to_bytes ( identifiers ) self . remember_encrypted_identity ( subject , encrypted )
Yosai consolidates rememberIdentity an overloaded method in java to a method that will use an identifier - else - account logic .
93
27
248,098
def convert_bytes_to_identifiers ( self , encrypted , subject_context ) : # unlike Shiro, Yosai assumes that the message is encrypted: decrypted = self . decrypt ( encrypted ) return self . serialization_manager . deserialize ( decrypted )
If a cipher_service is available it will be used to first decrypt the serialized message . Then the bytes are deserialized and returned .
58
29
248,099
def encrypt ( self , serialized ) : fernet = Fernet ( self . encryption_cipher_key ) return fernet . encrypt ( serialized )
Encrypts the serialized message using Fernet
35
10