idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
225,300
def execute ( self , request ) : handle = None if request : request [ 0 ] = command = to_string ( request [ 0 ] ) . lower ( ) info = COMMANDS_INFO . get ( command ) if info : handle = getattr ( self . store , info . method_name ) # if self . channels or self . patterns : if command not in self . store . SUBSCRIBE_COMMANDS : return self . reply_error ( self . store . PUBSUB_ONLY ) if self . blocked : return self . reply_error ( 'Blocked client cannot request' ) if self . transaction is not None and command not in 'exec' : self . transaction . append ( ( handle , request ) ) return self . connection . write ( self . store . QUEUED ) self . execute_command ( handle , request )
Execute a new request .
183
6
225,301
def handle_cookies ( response , exc = None ) : if exc : return headers = response . headers request = response . request client = request . client response . _cookies = c = SimpleCookie ( ) if 'set-cookie' in headers or 'set-cookie2' in headers : for cookie in ( headers . get ( 'set-cookie2' ) , headers . get ( 'set-cookie' ) ) : if cookie : c . load ( cookie ) if client . store_cookies : client . cookies . extract_cookies ( response , request )
Handle response cookies .
121
4
225,302
def on_headers ( self , response , exc = None ) : if response . status_code == 101 : connection = response . connection request = response . request handler = request . websocket_handler if not handler : handler = WS ( ) parser = request . client . frame_parser ( kind = 1 ) consumer = partial ( WebSocketClient . create , response , handler , parser ) connection . upgrade ( consumer ) response . event ( 'post_request' ) . fire ( ) websocket = connection . current_consumer ( ) response . request_again = lambda r : websocket
Websocket upgrade as on_headers event .
121
10
225,303
def add2python ( self , module = None , up = 0 , down = None , front = False , must_exist = True ) : if module : try : return import_module ( module ) except ImportError : pass dir = self . dir ( ) . ancestor ( up ) if down : dir = dir . join ( * down ) if dir . isdir ( ) : if dir not in sys . path : if front : sys . path . insert ( 0 , dir ) else : sys . path . append ( dir ) elif must_exist : raise ImportError ( 'Directory {0} not available' . format ( dir ) ) else : return None if module : try : return import_module ( module ) except ImportError : if must_exist : raise
Add a directory to the python path .
160
8
225,304
def get ( self , request ) : ul = Html ( 'ul' ) for router in sorted ( self . routes , key = lambda r : r . creation_count ) : a = router . link ( escape ( router . route . path ) ) a . addClass ( router . name ) for method in METHODS : if router . getparam ( method ) : a . addClass ( method ) li = Html ( 'li' , a , ' %s' % router . getparam ( 'title' , '' ) ) ul . append ( li ) title = 'Pulsar' html = request . html_document html . head . title = title html . head . links . append ( 'httpbin.css' ) html . head . links . append ( 'favicon.ico' , rel = "icon" , type = 'image/x-icon' ) html . head . scripts . append ( 'httpbin.js' ) ul = ul . to_string ( request ) templ = asset ( 'template.html' ) body = templ % ( title , JAPANESE , CHINESE , version , pyversion , ul ) html . body . append ( body ) return html . http_response ( request )
The home page of this router
264
6
225,305
def stats ( self , request ) : # scheme = 'wss' if request.is_secure else 'ws' # host = request.get('HTTP_HOST') # address = '%s://%s/stats' % (scheme, host) doc = HtmlDocument ( title = 'Live server stats' , media_path = '/assets/' ) # docs.head.scripts return doc . http_response ( request )
Live stats for the server .
94
6
225,306
def get_preparation_data ( name ) : d = dict ( name = name , sys_path = sys . path , sys_argv = sys . argv , log_to_stderr = _log_to_stderr , orig_dir = process . ORIGINAL_DIR , authkey = process . current_process ( ) . authkey , ) if _logger is not None : d [ 'log_level' ] = _logger . getEffectiveLevel ( ) if not WINEXE : main_path = getattr ( sys . modules [ '__main__' ] , '__file__' , None ) if not main_path and sys . argv [ 0 ] not in ( '' , '-c' ) : main_path = sys . argv [ 0 ] if main_path is not None : if ( not os . path . isabs ( main_path ) and process . ORIGINAL_DIR is not None ) : main_path = os . path . join ( process . ORIGINAL_DIR , main_path ) if not main_path . endswith ( '.exe' ) : d [ 'main_path' ] = os . path . normpath ( main_path ) return d
Return info about parent needed by child to unpickle process object . Monkey - patch from
266
17
225,307
def remote_call ( request , cls , method , args , kw ) : actor = request . actor name = 'remote_%s' % cls . __name__ if not hasattr ( actor , name ) : object = cls ( actor ) setattr ( actor , name , object ) else : object = getattr ( actor , name ) method_name = '%s%s' % ( PREFIX , method ) return getattr ( object , method_name ) ( request , * args , * * kw )
Command for executing remote calls on a remote object
113
9
225,308
def clear ( self ) : self . _size = 0 self . _level = 1 self . _head = Node ( 'HEAD' , None , [ None ] * SKIPLIST_MAXLEVEL , [ 1 ] * SKIPLIST_MAXLEVEL )
Clear the container from all data .
55
7
225,309
def extend ( self , iterable ) : i = self . insert for score_values in iterable : i ( * score_values )
Extend this skiplist with an iterable over score value pairs .
29
15
225,310
def remove_range ( self , start , end , callback = None ) : N = len ( self ) if start < 0 : start = max ( N + start , 0 ) if start >= N : return 0 if end is None : end = N elif end < 0 : end = max ( N + end , 0 ) else : end = min ( end , N ) if start >= end : return 0 node = self . _head index = 0 chain = [ None ] * self . _level for i in range ( self . _level - 1 , - 1 , - 1 ) : while node . next [ i ] and ( index + node . width [ i ] ) <= start : index += node . width [ i ] node = node . next [ i ] chain [ i ] = node node = node . next [ 0 ] initial = self . _size while node and index < end : next = node . next [ 0 ] self . _remove_node ( node , chain ) index += 1 if callback : callback ( node . score , node . value ) node = next return initial - self . _size
Remove a range by rank .
232
6
225,311
def remove_range_by_score ( self , minval , maxval , include_min = True , include_max = True , callback = None ) : node = self . _head chain = [ None ] * self . _level if include_min : for i in range ( self . _level - 1 , - 1 , - 1 ) : while node . next [ i ] and node . next [ i ] . score < minval : node = node . next [ i ] chain [ i ] = node else : for i in range ( self . _level - 1 , - 1 , - 1 ) : while node . next [ i ] and node . next [ i ] . score <= minval : node = node . next [ i ] chain [ i ] = node node = node . next [ 0 ] initial = self . _size while node and node . score >= minval : if ( ( include_max and node . score > maxval ) or ( not include_max and node . score >= maxval ) ) : break next = node . next [ 0 ] self . _remove_node ( node , chain ) if callback : callback ( node . score , node . value ) node = next return initial - self . _size
Remove a range with scores between minval and maxval .
260
12
225,312
def count ( self , minval , maxval , include_min = True , include_max = True ) : rank1 = self . rank ( minval ) if rank1 < 0 : rank1 = - rank1 - 1 elif not include_min : rank1 += 1 rank2 = self . rank ( maxval ) if rank2 < 0 : rank2 = - rank2 - 1 elif include_max : rank2 += 1 return max ( rank2 - rank1 , 0 )
Returns the number of elements in the skiplist with a score between min and max .
104
18
225,313
def quality ( self , key ) : for item , quality in self : if self . _value_matches ( key , item ) : return quality return 0
Returns the quality of the key .
33
7
225,314
def to_header ( self ) : result = [ ] for value , quality in self : if quality != 1 : value = '%s;q=%s' % ( value , quality ) result . append ( value ) return ',' . join ( result )
Convert the header set into an HTTP header string .
55
11
225,315
def best_match ( self , matches , default = None ) : if matches : best_quality = - 1 result = default for client_item , quality in self : for server_item in matches : if quality <= best_quality : break if self . _value_matches ( server_item , client_item ) : best_quality = quality result = server_item return result else : return self . best
Returns the best match from a list of possible matches based on the quality of the client . If two items have the same quality the one is returned that comes first .
86
33
225,316
def convert_bytes ( b ) : if b is None : return '#NA' for s in reversed ( memory_symbols ) : if b >= memory_size [ s ] : value = float ( b ) / memory_size [ s ] return '%.1f%sB' % ( value , s ) return "%sB" % b
Convert a number of bytes into a human readable memory usage bytes kilo mega giga tera peta exa zetta yotta
75
28
225,317
def process_info ( pid = None ) : if psutil is None : # pragma nocover return { } pid = pid or os . getpid ( ) try : p = psutil . Process ( pid ) # this fails on platforms which don't allow multiprocessing except psutil . NoSuchProcess : # pragma nocover return { } else : mem = p . memory_info ( ) return { 'memory' : convert_bytes ( mem . rss ) , 'memory_virtual' : convert_bytes ( mem . vms ) , 'cpu_percent' : p . cpu_percent ( ) , 'nice' : p . nice ( ) , 'num_threads' : p . num_threads ( ) }
Returns a dictionary of system information for the process pid .
159
11
225,318
async def start_serving ( self , address = None , sockets = None , backlog = 100 , sslcontext = None ) : if self . _server : raise RuntimeError ( 'Already serving' ) create_server = self . _loop . create_server server = None if sockets : for sock in sockets : srv = await create_server ( self . create_protocol , sock = sock , backlog = backlog , ssl = sslcontext ) if server : server . sockets . extend ( srv . sockets ) else : server = srv elif isinstance ( address , tuple ) : server = await create_server ( self . create_protocol , host = address [ 0 ] , port = address [ 1 ] , backlog = backlog , ssl = sslcontext ) else : raise RuntimeError ( 'sockets or address must be supplied' ) self . _set_server ( server )
Start serving .
190
3
225,319
def _close_connections ( self , connection = None , timeout = 5 ) : all = [ ] if connection : waiter = connection . event ( 'connection_lost' ) . waiter ( ) if waiter : all . append ( waiter ) connection . close ( ) else : connections = list ( self . _concurrent_connections ) self . _concurrent_connections = set ( ) for connection in connections : waiter = connection . event ( 'connection_lost' ) . waiter ( ) if waiter : all . append ( waiter ) connection . close ( ) if all : self . logger . info ( '%s closing %d connections' , self , len ( all ) ) return asyncio . wait ( all , timeout = timeout , loop = self . _loop )
Close connection if specified otherwise close all connections .
161
9
225,320
async def start_serving ( self , address = None , sockets = None , * * kw ) : if self . _server : raise RuntimeError ( 'Already serving' ) server = DGServer ( self . _loop ) loop = self . _loop if sockets : for sock in sockets : transport , _ = await loop . create_datagram_endpoint ( self . create_protocol , sock = sock ) server . transports . append ( transport ) elif isinstance ( address , tuple ) : transport , _ = await loop . create_datagram_endpoint ( self . create_protocol , local_addr = address ) server . transports . append ( transport ) else : raise RuntimeError ( 'sockets or address must be supplied' ) self . _set_server ( server )
create the server endpoint .
167
5
225,321
async def monitor_start ( self , monitor ) : cfg = self . cfg if ( not platform . has_multiprocessing_socket or cfg . concurrency == 'thread' ) : cfg . set ( 'workers' , 0 ) servers = await self . binds ( monitor ) if not servers : raise ImproperlyConfigured ( 'Could not open a socket. ' 'No address to bind to' ) addresses = [ ] for server in servers . values ( ) : addresses . extend ( server . addresses ) self . cfg . addresses = addresses
Create the socket listening to the bind address .
121
9
225,322
async def create_server ( self , worker , protocol_factory , address = None , sockets = None , idx = 0 ) : cfg = self . cfg max_requests = cfg . max_requests if max_requests : max_requests = int ( lognormvariate ( log ( max_requests ) , 0.2 ) ) server = self . server_factory ( protocol_factory , loop = worker . _loop , max_requests = max_requests , keep_alive = cfg . keep_alive , name = self . name , logger = self . logger , server_software = cfg . server_software , cfg = cfg , idx = idx ) for event in ( 'connection_made' , 'pre_request' , 'post_request' , 'connection_lost' ) : callback = getattr ( cfg , event ) if callback != pass_through : server . event ( event ) . bind ( callback ) await server . start_serving ( sockets = sockets , address = address , backlog = cfg . backlog , sslcontext = self . sslcontext ( ) ) return server
Create the Server which will listen for requests .
253
9
225,323
def channels ( self , pattern = None ) : if pattern : return self . store . execute ( 'PUBSUB' , 'CHANNELS' , pattern ) else : return self . store . execute ( 'PUBSUB' , 'CHANNELS' )
Lists the currently active channels matching pattern
55
8
225,324
def lock ( self , name , * * kwargs ) : return self . pubsub . store . client ( ) . lock ( self . prefixed ( name ) , * * kwargs )
Global distributed lock
42
3
225,325
async def publish ( self , channel , event , data = None ) : msg = { 'event' : event , 'channel' : channel } if data : msg [ 'data' ] = data try : await self . pubsub . publish ( self . prefixed ( channel ) , msg ) except ConnectionRefusedError : self . connection_error = True self . logger . critical ( '%s cannot publish on "%s" channel - connection error' , self , channel ) else : self . connection_ok ( )
Publish a new event on a channel
109
8
225,326
async def close ( self ) : push_connection = self . pubsub . push_connection self . status = self . statusType . closed if push_connection : push_connection . event ( 'connection_lost' ) . unbind ( self . _connection_lost ) await self . pubsub . close ( )
Close channels and underlying pubsub handler
67
7
225,327
def origin_req_host ( self ) : if self . history : return self . history [ 0 ] . request . origin_req_host else : return scheme_host_port ( self . url ) [ 1 ]
Required by Cookies handlers
46
4
225,328
def get_header ( self , header_name , default = None ) : return self . headers . get ( header_name , self . unredirected_headers . get ( header_name , default ) )
Retrieve header_name from this request headers .
45
10
225,329
def remove_header ( self , header_name ) : val1 = self . headers . pop ( header_name , None ) val2 = self . unredirected_headers . pop ( header_name , None ) return val1 or val2
Remove header_name from this request .
53
8
225,330
def raw ( self ) : if self . _raw is None : self . _raw = HttpStream ( self ) return self . _raw
A raw asynchronous Http response
30
6
225,331
def links ( self ) : headers = self . headers or { } header = headers . get ( 'link' ) li = { } if header : links = parse_header_links ( header ) for link in links : key = link . get ( 'rel' ) or link . get ( 'url' ) li [ key ] = link return li
Returns the parsed header links of the response if any
73
10
225,332
def text ( self ) : data = self . content return data . decode ( self . encoding or 'utf-8' ) if data else ''
Decode content as a string .
30
7
225,333
def decode_content ( self ) : ct = self . headers . get ( 'content-type' ) if ct : ct , options = parse_options_header ( ct ) charset = options . get ( 'charset' ) if ct in JSON_CONTENT_TYPES : return self . json ( ) elif ct . startswith ( 'text/' ) : return self . text elif ct == FORM_URL_ENCODED : return parse_qsl ( self . content . decode ( charset ) , keep_blank_values = True ) return self . content
Return the best possible representation of the response body .
133
10
225,334
def request ( self , method , url , * * params ) : response = self . _request ( method , url , * * params ) if not self . _loop . is_running ( ) : return self . _loop . run_until_complete ( response ) else : return response
Constructs and sends a request to a remote server .
60
11
225,335
def ssl_context ( self , verify = True , cert_reqs = None , check_hostname = False , certfile = None , keyfile = None , cafile = None , capath = None , cadata = None , * * kw ) : assert ssl , 'SSL not supported' cafile = cafile or DEFAULT_CA_BUNDLE_PATH if verify is True : cert_reqs = ssl . CERT_REQUIRED check_hostname = True if isinstance ( verify , str ) : cert_reqs = ssl . CERT_REQUIRED if os . path . isfile ( verify ) : cafile = verify elif os . path . isdir ( verify ) : capath = verify return ssl . _create_unverified_context ( cert_reqs = cert_reqs , check_hostname = check_hostname , certfile = certfile , keyfile = keyfile , cafile = cafile , capath = capath , cadata = cadata )
Create a SSL context object .
224
6
225,336
async def create_tunnel_connection ( self , req ) : tunnel_address = req . tunnel_address connection = await self . create_connection ( tunnel_address ) response = connection . current_consumer ( ) for event in response . events ( ) . values ( ) : event . clear ( ) response . start ( HttpTunnel ( self , req ) ) await response . event ( 'post_request' ) . waiter ( ) if response . status_code != 200 : raise ConnectionRefusedError ( 'Cannot connect to tunnel: status code %s' % response . status_code ) raw_sock = connection . transport . get_extra_info ( 'socket' ) if raw_sock is None : raise RuntimeError ( 'Transport without socket' ) # duplicate socket so we can close transport raw_sock = raw_sock . dup ( ) connection . transport . close ( ) await connection . event ( 'connection_lost' ) . waiter ( ) self . sessions -= 1 self . requests_processed -= 1 # connection = await self . create_connection ( sock = raw_sock , ssl = req . ssl ( self ) , server_hostname = req . netloc ) return connection
Create a tunnel connection
262
4
225,337
def python_path ( self , script ) : if not script : try : import __main__ script = getfile ( __main__ ) except Exception : # pragma nocover return script = os . path . realpath ( script ) if self . cfg . get ( 'python_path' , True ) : path = os . path . dirname ( script ) if path not in sys . path : sys . path . insert ( 0 , path ) return script
Called during initialisation to obtain the script name .
98
11
225,338
def start ( self , exit = True ) : on_start = self ( ) actor = arbiter ( ) if actor and on_start : actor . start ( exit = exit ) if actor . exit_code is not None : return actor . exit_code return on_start
Invoked the application callable method and start the arbiter if it wasn t already started .
58
19
225,339
def stop ( self , actor = None ) : if actor is None : actor = get_actor ( ) if actor and actor . is_arbiter ( ) : monitor = actor . get_actor ( self . name ) if monitor : return monitor . stop ( ) raise RuntimeError ( 'Cannot stop application' )
Stop the application
67
3
225,340
def set_owner_process ( uid , gid ) : if gid : try : os . setgid ( gid ) except OverflowError : # versions of python < 2.6.2 don't manage unsigned int for # groups like on osx or fedora os . setgid ( - ctypes . c_int ( - gid ) . value ) if uid : os . setuid ( uid )
set user and group of workers processes
92
7
225,341
def wait ( value , must_be_child = False ) : current = getcurrent ( ) parent = current . parent if must_be_child and not parent : raise MustBeInChildGreenlet ( 'Cannot wait on main greenlet' ) return parent . switch ( value ) if parent else value
Wait for a possible asynchronous value to complete .
64
9
225,342
def run_in_greenlet ( callable ) : @ wraps ( callable ) async def _ ( * args , * * kwargs ) : green = greenlet ( callable ) # switch to the new greenlet result = green . switch ( * args , * * kwargs ) # back to the parent while isawaitable ( result ) : # keep on switching back to the greenlet if we get an awaitable try : result = green . switch ( ( await result ) ) except Exception : exc_info = sys . exc_info ( ) result = green . throw ( * exc_info ) return green . switch ( result ) return _
Decorator to run a callable on a new greenlet .
138
14
225,343
def build_response ( content , code = 200 ) : response = make_response ( jsonify ( content ) , content [ 'code' ] ) response . headers [ 'Access-Control-Allow-Origin' ] = '*' response . headers [ 'Access-Control-Allow-Headers' ] = 'Origin, X-Requested-With, Content-Type, Accept, Authorization' return response
Build response add headers
85
4
225,344
def post ( self ) : ## format sql data = request . get_json ( ) options , sql_raw = data . get ( 'options' ) , data . get ( 'sql_raw' ) if options == 'format' : sql_formmated = sqlparse . format ( sql_raw , keyword_case = 'upper' , reindent = True ) return build_response ( dict ( data = sql_formmated , code = 200 ) ) elif options in ( 'all' , 'selected' ) : conn = SQL ( config . sql_host , config . sql_port , config . sql_user , config . sql_pwd , config . sql_db ) result = conn . run ( sql_raw ) return build_response ( dict ( data = result , code = 200 ) ) else : pass pass
return executed sql result to client .
178
7
225,345
def get ( self , page = 0 , size = 10 ) : dash_list = r_db . zrevrange ( config . DASH_ID_KEY , 0 , - 1 , True ) id_list = dash_list [ page * size : page * size + size ] dash_meta = [ ] data = [ ] if id_list : dash_meta = r_db . hmget ( config . DASH_META_KEY , [ i [ 0 ] for i in id_list ] ) data = [ json . loads ( i ) for i in dash_meta ] return build_response ( dict ( data = data , code = 200 ) )
Get dashboard meta info from in page page and page size is size .
142
14
225,346
def get ( self ) : keys = r_kv . keys ( ) keys . sort ( ) return build_response ( dict ( data = keys , code = 200 ) )
Get key list in storage .
37
6
225,347
def get ( self , key ) : data = r_kv . get ( key ) # data = json.dumps(data) if isinstance(data, str) else data # data = json.loads(data) if data else {} return build_response ( dict ( data = data , code = 200 ) )
Get a key - value from storage according to the key name .
68
13
225,348
def get ( self , dash_id ) : return make_response ( render_template ( 'dashboard.html' , dash_id = dash_id , api_root = config . app_host ) )
Just return the dashboard id in the rendering html .
45
10
225,349
def get ( self , dash_id ) : data = json . loads ( r_db . hmget ( config . DASH_CONTENT_KEY , dash_id ) [ 0 ] ) return build_response ( dict ( data = data , code = 200 ) )
Read dashboard content .
58
4
225,350
def put ( self , dash_id = 0 ) : data = request . get_json ( ) updated = self . _update_dash ( dash_id , data ) return build_response ( dict ( data = updated , code = 200 ) )
Update a dash meta and content return updated dash content .
52
11
225,351
def delete ( self , dash_id ) : removed_info = dict ( time_modified = r_db . zscore ( config . DASH_ID_KEY , dash_id ) , meta = r_db . hget ( config . DASH_META_KEY , dash_id ) , content = r_db . hget ( config . DASH_CONTENT_KEY , dash_id ) ) r_db . zrem ( config . DASH_ID_KEY , dash_id ) r_db . hdel ( config . DASH_META_KEY , dash_id ) r_db . hdel ( config . DASH_CONTENT_KEY , dash_id ) return { 'removed_info' : removed_info }
Delete a dash meta and content return updated dash content .
164
11
225,352
def main ( lang = 'deu' , n = 900 , epochs = 50 , batch_size = 64 , num_neurons = 256 , encoder_input_data = None , decoder_input_data = None , decoder_target_data = None , checkpoint_dir = os . path . join ( BIGDATA_PATH , 'checkpoints' ) , ) : mkdir_p ( checkpoint_dir ) encoder_input_path = os . path . join ( checkpoint_dir , 'nlpia-ch10-translate-input-{}.npy' . format ( lang ) ) decoder_input_path = os . path . join ( checkpoint_dir , 'nlpia-ch10-translate-decoder-input-{}.npy' . format ( lang ) ) decoder_target_path = os . path . join ( checkpoint_dir , 'nlpia-ch10-translate-target-{}.npy' . format ( 'eng' ) ) data_paths = ( encoder_input_path , decoder_input_path , decoder_target_path ) encoder_input_data = [ ] if all ( [ os . path . isfile ( p ) for p in data_paths ] ) : encoder_input_data = np . load ( encoder_input_path ) decoder_input_data = np . load ( decoder_input_path ) decoder_target_data = np . load ( decoder_target_path ) if len ( encoder_input_data ) < n : encoder_input_data , decoder_input_data , decoder_target_data = onehot_char_training_data ( lang = lang , n = n , data_paths = data_paths ) encoder_input_data = encoder_input_data [ : n ] decoder_input_data = decoder_input_data [ : n ] decoder_target_data = decoder_target_data [ : n ] model = fit ( data_paths = data_paths , epochs = epochs , batch_size = batch_size , num_neurons = num_neurons ) return model
Train an LSTM encoder - decoder squence - to - sequence model on Anki flashcards for international translation
487
25
225,353
def energy ( self , v , h = None ) : h = np . zeros ( self . Nh ) if h is None else h negE = np . dot ( v , self . bv ) negE += np . dot ( h , self . bh ) for j in range ( self . Nv ) : for i in range ( j ) : negE += v [ i ] * v [ j ] * self . Wvv [ i ] [ j ] for i in range ( self . Nv ) : for k in range ( self . Nh ) : negE += v [ i ] * h [ k ] * self . Wvh [ i ] [ k ] for l in range ( self . Nh ) : for k in range ( l ) : negE += h [ k ] * h [ l ] * self . Whh [ k ] [ l ] return - negE
Compute the global energy for the current joint state of all nodes
192
13
225,354
def energy ( self ) : s , b , W , N = self . state , self . b , self . W , self . N self . E = - sum ( s * b ) - sum ( [ s [ i ] * s [ j ] * W [ i , j ] for ( i , j ) in product ( range ( N ) , range ( N ) ) if i < j ] ) self . low_energies [ - 1 ] = self . E self . low_energies . sort ( ) self . high_energies [ - 1 ] = self . E self . high_energies . sort ( ) self . high_energies = self . high_energies [ : : - 1 ] return self . E
r Compute the global energy for the current joint state of all nodes
162
14
225,355
def translate ( self , text , to_template = '{name} ({url})' , from_template = None , name_matcher = None , url_matcher = None ) : return self . replace ( text , to_template = to_template , from_template = from_template , name_matcher = name_matcher , url_matcher = url_matcher )
Translate hyperinks into printable book style for Manning Publishing
84
12
225,356
def main ( dialogpath = None ) : if dialogpath is None : args = parse_args ( ) dialogpath = os . path . abspath ( os . path . expanduser ( args . dialogpath ) ) else : dialogpath = os . path . abspath ( os . path . expanduser ( args . dialogpath ) ) return clean_csvs ( dialogpath = dialogpath )
Parse the state transition graph for a set of dialog - definition tables to find an fix deadends
82
20
225,357
def prepare_data_maybe_download ( directory ) : filename = 'ubuntu_dialogs.tgz' url = 'http://cs.mcgill.ca/~jpineau/datasets/ubuntu-corpus-1.0/ubuntu_dialogs.tgz' dialogs_path = os . path . join ( directory , 'dialogs' ) # test it there are some dialogs in the path if not os . path . exists ( os . path . join ( directory , "10" , "1.tst" ) ) : # dialogs are missing archive_path = os . path . join ( directory , filename ) if not os . path . exists ( archive_path ) : # archive missing, download it print ( "Downloading %s to %s" % ( url , archive_path ) ) filepath , _ = urllib . request . urlretrieve ( url , archive_path ) print "Successfully downloaded " + filepath # unpack data if not os . path . exists ( dialogs_path ) : print ( "Unpacking dialogs ..." ) with tarfile . open ( archive_path ) as tar : tar . extractall ( path = directory ) print ( "Archive unpacked." ) return
Download and unpack dialogs if necessary .
268
9
225,358
def fib ( n ) : assert n > 0 a , b = 1 , 1 for i in range ( n - 1 ) : a , b = b , a + b return a
Fibonacci example function
38
6
225,359
def main ( args ) : args = parse_args ( args ) setup_logging ( args . loglevel ) _logger . debug ( "Starting crazy calculations..." ) print ( "The {}-th Fibonacci number is {}" . format ( args . n , fib ( args . n ) ) ) _logger . info ( "Script ends here" )
Main entry point allowing external calls
78
6
225,360
def optimize_feature_power ( df , output_column_name = None , exponents = [ 2. , 1. , .8 , .5 , .25 , .1 , .01 ] ) : output_column_name = list ( df . columns ) [ - 1 ] if output_column_name is None else output_column_name input_column_names = [ colname for colname in df . columns if output_column_name != colname ] results = np . zeros ( ( len ( exponents ) , len ( input_column_names ) ) ) for rownum , exponent in enumerate ( exponents ) : for colnum , column_name in enumerate ( input_column_names ) : results [ rownum , colnum ] = ( df [ output_column_name ] ** exponent ) . corr ( df [ column_name ] ) results = pd . DataFrame ( results , columns = input_column_names , index = pd . Series ( exponents , name = 'power' ) ) # results.plot(logx=True) return results
Plot the correlation coefficient for various exponential scalings of input features
236
12
225,361
def representative_sample ( X , num_samples , save = False ) : X = X . values if hasattr ( X , 'values' ) else np . array ( X ) N , M = X . shape rownums = np . arange ( N ) np . random . shuffle ( rownums ) idx = AnnoyIndex ( M ) for i , row in enumerate ( X ) : idx . add_item ( i , row ) idx . build ( int ( np . log2 ( N ) ) + 1 ) if save : if isinstance ( save , basestring ) : idxfilename = save else : idxfile = tempfile . NamedTemporaryFile ( delete = False ) idxfile . close ( ) idxfilename = idxfile . name idx . save ( idxfilename ) idx = AnnoyIndex ( M ) idx . load ( idxfile . name ) samples = - 1 * np . ones ( shape = ( num_samples , ) , dtype = int ) samples [ 0 ] = rownums [ 0 ] # FIXME: some integer determined by N and num_samples and distribution j , num_nns = 0 , min ( 1000 , int ( num_samples / 2. + 1 ) ) for i in rownums : if i in samples : continue nns = idx . get_nns_by_item ( i , num_nns ) # FIXME: pick vector furthest from past K (K > 1) points or outside of a hypercube # (sized to uniformly fill the space) around the last sample samples [ j + 1 ] = np . setdiff1d ( nns , samples ) [ - 1 ] if len ( num_nns ) < num_samples / 3. : num_nns = min ( N , 1.3 * num_nns ) j += 1 return samples
Sample vectors in X preferring edge cases and vectors farthest from other vectors in sample set
409
17
225,362
def cosine_sim ( vec1 , vec2 ) : vec1 = [ val for val in vec1 . values ( ) ] vec2 = [ val for val in vec2 . values ( ) ] dot_prod = 0 for i , v in enumerate ( vec1 ) : dot_prod += v * vec2 [ i ] mag_1 = math . sqrt ( sum ( [ x ** 2 for x in vec1 ] ) ) mag_2 = math . sqrt ( sum ( [ x ** 2 for x in vec2 ] ) ) return dot_prod / ( mag_1 * mag_2 )
Since our vectors are dictionaries lets convert them to lists for easier mathing .
134
16
225,363
def fit ( self , X , y ) : # initial sums n = float ( len ( X ) ) sum_x = X . sum ( ) sum_y = y . sum ( ) sum_xy = ( X * y ) . sum ( ) sum_xx = ( X ** 2 ) . sum ( ) # formula for w0 self . slope = ( sum_xy - ( sum_x * sum_y ) / n ) / ( sum_xx - ( sum_x * sum_x ) / n ) # formula for w1 self . intercept = sum_y / n - self . slope * ( sum_x / n ) return self
Compute average slope and intercept for all X y pairs
138
11
225,364
def looks_like_url ( url ) : if not isinstance ( url , basestring ) : return False if not isinstance ( url , basestring ) or len ( url ) >= 1024 or not cre_url . match ( url ) : return False return True
Simplified check to see if the text appears to be a URL .
57
15
225,365
def try_parse_url ( url ) : if len ( url . strip ( ) ) < 4 : logger . info ( 'URL too short: {}' . format ( url ) ) return None try : parsed_url = urlparse ( url ) except ValueError : logger . info ( 'Parse URL ValueError: {}' . format ( url ) ) return None if parsed_url . scheme : return parsed_url try : parsed_url = urlparse ( 'http://' + parsed_url . geturl ( ) ) except ValueError : logger . info ( 'Invalid URL for assumed http scheme: urlparse("{}") from "{}" ' . format ( 'http://' + parsed_url . geturl ( ) , url ) ) return None if not parsed_url . scheme : logger . info ( 'Unable to guess a scheme for URL: {}' . format ( url ) ) return None return parsed_url
User urlparse to try to parse URL returning None on exception
195
12
225,366
def get_url_filemeta ( url ) : parsed_url = try_parse_url ( url ) if parsed_url is None : return None if parsed_url . scheme . startswith ( 'ftp' ) : return get_ftp_filemeta ( parsed_url ) url = parsed_url . geturl ( ) try : r = requests . get ( url , stream = True , allow_redirects = True , timeout = 5 ) remote_size = r . headers . get ( 'Content-Length' , - 1 ) return dict ( url = url , hostname = parsed_url . hostname , path = parsed_url . path , username = parsed_url . username , remote_size = remote_size , filename = os . path . basename ( parsed_url . path ) ) except ConnectionError : return None except ( InvalidURL , InvalidSchema , InvalidHeader , MissingSchema ) : return None return None
Request HTML for the page at the URL indicated and return the url filename and remote size
201
17
225,367
def save_response_content ( response , filename = 'data.csv' , destination = os . path . curdir , chunksize = 32768 ) : chunksize = chunksize or 32768 if os . path . sep in filename : full_destination_path = filename else : full_destination_path = os . path . join ( destination , filename ) full_destination_path = expand_filepath ( full_destination_path ) with open ( full_destination_path , "wb" ) as f : for chunk in tqdm ( response . iter_content ( CHUNK_SIZE ) ) : if chunk : # filter out keep-alive new chunks f . write ( chunk ) return full_destination_path
For streaming response from requests download the content one CHUNK at a time
159
15
225,368
def download_file_from_google_drive ( driveid , filename = None , destination = os . path . curdir ) : if '&id=' in driveid : # https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfM1BxdkxVaTY2bWs # dailymail_stories.tgz driveid = driveid . split ( '&id=' ) [ - 1 ] if '?id=' in driveid : # 'https://drive.google.com/open?id=14mELuzm0OvXnwjb0mzAiG-Ake9_NP_LQ' # SSD pretrainined keras model driveid = driveid . split ( '?id=' ) [ - 1 ] URL = "https://docs.google.com/uc?export=download" session = requests . Session ( ) response = session . get ( URL , params = { 'id' : driveid } , stream = True ) token = get_response_confirmation_token ( response ) if token : params = { 'id' : driveid , 'confirm' : token } response = session . get ( URL , params = params , stream = True ) filename = filename or get_url_filename ( driveid = driveid ) full_destination_path = save_response_content ( response , filename = fileanme , destination = destination ) return os . path . abspath ( destination )
Download script for google drive shared links
323
7
225,369
def find_greeting ( s ) : if s [ 0 ] == 'H' : if s [ : 3 ] in [ 'Hi' , 'Hi ' , 'Hi,' , 'Hi!' ] : return s [ : 2 ] elif s [ : 6 ] in [ 'Hello' , 'Hello ' , 'Hello,' , 'Hello!' ] : return s [ : 5 ] elif s [ 0 ] == 'Y' : if s [ 1 ] == 'o' and s [ : 3 ] in [ 'Yo' , 'Yo,' , 'Yo ' , 'Yo!' ] : return s [ : 2 ] return None
Return the the greeting string Hi Hello or Yo if it occurs at the beginning of a string
136
18
225,370
def file_to_list ( in_file ) : lines = [ ] for line in in_file : # Strip new line line = line . strip ( '\n' ) # Ignore empty lines if line != '' : # Ignore comments if line [ 0 ] != '#' : lines . append ( line ) return lines
Reads file into list
68
5
225,371
def add_flag_values ( self , entry , flag ) : if flag in self . flags : self . flags [ flag ] . append ( entry )
Adds flag value to applicable compounds
32
6
225,372
def get_regex ( self ) : regex = '' for flag in self . compound : if flag == '?' or flag == '*' : regex += flag else : regex += '(' + '|' . join ( self . flags [ flag ] ) + ')' return regex
Generates and returns compound regular expression
59
7
225,373
def __parse_dict ( self ) : i = 0 lines = self . lines for line in lines : line = line . split ( '/' ) word = line [ 0 ] flags = line [ 1 ] if len ( line ) > 1 else None # Base Word self . num_words += 1 if flags != None : # Derivatives possible for flag in flags : # Compound? if flag in self . aff . compound_flags or flag == self . aff . only_in_compound_flag : for rule in self . aff . compound_rules : rule . add_flag_values ( word , flag ) else : # No Suggest flags if self . aff . no_suggest_flag == flag : pass else : affix_rule_entries = self . aff . affix_rules [ flag ] # Get flag that meets condition for i in range ( len ( affix_rule_entries ) ) : rule = affix_rule_entries [ i ] if rule . meets_condition ( word ) : # Add word to list if does not already exist if word not in self . words : self . words [ word ] = [ ] # Derivatives self . num_words += 1 if self . format == "addsub" : add_sub = rule . generate_add_sub ( ) # Add to list of keys if add_sub not in self . keys : self . keys . append ( add_sub ) # Check if key is to be generated if self . key : self . words [ word ] . append ( str ( self . keys . index ( add_sub ) ) ) else : # Generate addsub next to base word self . words [ word ] . append ( rule . generate_add_sub ( ) ) else : # Default, insert complete derivative word self . words [ word ] . append ( rule . create_derivative ( word ) ) else : # No derivatives. self . words [ word ] = [ ] # Create regular expression from compounds for rule in self . aff . compound_rules : # Add to list self . regex_compounds . append ( rule . get_regex ( ) )
Parses dictionary with according rules
452
7
225,374
def load_imdb_df ( dirpath = os . path . join ( BIGDATA_PATH , 'aclImdb' ) , subdirectories = ( ( 'train' , 'test' ) , ( 'pos' , 'neg' , 'unsup' ) ) ) : dfs = { } for subdirs in tqdm ( list ( product ( * subdirectories ) ) ) : urlspath = os . path . join ( dirpath , subdirs [ 0 ] , 'urls_{}.txt' . format ( subdirs [ 1 ] ) ) if not os . path . isfile ( urlspath ) : if subdirs != ( 'test' , 'unsup' ) : # test/ dir doesn't usually have an unsup subdirectory logger . warning ( 'Unable to find expected IMDB review list of URLs: {}' . format ( urlspath ) ) continue df = pd . read_csv ( urlspath , header = None , names = [ 'url' ] ) # df.index.name = 'id' df [ 'url' ] = series_strip ( df . url , endswith = '/usercomments' ) textsdir = os . path . join ( dirpath , subdirs [ 0 ] , subdirs [ 1 ] ) if not os . path . isdir ( textsdir ) : logger . warning ( 'Unable to find expected IMDB review text subdirectory: {}' . format ( textsdir ) ) continue filenames = [ fn for fn in os . listdir ( textsdir ) if fn . lower ( ) . endswith ( '.txt' ) ] df [ 'index0' ] = subdirs [ 0 ] # TODO: column names more generic so will work on other datasets df [ 'index1' ] = subdirs [ 1 ] df [ 'index2' ] = np . array ( [ int ( fn [ : - 4 ] . split ( '_' ) [ 0 ] ) for fn in filenames ] ) df [ 'rating' ] = np . array ( [ int ( fn [ : - 4 ] . split ( '_' ) [ 1 ] ) for fn in filenames ] ) texts = [ ] for fn in filenames : with ensure_open ( os . path . join ( textsdir , fn ) ) as f : texts . append ( f . read ( ) ) df [ 'text' ] = np . array ( texts ) del texts df . set_index ( 'index0 index1 index2' . split ( ) , inplace = True ) df . sort_index ( inplace = True ) dfs [ subdirs ] = df return pd . concat ( dfs . values ( ) )
Walk directory tree starting at path to compile a DataFrame of movie review text labeled with their 1 - 10 star ratings
589
23
225,375
def load_glove ( filepath , batch_size = 1000 , limit = None , verbose = True ) : num_dim = isglove ( filepath ) tqdm_prog = tqdm if verbose else no_tqdm wv = KeyedVectors ( num_dim ) if limit : vocab_size = int ( limit ) else : with ensure_open ( filepath ) as fin : for i , line in enumerate ( fin ) : pass vocab_size = i + 1 wv . vectors = np . zeros ( ( vocab_size , num_dim ) , REAL ) with ensure_open ( filepath ) as fin : batch , words = [ ] , [ ] for i , line in enumerate ( tqdm_prog ( fin , total = vocab_size ) ) : line = line . split ( ) word = line [ 0 ] vector = np . array ( line [ 1 : ] ) . astype ( float ) # words.append(word) # batch.append(vector) wv . index2word . append ( word ) wv . vocab [ word ] = Vocab ( index = i , count = vocab_size - i ) wv . vectors [ i ] = vector if len ( words ) >= batch_size : # wv[words] = np.array(batch) batch , words = [ ] , [ ] if i >= vocab_size - 1 : break if words : wv [ words ] = np . array ( batch ) return wv
r Load a pretrained GloVE word vector model
333
11
225,376
def load_glove_df ( filepath , * * kwargs ) : pdkwargs = dict ( index_col = 0 , header = None , sep = r'\s' , skiprows = [ 0 ] , verbose = False , engine = 'python' ) pdkwargs . update ( kwargs ) return pd . read_csv ( filepath , * * pdkwargs )
Load a GloVE - format text file into a dataframe
90
13
225,377
def get_en2fr ( url = 'http://www.manythings.org/anki/fra-eng.zip' ) : download_unzip ( url ) return pd . read_table ( url , compression = 'zip' , header = None , skip_blank_lines = True , sep = '\t' , skiprows = 0 , names = 'en fr' . split ( ) )
Download and parse English - > French translation dataset used in Keras seq2seq example
90
17
225,378
def load_anki_df ( language = 'deu' ) : if os . path . isfile ( language ) : filepath = language lang = re . search ( '[a-z]{3}-eng/' , filepath ) . group ( ) [ : 3 ] . lower ( ) else : lang = ( language or 'deu' ) . lower ( ) [ : 3 ] filepath = os . path . join ( BIGDATA_PATH , '{}-eng' . format ( lang ) , '{}.txt' . format ( lang ) ) df = pd . read_table ( filepath , skiprows = 1 , header = None ) df . columns = [ 'eng' , lang ] return df
Load into a DataFrame statements in one language along with their translation into English
156
15
225,379
def generate_big_urls_glove ( bigurls = None ) : bigurls = bigurls or { } for num_dim in ( 50 , 100 , 200 , 300 ) : # not all of these dimensionality, and training set size combinations were trained by Stanford for suffixes , num_words in zip ( ( 'sm -sm _sm -small _small' . split ( ) , 'med -med _med -medium _medium' . split ( ) , 'lg -lg _lg -large _large' . split ( ) ) , ( 6 , 42 , 840 ) ) : for suf in suffixes [ : - 1 ] : name = 'glove' + suf + str ( num_dim ) dirname = 'glove.{num_words}B' . format ( num_words = num_words ) # glove.42B.300d.w2v.txt filename = dirname + '.{num_dim}d.w2v.txt' . format ( num_dim = num_dim ) # seed the alias named URL with the URL for that training set size's canonical name bigurl_tuple = BIG_URLS [ 'glove' + suffixes [ - 1 ] ] bigurls [ name ] = list ( bigurl_tuple [ : 2 ] ) bigurls [ name ] . append ( os . path . join ( dirname , filename ) ) bigurls [ name ] . append ( load_glove ) bigurls [ name ] = tuple ( bigurls [ name ] ) return bigurls
Generate a dictionary of URLs for various combinations of GloVe training set sizes and dimensionality
343
19
225,380
def normalize_ext_rename ( filepath ) : logger . debug ( 'normalize_ext.filepath=' + str ( filepath ) ) new_file_path = normalize_ext ( filepath ) logger . debug ( 'download_unzip.new_filepaths=' + str ( new_file_path ) ) # FIXME: fails when name is a url filename filepath = rename_file ( filepath , new_file_path ) logger . debug ( 'download_unzip.filepath=' + str ( filepath ) ) return filepath
normalize file ext like . tgz - > . tar . gz and 300d . txt - > 300d . glove . txt and rename the file
123
34
225,381
def untar ( fname , verbose = True ) : if fname . lower ( ) . endswith ( ".tar.gz" ) : dirpath = os . path . join ( BIGDATA_PATH , os . path . basename ( fname ) [ : - 7 ] ) if os . path . isdir ( dirpath ) : return dirpath with tarfile . open ( fname ) as tf : members = tf . getmembers ( ) for member in tqdm ( members , total = len ( members ) ) : tf . extract ( member , path = BIGDATA_PATH ) dirpath = os . path . join ( BIGDATA_PATH , members [ 0 ] . name ) if os . path . isdir ( dirpath ) : return dirpath else : logger . warning ( "Not a tar.gz file: {}" . format ( fname ) )
Uunzip and untar a tar . gz file into a subdir of the BIGDATA_PATH directory
186
23
225,382
def endswith_strip ( s , endswith = '.txt' , ignorecase = True ) : if ignorecase : if s . lower ( ) . endswith ( endswith . lower ( ) ) : return s [ : - len ( endswith ) ] else : if s . endswith ( endswith ) : return s [ : - len ( endswith ) ] return s
Strip a suffix from the end of a string
87
10
225,383
def startswith_strip ( s , startswith = 'http://' , ignorecase = True ) : if ignorecase : if s . lower ( ) . startswith ( startswith . lower ( ) ) : return s [ len ( startswith ) : ] else : if s . endswith ( startswith ) : return s [ len ( startswith ) : ] return s
Strip a prefix from the beginning of a string
86
10
225,384
def get_longest_table ( url = 'https://www.openoffice.org/dev_docs/source/file_extensions.html' , header = 0 ) : dfs = pd . read_html ( url , header = header ) return longest_table ( dfs )
Retrieve the HTML tables from a URL and return the longest DataFrame found
63
15
225,385
def get_filename_extensions ( url = 'https://www.webopedia.com/quick_ref/fileextensionsfull.asp' ) : df = get_longest_table ( url ) columns = list ( df . columns ) columns [ 0 ] = 'ext' columns [ 1 ] = 'description' if len ( columns ) > 2 : columns [ 2 ] = 'details' df . columns = columns return df
Load a DataFrame of filename extensions from the indicated url
92
11
225,386
def create_big_url ( name ) : # BIG side effect global BIG_URLS filemeta = get_url_filemeta ( name ) if not filemeta : return None filename = filemeta [ 'filename' ] remote_size = filemeta [ 'remote_size' ] url = filemeta [ 'url' ] name = filename . split ( '.' ) name = ( name [ 0 ] if name [ 0 ] not in ( '' , '.' ) else name [ 1 ] ) . replace ( ' ' , '-' ) name = name . lower ( ) . strip ( ) BIG_URLS [ name ] = ( url , int ( remote_size or - 1 ) , filename ) return name
If name looks like a url with an http add an entry for it in BIG_URLS
149
19
225,387
def get_data ( name = 'sms-spam' , nrows = None , limit = None ) : nrows = nrows or limit if name in BIG_URLS : logger . info ( 'Downloading {}' . format ( name ) ) filepaths = download_unzip ( name , normalize_filenames = True ) logger . debug ( 'nlpia.loaders.get_data.filepaths=' + str ( filepaths ) ) filepath = filepaths [ name ] [ 0 ] if isinstance ( filepaths [ name ] , ( list , tuple ) ) else filepaths [ name ] logger . debug ( 'nlpia.loaders.get_data.filepath=' + str ( filepath ) ) filepathlow = filepath . lower ( ) if len ( BIG_URLS [ name ] ) >= 4 : kwargs = BIG_URLS [ name ] [ 4 ] if len ( BIG_URLS [ name ] ) >= 5 else { } return BIG_URLS [ name ] [ 3 ] ( filepath , * * kwargs ) if filepathlow . endswith ( '.w2v.txt' ) : try : return KeyedVectors . load_word2vec_format ( filepath , binary = False , limit = nrows ) except ( TypeError , UnicodeError ) : pass if filepathlow . endswith ( '.w2v.bin' ) or filepathlow . endswith ( '.bin.gz' ) or filepathlow . endswith ( '.w2v.bin.gz' ) : try : return KeyedVectors . load_word2vec_format ( filepath , binary = True , limit = nrows ) except ( TypeError , UnicodeError ) : pass if filepathlow . endswith ( '.gz' ) : try : filepath = ensure_open ( filepath ) except : # noqa pass if re . match ( r'.json([.][a-z]{0,3}){0,2}' , filepathlow ) : return read_json ( filepath ) if filepathlow . endswith ( '.tsv.gz' ) or filepathlow . endswith ( '.tsv' ) : try : return pd . read_table ( filepath ) except : # noqa pass if filepathlow . endswith ( '.csv.gz' ) or filepathlow . endswith ( '.csv' ) : try : return read_csv ( filepath ) except : # noqa pass if filepathlow . endswith ( '.txt' ) : try : return read_txt ( filepath ) except ( TypeError , UnicodeError ) : pass return filepaths [ name ] elif name in DATASET_NAME2FILENAME : return read_named_csv ( name , nrows = nrows ) elif name in DATA_NAMES : return read_named_csv ( DATA_NAMES [ name ] , nrows = nrows ) elif os . path . isfile ( name ) : return read_named_csv ( name , nrows = nrows ) elif os . path . isfile ( os . path . join ( DATA_PATH , name ) ) : return read_named_csv ( os . path . join ( DATA_PATH , name ) , nrows = nrows ) msg = 'Unable to find dataset "{}"" in {} or {} (*.csv.gz, *.csv, *.json, *.zip, or *.txt)\n' . format ( name , DATA_PATH , BIGDATA_PATH ) msg += 'Available dataset names include:\n{}' . format ( '\n' . join ( DATASET_NAMES ) ) logger . error ( msg ) raise IOError ( msg )
Load data from a json csv or txt file if it exists in the data dir .
825
19
225,388
def get_wikidata_qnum ( wikiarticle , wikisite ) : resp = requests . get ( 'https://www.wikidata.org/w/api.php' , timeout = 5 , params = { 'action' : 'wbgetentities' , 'titles' : wikiarticle , 'sites' : wikisite , 'props' : '' , 'format' : 'json' } ) . json ( ) return list ( resp [ 'entities' ] ) [ 0 ]
Retrieve the Query number for a wikidata database of metadata about a particular article
108
17
225,389
def normalize_column_names ( df ) : columns = df . columns if hasattr ( df , 'columns' ) else df columns = [ c . lower ( ) . replace ( ' ' , '_' ) for c in columns ] return columns
r Clean up whitespace in column names . See better version at pugnlp . clean_columns
54
22
225,390
def clean_column_values ( df , inplace = True ) : dollars_percents = re . compile ( r'[%$,;\s]+' ) if not inplace : df = df . copy ( ) for c in df . columns : values = None if df [ c ] . dtype . char in '<U S O' . split ( ) : try : values = df [ c ] . copy ( ) values = values . fillna ( '' ) values = values . astype ( str ) . str . replace ( dollars_percents , '' ) # values = values.str.strip().str.replace(dollars_percents, '').str.strip() if values . str . len ( ) . sum ( ) > .2 * df [ c ] . astype ( str ) . str . len ( ) . sum ( ) : values [ values . isnull ( ) ] = np . nan values [ values == '' ] = np . nan values = values . astype ( float ) except ValueError : values = None except : # noqa logger . error ( 'Error on column {} with dtype {}' . format ( c , df [ c ] . dtype ) ) raise if values is not None : if values . isnull ( ) . sum ( ) < .6 * len ( values ) and values . any ( ) : df [ c ] = values return df
r Convert dollar value strings numbers with commas and percents into floating point values
300
17
225,391
def isglove ( filepath ) : with ensure_open ( filepath , 'r' ) as f : header_line = f . readline ( ) vector_line = f . readline ( ) try : num_vectors , num_dim = header_line . split ( ) return int ( num_dim ) except ( ValueError , TypeError ) : pass vector = vector_line . split ( ) [ 1 : ] if len ( vector ) % 10 : print ( vector ) print ( len ( vector ) % 10 ) return False try : vector = np . array ( [ float ( x ) for x in vector ] ) except ( ValueError , TypeError ) : return False if np . all ( np . abs ( vector ) < 12. ) : return len ( vector ) return False
Get the first word vector in a GloVE file and return its dimensionality or False if not a vector
168
22
225,392
def nlp ( texts , lang = 'en' , linesep = None , verbose = True ) : # doesn't let you load a different model anywhere else in the module linesep = os . linesep if linesep in ( 'default' , True , 1 , 'os' ) else linesep tqdm_prog = no_tqdm if ( not verbose or ( hasattr ( texts , '__len__' ) and len ( texts ) < 3 ) ) else tqdm global _parse if not _parse : try : _parse = spacy . load ( lang ) except ( OSError , IOError ) : try : spacy . cli . download ( lang ) except URLError : logger . warning ( "Unable to download Spacy language model '{}' so nlp(text) just returns text.split()" . format ( lang ) ) parse = _parse or str . split # TODO: reverse this recursion (str first then sequence) to allow for sequences of sequences of texts if isinstance ( texts , str ) : if linesep : return nlp ( texts . split ( linesep ) ) else : return nlp ( [ texts ] ) if hasattr ( texts , '__len__' ) : if len ( texts ) == 1 : return parse ( texts [ 0 ] ) elif len ( texts ) > 1 : return [ ( parse or str . split ) ( text ) for text in tqdm_prog ( texts ) ] else : return None else : # return generator if sequence of strings doesn't have __len__ which means its an iterable or generator itself return ( parse ( text ) for text in tqdm_prog ( texts ) )
r Use the SpaCy parser to parse and tag natural language strings .
367
14
225,393
def get_decoder ( libdir = None , modeldir = None , lang = 'en-us' ) : modeldir = modeldir or ( os . path . join ( libdir , 'model' ) if libdir else MODELDIR ) libdir = os . path . dirname ( modeldir ) config = ps . Decoder . default_config ( ) config . set_string ( '-hmm' , os . path . join ( modeldir , lang ) ) config . set_string ( '-lm' , os . path . join ( modeldir , lang + '.lm.bin' ) ) config . set_string ( '-dict' , os . path . join ( modeldir , 'cmudict-' + lang + '.dict' ) ) print ( config ) return ps . Decoder ( config )
Create a decoder with the requested language model
183
9
225,394
def transcribe ( decoder , audio_file , libdir = None ) : decoder = get_decoder ( ) decoder . start_utt ( ) stream = open ( audio_file , 'rb' ) while True : buf = stream . read ( 1024 ) if buf : decoder . process_raw ( buf , False , False ) else : break decoder . end_utt ( ) return evaluate_results ( decoder )
Decode streaming audio data from raw binary file on disk .
92
12
225,395
def pre_process_data ( filepath ) : positive_path = os . path . join ( filepath , 'pos' ) negative_path = os . path . join ( filepath , 'neg' ) pos_label = 1 neg_label = 0 dataset = [ ] for filename in glob . glob ( os . path . join ( positive_path , '*.txt' ) ) : with open ( filename , 'r' ) as f : dataset . append ( ( pos_label , f . read ( ) ) ) for filename in glob . glob ( os . path . join ( negative_path , '*.txt' ) ) : with open ( filename , 'r' ) as f : dataset . append ( ( neg_label , f . read ( ) ) ) shuffle ( dataset ) return dataset
This is dependent on your training data source but we will try to generalize it as best as possible .
169
21
225,396
def pad_trunc ( data , maxlen ) : new_data = [ ] # Create a vector of 0's the length of our word vectors zero_vector = [ ] for _ in range ( len ( data [ 0 ] [ 0 ] ) ) : zero_vector . append ( 0.0 ) for sample in data : if len ( sample ) > maxlen : temp = sample [ : maxlen ] elif len ( sample ) < maxlen : temp = sample additional_elems = maxlen - len ( sample ) for _ in range ( additional_elems ) : temp . append ( zero_vector ) else : temp = sample new_data . append ( temp ) return new_data
For a given dataset pad with zero vectors or truncate to maxlen
147
14
225,397
def clean_data ( data ) : new_data = [ ] VALID = 'abcdefghijklmnopqrstuvwxyz123456789"\'?!.,:; ' for sample in data : new_sample = [ ] for char in sample [ 1 ] . lower ( ) : # Just grab the string, not the label if char in VALID : new_sample . append ( char ) else : new_sample . append ( 'UNK' ) new_data . append ( new_sample ) return new_data
Shift to lower case replace unknowns with UNK and listify
116
13
225,398
def char_pad_trunc ( data , maxlen ) : new_dataset = [ ] for sample in data : if len ( sample ) > maxlen : new_data = sample [ : maxlen ] elif len ( sample ) < maxlen : pads = maxlen - len ( sample ) new_data = sample + [ 'PAD' ] * pads else : new_data = sample new_dataset . append ( new_data ) return new_dataset
We truncate to maxlen or add in PAD tokens
104
12
225,399
def create_dicts ( data ) : chars = set ( ) for sample in data : chars . update ( set ( sample ) ) char_indices = dict ( ( c , i ) for i , c in enumerate ( chars ) ) indices_char = dict ( ( i , c ) for i , c in enumerate ( chars ) ) return char_indices , indices_char
Modified from Keras LSTM example
82
9