idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
236,900
def delete ( self ) : try : self . _api . table_delete ( self . _name_parts ) except google . datalab . utils . RequestException : # TODO(gram): May want to check the error reasons here and if it is not # because the file didn't exist, return an error. pass except Exception as e : raise e return not self . exists ( )
Delete the table .
83
4
236,901
def create ( self , schema , overwrite = False ) : if overwrite and self . exists ( ) : self . delete ( ) if not isinstance ( schema , _schema . Schema ) : # Convert to a Schema object schema = _schema . Schema ( schema ) try : response = self . _api . tables_insert ( self . _name_parts , schema = schema . _bq_schema ) except Exception as e : raise e if 'selfLink' in response : self . _schema = schema return self raise Exception ( "Table %s could not be created as it already exists" % self . _full_name )
Create the table with the specified schema .
138
8
236,902
def _init_job_from_response ( self , response ) : job = None if response and 'jobReference' in response : job = _job . Job ( job_id = response [ 'jobReference' ] [ 'jobId' ] , context = self . _context ) return job
Helper function to create a Job instance from a response .
62
11
236,903
def extract_async ( self , destination , format = 'csv' , csv_delimiter = None , csv_header = True , compress = False ) : format = format . upper ( ) if format == 'JSON' : format = 'NEWLINE_DELIMITED_JSON' if format == 'CSV' and csv_delimiter is None : csv_delimiter = ',' try : response = self . _api . table_extract ( self . _name_parts , destination , format , compress , csv_delimiter , csv_header ) return self . _init_job_from_response ( response ) except Exception as e : raise google . datalab . JobError ( location = traceback . format_exc ( ) , message = str ( e ) , reason = str ( type ( e ) ) )
Starts a job to export the table to GCS .
186
12
236,904
def extract ( self , destination , format = 'csv' , csv_delimiter = None , csv_header = True , compress = False ) : job = self . extract_async ( destination , format = format , csv_delimiter = csv_delimiter , csv_header = csv_header , compress = compress ) if job is not None : job . wait ( ) return job
Exports the table to GCS ; blocks until complete .
90
12
236,905
def load_async ( self , source , mode = 'create' , source_format = 'csv' , csv_options = None , ignore_unknown_values = False , max_bad_records = 0 ) : if source_format == 'csv' : source_format = 'CSV' elif source_format == 'json' : source_format = 'NEWLINE_DELIMITED_JSON' else : raise Exception ( "Invalid source format %s" % source_format ) if not ( mode == 'create' or mode == 'append' or mode == 'overwrite' ) : raise Exception ( "Invalid mode %s" % mode ) if csv_options is None : csv_options = _csv_options . CSVOptions ( ) try : response = self . _api . jobs_insert_load ( source , self . _name_parts , append = ( mode == 'append' ) , overwrite = ( mode == 'overwrite' ) , create = ( mode == 'create' ) , source_format = source_format , field_delimiter = csv_options . delimiter , allow_jagged_rows = csv_options . allow_jagged_rows , allow_quoted_newlines = csv_options . allow_quoted_newlines , encoding = csv_options . encoding . upper ( ) , ignore_unknown_values = ignore_unknown_values , max_bad_records = max_bad_records , quote = csv_options . quote , skip_leading_rows = csv_options . skip_leading_rows ) except Exception as e : raise e return self . _init_job_from_response ( response )
Starts importing a table from GCS and return a Future .
368
13
236,906
def load ( self , source , mode = 'create' , source_format = 'csv' , csv_options = None , ignore_unknown_values = False , max_bad_records = 0 ) : job = self . load_async ( source , mode = mode , source_format = source_format , csv_options = csv_options , ignore_unknown_values = ignore_unknown_values , max_bad_records = max_bad_records ) if job is not None : job . wait ( ) return job
Load the table from GCS .
118
7
236,907
def _get_row_fetcher ( self , start_row = 0 , max_rows = None , page_size = _DEFAULT_PAGE_SIZE ) : if not start_row : start_row = 0 elif start_row < 0 : # We are measuring from the table end if self . length >= 0 : start_row += self . length else : raise Exception ( 'Cannot use negative indices for table of unknown length' ) schema = self . schema . _bq_schema name_parts = self . _name_parts def _retrieve_rows ( page_token , count ) : page_rows = [ ] if max_rows and count >= max_rows : page_token = None else : if max_rows and page_size > ( max_rows - count ) : max_results = max_rows - count else : max_results = page_size try : if page_token : response = self . _api . tabledata_list ( name_parts , page_token = page_token , max_results = max_results ) else : response = self . _api . tabledata_list ( name_parts , start_index = start_row , max_results = max_results ) except Exception as e : raise e page_token = response [ 'pageToken' ] if 'pageToken' in response else None if 'rows' in response : page_rows = response [ 'rows' ] rows = [ ] for row_dict in page_rows : rows . append ( _parser . Parser . parse_row ( schema , row_dict ) ) return rows , page_token return _retrieve_rows
Get a function that can retrieve a page of rows .
356
11
236,908
def schema ( self ) : if not self . _schema : try : self . _load_info ( ) self . _schema = _schema . Schema ( self . _info [ 'schema' ] [ 'fields' ] ) except KeyError : raise Exception ( 'Unexpected table response: missing schema' ) return self . _schema
Retrieves the schema of the table .
76
9
236,909
def snapshot ( self , at ) : if self . _name_parts . decorator != '' : raise Exception ( "Cannot use snapshot() on an already decorated table" ) value = Table . _convert_decorator_time ( at ) return Table ( "%s@%s" % ( self . _full_name , str ( value ) ) , context = self . _context )
Return a new Table which is a snapshot of this table at the specified time .
84
16
236,910
def window ( self , begin , end = None ) : if self . _name_parts . decorator != '' : raise Exception ( "Cannot use window() on an already decorated table" ) start = Table . _convert_decorator_time ( begin ) if end is None : if isinstance ( begin , datetime . timedelta ) : end = datetime . timedelta ( 0 ) else : end = datetime . datetime . utcnow ( ) stop = Table . _convert_decorator_time ( end ) # Both values must have the same sign if ( start > 0 >= stop ) or ( stop > 0 >= start ) : raise Exception ( "window: Between arguments must both be absolute or relative: %s, %s" % ( str ( begin ) , str ( end ) ) ) # start must be less than stop if start > stop : raise Exception ( "window: Between arguments: begin must be before end: %s, %s" % ( str ( begin ) , str ( end ) ) ) return Table ( "%s@%s-%s" % ( self . _full_name , str ( start ) , str ( stop ) ) , context = self . _context )
Return a new Table limited to the rows added to this Table during the specified time range .
260
18
236,911
def serialize_example ( transformed_json_data , features , feature_indices , target_name ) : import six import tensorflow as tf from trainer import feature_transforms line = str ( transformed_json_data [ target_name ] [ 0 ] ) for name , info in feature_indices : if features [ name ] [ 'transform' ] in [ feature_transforms . IDENTITY_TRANSFORM , feature_transforms . SCALE_TRANSFORM ] : line += ' %d:%s' % ( info [ 'index_start' ] , str ( transformed_json_data [ name ] [ 0 ] ) ) elif features [ name ] [ 'transform' ] in [ feature_transforms . ONE_HOT_TRANSFORM , feature_transforms . MULTI_HOT_TRANSFORM ] : for i in range ( info [ 'size' ] ) : if i in transformed_json_data [ name ] : line += ' %d:1' % ( info [ 'index_start' ] + i ) elif features [ name ] [ 'transform' ] in [ feature_transforms . IMAGE_TRANSFORM ] : for i in range ( info [ 'size' ] ) : line += ' %d:%s' % ( info [ 'index_start' ] + i , str ( transformed_json_data [ name ] [ i ] ) ) return line
Makes an instance of data in libsvm format .
308
12
236,912
def delete ( self , delete_contents = False ) : if not self . exists ( ) : raise Exception ( 'Cannot delete non-existent dataset %s' % self . _full_name ) try : self . _api . datasets_delete ( self . _name_parts , delete_contents = delete_contents ) except Exception as e : raise e self . _info = None return None
Issues a request to delete the dataset .
86
9
236,913
def create ( self , friendly_name = None , description = None ) : if not self . exists ( ) : try : response = self . _api . datasets_insert ( self . _name_parts , friendly_name = friendly_name , description = description ) except Exception as e : raise e if 'selfLink' not in response : raise Exception ( "Could not create dataset %s" % self . _full_name ) return self
Creates the Dataset with the specified friendly name and description .
93
14
236,914
def update ( self , friendly_name = None , description = None ) : self . _get_info ( ) if self . _info : if friendly_name : self . _info [ 'friendlyName' ] = friendly_name if description : self . _info [ 'description' ] = description try : self . _api . datasets_update ( self . _name_parts , self . _info ) except Exception as e : raise e finally : self . _info = None
Selectively updates Dataset information .
101
8
236,915
def query ( self ) : if not self . exists ( ) : return None self . _table . _load_info ( ) if 'view' in self . _table . _info and 'query' in self . _table . _info [ 'view' ] : return _query . Query ( self . _table . _info [ 'view' ] [ 'query' ] ) return None
The Query that defines the view .
83
7
236,916
def run_numerical_categorical_analysis ( args , schema_list ) : header = [ column [ 'name' ] for column in schema_list ] input_files = file_io . get_matching_files ( args . input_file_pattern ) # Check the schema is valid for col_schema in schema_list : col_type = col_schema [ 'type' ] . lower ( ) if col_type != 'string' and col_type != 'integer' and col_type != 'float' : raise ValueError ( 'Schema contains an unsupported type %s.' % col_type ) # initialize the results def _init_numerical_results ( ) : return { 'min' : float ( 'inf' ) , 'max' : float ( '-inf' ) , 'count' : 0 , 'sum' : 0.0 } numerical_results = collections . defaultdict ( _init_numerical_results ) categorical_results = collections . defaultdict ( set ) # for each file, update the numerical stats from that file, and update the set # of unique labels. for input_file in input_files : with file_io . FileIO ( input_file , 'r' ) as f : for line in f : parsed_line = dict ( zip ( header , line . strip ( ) . split ( ',' ) ) ) for col_schema in schema_list : col_name = col_schema [ 'name' ] col_type = col_schema [ 'type' ] if col_type . lower ( ) == 'string' : categorical_results [ col_name ] . update ( [ parsed_line [ col_name ] ] ) else : # numerical column. # if empty, skip if not parsed_line [ col_name ] . strip ( ) : continue numerical_results [ col_name ] [ 'min' ] = ( min ( numerical_results [ col_name ] [ 'min' ] , float ( parsed_line [ col_name ] ) ) ) numerical_results [ col_name ] [ 'max' ] = ( max ( numerical_results [ col_name ] [ 'max' ] , float ( parsed_line [ col_name ] ) ) ) numerical_results [ col_name ] [ 'count' ] += 1 numerical_results [ col_name ] [ 'sum' ] += float ( parsed_line [ col_name ] ) # Update numerical_results to just have min/min/mean for col_schema in schema_list : if col_schema [ 'type' ] . lower ( ) != 'string' : col_name = col_schema [ 'name' ] mean = numerical_results [ col_name ] [ 'sum' ] / numerical_results [ col_name ] [ 'count' ] del numerical_results [ col_name ] [ 'sum' ] del numerical_results [ col_name ] [ 'count' ] numerical_results [ col_name ] [ 'mean' ] = mean # Write the numerical_results to a json file. file_io . write_string_to_file ( os . path . join ( args . output_dir , NUMERICAL_ANALYSIS_FILE ) , json . dumps ( numerical_results , indent = 2 , separators = ( ',' , ': ' ) ) ) # Write the vocab files. Each label is on its own line. for name , unique_labels in six . iteritems ( categorical_results ) : labels = '\n' . join ( list ( unique_labels ) ) file_io . write_string_to_file ( os . path . join ( args . output_dir , CATEGORICAL_ANALYSIS_FILE % name ) , labels )
Makes the numerical and categorical analysis files .
818
10
236,917
def run_analysis ( args ) : # Read the schema and input feature types schema_list = json . loads ( file_io . read_file_to_string ( args . schema_file ) ) run_numerical_categorical_analysis ( args , schema_list ) # Also save a copy of the schema in the output folder. file_io . copy ( args . schema_file , os . path . join ( args . output_dir , SCHEMA_FILE ) , overwrite = True )
Builds an analysis files for training .
108
8
236,918
def _repr_html_ ( self ) : parts = [ ] if self . _class : parts . append ( '<div id="hh_%s" class="%s">%s</div>' % ( self . _id , self . _class , self . _markup ) ) else : parts . append ( '<div id="hh_%s">%s</div>' % ( self . _id , self . _markup ) ) if len ( self . _script ) != 0 : parts . append ( '<script>' ) parts . append ( 'require([' ) parts . append ( ',' . join ( [ '"%s"' % d [ 0 ] for d in self . _dependencies ] ) ) parts . append ( '], function(' ) parts . append ( ',' . join ( [ d [ 1 ] for d in self . _dependencies ] ) ) parts . append ( ') {' ) parts . append ( self . _script ) parts . append ( '});' ) parts . append ( '</script>' ) return '' . join ( parts )
Generates the HTML representation .
239
6
236,919
def _render_objects ( self , items , attributes = None , datatype = 'object' ) : if not items : return if datatype == 'chartdata' : if not attributes : attributes = [ items [ 'cols' ] [ i ] [ 'label' ] for i in range ( 0 , len ( items [ 'cols' ] ) ) ] items = items [ 'rows' ] indices = { attributes [ i ] : i for i in range ( 0 , len ( attributes ) ) } num_segments = len ( self . _segments ) self . _segments . append ( '<table>' ) first = True for o in items : if first : first = False if datatype == 'dict' and not attributes : attributes = list ( o . keys ( ) ) if attributes is not None : self . _segments . append ( '<tr>' ) for attr in attributes : self . _segments . append ( '<th>%s</th>' % attr ) self . _segments . append ( '</tr>' ) self . _segments . append ( '<tr>' ) if attributes is None : self . _segments . append ( '<td>%s</td>' % HtmlBuilder . _format ( o ) ) else : for attr in attributes : if datatype == 'dict' : self . _segments . append ( '<td>%s</td>' % HtmlBuilder . _format ( o . get ( attr , None ) , nbsp = True ) ) elif datatype == 'chartdata' : self . _segments . append ( '<td>%s</td>' % HtmlBuilder . _format ( o [ 'c' ] [ indices [ attr ] ] [ 'v' ] , nbsp = True ) ) else : self . _segments . append ( '<td>%s</td>' % HtmlBuilder . _format ( o . __getattribute__ ( attr ) , nbsp = True ) ) self . _segments . append ( '</tr>' ) self . _segments . append ( '</table>' ) if first : # The table was empty; drop it from the segments. self . _segments = self . _segments [ : num_segments ]
Renders an HTML table with the specified list of objects .
509
12
236,920
def _render_list ( self , items , empty = '<pre>&lt;empty&gt;</pre>' ) : if not items or len ( items ) == 0 : self . _segments . append ( empty ) return self . _segments . append ( '<ul>' ) for o in items : self . _segments . append ( '<li>' ) self . _segments . append ( str ( o ) ) self . _segments . append ( '</li>' ) self . _segments . append ( '</ul>' )
Renders an HTML list with the specified list of strings .
125
12
236,921
def sample ( self , fields = None , count = 5 , sampling = None , use_cache = True , dialect = None , billing_tier = None ) : # Do import here to avoid top-level circular dependencies. from . import _query sql = self . _repr_sql_ ( ) return _query . Query . sampling_query ( sql , context = self . _context , count = count , fields = fields , sampling = sampling ) . results ( use_cache = use_cache , dialect = dialect , billing_tier = billing_tier )
Retrieves a sampling of data from the table .
117
11
236,922
def _encode_dict_as_row ( record , column_name_map ) : for k in list ( record . keys ( ) ) : v = record [ k ] # If the column is a date, convert to ISO string. if isinstance ( v , pandas . Timestamp ) or isinstance ( v , datetime . datetime ) : v = record [ k ] = record [ k ] . isoformat ( ) # If k has invalid characters clean it up if k not in column_name_map : column_name_map [ k ] = '' . join ( c for c in k if c in Table . _VALID_COLUMN_NAME_CHARACTERS ) new_k = column_name_map [ k ] if k != new_k : record [ new_k ] = v del record [ k ] return record
Encode a dictionary representing a table row in a form suitable for streaming to BQ .
182
18
236,923
def range ( self , start_row = 0 , max_rows = None ) : fetcher = self . _get_row_fetcher ( start_row = start_row , max_rows = max_rows ) return iter ( datalab . utils . Iterator ( fetcher ) )
Get an iterator to iterate through a set of table rows .
64
13
236,924
def to_file_async ( self , destination , format = 'csv' , csv_delimiter = ',' , csv_header = True ) : self . to_file ( destination , format = format , csv_delimiter = csv_delimiter , csv_header = csv_header )
Start saving the results to a local file in CSV format and return a Job for completion .
72
18
236,925
def update ( self , friendly_name = None , description = None , expiry = None , schema = None ) : self . _load_info ( ) if friendly_name is not None : self . _info [ 'friendlyName' ] = friendly_name if description is not None : self . _info [ 'description' ] = description if expiry is not None : if isinstance ( expiry , datetime . datetime ) : expiry = calendar . timegm ( expiry . utctimetuple ( ) ) * 1000 self . _info [ 'expirationTime' ] = expiry if schema is not None : if isinstance ( schema , _schema . Schema ) : schema = schema . _bq_schema self . _info [ 'schema' ] = { 'fields' : schema } try : self . _api . table_update ( self . _name_parts , self . _info ) except datalab . utils . RequestException : # The cached metadata is out of sync now; abandon it. self . _info = None except Exception as e : raise e
Selectively updates Table information .
235
6
236,926
def to_query ( self , fields = None ) : # Do import here to avoid top-level circular dependencies. from . import _query if fields is None : fields = '*' elif isinstance ( fields , list ) : fields = ',' . join ( fields ) return _query . Query ( 'SELECT %s FROM %s' % ( fields , self . _repr_sql_ ( ) ) , context = self . _context )
Return a Query for this Table .
95
7
236,927
def copy_to ( self , new_key , bucket = None ) : if bucket is None : bucket = self . _bucket try : new_info = self . _api . objects_copy ( self . _bucket , self . _key , bucket , new_key ) except Exception as e : raise e return Item ( bucket , new_key , new_info , context = self . _context )
Copies this item to the specified new key .
87
10
236,928
def exists ( self ) : try : return self . metadata is not None except datalab . utils . RequestException : return False except Exception as e : raise e
Checks if the item exists .
35
7
236,929
def delete ( self ) : if self . exists ( ) : try : self . _api . objects_delete ( self . _bucket , self . _key ) except Exception as e : raise e
Deletes this item from its bucket .
42
8
236,930
def write_to ( self , content , content_type ) : try : self . _api . object_upload ( self . _bucket , self . _key , content , content_type ) except Exception as e : raise e
Writes text content to this item .
49
8
236,931
def contains ( self , key ) : try : self . _api . objects_get ( self . _bucket , key ) except datalab . utils . RequestException as e : if e . status == 404 : return False raise e except Exception as e : raise e return True
Checks if the specified item exists .
60
8
236,932
def request ( url , args = None , data = None , headers = None , method = None , credentials = None , raw_response = False , stats = None ) : if headers is None : headers = { } headers [ 'user-agent' ] = 'GoogleCloudDataLab/1.0' # Add querystring to the URL if there are any arguments. if args is not None : qs = urllib . parse . urlencode ( args ) url = url + '?' + qs # Setup method to POST if unspecified, and appropriate request headers # if there is data to be sent within the request. if data is not None : if method is None : method = 'POST' if data != '' : # If there is a content type specified, use it (and the data) as-is. # Otherwise, assume JSON, and serialize the data object. if 'Content-Type' not in headers : data = json . dumps ( data ) headers [ 'Content-Type' ] = 'application/json' headers [ 'Content-Length' ] = str ( len ( data ) ) else : if method == 'POST' : headers [ 'Content-Length' ] = '0' # If the method is still unset, i.e. it was unspecified, and there # was no data to be POSTed, then default to GET request. if method is None : method = 'GET' http = Http . http # Authorize with credentials if given if credentials is not None : # Make a copy of the shared http instance before we modify it. http = copy . copy ( http ) http = google_auth_httplib2 . AuthorizedHttp ( credentials ) if stats is not None : stats [ 'duration' ] = datetime . datetime . utcnow ( ) response = None try : log . debug ( 'request: method[%(method)s], url[%(url)s], body[%(data)s]' % locals ( ) ) response , content = http . request ( url , method = method , body = data , headers = headers ) if 200 <= response . status < 300 : if raw_response : return content if type ( content ) == str : return json . loads ( content ) else : return json . loads ( str ( content , encoding = 'UTF-8' ) ) else : raise RequestException ( response . status , content ) except ValueError : raise Exception ( 'Failed to process HTTP response.' ) except httplib2 . HttpLib2Error : raise Exception ( 'Failed to send HTTP request.' ) finally : if stats is not None : stats [ 'data_size' ] = len ( data ) stats [ 'status' ] = response . status stats [ 'duration' ] = ( datetime . datetime . utcnow ( ) - stats [ 'duration' ] ) . total_seconds ( )
Issues HTTP requests .
610
5
236,933
def _add_command ( parser , subparser_fn , handler , cell_required = False , cell_prohibited = False ) : sub_parser = subparser_fn ( parser ) sub_parser . set_defaults ( func = lambda args , cell : _dispatch_handler ( args , cell , sub_parser , handler , cell_required = cell_required , cell_prohibited = cell_prohibited ) )
Create and initialize a pipeline subcommand handler .
91
9
236,934
def pipeline ( line , cell = None ) : return google . datalab . utils . commands . handle_magic_line ( line , cell , _pipeline_parser )
Implements the pipeline cell magic for ipython notebooks .
39
12
236,935
def _dispatch_handler ( args , cell , parser , handler , cell_required = False , cell_prohibited = False ) : if cell_prohibited : if cell and len ( cell . strip ( ) ) : parser . print_help ( ) raise Exception ( 'Additional data is not supported with the %s command.' % parser . prog ) return handler ( args ) if cell_required and not cell : parser . print_help ( ) raise Exception ( 'The %s command requires additional data' % parser . prog ) return handler ( args , cell )
Makes sure cell magics include cell and line magics don t before dispatching to handler .
118
20
236,936
def expand_defaults ( schema , features ) : schema_names = [ x [ 'name' ] for x in schema ] # Add missing source columns for name , transform in six . iteritems ( features ) : if 'source_column' not in transform : transform [ 'source_column' ] = name # Check source columns are in the schema and collect which are used. used_schema_columns = [ ] for name , transform in six . iteritems ( features ) : if transform [ 'source_column' ] not in schema_names : raise ValueError ( 'source column %s is not in the schema for transform %s' % ( transform [ 'source_column' ] , name ) ) used_schema_columns . append ( transform [ 'source_column' ] ) # Update default transformation based on schema. for col_schema in schema : schema_name = col_schema [ 'name' ] schema_type = col_schema [ 'type' ] . lower ( ) if schema_type not in constant . NUMERIC_SCHEMA + [ constant . STRING_SCHEMA ] : raise ValueError ( ( 'Only the following schema types are supported: %s' % ' ' . join ( constant . NUMERIC_SCHEMA + [ constant . STRING_SCHEMA ] ) ) ) if schema_name not in used_schema_columns : # add the default transform to the features if schema_type in constant . NUMERIC_SCHEMA : features [ schema_name ] = { 'transform' : constant . DEFAULT_NUMERIC_TRANSFORM , 'source_column' : schema_name } elif schema_type == constant . STRING_SCHEMA : features [ schema_name ] = { 'transform' : constant . DEFAULT_CATEGORICAL_TRANSFORM , 'source_column' : schema_name } else : raise NotImplementedError ( 'Unknown type %s' % schema_type )
Add to features any default transformations .
432
7
236,937
def _sample_cell ( args , cell_body ) : env = datalab . utils . commands . notebook_environment ( ) query = None table = None view = None if args [ 'query' ] : query = _get_query_argument ( args , cell_body , env ) elif args [ 'table' ] : table = _get_table ( args [ 'table' ] ) elif args [ 'view' ] : view = datalab . utils . commands . get_notebook_item ( args [ 'view' ] ) if not isinstance ( view , datalab . bigquery . View ) : raise Exception ( '%s is not a view' % args [ 'view' ] ) else : query = datalab . bigquery . Query ( cell_body , values = env ) count = args [ 'count' ] method = args [ 'method' ] if method == 'random' : sampling = datalab . bigquery . Sampling . random ( percent = args [ 'percent' ] , count = count ) elif method == 'hashed' : sampling = datalab . bigquery . Sampling . hashed ( field_name = args [ 'field' ] , percent = args [ 'percent' ] , count = count ) elif method == 'sorted' : ascending = args [ 'order' ] == 'ascending' sampling = datalab . bigquery . Sampling . sorted ( args [ 'field' ] , ascending = ascending , count = count ) elif method == 'limit' : sampling = datalab . bigquery . Sampling . default ( count = count ) else : sampling = datalab . bigquery . Sampling . default ( count = count ) if query : results = query . sample ( sampling = sampling , dialect = args [ 'dialect' ] , billing_tier = args [ 'billing' ] ) elif view : results = view . sample ( sampling = sampling ) else : results = table . sample ( sampling = sampling ) if args [ 'verbose' ] : print ( results . sql ) if args [ 'profile' ] : return datalab . utils . commands . profile_df ( results . to_dataframe ( ) ) else : return results
Implements the bigquery sample cell magic for ipython notebooks .
483
14
236,938
def _create_cell ( args , cell_body ) : if args [ 'command' ] == 'dataset' : try : datalab . bigquery . Dataset ( args [ 'name' ] ) . create ( friendly_name = args [ 'friendly' ] , description = cell_body ) except Exception as e : print ( 'Failed to create dataset %s: %s' % ( args [ 'name' ] , e ) ) else : if cell_body is None : print ( 'Failed to create %s: no schema specified' % args [ 'name' ] ) else : try : record = datalab . utils . commands . parse_config ( cell_body , datalab . utils . commands . notebook_environment ( ) , as_dict = False ) schema = datalab . bigquery . Schema ( record ) datalab . bigquery . Table ( args [ 'name' ] ) . create ( schema = schema , overwrite = args [ 'overwrite' ] ) except Exception as e : print ( 'Failed to create table %s: %s' % ( args [ 'name' ] , e ) )
Implements the BigQuery cell magic used to create datasets and tables .
251
15
236,939
def _delete_cell ( args , _ ) : # TODO(gram): add support for wildchars and multiple arguments at some point. The latter is # easy, the former a bit more tricky if non-default projects are involved. if args [ 'command' ] == 'dataset' : try : datalab . bigquery . Dataset ( args [ 'name' ] ) . delete ( ) except Exception as e : print ( 'Failed to delete dataset %s: %s' % ( args [ 'name' ] , e ) ) else : try : datalab . bigquery . Table ( args [ 'name' ] ) . delete ( ) except Exception as e : print ( 'Failed to delete table %s: %s' % ( args [ 'name' ] , e ) )
Implements the BigQuery cell magic used to delete datasets and tables .
174
15
236,940
def _udf_cell ( args , js ) : variable_name = args [ 'module' ] if not variable_name : raise Exception ( 'Declaration must be of the form %%bigquery udf --module <variable name>' ) # Parse out the input and output specification spec_pattern = r'\{\{([^}]+)\}\}' spec_part_pattern = r'[a-z_][a-z0-9_]*' specs = re . findall ( spec_pattern , js ) if len ( specs ) < 2 : raise Exception ( 'The JavaScript must declare the input row and output emitter parameters ' 'using valid jsdoc format comments.\n' 'The input row param declaration must be typed as {{field:type, field2:type}} ' 'and the output emitter param declaration must be typed as ' 'function({{field:type, field2:type}}.' ) inputs = [ ] input_spec_parts = re . findall ( spec_part_pattern , specs [ 0 ] , flags = re . IGNORECASE ) if len ( input_spec_parts ) % 2 != 0 : raise Exception ( 'Invalid input row param declaration. The jsdoc type expression must ' 'define an object with field and type pairs.' ) for n , t in zip ( input_spec_parts [ 0 : : 2 ] , input_spec_parts [ 1 : : 2 ] ) : inputs . append ( ( n , t ) ) outputs = [ ] output_spec_parts = re . findall ( spec_part_pattern , specs [ 1 ] , flags = re . IGNORECASE ) if len ( output_spec_parts ) % 2 != 0 : raise Exception ( 'Invalid output emitter param declaration. The jsdoc type expression must ' 'define a function accepting an an object with field and type pairs.' ) for n , t in zip ( output_spec_parts [ 0 : : 2 ] , output_spec_parts [ 1 : : 2 ] ) : outputs . append ( ( n , t ) ) # Look for imports. We use a non-standard @import keyword; we could alternatively use @requires. # Object names can contain any characters except \r and \n. import_pattern = r'@import[\s]+(gs://[a-z\d][a-z\d_\.\-]*[a-z\d]/[^\n\r]+)' imports = re . findall ( import_pattern , js ) # Split the cell if necessary. We look for a 'function(' with no name and a header comment # block with @param and assume this is the primary function, up to a closing '}' at the start # of the line. The remaining cell content is used as support code. split_pattern = r'(.*)(/\*.*?@param.*?@param.*?\*/\w*\n\w*function\w*\(.*?^}\n?)(.*)' parts = re . match ( split_pattern , js , re . MULTILINE | re . DOTALL ) support_code = '' if parts : support_code = ( parts . group ( 1 ) + parts . group ( 3 ) ) . strip ( ) if len ( support_code ) : js = parts . group ( 2 ) # Finally build the UDF object udf = datalab . bigquery . UDF ( inputs , outputs , variable_name , js , support_code , imports ) datalab . utils . commands . notebook_environment ( ) [ variable_name ] = udf
Implements the bigquery_udf cell magic for ipython notebooks .
776
16
236,941
def _pipeline_cell ( args , cell_body ) : if args [ 'action' ] == 'deploy' : raise Exception ( 'Deploying a pipeline is not yet supported' ) env = { } for key , value in datalab . utils . commands . notebook_environment ( ) . items ( ) : if isinstance ( value , datalab . bigquery . _udf . UDF ) : env [ key ] = value query = _get_query_argument ( args , cell_body , env ) if args [ 'verbose' ] : print ( query . sql ) if args [ 'action' ] == 'dryrun' : print ( query . sql ) result = query . execute_dry_run ( ) return datalab . bigquery . _query_stats . QueryStats ( total_bytes = result [ 'totalBytesProcessed' ] , is_cached = result [ 'cacheHit' ] ) if args [ 'action' ] == 'run' : return query . execute ( args [ 'target' ] , table_mode = args [ 'mode' ] , use_cache = not args [ 'nocache' ] , allow_large_results = args [ 'large' ] , dialect = args [ 'dialect' ] , billing_tier = args [ 'billing' ] ) . results
Implements the BigQuery cell magic used to validate execute or deploy BQ pipelines .
288
18
236,942
def _table_line ( args ) : # TODO(gram): It would be good to turn _table_viewer into a class that has a registered # renderer. That would allow this to return a table viewer object which is easier to test. name = args [ 'table' ] table = _get_table ( name ) if table and table . exists ( ) : fields = args [ 'cols' ] . split ( ',' ) if args [ 'cols' ] else None html = _table_viewer ( table , rows_per_page = args [ 'rows' ] , fields = fields ) return IPython . core . display . HTML ( html ) else : raise Exception ( 'Table %s does not exist; cannot display' % name )
Implements the BigQuery table magic used to display tables .
162
13
236,943
def _get_schema ( name ) : item = datalab . utils . commands . get_notebook_item ( name ) if not item : item = _get_table ( name ) if isinstance ( item , datalab . bigquery . Schema ) : return item if hasattr ( item , 'schema' ) and isinstance ( item . schema , datalab . bigquery . _schema . Schema ) : return item . schema return None
Given a variable or table name get the Schema if it exists .
102
14
236,944
def _render_table ( data , fields = None ) : return IPython . core . display . HTML ( datalab . utils . commands . HtmlBuilder . render_table ( data , fields ) )
Helper to render a list of dictionaries as an HTML display object .
45
14
236,945
def _datasets_line ( args ) : filter_ = args [ 'filter' ] if args [ 'filter' ] else '*' return _render_list ( [ str ( dataset ) for dataset in datalab . bigquery . Datasets ( args [ 'project' ] ) if fnmatch . fnmatch ( str ( dataset ) , filter_ ) ] )
Implements the BigQuery datasets magic used to display datasets in a project .
80
16
236,946
def _tables_line ( args ) : filter_ = args [ 'filter' ] if args [ 'filter' ] else '*' if args [ 'dataset' ] : if args [ 'project' ] is None : datasets = [ datalab . bigquery . Dataset ( args [ 'dataset' ] ) ] else : datasets = [ datalab . bigquery . Dataset ( ( args [ 'project' ] , args [ 'dataset' ] ) ) ] else : datasets = datalab . bigquery . Datasets ( args [ 'project' ] ) tables = [ ] for dataset in datasets : tables . extend ( [ str ( table ) for table in dataset if fnmatch . fnmatch ( str ( table ) , filter_ ) ] ) return _render_list ( tables )
Implements the BigQuery tables magic used to display tables in a dataset .
178
16
236,947
def _extract_line ( args ) : name = args [ 'source' ] source = datalab . utils . commands . get_notebook_item ( name ) if not source : source = _get_table ( name ) if not source : raise Exception ( 'No source named %s found' % name ) elif isinstance ( source , datalab . bigquery . Table ) and not source . exists ( ) : raise Exception ( 'Table %s does not exist' % name ) else : job = source . extract ( args [ 'destination' ] , format = 'CSV' if args [ 'format' ] == 'csv' else 'NEWLINE_DELIMITED_JSON' , compress = args [ 'compress' ] , csv_delimiter = args [ 'delimiter' ] , csv_header = args [ 'header' ] ) if job . failed : raise Exception ( 'Extract failed: %s' % str ( job . fatal_error ) ) elif job . errors : raise Exception ( 'Extract completed with errors: %s' % str ( job . errors ) )
Implements the BigQuery extract magic used to extract table data to GCS .
244
17
236,948
def bigquery ( line , cell = None ) : namespace = { } if line . find ( '$' ) >= 0 : # We likely have variables to expand; get the appropriate context. namespace = datalab . utils . commands . notebook_environment ( ) return datalab . utils . commands . handle_magic_line ( line , cell , _bigquery_parser , namespace = namespace )
Implements the bigquery cell magic for ipython notebooks .
86
13
236,949
def table ( name = None , mode = 'create' , use_cache = True , priority = 'interactive' , allow_large_results = False ) : output = QueryOutput ( ) output . _output_type = 'table' output . _table_name = name output . _table_mode = mode output . _use_cache = use_cache output . _priority = priority output . _allow_large_results = allow_large_results return output
Construct a query output object where the result is a table
99
11
236,950
def file ( path , format = 'csv' , csv_delimiter = ',' , csv_header = True , compress = False , use_cache = True ) : output = QueryOutput ( ) output . _output_type = 'file' output . _file_path = path output . _file_format = format output . _csv_delimiter = csv_delimiter output . _csv_header = csv_header output . _compress_file = compress return output
Construct a query output object where the result is either a local file or a GCS path
108
18
236,951
def dataframe ( start_row = 0 , max_rows = None , use_cache = True ) : output = QueryOutput ( ) output . _output_type = 'dataframe' output . _dataframe_start_row = start_row output . _dataframe_max_rows = max_rows output . _use_cache = use_cache return output
Construct a query output object where the result is a dataframe
78
12
236,952
def list ( ) : running_list = [ ] parser = argparse . ArgumentParser ( ) parser . add_argument ( '--logdir' ) parser . add_argument ( '--port' ) for p in psutil . process_iter ( ) : if p . name ( ) != 'tensorboard' or p . status ( ) == psutil . STATUS_ZOMBIE : continue cmd_args = p . cmdline ( ) del cmd_args [ 0 : 2 ] # remove 'python' and 'tensorboard' args = parser . parse_args ( cmd_args ) running_list . append ( { 'pid' : p . pid , 'logdir' : args . logdir , 'port' : args . port } ) return pd . DataFrame ( running_list )
List running TensorBoard instances .
173
7
236,953
def start ( logdir ) : if logdir . startswith ( 'gs://' ) : # Check user does have access. TensorBoard will start successfully regardless # the user has read permissions or not so we check permissions here to # give user alerts if needed. datalab . storage . _api . Api . verify_permitted_to_read ( logdir ) port = datalab . utils . pick_unused_port ( ) args = [ 'tensorboard' , '--logdir=' + logdir , '--port=' + str ( port ) ] p = subprocess . Popen ( args ) retry = 10 while ( retry > 0 ) : if datalab . utils . is_http_running_on ( port ) : basepath = os . environ . get ( 'DATALAB_ENDPOINT_URL' , '' ) url = '%s/_proxy/%d/' % ( basepath . rstrip ( '/' ) , port ) html = '<p>TensorBoard was started successfully with pid %d. ' % p . pid html += 'Click <a href="%s" target="_blank">here</a> to access it.</p>' % url IPython . display . display_html ( html , raw = True ) return p . pid time . sleep ( 1 ) retry -= 1 raise Exception ( 'Cannot start TensorBoard.' )
Start a TensorBoard instance .
312
7
236,954
def stop ( pid ) : if psutil . pid_exists ( pid ) : try : p = psutil . Process ( pid ) p . kill ( ) except Exception : pass
Shut down a specific process .
38
6
236,955
def build_graph ( self ) : import tensorflow as tf input_jpeg = tf . placeholder ( tf . string , shape = None ) image = tf . image . decode_jpeg ( input_jpeg , channels = self . CHANNELS ) # Note resize expects a batch_size, but we are feeding a single image. # So we have to expand then squeeze. Resize returns float32 in the # range [0, uint8_max] image = tf . expand_dims ( image , 0 ) # convert_image_dtype also scales [0, uint8_max] -> [0 ,1). image = tf . image . convert_image_dtype ( image , dtype = tf . float32 ) image = tf . image . resize_bilinear ( image , [ self . HEIGHT , self . WIDTH ] , align_corners = False ) # Then rescale range to [-1, 1) for Inception. image = tf . subtract ( image , 0.5 ) inception_input = tf . multiply ( image , 2.0 ) # Build Inception layers, which expect a tensor of type float from [-1, 1) # and shape [batch_size, height, width, channels]. with tf . contrib . slim . arg_scope ( _inceptionlib . inception_v3_arg_scope ( ) ) : _ , end_points = _inceptionlib . inception_v3 ( inception_input , is_training = False ) embedding = end_points [ 'PreLogits' ] return input_jpeg , embedding
Forms the core by building a wrapper around the inception graph .
342
13
236,956
def restore_from_checkpoint ( self , checkpoint_path ) : import tensorflow as tf # Get all variables to restore. Exclude Logits and AuxLogits because they # depend on the input data and we do not need to intialize them from # checkpoint. all_vars = tf . contrib . slim . get_variables_to_restore ( exclude = [ 'InceptionV3/AuxLogits' , 'InceptionV3/Logits' , 'global_step' ] ) saver = tf . train . Saver ( all_vars ) saver . restore ( self . tf_session , checkpoint_path )
To restore inception model variables from the checkpoint file .
142
10
236,957
def calculate_embedding ( self , batch_image_bytes ) : return self . tf_session . run ( self . embedding , feed_dict = { self . input_jpeg : batch_image_bytes } )
Get the embeddings for a given JPEG image .
48
11
236,958
def add_final_training_ops ( self , embeddings , all_labels_count , bottleneck_tensor_size , hidden_layer_size = BOTTLENECK_TENSOR_SIZE / 4 , dropout_keep_prob = None ) : with tf . name_scope ( 'input' ) : bottleneck_input = tf . placeholder_with_default ( embeddings , shape = [ None , bottleneck_tensor_size ] , name = 'ReshapeSqueezed' ) bottleneck_with_no_gradient = tf . stop_gradient ( bottleneck_input ) with tf . name_scope ( 'Wx_plus_b' ) : hidden = layers . fully_connected ( bottleneck_with_no_gradient , hidden_layer_size ) # We need a dropout when the size of the dataset is rather small. if dropout_keep_prob : hidden = tf . nn . dropout ( hidden , dropout_keep_prob ) logits = layers . fully_connected ( hidden , all_labels_count , activation_fn = None ) softmax = tf . nn . softmax ( logits , name = 'softmax' ) return softmax , logits
Adds a new softmax and fully - connected layer for training .
265
13
236,959
def build_inception_graph ( self ) : image_str_tensor = tf . placeholder ( tf . string , shape = [ None ] ) # The CloudML Prediction API always "feeds" the Tensorflow graph with # dynamic batch sizes e.g. (?,). decode_jpeg only processes scalar # strings because it cannot guarantee a batch of images would have # the same output size. We use tf.map_fn to give decode_jpeg a scalar # string from dynamic batches. image = tf . map_fn ( _util . decode_and_resize , image_str_tensor , back_prop = False , dtype = tf . uint8 ) # convert_image_dtype, also scales [0, uint8_max] -> [0 ,1). image = tf . image . convert_image_dtype ( image , dtype = tf . float32 ) # Then shift images to [-1, 1) for Inception. image = tf . subtract ( image , 0.5 ) image = tf . multiply ( image , 2.0 ) # Build Inception layers, which expect A tensor of type float from [-1, 1) # and shape [batch_size, height, width, channels]. with slim . arg_scope ( _inceptionlib . inception_v3_arg_scope ( ) ) : _ , end_points = _inceptionlib . inception_v3 ( image , is_training = False ) inception_embeddings = end_points [ 'PreLogits' ] inception_embeddings = tf . squeeze ( inception_embeddings , [ 1 , 2 ] , name = 'SpatialSqueeze' ) return image_str_tensor , inception_embeddings
Builds an inception graph and add the necessary input & output tensors .
374
15
236,960
def build_graph ( self , data_paths , batch_size , graph_mod ) : tensors = GraphReferences ( ) is_training = graph_mod == GraphMod . TRAIN if data_paths : _ , tensors . examples = _util . read_examples ( data_paths , batch_size , shuffle = is_training , num_epochs = None if is_training else 2 ) else : tensors . examples = tf . placeholder ( tf . string , name = 'input' , shape = ( None , ) ) if graph_mod == GraphMod . PREDICT : inception_input , inception_embeddings = self . build_inception_graph ( ) # Build the Inception graph. We later add final training layers # to this graph. This is currently used only for prediction. # For training, we use pre-processed data, so it is not needed. embeddings = inception_embeddings tensors . input_jpeg = inception_input else : # For training and evaluation we assume data is preprocessed, so the # inputs are tf-examples. # Generate placeholders for examples. with tf . name_scope ( 'inputs' ) : feature_map = { 'image_uri' : tf . FixedLenFeature ( shape = [ ] , dtype = tf . string , default_value = [ '' ] ) , # Some images may have no labels. For those, we assume a default # label. So the number of labels is label_count+1 for the default # label. 'label' : tf . FixedLenFeature ( shape = [ 1 ] , dtype = tf . int64 , default_value = [ len ( self . labels ) ] ) , 'embedding' : tf . FixedLenFeature ( shape = [ BOTTLENECK_TENSOR_SIZE ] , dtype = tf . float32 ) } parsed = tf . parse_example ( tensors . examples , features = feature_map ) labels = tf . squeeze ( parsed [ 'label' ] ) uris = tf . squeeze ( parsed [ 'image_uri' ] ) embeddings = parsed [ 'embedding' ] # We assume a default label, so the total number of labels is equal to # label_count+1. all_labels_count = len ( self . labels ) + 1 with tf . name_scope ( 'final_ops' ) : softmax , logits = self . add_final_training_ops ( embeddings , all_labels_count , BOTTLENECK_TENSOR_SIZE , dropout_keep_prob = self . dropout if is_training else None ) # Prediction is the index of the label with the highest score. We are # interested only in the top score. prediction = tf . argmax ( softmax , 1 ) tensors . predictions = [ prediction , softmax , embeddings ] if graph_mod == GraphMod . PREDICT : return tensors with tf . name_scope ( 'evaluate' ) : loss_value = loss ( logits , labels ) # Add to the Graph the Ops that calculate and apply gradients. if is_training : tensors . train , tensors . global_step = training ( loss_value ) else : tensors . global_step = tf . Variable ( 0 , name = 'global_step' , trainable = False ) tensors . uris = uris # Add means across all batches. loss_updates , loss_op = _util . loss ( loss_value ) accuracy_updates , accuracy_op = _util . accuracy ( logits , labels ) if not is_training : tf . summary . scalar ( 'accuracy' , accuracy_op ) tf . summary . scalar ( 'loss' , loss_op ) tensors . metric_updates = loss_updates + accuracy_updates tensors . metric_values = [ loss_op , accuracy_op ] return tensors
Builds generic graph for training or eval .
855
9
236,961
def restore_from_checkpoint ( self , session , inception_checkpoint_file , trained_checkpoint_file ) : inception_exclude_scopes = [ 'InceptionV3/AuxLogits' , 'InceptionV3/Logits' , 'global_step' , 'final_ops' ] reader = tf . train . NewCheckpointReader ( inception_checkpoint_file ) var_to_shape_map = reader . get_variable_to_shape_map ( ) # Get all variables to restore. Exclude Logits and AuxLogits because they # depend on the input data and we do not need to intialize them. all_vars = tf . contrib . slim . get_variables_to_restore ( exclude = inception_exclude_scopes ) # Remove variables that do not exist in the inception checkpoint (for # example the final softmax and fully-connected layers). inception_vars = { var . op . name : var for var in all_vars if var . op . name in var_to_shape_map } inception_saver = tf . train . Saver ( inception_vars ) inception_saver . restore ( session , inception_checkpoint_file ) # Restore the rest of the variables from the trained checkpoint. trained_vars = tf . contrib . slim . get_variables_to_restore ( exclude = inception_exclude_scopes + inception_vars . keys ( ) ) trained_saver = tf . train . Saver ( trained_vars ) trained_saver . restore ( session , trained_checkpoint_file )
To restore model variables from the checkpoint file .
354
9
236,962
def build_prediction_graph ( self ) : tensors = self . build_graph ( None , 1 , GraphMod . PREDICT ) keys_placeholder = tf . placeholder ( tf . string , shape = [ None ] ) inputs = { 'key' : keys_placeholder , 'image_bytes' : tensors . input_jpeg } # To extract the id, we need to add the identity function. keys = tf . identity ( keys_placeholder ) labels = self . labels + [ 'UNKNOWN' ] labels_tensor = tf . constant ( labels ) labels_table = tf . contrib . lookup . index_to_string_table_from_tensor ( mapping = labels_tensor ) predicted_label = labels_table . lookup ( tensors . predictions [ 0 ] ) # Need to duplicate the labels by num_of_instances so the output is one batch # (all output members share the same outer dimension). # The labels are needed for client to match class scores list. labels_tensor = tf . expand_dims ( tf . constant ( labels ) , 0 ) num_instance = tf . shape ( keys ) labels_tensors_n = tf . tile ( labels_tensor , tf . concat ( axis = 0 , values = [ num_instance , [ 1 ] ] ) ) outputs = { 'key' : keys , 'prediction' : predicted_label , 'labels' : labels_tensors_n , 'scores' : tensors . predictions [ 1 ] , } return inputs , outputs
Builds prediction graph and registers appropriate endpoints .
335
10
236,963
def export ( self , last_checkpoint , output_dir ) : logging . info ( 'Exporting prediction graph to %s' , output_dir ) with tf . Session ( graph = tf . Graph ( ) ) as sess : # Build and save prediction meta graph and trained variable values. inputs , outputs = self . build_prediction_graph ( ) signature_def_map = { 'serving_default' : signature_def_utils . predict_signature_def ( inputs , outputs ) } init_op = tf . global_variables_initializer ( ) sess . run ( init_op ) self . restore_from_checkpoint ( sess , self . inception_checkpoint_file , last_checkpoint ) init_op_serving = control_flow_ops . group ( variables . local_variables_initializer ( ) , tf . tables_initializer ( ) ) builder = saved_model_builder . SavedModelBuilder ( output_dir ) builder . add_meta_graph_and_variables ( sess , [ tag_constants . SERVING ] , signature_def_map = signature_def_map , legacy_init_op = init_op_serving ) builder . save ( False )
Builds a prediction graph and xports the model .
265
11
236,964
def format_metric_values ( self , metric_values ) : # Early in training, metric_values may actually be None. loss_str = 'N/A' accuracy_str = 'N/A' try : loss_str = 'loss: %.3f' % metric_values [ 0 ] accuracy_str = 'accuracy: %.3f' % metric_values [ 1 ] except ( TypeError , IndexError ) : pass return '%s, %s' % ( loss_str , accuracy_str )
Formats metric values - used for logging purpose .
115
10
236,965
def package_and_copy ( package_root_dir , setup_py , output_tar_path ) : if not output_tar_path . startswith ( 'gs://' ) : raise ValueError ( 'output_tar_path needs to be a GCS path.' ) if not os . path . isfile ( setup_py ) : raise ValueError ( 'Supplied file "%s" does not exist.' % setup_py ) dest_setup_py = os . path . join ( package_root_dir , 'setup.py' ) if dest_setup_py != setup_py : # setuptools requires a "setup.py" in the current dir, so copy setup.py there. # Also check if there is an existing setup.py. If so, back it up. if os . path . isfile ( dest_setup_py ) : os . rename ( dest_setup_py , dest_setup_py + '._bak_' ) shutil . copyfile ( setup_py , dest_setup_py ) tempdir = tempfile . mkdtemp ( ) previous_cwd = os . getcwd ( ) os . chdir ( package_root_dir ) try : # Repackage. sdist = [ 'python' , dest_setup_py , 'sdist' , '--format=gztar' , '-d' , tempdir ] subprocess . check_call ( sdist ) # Copy to GCS. source = os . path . join ( tempdir , '*.tar.gz' ) gscopy = [ 'gsutil' , 'cp' , source , output_tar_path ] subprocess . check_call ( gscopy ) return finally : os . chdir ( previous_cwd ) if dest_setup_py != setup_py : os . remove ( dest_setup_py ) if os . path . isfile ( dest_setup_py + '._bak_' ) : os . rename ( dest_setup_py + '._bak_' , dest_setup_py ) shutil . rmtree ( tempdir )
Repackage an CloudML package and copy it to a staging dir .
458
15
236,966
def read_file_to_string ( path ) : bytes_string = tf . gfile . Open ( path , 'r' ) . read ( ) return dlutils . python_portable_string ( bytes_string )
Read a file into a string .
49
7
236,967
def _date ( val , offset = None ) : if val is None : return val if val == '' or val == 'now' : when = datetime . datetime . utcnow ( ) elif val == 'today' : dt = datetime . datetime . utcnow ( ) when = datetime . datetime ( dt . year , dt . month , dt . day ) elif val == 'yesterday' : dt = datetime . datetime . utcnow ( ) - datetime . timedelta ( 1 ) when = datetime . datetime ( dt . year , dt . month , dt . day ) else : when = datetime . datetime . strptime ( val , "%Y%m%d" ) if offset is not None : for part in offset . split ( ',' ) : unit = part [ - 1 ] quantity = int ( part [ : - 1 ] ) # We can use timedelta for days and under, but not for years and months if unit == 'y' : when = datetime . datetime ( year = when . year + quantity , month = when . month , day = when . day , hour = when . hour , minute = when . minute ) elif unit == 'm' : new_year = when . year new_month = when . month + quantity if new_month < 1 : new_month = - new_month new_year += 1 + ( new_month // 12 ) new_month = 12 - new_month % 12 elif new_month > 12 : new_year += ( new_month - 1 ) // 12 new_month = 1 + ( new_month - 1 ) % 12 when = datetime . datetime ( year = new_year , month = new_month , day = when . day , hour = when . hour , minute = when . minute ) elif unit == 'd' : when += datetime . timedelta ( days = quantity ) elif unit == 'h' : when += datetime . timedelta ( hours = quantity ) elif unit == 'M' : when += datetime . timedelta ( minutes = quantity ) return when
A special pseudo - type for pipeline arguments .
464
9
236,968
def _make_string_formatter ( f , offset = None ) : format = f delta = offset return lambda v : time . strftime ( format , ( _date ( v , delta ) ) . timetuple ( ) )
A closure - izer for string arguments that include a format and possibly an offset .
48
17
236,969
def _make_table_formatter ( f , offset = None ) : format = f delta = offset return lambda v : _resolve_table ( v , format , delta )
A closure - izer for table arguments that include a format and possibly an offset .
38
17
236,970
def _arguments ( code , module ) : arg_parser = CommandParser . create ( '' ) try : # Define our special argument 'types' and add them to the environment. builtins = { 'source' : _table , 'datestring' : _datestring } env = { } env . update ( builtins ) # Execute the cell which should be one or more calls to arg(). exec ( code , env ) # Iterate through the module dictionary. For any newly defined objects, # add args to the parser. for key in env : # Skip internal/private stuff. if key in builtins or key [ 0 ] == '_' : continue # If we want to support importing query modules into other query modules, uncomment next 4 # Skip imports but add them to the module # if isinstance(env[key], types.ModuleType): # module.__dict__[key] = env[key] # continue val = env [ key ] key = '--%s' % key if isinstance ( val , bool ) : if val : arg_parser . add_argument ( key , default = val , action = 'store_true' ) else : arg_parser . add_argument ( key , default = val , action = 'store_false' ) elif isinstance ( val , basestring ) or isinstance ( val , int ) or isinstance ( val , float ) or isinstance ( val , int ) : arg_parser . add_argument ( key , default = val ) elif isinstance ( val , list ) : arg_parser . add_argument ( key , default = val , nargs = '+' ) elif isinstance ( val , tuple ) : arg_parser . add_argument ( key , default = list ( val ) , nargs = '+' ) # Is this one of our pseudo-types for dates/tables? elif isinstance ( val , dict ) and 'type' in val : if val [ 'type' ] == 'datestring' : arg_parser . add_argument ( key , default = '' , type = _make_string_formatter ( val [ 'format' ] , offset = val [ 'offset' ] ) ) elif val [ 'type' ] == 'table' : if val [ 'format' ] is not None : arg_parser . add_argument ( key , default = '' , type = _make_table_formatter ( val [ 'format' ] , offset = val [ 'offset' ] ) ) else : arg_parser . add_argument ( key , default = val [ 'name' ] , type = _make_table ) else : raise Exception ( 'Cannot generate argument for %s of type %s' % ( key , type ( val ) ) ) else : raise Exception ( 'Cannot generate argument for %s of type %s' % ( key , type ( val ) ) ) except Exception as e : print ( "%%sql arguments: %s from code '%s'" % ( str ( e ) , str ( code ) ) ) return arg_parser
Define pipeline arguments .
659
5
236,971
def sql_cell ( args , cell ) : name = args [ 'module' ] if args [ 'module' ] else '_sql_cell' module = imp . new_module ( name ) query = _split_cell ( cell , module ) ipy = IPython . get_ipython ( ) if not args [ 'module' ] : # Execute now if query : return datalab . bigquery . Query ( query , values = ipy . user_ns ) . execute ( dialect = args [ 'dialect' ] , billing_tier = args [ 'billing' ] ) . results else : # Add it as a module sys . modules [ name ] = module exec ( 'import %s' % name , ipy . user_ns )
Implements the SQL cell magic for ipython notebooks .
162
12
236,972
def get_reader_input_fn ( train_config , preprocess_output_dir , model_type , data_paths , batch_size , shuffle , num_epochs = None ) : def get_input_features ( ) : """Read the input features from the given data paths.""" _ , examples = util . read_examples ( input_files = data_paths , batch_size = batch_size , shuffle = shuffle , num_epochs = num_epochs ) features = util . parse_example_tensor ( examples = examples , train_config = train_config , keep_target = True ) target_name = train_config [ 'target_column' ] target = features . pop ( target_name ) features , target = util . preprocess_input ( features = features , target = target , train_config = train_config , preprocess_output_dir = preprocess_output_dir , model_type = model_type ) return features , target # Return a function to input the feaures into the model from a data path. return get_input_features
Builds input layer for training .
239
7
236,973
def main ( argv = None ) : args = parse_arguments ( sys . argv if argv is None else argv ) tf . logging . set_verbosity ( tf . logging . INFO ) learn_runner . run ( experiment_fn = get_experiment_fn ( args ) , output_dir = args . job_dir )
Run a Tensorflow model on the Iris dataset .
74
11
236,974
def sd ( line , cell = None ) : parser = google . datalab . utils . commands . CommandParser ( prog = '%sd' , description = ( 'Execute various Stackdriver related operations. Use "%sd ' '<stackdriver_product> -h" for help on a specific Stackdriver product.' ) ) # %%sd monitoring _create_monitoring_subparser ( parser ) return google . datalab . utils . commands . handle_magic_line ( line , cell , parser )
Implements the stackdriver cell magic for ipython notebooks .
109
13
236,975
def make_prediction_output_tensors ( args , features , input_ops , model_fn_ops , keep_target ) : target_name = feature_transforms . get_target_name ( features ) key_names = get_key_names ( features ) outputs = { } outputs . update ( { key_name : tf . squeeze ( input_ops . features [ key_name ] ) for key_name in key_names } ) if is_classification_model ( args . model ) : # build maps from ints to the origional categorical strings. class_names = read_vocab ( args , target_name ) table = tf . contrib . lookup . index_to_string_table_from_tensor ( mapping = class_names , default_value = 'UNKNOWN' ) # Get the label of the input target. if keep_target : input_target_label = table . lookup ( input_ops . features [ target_name ] ) outputs [ PG_TARGET ] = tf . squeeze ( input_target_label ) # TODO(brandondutra): get the score of the target label too. probabilities = model_fn_ops . predictions [ 'probabilities' ] # if top_n == 0, this means use all the classes. We will use class names as # probabilities labels. if args . top_n == 0 : predicted_index = tf . argmax ( probabilities , axis = 1 ) predicted = table . lookup ( predicted_index ) outputs . update ( { PG_CLASSIFICATION_FIRST_LABEL : predicted } ) probabilities_list = tf . unstack ( probabilities , axis = 1 ) for class_name , p in zip ( class_names , probabilities_list ) : outputs [ class_name ] = p else : top_n = args . top_n # get top k labels and their scores. ( top_k_values , top_k_indices ) = tf . nn . top_k ( probabilities , k = top_n ) top_k_labels = table . lookup ( tf . to_int64 ( top_k_indices ) ) # Write the top_k values using 2*top_n columns. num_digits = int ( math . ceil ( math . log ( top_n , 10 ) ) ) if num_digits == 0 : num_digits = 1 for i in range ( 0 , top_n ) : # Pad i based on the size of k. So if k = 100, i = 23 -> i = '023'. This # makes sorting the columns easy. padded_i = str ( i + 1 ) . zfill ( num_digits ) if i == 0 : label_alias = PG_CLASSIFICATION_FIRST_LABEL else : label_alias = PG_CLASSIFICATION_LABEL_TEMPLATE % padded_i label_tensor_name = ( tf . squeeze ( tf . slice ( top_k_labels , [ 0 , i ] , [ tf . shape ( top_k_labels ) [ 0 ] , 1 ] ) ) ) if i == 0 : score_alias = PG_CLASSIFICATION_FIRST_SCORE else : score_alias = PG_CLASSIFICATION_SCORE_TEMPLATE % padded_i score_tensor_name = ( tf . squeeze ( tf . slice ( top_k_values , [ 0 , i ] , [ tf . shape ( top_k_values ) [ 0 ] , 1 ] ) ) ) outputs . update ( { label_alias : label_tensor_name , score_alias : score_tensor_name } ) else : if keep_target : outputs [ PG_TARGET ] = tf . squeeze ( input_ops . features [ target_name ] ) scores = model_fn_ops . predictions [ 'scores' ] outputs [ PG_REGRESSION_PREDICTED_TARGET ] = tf . squeeze ( scores ) return outputs
Makes the final prediction output layer .
860
8
236,976
def read_vocab ( args , column_name ) : vocab_path = os . path . join ( args . analysis , feature_transforms . VOCAB_ANALYSIS_FILE % column_name ) if not file_io . file_exists ( vocab_path ) : return [ ] vocab , _ = feature_transforms . read_vocab_file ( vocab_path ) return vocab
Reads a vocab file if it exists .
93
10
236,977
def get_item ( env , name , default = None ) : # TODO: handle attributes for key in name . split ( '.' ) : if isinstance ( env , dict ) and key in env : env = env [ key ] elif isinstance ( env , types . ModuleType ) and key in env . __dict__ : env = env . __dict__ [ key ] else : return default return env
Get an item from a dictionary handling nested lookups with dotted notation .
87
14
236,978
def predict ( model_dir , images ) : results = _tf_predict ( model_dir , images ) predicted_and_scores = [ ( predicted , label_scores [ list ( labels ) . index ( predicted ) ] ) for predicted , labels , label_scores in results ] return predicted_and_scores
Local instant prediction .
70
4
236,979
def configure_pipeline ( p , dataset , model_dir , output_csv , output_bq_table ) : data = _util . get_sources_from_dataset ( p , dataset , 'predict' ) if len ( dataset . schema ) == 2 : output_schema = [ { 'name' : 'image_url' , 'type' : 'STRING' } , { 'name' : 'target' , 'type' : 'STRING' } , { 'name' : 'predicted' , 'type' : 'STRING' } , { 'name' : 'target_prob' , 'type' : 'FLOAT' } , { 'name' : 'predicted_prob' , 'type' : 'FLOAT' } , ] else : output_schema = [ { 'name' : 'image_url' , 'type' : 'STRING' } , { 'name' : 'predicted' , 'type' : 'STRING' } , { 'name' : 'predicted_prob' , 'type' : 'FLOAT' } , ] results = ( data | 'Load Images' >> beam . ParDo ( LoadImagesDoFn ( ) ) | 'Batch Inputs' >> beam . ParDo ( EmitAsBatchDoFn ( 20 ) ) | 'Batch Predict' >> beam . ParDo ( PredictBatchDoFn ( model_dir ) ) | 'Unbatch' >> beam . ParDo ( UnbatchDoFn ( ) ) | 'Process Results' >> beam . ParDo ( ProcessResultsDoFn ( ) ) ) if output_csv is not None : schema_file = output_csv + '.schema.json' results_save = ( results | 'Prepare For Output' >> beam . ParDo ( MakeCsvLineDoFn ( ) ) | 'Write Csv Results' >> beam . io . textio . WriteToText ( output_csv , shard_name_template = '' ) ) ( results_save | 'Sample One' >> beam . transforms . combiners . Sample . FixedSizeGlobally ( 1 ) | 'Serialize Schema' >> beam . Map ( lambda path : json . dumps ( output_schema ) ) | 'Write Schema' >> beam . io . textio . WriteToText ( schema_file , shard_name_template = '' ) ) if output_bq_table is not None : # BigQuery sink takes schema in the form of 'field1:type1,field2:type2...' bq_schema_string = ',' . join ( x [ 'name' ] + ':' + x [ 'type' ] for x in output_schema ) sink = beam . io . BigQuerySink ( output_bq_table , schema = bq_schema_string , write_disposition = beam . io . BigQueryDisposition . WRITE_TRUNCATE ) results | 'Write BQ Results' >> beam . io . Write ( sink )
Configures a dataflow pipeline for batch prediction .
667
10
236,980
def sampling_query ( sql , context , fields = None , count = 5 , sampling = None , udfs = None , data_sources = None ) : return Query ( _sampling . Sampling . sampling_query ( sql , fields , count , sampling ) , context = context , udfs = udfs , data_sources = data_sources )
Returns a sampling Query for the SQL object .
80
9
236,981
def results ( self , use_cache = True , dialect = None , billing_tier = None ) : if not use_cache or ( self . _results is None ) : self . execute ( use_cache = use_cache , dialect = dialect , billing_tier = billing_tier ) return self . _results . results
Retrieves table of results for the query . May block if the query must be executed first .
68
20
236,982
def extract ( self , storage_uris , format = 'csv' , csv_delimiter = ',' , csv_header = True , compress = False , use_cache = True , dialect = None , billing_tier = None ) : return self . results ( use_cache = use_cache , dialect = dialect , billing_tier = billing_tier ) . extract ( storage_uris , format = format , csv_delimiter = csv_delimiter , csv_header = csv_header , compress = compress )
Exports the query results to GCS .
119
9
236,983
def to_dataframe ( self , start_row = 0 , max_rows = None , use_cache = True , dialect = None , billing_tier = None ) : return self . results ( use_cache = use_cache , dialect = dialect , billing_tier = billing_tier ) . to_dataframe ( start_row = start_row , max_rows = max_rows )
Exports the query results to a Pandas dataframe .
84
12
236,984
def sample ( self , count = 5 , fields = None , sampling = None , use_cache = True , dialect = None , billing_tier = None ) : return Query . sampling_query ( self . _sql , self . _context , count = count , fields = fields , sampling = sampling , udfs = self . _udfs , data_sources = self . _data_sources ) . results ( use_cache = use_cache , dialect = dialect , billing_tier = billing_tier )
Retrieves a sampling of rows for the query .
109
11
236,985
def execute ( self , table_name = None , table_mode = 'create' , use_cache = True , priority = 'interactive' , allow_large_results = False , dialect = None , billing_tier = None ) : job = self . execute_async ( table_name = table_name , table_mode = table_mode , use_cache = use_cache , priority = priority , allow_large_results = allow_large_results , dialect = dialect , billing_tier = billing_tier ) self . _results = job . wait ( ) return self . _results
Initiate the query blocking until complete and then return the results .
127
14
236,986
def to_view ( self , view_name ) : # Do the import here to avoid circular dependencies at top-level. from . import _view return _view . View ( view_name , self . _context ) . create ( self . _sql )
Create a View from this Query .
54
7
236,987
def format_help ( self ) : if not self . _cell_args : return super ( CommandParser , self ) . format_help ( ) else : # Print the standard argparse info, the cell arg block, and then the epilog # If we don't remove epilog before calling the super, then epilog will # be printed before the 'Cell args' block. epilog = self . epilog self . epilog = None orig_help = super ( CommandParser , self ) . format_help ( ) cell_args_help = '\nCell args:\n\n' for cell_arg , v in six . iteritems ( self . _cell_args ) : required = 'Required' if v [ 'required' ] else 'Optional' cell_args_help += '%s: %s. %s.\n\n' % ( cell_arg , required , v [ 'help' ] ) orig_help += cell_args_help if epilog : orig_help += epilog + '\n\n' return orig_help
Override help doc to add cell args .
235
8
236,988
def _get_subparsers ( self ) : subparsers = [ ] for action in self . _actions : if isinstance ( action , argparse . _SubParsersAction ) : for _ , subparser in action . choices . items ( ) : subparsers . append ( subparser ) ret = subparsers for sp in subparsers : ret += sp . _get_subparsers ( ) return ret
Recursively get subparsers .
93
8
236,989
def _get_subparser_line_args ( self , subparser_prog ) : subparsers = self . _get_subparsers ( ) for subparser in subparsers : if subparser_prog == subparser . prog : # Found the subparser. args_to_parse = [ ] for action in subparser . _actions : if action . option_strings : for argname in action . option_strings : if argname . startswith ( '--' ) : args_to_parse . append ( argname [ 2 : ] ) return args_to_parse return None
Get line args of a specified subparser by its prog .
131
12
236,990
def _get_subparser_cell_args ( self , subparser_prog ) : subparsers = self . _get_subparsers ( ) for subparser in subparsers : if subparser_prog == subparser . prog : return subparser . _cell_args return None
Get cell args of a specified subparser by its prog .
65
12
236,991
def add_cell_argument ( self , name , help , required = False ) : for action in self . _actions : if action . dest == name : raise ValueError ( 'Arg "%s" was added by add_argument already.' % name ) self . _cell_args [ name ] = { 'required' : required , 'help' : help }
Add a cell only argument .
76
6
236,992
def parse ( self , line , cell , namespace = None ) : if namespace is None : ipy = IPython . get_ipython ( ) namespace = ipy . user_ns # Find which subcommand in the line by comparing line with subcommand progs. # For example, assuming there are 3 subcommands with their progs # %bq tables # %bq tables list # %bq datasets # and the line is "tables list --dataset proj.myds" # it will find the second one --- "tables list" because it matches the prog and # it is the longest. args = CommandParser . create_args ( line , namespace ) # "prog" is a ArgumentParser's path splitted by namspace, such as '%bq tables list'. sub_parsers_progs = [ x . prog for x in self . _get_subparsers ( ) ] matched_progs = [ ] for prog in sub_parsers_progs : # Remove the leading magic such as "%bq". match = prog . split ( ) [ 1 : ] for i in range ( len ( args ) ) : if args [ i : i + len ( match ) ] == match : matched_progs . append ( prog ) break matched_prog = None if matched_progs : # Get the longest match. matched_prog = max ( matched_progs , key = lambda x : len ( x . split ( ) ) ) # Line args can be provided in cell too. If they are in cell, move them to line # so we can parse them all together. line_args = self . _get_subparser_line_args ( matched_prog ) if line_args : cell_config = None try : cell_config , cell = google . datalab . utils . commands . parse_config_for_selected_keys ( cell , line_args ) except : # It is okay --- probably because cell is not in yaml or json format. pass if cell_config : google . datalab . utils . commands . replace_vars ( cell_config , namespace ) for arg_name in cell_config : arg_value = cell_config [ arg_name ] if arg_value is None : continue if '--' + arg_name in args : raise ValueError ( 'config item "%s" is specified in both cell and line.' % arg_name ) if isinstance ( arg_value , bool ) : if arg_value : line += ' --%s' % arg_name else : line += ' --%s %s' % ( arg_name , str ( cell_config [ arg_name ] ) ) # Parse args again with the new line. args = CommandParser . create_args ( line , namespace ) args = vars ( self . parse_args ( args ) ) # Parse cell args. cell_config = None cell_args = self . _get_subparser_cell_args ( matched_prog ) if cell_args : try : cell_config , _ = google . datalab . utils . commands . parse_config_for_selected_keys ( cell , cell_args ) except : # It is okay --- probably because cell is not in yaml or json format. pass if cell_config : google . datalab . utils . commands . replace_vars ( cell_config , namespace ) for arg in cell_args : if ( cell_args [ arg ] [ 'required' ] and ( cell_config is None or cell_config . get ( arg , None ) is None ) ) : raise ValueError ( 'Cell config "%s" is required.' % arg ) if cell_config : args . update ( cell_config ) return args , cell
Parses a line and cell into a dictionary of arguments expanding variables from a namespace .
813
18
236,993
def _glob_events_files ( self , paths , recursive ) : event_files = [ ] for path in paths : dirs = tf . gfile . Glob ( path ) dirs = filter ( lambda x : tf . gfile . IsDirectory ( x ) , dirs ) for dir in dirs : if recursive : dir_files_pair = [ ( root , filenames ) for root , _ , filenames in tf . gfile . Walk ( dir ) ] else : dir_files_pair = [ ( dir , tf . gfile . ListDirectory ( dir ) ) ] for root , filenames in dir_files_pair : file_names = fnmatch . filter ( filenames , '*.tfevents.*' ) file_paths = [ os . path . join ( root , x ) for x in file_names ] file_paths = filter ( lambda x : not tf . gfile . IsDirectory ( x ) , file_paths ) event_files += file_paths return event_files
Find all tf events files under a list of paths recursively .
222
14
236,994
def list_events ( self ) : event_dir_dict = collections . defaultdict ( set ) for event_file in self . _glob_events_files ( self . _paths , recursive = True ) : dir = os . path . dirname ( event_file ) try : for record in tf_record . tf_record_iterator ( event_file ) : event = event_pb2 . Event . FromString ( record ) if event . summary is None or event . summary . value is None : continue for value in event . summary . value : if value . simple_value is None or value . tag is None : continue event_dir_dict [ value . tag ] . add ( dir ) except tf . errors . DataLossError : # DataLossError seems to happen sometimes for small logs. # We want to show good records regardless. continue return dict ( event_dir_dict )
List all scalar events in the directory .
192
9
236,995
def from_storage ( source , source_format = 'csv' , csv_options = None , ignore_unknown_values = False , max_bad_records = 0 , compressed = False , schema = None ) : result = FederatedTable ( ) # Do some sanity checking and concert some params from friendly form to form used by BQ. if source_format == 'csv' : result . _bq_source_format = 'CSV' if csv_options is None : csv_options = _csv_options . CSVOptions ( ) # use defaults elif source_format == 'json' : if csv_options : raise Exception ( 'CSV options are not support for JSON tables' ) result . _bq_source_format = 'NEWLINE_DELIMITED_JSON' else : raise Exception ( "Invalid source format %s" % source_format ) result . _source = source if isinstance ( source , list ) else [ source ] result . _source_format = source_format result . _csv_options = csv_options result . _ignore_unknown_values = ignore_unknown_values result . _max_bad_records = max_bad_records result . _compressed = compressed result . _schema = schema return result
Create an external table for a GCS object .
276
10
236,996
def get_query_parameters ( args , cell_body , date_time = datetime . datetime . now ( ) ) : env = google . datalab . utils . commands . notebook_environment ( ) config = google . datalab . utils . commands . parse_config ( cell_body , env = env , as_dict = False ) sql = args [ 'query' ] if sql is None : raise Exception ( 'Cannot extract query parameters in non-query cell' ) # Validate query_params if config : jsonschema . validate ( config , BigQuerySchema . QUERY_PARAMS_SCHEMA ) config = config or { } config_parameters = config . get ( 'parameters' , [ ] ) return bigquery . Query . get_query_parameters ( config_parameters , date_time = date_time )
Extract query parameters from cell body if provided Also validates the cell body schema using jsonschema to catch errors before sending the http request . This validation isn t complete however ; it does not validate recursive schemas but it acts as a good filter against most simple schemas
189
56
236,997
def _udf_cell ( args , cell_body ) : udf_name = args [ 'name' ] if not udf_name : raise Exception ( 'Declaration must be of the form %%bq udf --name <variable name>' ) # Parse out parameters, return type, and imports param_pattern = r'^\s*\/\/\s*@param\s+([<>\w]+)\s+([<>\w,\s]+)\s*$' returns_pattern = r'^\s*\/\/\s*@returns\s+([<>\w,\s]+)\s*$' import_pattern = r'^\s*\/\/\s*@import\s+(\S+)\s*$' params = re . findall ( param_pattern , cell_body , re . MULTILINE ) return_type = re . findall ( returns_pattern , cell_body , re . MULTILINE ) imports = re . findall ( import_pattern , cell_body , re . MULTILINE ) if len ( return_type ) < 1 : raise Exception ( 'UDF return type must be defined using // @returns <type>' ) if len ( return_type ) > 1 : raise Exception ( 'Found more than one return type definition' ) return_type = return_type [ 0 ] # Finally build the UDF object udf = bigquery . UDF ( udf_name , cell_body , return_type , params , args [ 'language' ] , imports ) google . datalab . utils . commands . notebook_environment ( ) [ udf_name ] = udf
Implements the Bigquery udf cell magic for ipython notebooks .
367
15
236,998
def _datasource_cell ( args , cell_body ) : name = args [ 'name' ] paths = args [ 'paths' ] data_format = ( args [ 'format' ] or 'CSV' ) . lower ( ) compressed = args [ 'compressed' ] or False # Get the source schema from the cell body record = google . datalab . utils . commands . parse_config ( cell_body , google . datalab . utils . commands . notebook_environment ( ) , as_dict = False ) jsonschema . validate ( record , BigQuerySchema . TABLE_SCHEMA_SCHEMA ) schema = bigquery . Schema ( record [ 'schema' ] ) # Finally build the datasource object datasource = bigquery . ExternalDataSource ( source = paths , source_format = data_format , compressed = compressed , schema = schema ) google . datalab . utils . commands . notebook_environment ( ) [ name ] = datasource
Implements the BigQuery datasource cell magic for ipython notebooks .
216
15
236,999
def _query_cell ( args , cell_body ) : name = args [ 'name' ] udfs = args [ 'udfs' ] datasources = args [ 'datasources' ] subqueries = args [ 'subqueries' ] # Finally build the query object query = bigquery . Query ( cell_body , env = IPython . get_ipython ( ) . user_ns , udfs = udfs , data_sources = datasources , subqueries = subqueries ) # if no name is specified, execute this query instead of defining it if name is None : return query . execute ( ) . result ( ) else : google . datalab . utils . commands . notebook_environment ( ) [ name ] = query
Implements the BigQuery cell magic for used to build SQL objects .
164
15