idx int64 0 251k | question stringlengths 53 3.53k | target stringlengths 5 1.23k | len_question int64 20 893 | len_target int64 3 238 |
|---|---|---|---|---|
236,800 | def load_images ( image_files , resize = True ) : images = [ ] for image_file in image_files : with file_io . FileIO ( image_file , 'r' ) as ff : images . append ( ff . read ( ) ) if resize is False : return images # To resize, run a tf session so we can reuse 'decode_and_resize()' # which is used in prediction graph. This makes sure we don't lose # any quality in prediction, while decreasing the size of the images # submitted to the model over network. image_str_tensor = tf . placeholder ( tf . string , shape = [ None ] ) image = tf . map_fn ( resize_image , image_str_tensor , back_prop = False ) feed_dict = collections . defaultdict ( list ) feed_dict [ image_str_tensor . name ] = images with tf . Session ( ) as sess : images_resized = sess . run ( image , feed_dict = feed_dict ) return images_resized | Load images from files and optionally resize it . | 228 | 9 |
236,801 | def process_prediction_results ( results , show_image ) : import pandas as pd if ( is_in_IPython ( ) and show_image is True ) : import IPython for image_url , image , label_and_score in results : IPython . display . display_html ( '<p style="font-size:28px">%s(%.5f)</p>' % label_and_score , raw = True ) IPython . display . display ( IPython . display . Image ( data = image ) ) result_dict = [ { 'image_url' : url , 'label' : r [ 0 ] , 'score' : r [ 1 ] } for url , _ , r in results ] return pd . DataFrame ( result_dict ) | Create DataFrames out of prediction results and display images in IPython if requested . | 170 | 16 |
236,802 | def repackage_to_staging ( output_path ) : import google . datalab . ml as ml # Find the package root. __file__ is under [package_root]/mltoolbox/image/classification. package_root = os . path . join ( os . path . dirname ( __file__ ) , '../../../' ) # We deploy setup.py in the same dir for repackaging purpose. setup_py = os . path . join ( os . path . dirname ( __file__ ) , 'setup.py' ) staging_package_url = os . path . join ( output_path , 'staging' , 'image_classification.tar.gz' ) ml . package_and_copy ( package_root , setup_py , staging_package_url ) return staging_package_url | Repackage it from local installed location and copy it to GCS . | 184 | 15 |
236,803 | def generate_airflow_spec ( name , pipeline_spec ) : task_definitions = '' up_steam_statements = '' parameters = pipeline_spec . get ( 'parameters' ) for ( task_id , task_details ) in sorted ( pipeline_spec [ 'tasks' ] . items ( ) ) : task_def = PipelineGenerator . _get_operator_definition ( task_id , task_details , parameters ) task_definitions = task_definitions + task_def dependency_def = PipelineGenerator . _get_dependency_definition ( task_id , task_details . get ( 'up_stream' , [ ] ) ) up_steam_statements = up_steam_statements + dependency_def schedule_config = pipeline_spec . get ( 'schedule' , { } ) default_args = PipelineGenerator . _get_default_args ( schedule_config , pipeline_spec . get ( 'emails' , { } ) ) dag_definition = PipelineGenerator . _get_dag_definition ( name , schedule_config . get ( 'interval' , '@once' ) , schedule_config . get ( 'catchup' , False ) ) return PipelineGenerator . _imports + default_args + dag_definition + task_definitions + up_steam_statements | Gets the airflow python spec for the Pipeline object . | 290 | 11 |
236,804 | def _get_dependency_definition ( task_id , dependencies ) : set_upstream_statements = '' for dependency in dependencies : set_upstream_statements = set_upstream_statements + '{0}.set_upstream({1})' . format ( task_id , dependency ) + '\n' return set_upstream_statements | Internal helper collects all the dependencies of the task and returns the Airflow equivalent python sytax for specifying them . | 80 | 22 |
236,805 | def _get_operator_class_name ( task_detail_type ) : # TODO(rajivpb): Rename this var correctly. task_type_to_operator_prefix_mapping = { 'pydatalab.bq.execute' : ( 'Execute' , 'google.datalab.contrib.bigquery.operators._bq_execute_operator' ) , 'pydatalab.bq.extract' : ( 'Extract' , 'google.datalab.contrib.bigquery.operators._bq_extract_operator' ) , 'pydatalab.bq.load' : ( 'Load' , 'google.datalab.contrib.bigquery.operators._bq_load_operator' ) , 'Bash' : ( 'Bash' , 'airflow.operators.bash_operator' ) } ( operator_class_prefix , module ) = task_type_to_operator_prefix_mapping . get ( task_detail_type , ( None , __name__ ) ) format_string = '{0}Operator' operator_class_name = format_string . format ( operator_class_prefix ) if operator_class_prefix is None : return format_string . format ( task_detail_type ) , module return operator_class_name , module | Internal helper gets the name of the Airflow operator class . We maintain this in a map so this method really returns the enum name concatenated with the string Operator . | 300 | 34 |
236,806 | def _get_operator_param_name_and_values ( operator_class_name , task_details ) : # We make a clone and then remove 'type' and 'up_stream' since these aren't needed for the # the operator's parameters. operator_task_details = task_details . copy ( ) if 'type' in operator_task_details . keys ( ) : del operator_task_details [ 'type' ] if 'up_stream' in operator_task_details . keys ( ) : del operator_task_details [ 'up_stream' ] # We special-case certain operators if we do some translation of the parameter names. This is # usually the case when we use syntactic sugar to expose the functionality. # TODO(rajivpb): It should be possible to make this a lookup from the modules mapping via # getattr() or equivalent. Avoid hard-coding these class-names here. if ( operator_class_name == 'BigQueryOperator' ) : return PipelineGenerator . _get_bq_execute_params ( operator_task_details ) if ( operator_class_name == 'BigQueryToCloudStorageOperator' ) : return PipelineGenerator . _get_bq_extract_params ( operator_task_details ) if ( operator_class_name == 'GoogleCloudStorageToBigQueryOperator' ) : return PipelineGenerator . _get_bq_load_params ( operator_task_details ) return operator_task_details | Internal helper gets the name of the python parameter for the Airflow operator class . In some cases we do not expose the airflow parameter name in its native form but choose to expose a name that s more standard for Datalab or one that s more friendly . For example Airflow s BigQueryOperator uses bql for the query string but we want %%bq users in Datalab to use query . Hence a few substitutions that are specific to the Airflow operator need to be made . | 324 | 101 |
236,807 | def sample ( self , n ) : total = bq . Query ( 'select count(*) from %s' % self . _get_source ( ) ) . execute ( ) . result ( ) [ 0 ] . values ( ) [ 0 ] if n > total : raise ValueError ( 'sample larger than population' ) sampling = bq . Sampling . random ( percent = n * 100.0 / float ( total ) ) if self . _query is not None : source = self . _query else : source = 'SELECT * FROM `%s`' % self . _table sample = bq . Query ( source ) . execute ( sampling = sampling ) . result ( ) df = sample . to_dataframe ( ) return df | Samples data into a Pandas DataFrame . Note that it calls BigQuery so it will incur cost . | 156 | 22 |
236,808 | def size ( self ) : import tensorflow as tf if self . _size is None : self . _size = 0 options = tf . python_io . TFRecordOptions ( tf . python_io . TFRecordCompressionType . GZIP ) for tfexample_file in self . files : self . _size += sum ( 1 for x in tf . python_io . tf_record_iterator ( tfexample_file , options = options ) ) return self . _size | The number of instances in the data . If the underlying data source changes it may be outdated . | 102 | 19 |
236,809 | def list ( self , pattern = '*' ) : if self . _group_dict is None : self . _group_dict = collections . OrderedDict ( ( group . id , group ) for group in self . _client . list_groups ( ) ) return [ group for group in self . _group_dict . values ( ) if fnmatch . fnmatch ( group . display_name , pattern ) ] | Returns a list of groups that match the filters . | 89 | 10 |
236,810 | def as_dataframe ( self , pattern = '*' , max_rows = None ) : data = [ ] for i , group in enumerate ( self . list ( pattern ) ) : if max_rows is not None and i >= max_rows : break parent = self . _group_dict . get ( group . parent_id ) parent_display_name = '' if parent is None else parent . display_name data . append ( [ group . id , group . display_name , group . parent_id , parent_display_name , group . is_cluster , group . filter ] ) return pandas . DataFrame ( data , columns = self . _DISPLAY_HEADERS ) | Creates a pandas dataframe from the groups that match the filters . | 149 | 15 |
236,811 | def _find_recursive_dependencies ( sql , values , code , resolved_vars , resolving_vars = None ) : # Get the set of $var references in this SQL. dependencies = SqlStatement . _get_dependencies ( sql ) for dependency in dependencies : # Now we check each dependency. If it is in complete - i.e., we have an expansion # for it already - we just continue. if dependency in resolved_vars : continue # Look it up in our resolution namespace dictionary. dep = datalab . utils . get_item ( values , dependency ) # If it is a SQL module, get the main/last query from the module, so users can refer # to $module. Useful especially if final query in module has no DEFINE QUERY <name> part. if isinstance ( dep , types . ModuleType ) : dep = _utils . get_default_query_from_module ( dep ) # If we can't resolve the $name, give up. if dep is None : raise Exception ( "Unsatisfied dependency $%s" % dependency ) # If it is a SqlStatement, it may have its own $ references in turn; check to make # sure we don't have circular references, and if not, recursively expand it and add # it to the set of complete dependencies. if isinstance ( dep , SqlStatement ) : if resolving_vars is None : resolving_vars = [ ] elif dependency in resolving_vars : # Circular dependency raise Exception ( "Circular dependency in $%s" % dependency ) resolving_vars . append ( dependency ) SqlStatement . _find_recursive_dependencies ( dep . _sql , values , code , resolved_vars , resolving_vars ) resolving_vars . pop ( ) resolved_vars [ dependency ] = SqlStatement ( dep . _sql ) else : resolved_vars [ dependency ] = dep | Recursive helper method for expanding variables including transitive dependencies . | 416 | 12 |
236,812 | def format ( sql , args = None ) : resolved_vars = { } code = [ ] SqlStatement . _find_recursive_dependencies ( sql , args , code = code , resolved_vars = resolved_vars ) # Rebuild the SQL string, substituting just '$' for escaped $ occurrences, # variable references substituted with their values, or literal text copied # over as-is. parts = [ ] for ( escape , placeholder , _ , literal ) in SqlStatement . _get_tokens ( sql ) : if escape : parts . append ( '$' ) elif placeholder : variable = placeholder [ 1 : ] try : value = resolved_vars [ variable ] except KeyError as e : raise Exception ( 'Invalid sql. Unable to substitute $%s.' % e . args [ 0 ] ) if isinstance ( value , types . ModuleType ) : value = _utils . get_default_query_from_module ( value ) if isinstance ( value , SqlStatement ) : sql = value . format ( value . _sql , resolved_vars ) value = '(%s)' % sql elif '_repr_sql_' in dir ( value ) : # pylint: disable=protected-access value = value . _repr_sql_ ( ) elif isinstance ( value , basestring ) : value = SqlStatement . _escape_string ( value ) elif isinstance ( value , list ) or isinstance ( value , tuple ) : if isinstance ( value , tuple ) : value = list ( value ) expansion = '(' for v in value : if len ( expansion ) > 1 : expansion += ', ' if isinstance ( v , basestring ) : expansion += SqlStatement . _escape_string ( v ) else : expansion += str ( v ) expansion += ')' value = expansion else : value = str ( value ) parts . append ( value ) elif literal : parts . append ( literal ) expanded = '' . join ( parts ) return expanded | Resolve variable references in a query within an environment . | 432 | 11 |
236,813 | def _get_dependencies ( sql ) : dependencies = [ ] for ( _ , placeholder , dollar , _ ) in SqlStatement . _get_tokens ( sql ) : if placeholder : variable = placeholder [ 1 : ] if variable not in dependencies : dependencies . append ( variable ) elif dollar : raise Exception ( 'Invalid sql; $ with no following $ or identifier: %s.' % sql ) return dependencies | Return the list of variables referenced in this SQL . | 88 | 10 |
236,814 | def pymodule ( line , cell = None ) : parser = _commands . CommandParser . create ( 'pymodule' ) parser . add_argument ( '-n' , '--name' , help = 'the name of the python module to create and import' ) parser . set_defaults ( func = _pymodule_cell ) return _utils . handle_magic_line ( line , cell , parser ) | Creates and subsequently auto - imports a python module . | 89 | 11 |
236,815 | def compare_datetimes ( d1 , d2 ) : if d1 . tzinfo is None or d1 . tzinfo . utcoffset ( d1 ) is None : d1 = d1 . replace ( tzinfo = pytz . UTC ) if d2 . tzinfo is None or d2 . tzinfo . utcoffset ( d2 ) is None : d2 = d2 . replace ( tzinfo = pytz . UTC ) if d1 < d2 : return - 1 elif d1 > d2 : return 1 return 0 | Compares two datetimes safely whether they are timezone - naive or timezone - aware . | 125 | 19 |
236,816 | def pick_unused_port ( ) : s = socket . socket ( socket . AF_INET , socket . SOCK_STREAM ) s . bind ( ( 'localhost' , 0 ) ) addr , port = s . getsockname ( ) s . close ( ) return port | get an unused port on the VM . | 61 | 8 |
236,817 | def is_http_running_on ( port ) : try : conn = httplib . HTTPConnection ( '127.0.0.1:' + str ( port ) ) conn . connect ( ) conn . close ( ) return True except Exception : return False | Check if an http server runs on a given port . | 55 | 11 |
236,818 | def save_project_id ( project_id ) : # Try gcloud first. If gcloud fails (probably because it does not exist), then # write to a config file. try : subprocess . call ( [ 'gcloud' , 'config' , 'set' , 'project' , project_id ] ) except : config_file = os . path . join ( get_config_dir ( ) , 'config.json' ) config = { } if os . path . exists ( config_file ) : with open ( config_file ) as f : config = json . loads ( f . read ( ) ) config [ 'project_id' ] = project_id with open ( config_file , 'w' ) as f : f . write ( json . dumps ( config ) ) | Save project id to config file . | 169 | 7 |
236,819 | def get_default_project_id ( ) : # Try getting default project id from gcloud. If it fails try config.json. try : proc = subprocess . Popen ( [ 'gcloud' , 'config' , 'list' , '--format' , 'value(core.project)' ] , stdout = subprocess . PIPE ) stdout , _ = proc . communicate ( ) value = stdout . strip ( ) if proc . poll ( ) == 0 and value : if isinstance ( value , six . string_types ) : return value else : # Hope it's a utf-8 string encoded in bytes. Otherwise an exception will # be thrown and config.json will be checked. return value . decode ( ) except : pass config_file = os . path . join ( get_config_dir ( ) , 'config.json' ) if os . path . exists ( config_file ) : with open ( config_file ) as f : config = json . loads ( f . read ( ) ) if 'project_id' in config and config [ 'project_id' ] : return str ( config [ 'project_id' ] ) if os . getenv ( 'PROJECT_ID' ) is not None : return os . getenv ( 'PROJECT_ID' ) return None | Get default project id from config or environment var . | 281 | 10 |
236,820 | def _construct_context_for_args ( args ) : global_default_context = google . datalab . Context . default ( ) config = { } for key in global_default_context . config : config [ key ] = global_default_context . config [ key ] billing_tier_arg = args . get ( 'billing' , None ) if billing_tier_arg : config [ 'bigquery_billing_tier' ] = billing_tier_arg return google . datalab . Context ( project_id = global_default_context . project_id , credentials = global_default_context . credentials , config = config ) | Construct a new Context for the parsed arguments . | 139 | 9 |
236,821 | def python_portable_string ( string , encoding = 'utf-8' ) : if isinstance ( string , six . string_types ) : return string if six . PY3 : return string . decode ( encoding ) raise ValueError ( 'Unsupported type %s' % str ( type ( string ) ) ) | Converts bytes into a string type . | 68 | 8 |
236,822 | def _storage_list_buckets ( project , pattern ) : data = [ { 'Bucket' : 'gs://' + bucket . name , 'Created' : bucket . metadata . created_on } for bucket in datalab . storage . Buckets ( project_id = project ) if fnmatch . fnmatch ( bucket . name , pattern ) ] return datalab . utils . commands . render_dictionary ( data , [ 'Bucket' , 'Created' ] ) | List all storage buckets that match a pattern . | 104 | 9 |
236,823 | def _storage_list_keys ( bucket , pattern ) : data = [ { 'Name' : item . metadata . name , 'Type' : item . metadata . content_type , 'Size' : item . metadata . size , 'Updated' : item . metadata . updated_on } for item in _storage_get_keys ( bucket , pattern ) ] return datalab . utils . commands . render_dictionary ( data , [ 'Name' , 'Type' , 'Size' , 'Updated' ] ) | List all storage keys in a specified bucket that match a pattern . | 111 | 13 |
236,824 | def tables_list ( self , dataset_name , max_results = 0 , page_token = None ) : url = Api . _ENDPOINT + ( Api . _TABLES_PATH % ( dataset_name . project_id , dataset_name . dataset_id , '' , '' ) ) args = { } if max_results != 0 : args [ 'maxResults' ] = max_results if page_token is not None : args [ 'pageToken' ] = page_token return google . datalab . utils . Http . request ( url , args = args , credentials = self . credentials ) | Issues a request to retrieve a list of tables . | 136 | 11 |
236,825 | def _bag_of_words ( x ) : def _bow ( x ) : """Comptue BOW weights. As tf layer's sum combiner is used, the weights can be just ones. Tokens are not summed together here. """ return tf . SparseTensor ( indices = x . indices , values = tf . to_float ( tf . ones_like ( x . values ) ) , dense_shape = x . dense_shape ) return _bow ( x ) | Computes bag of words weights | 101 | 6 |
236,826 | def csv_header_and_defaults ( features , schema , stats , keep_target ) : target_name = get_target_name ( features ) if keep_target and not target_name : raise ValueError ( 'Cannot find target transform' ) csv_header = [ ] record_defaults = [ ] for col in schema : if not keep_target and col [ 'name' ] == target_name : continue # Note that numerical key columns do not have a stats entry, hence the use # of get(col['name'], {}) csv_header . append ( col [ 'name' ] ) if col [ 'type' ] . lower ( ) == INTEGER_SCHEMA : dtype = tf . int64 default = int ( stats [ 'column_stats' ] . get ( col [ 'name' ] , { } ) . get ( 'mean' , 0 ) ) elif col [ 'type' ] . lower ( ) == FLOAT_SCHEMA : dtype = tf . float32 default = float ( stats [ 'column_stats' ] . get ( col [ 'name' ] , { } ) . get ( 'mean' , 0.0 ) ) else : dtype = tf . string default = '' record_defaults . append ( tf . constant ( [ default ] , dtype = dtype ) ) return csv_header , record_defaults | Gets csv header and default lists . | 304 | 9 |
236,827 | def build_csv_serving_tensors_for_transform_step ( analysis_path , features , schema , stats , keep_target ) : csv_header , record_defaults = csv_header_and_defaults ( features , schema , stats , keep_target ) placeholder = tf . placeholder ( dtype = tf . string , shape = ( None , ) , name = 'csv_input_placeholder' ) tensors = tf . decode_csv ( placeholder , record_defaults ) raw_features = dict ( zip ( csv_header , tensors ) ) transform_fn = make_preprocessing_fn ( analysis_path , features , keep_target ) transformed_tensors = transform_fn ( raw_features ) transformed_features = { } # Expand the dims of non-sparse tensors for k , v in six . iteritems ( transformed_tensors ) : if isinstance ( v , tf . Tensor ) and v . get_shape ( ) . ndims == 1 : transformed_features [ k ] = tf . expand_dims ( v , - 1 ) else : transformed_features [ k ] = v return input_fn_utils . InputFnOps ( transformed_features , None , { "csv_example" : placeholder } ) | Builds a serving function starting from raw csv . | 279 | 11 |
236,828 | def build_csv_serving_tensors_for_training_step ( analysis_path , features , schema , stats , keep_target ) : transformed_features , _ , placeholder_dict = build_csv_serving_tensors_for_transform_step ( analysis_path = analysis_path , features = features , schema = schema , stats = stats , keep_target = keep_target ) transformed_features = image_feature_engineering ( features = features , feature_tensors_dict = transformed_features ) return input_fn_utils . InputFnOps ( transformed_features , None , placeholder_dict ) | Builds a serving function starting from raw csv used at model export time . | 134 | 16 |
236,829 | def build_csv_transforming_training_input_fn ( schema , features , stats , analysis_output_dir , raw_data_file_pattern , training_batch_size , num_epochs = None , randomize_input = False , min_after_dequeue = 1 , reader_num_threads = 1 , allow_smaller_final_batch = True ) : def raw_training_input_fn ( ) : """Training input function that reads raw data and applies transforms.""" if isinstance ( raw_data_file_pattern , six . string_types ) : filepath_list = [ raw_data_file_pattern ] else : filepath_list = raw_data_file_pattern files = [ ] for path in filepath_list : files . extend ( file_io . get_matching_files ( path ) ) filename_queue = tf . train . string_input_producer ( files , num_epochs = num_epochs , shuffle = randomize_input ) csv_id , csv_lines = tf . TextLineReader ( ) . read_up_to ( filename_queue , training_batch_size ) queue_capacity = ( reader_num_threads + 3 ) * training_batch_size + min_after_dequeue if randomize_input : _ , batch_csv_lines = tf . train . shuffle_batch ( tensors = [ csv_id , csv_lines ] , batch_size = training_batch_size , capacity = queue_capacity , min_after_dequeue = min_after_dequeue , enqueue_many = True , num_threads = reader_num_threads , allow_smaller_final_batch = allow_smaller_final_batch ) else : _ , batch_csv_lines = tf . train . batch ( tensors = [ csv_id , csv_lines ] , batch_size = training_batch_size , capacity = queue_capacity , enqueue_many = True , num_threads = reader_num_threads , allow_smaller_final_batch = allow_smaller_final_batch ) csv_header , record_defaults = csv_header_and_defaults ( features , schema , stats , keep_target = True ) parsed_tensors = tf . decode_csv ( batch_csv_lines , record_defaults , name = 'csv_to_tensors' ) raw_features = dict ( zip ( csv_header , parsed_tensors ) ) transform_fn = make_preprocessing_fn ( analysis_output_dir , features , keep_target = True ) transformed_tensors = transform_fn ( raw_features ) # Expand the dims of non-sparse tensors. This is needed by tf.learn. transformed_features = { } for k , v in six . iteritems ( transformed_tensors ) : if isinstance ( v , tf . Tensor ) and v . get_shape ( ) . ndims == 1 : transformed_features [ k ] = tf . expand_dims ( v , - 1 ) else : transformed_features [ k ] = v # image_feature_engineering does not need to be called as images are not # supported in raw csv for training. # Remove the target tensor, and return it directly target_name = get_target_name ( features ) if not target_name or target_name not in transformed_features : raise ValueError ( 'Cannot find target transform in features' ) transformed_target = transformed_features . pop ( target_name ) return transformed_features , transformed_target return raw_training_input_fn | Creates training input_fn that reads raw csv data and applies transforms . | 803 | 16 |
236,830 | def build_tfexample_transfored_training_input_fn ( schema , features , analysis_output_dir , raw_data_file_pattern , training_batch_size , num_epochs = None , randomize_input = False , min_after_dequeue = 1 , reader_num_threads = 1 , allow_smaller_final_batch = True ) : def transformed_training_input_fn ( ) : """Training input function that reads transformed data.""" if isinstance ( raw_data_file_pattern , six . string_types ) : filepath_list = [ raw_data_file_pattern ] else : filepath_list = raw_data_file_pattern files = [ ] for path in filepath_list : files . extend ( file_io . get_matching_files ( path ) ) filename_queue = tf . train . string_input_producer ( files , num_epochs = num_epochs , shuffle = randomize_input ) options = tf . python_io . TFRecordOptions ( compression_type = tf . python_io . TFRecordCompressionType . GZIP ) ex_id , ex_str = tf . TFRecordReader ( options = options ) . read_up_to ( filename_queue , training_batch_size ) queue_capacity = ( reader_num_threads + 3 ) * training_batch_size + min_after_dequeue if randomize_input : _ , batch_ex_str = tf . train . shuffle_batch ( tensors = [ ex_id , ex_str ] , batch_size = training_batch_size , capacity = queue_capacity , min_after_dequeue = min_after_dequeue , enqueue_many = True , num_threads = reader_num_threads , allow_smaller_final_batch = allow_smaller_final_batch ) else : _ , batch_ex_str = tf . train . batch ( tensors = [ ex_id , ex_str ] , batch_size = training_batch_size , capacity = queue_capacity , enqueue_many = True , num_threads = reader_num_threads , allow_smaller_final_batch = allow_smaller_final_batch ) feature_spec = { } feature_info = get_transformed_feature_info ( features , schema ) for name , info in six . iteritems ( feature_info ) : if info [ 'size' ] is None : feature_spec [ name ] = tf . VarLenFeature ( dtype = info [ 'dtype' ] ) else : feature_spec [ name ] = tf . FixedLenFeature ( shape = [ info [ 'size' ] ] , dtype = info [ 'dtype' ] ) parsed_tensors = tf . parse_example ( batch_ex_str , feature_spec ) # Expand the dims of non-sparse tensors. This is needed by tf.learn. transformed_features = { } for k , v in six . iteritems ( parsed_tensors ) : if isinstance ( v , tf . Tensor ) and v . get_shape ( ) . ndims == 1 : transformed_features [ k ] = tf . expand_dims ( v , - 1 ) else : # Sparse tensor transformed_features [ k ] = v transformed_features = image_feature_engineering ( features = features , feature_tensors_dict = transformed_features ) # Remove the target tensor, and return it directly target_name = get_target_name ( features ) if not target_name or target_name not in transformed_features : raise ValueError ( 'Cannot find target transform in features' ) transformed_target = transformed_features . pop ( target_name ) return transformed_features , transformed_target return transformed_training_input_fn | Creates training input_fn that reads transformed tf . example files . | 840 | 14 |
236,831 | def image_feature_engineering ( features , feature_tensors_dict ) : engineered_features = { } for name , feature_tensor in six . iteritems ( feature_tensors_dict ) : if name in features and features [ name ] [ 'transform' ] == IMAGE_TRANSFORM : with tf . name_scope ( name , 'Wx_plus_b' ) : hidden = tf . contrib . layers . fully_connected ( feature_tensor , IMAGE_HIDDEN_TENSOR_SIZE ) engineered_features [ name ] = hidden else : engineered_features [ name ] = feature_tensor return engineered_features | Add a hidden layer on image features . | 143 | 8 |
236,832 | def read_vocab_file ( file_path ) : with file_io . FileIO ( file_path , 'r' ) as f : vocab_pd = pd . read_csv ( f , header = None , names = [ 'vocab' , 'count' ] , dtype = str , # Prevent pd from converting numerical categories. na_filter = False ) # Prevent pd from converting 'NA' to a NaN. vocab = vocab_pd [ 'vocab' ] . tolist ( ) ex_count = vocab_pd [ 'count' ] . astype ( int ) . tolist ( ) return vocab , ex_count | Reads a vocab file to memeory . | 146 | 10 |
236,833 | def _to_query_json ( self ) : json = { 'compression' : 'GZIP' if self . _compressed else 'NONE' , 'ignoreUnknownValues' : self . _ignore_unknown_values , 'maxBadRecords' : self . _max_bad_records , 'sourceFormat' : self . _bq_source_format , 'sourceUris' : self . _source , } if self . _source_format == 'csv' and self . _csv_options : json [ 'csvOptions' ] = { } json [ 'csvOptions' ] . update ( self . _csv_options . _to_query_json ( ) ) if self . _schema : json [ 'schema' ] = { 'fields' : self . _schema . _bq_schema } return json | Return the table as a dictionary to be used as JSON in a query job . | 185 | 16 |
236,834 | def load_ipython_extension ( shell ) : # Inject our user agent on all requests by monkey-patching a wrapper around httplib2.Http.request. def _request ( self , uri , method = "GET" , body = None , headers = None , redirections = _httplib2 . DEFAULT_MAX_REDIRECTS , connection_type = None ) : if headers is None : headers = { } headers [ 'user-agent' ] = 'GoogleCloudDataLab/1.0' return _orig_request ( self , uri , method = method , body = body , headers = headers , redirections = redirections , connection_type = connection_type ) _httplib2 . Http . request = _request # Similarly for the requests library. def _init_session ( self ) : _orig_init ( self ) self . headers [ 'User-Agent' ] = 'GoogleCloudDataLab/1.0' _requests . Session . __init__ = _init_session # Be more tolerant with magics. If the user specified a cell magic that doesn't # exist and an empty cell body but a line magic with that name exists, run that # instead. Conversely, if the user specified a line magic that doesn't exist but # a cell magic exists with that name, run the cell magic with an empty body. def _run_line_magic ( self , magic_name , line ) : fn = self . find_line_magic ( magic_name ) if fn is None : cm = self . find_cell_magic ( magic_name ) if cm : return _run_cell_magic ( self , magic_name , line , None ) return _orig_run_line_magic ( self , magic_name , line ) def _run_cell_magic ( self , magic_name , line , cell ) : if cell is None or len ( cell ) == 0 or cell . isspace ( ) : fn = self . find_line_magic ( magic_name ) if fn : return _orig_run_line_magic ( self , magic_name , line ) # IPython will complain if cell is empty string but not if it is None cell = None return _orig_run_cell_magic ( self , magic_name , line , cell ) _shell . InteractiveShell . run_cell_magic = _run_cell_magic _shell . InteractiveShell . run_line_magic = _run_line_magic # Define global 'project_id' and 'set_project_id' functions to manage the default project ID. We # do this conditionally in a try/catch # to avoid the call to Context.default() when running tests # which mock IPython.get_ipython(). def _get_project_id ( ) : try : return google . datalab . Context . default ( ) . project_id except Exception : return None def _set_project_id ( project_id ) : context = google . datalab . Context . default ( ) context . set_project_id ( project_id ) try : from datalab . context import Context as _old_context _old_context . default ( ) . set_project_id ( project_id ) except ImportError : # If the old library is not loaded, then we don't have to do anything pass try : if 'datalab_project_id' not in _IPython . get_ipython ( ) . user_ns : _IPython . get_ipython ( ) . user_ns [ 'datalab_project_id' ] = _get_project_id _IPython . get_ipython ( ) . user_ns [ 'set_datalab_project_id' ] = _set_project_id except TypeError : pass | Called when the extension is loaded . | 818 | 8 |
236,835 | def _get_sql_args ( parser , args = None ) : overrides = None if args is None : tokens = [ ] elif isinstance ( args , basestring ) : command_line = ' ' . join ( args . split ( '\n' ) ) tokens = shlex . split ( command_line ) elif isinstance ( args , dict ) : overrides = args tokens = [ ] else : tokens = args args = { } if parser is None else vars ( parser . parse_args ( tokens ) ) if overrides : args . update ( overrides ) # Don't return any args that are None as we don't want to expand to 'None' return { arg : value for arg , value in args . items ( ) if value is not None } | Parse a set of %%sql arguments or get the default value of the arguments . | 166 | 17 |
236,836 | def get_sql_statement_with_environment ( item , args = None ) : if isinstance ( item , basestring ) : item = _sql_statement . SqlStatement ( item ) elif not isinstance ( item , _sql_statement . SqlStatement ) : item = SqlModule . get_default_query_from_module ( item ) if not item : raise Exception ( 'Expected a SQL statement or module but got %s' % str ( item ) ) env = { } if item . module : env . update ( item . module . __dict__ ) parser = env . get ( _utils . _SQL_MODULE_ARGPARSE , None ) if parser : args = SqlModule . _get_sql_args ( parser , args = args ) else : args = None if isinstance ( args , dict ) : env . update ( args ) return item , env | Given a SQLStatement string or module plus command line args or a dictionary return a SqlStatement and final dictionary for variable resolution . | 193 | 26 |
236,837 | def expand ( sql , args = None ) : sql , args = SqlModule . get_sql_statement_with_environment ( sql , args ) return _sql_statement . SqlStatement . format ( sql . _sql , args ) | Expand a SqlStatement query string or SqlModule with a set of arguments . | 51 | 18 |
236,838 | def parse_dataset_name ( name , project_id = None ) : _project_id = _dataset_id = None if isinstance ( name , basestring ) : # Try to parse as absolute name first. m = re . match ( _ABS_DATASET_NAME_PATTERN , name , re . IGNORECASE ) if m is not None : _project_id , _dataset_id = m . groups ( ) else : # Next try to match as a relative name implicitly scoped within current project. m = re . match ( _REL_DATASET_NAME_PATTERN , name ) if m is not None : groups = m . groups ( ) _dataset_id = groups [ 0 ] elif isinstance ( name , dict ) : try : _dataset_id = name [ 'dataset_id' ] _project_id = name [ 'project_id' ] except KeyError : pass else : # Try treat as an array or tuple if len ( name ) == 2 : # Treat as a tuple or array. _project_id , _dataset_id = name elif len ( name ) == 1 : _dataset_id = name [ 0 ] if not _dataset_id : raise Exception ( 'Invalid dataset name: ' + str ( name ) ) if not _project_id : _project_id = project_id return DatasetName ( _project_id , _dataset_id ) | Parses a dataset name into its individual parts . | 331 | 11 |
236,839 | def parse_table_name ( name , project_id = None , dataset_id = None ) : _project_id = _dataset_id = _table_id = _decorator = None if isinstance ( name , basestring ) : # Try to parse as absolute name first. m = re . match ( _ABS_TABLE_NAME_PATTERN , name , re . IGNORECASE ) if m is not None : _project_id , _dataset_id , _table_id , _decorator = m . groups ( ) else : # Next try to match as a relative name implicitly scoped within current project. m = re . match ( _REL_TABLE_NAME_PATTERN , name ) if m is not None : groups = m . groups ( ) _project_id , _dataset_id , _table_id , _decorator = project_id , groups [ 0 ] , groups [ 1 ] , groups [ 2 ] else : # Finally try to match as a table name only. m = re . match ( _TABLE_NAME_PATTERN , name ) if m is not None : groups = m . groups ( ) _project_id , _dataset_id , _table_id , _decorator = project_id , dataset_id , groups [ 0 ] , groups [ 1 ] elif isinstance ( name , dict ) : try : _table_id = name [ 'table_id' ] _dataset_id = name [ 'dataset_id' ] _project_id = name [ 'project_id' ] except KeyError : pass else : # Try treat as an array or tuple if len ( name ) == 4 : _project_id , _dataset_id , _table_id , _decorator = name elif len ( name ) == 3 : _project_id , _dataset_id , _table_id = name elif len ( name ) == 2 : _dataset_id , _table_id = name if not _table_id : raise Exception ( 'Invalid table name: ' + str ( name ) ) if not _project_id : _project_id = project_id if not _dataset_id : _dataset_id = dataset_id if not _decorator : _decorator = '' return TableName ( _project_id , _dataset_id , _table_id , _decorator ) | Parses a table name into its individual parts . | 542 | 11 |
236,840 | def _make_text_predict_fn ( self , labels , instance , column_to_explain ) : def _predict_fn ( perturbed_text ) : predict_input = [ ] for x in perturbed_text : instance_copy = dict ( instance ) instance_copy [ column_to_explain ] = x predict_input . append ( instance_copy ) df = _local_predict . get_prediction_results ( self . _model_dir , predict_input , self . _headers , with_source = False ) probs = _local_predict . get_probs_for_labels ( labels , df ) return np . asarray ( probs ) return _predict_fn | Create a predict_fn that can be used by LIME text explainer . | 158 | 16 |
236,841 | def _make_image_predict_fn ( self , labels , instance , column_to_explain ) : def _predict_fn ( perturbed_image ) : predict_input = [ ] for x in perturbed_image : instance_copy = dict ( instance ) instance_copy [ column_to_explain ] = Image . fromarray ( x ) predict_input . append ( instance_copy ) df = _local_predict . get_prediction_results ( self . _model_dir , predict_input , self . _headers , img_cols = self . _image_columns , with_source = False ) probs = _local_predict . get_probs_for_labels ( labels , df ) return np . asarray ( probs ) return _predict_fn | Create a predict_fn that can be used by LIME image explainer . | 177 | 16 |
236,842 | def _get_unique_categories ( self , df ) : categories = [ ] for col in self . _categorical_columns : categocial = pd . Categorical ( df [ col ] ) col_categories = list ( map ( str , categocial . categories ) ) col_categories . append ( '_UNKNOWN' ) categories . append ( col_categories ) return categories | Get all categories for each categorical columns from training data . | 87 | 12 |
236,843 | def _preprocess_data_for_tabular_explain ( self , df , categories ) : df = df . copy ( ) # Remove non tabular columns (text, image). for col in list ( df . columns ) : if col not in ( self . _categorical_columns + self . _numeric_columns ) : del df [ col ] # Convert categorical values into indices. for col_name , col_categories in zip ( self . _categorical_columns , categories ) : df [ col_name ] = df [ col_name ] . apply ( lambda x : col_categories . index ( str ( x ) ) if str ( x ) in col_categories else len ( col_categories ) - 1 ) # Make sure numeric values are really numeric for numeric_col in self . _numeric_columns : df [ numeric_col ] = df [ numeric_col ] . apply ( lambda x : float ( x ) ) return df . as_matrix ( self . _categorical_columns + self . _numeric_columns ) | Get preprocessed training set in numpy array and categorical names from raw training data . | 238 | 19 |
236,844 | def _make_tabular_predict_fn ( self , labels , instance , categories ) : def _predict_fn ( np_instance ) : df = pd . DataFrame ( np_instance , columns = ( self . _categorical_columns + self . _numeric_columns ) ) # Convert categorical indices back to categories. for col_name , col_categories in zip ( self . _categorical_columns , categories ) : df [ col_name ] = df [ col_name ] . apply ( lambda x : col_categories [ int ( x ) ] ) # Add columns that do not exist in the perturbed data, # such as key, text, and image data. for col_name in self . _headers : if col_name not in ( self . _categorical_columns + self . _numeric_columns ) : df [ col_name ] = instance [ col_name ] r = _local_predict . get_prediction_results ( self . _model_dir , df , self . _headers , with_source = False ) probs = _local_predict . get_probs_for_labels ( labels , r ) probs = np . asarray ( probs ) return probs return _predict_fn | Create a predict_fn that can be used by LIME tabular explainer . | 284 | 17 |
236,845 | def explain_tabular ( self , trainset , labels , instance , num_features = 5 , kernel_width = 3 ) : from lime . lime_tabular import LimeTabularExplainer if isinstance ( instance , six . string_types ) : instance = next ( csv . DictReader ( [ instance ] , fieldnames = self . _headers ) ) categories = self . _get_unique_categories ( trainset ) np_trainset = self . _preprocess_data_for_tabular_explain ( trainset , categories ) predict_fn = self . _make_tabular_predict_fn ( labels , instance , categories ) prediction_df = pd . DataFrame ( [ instance ] ) prediction_instance = self . _preprocess_data_for_tabular_explain ( prediction_df , categories ) explainer = LimeTabularExplainer ( np_trainset , feature_names = ( self . _categorical_columns + self . _numeric_columns ) , class_names = labels , categorical_features = range ( len ( categories ) ) , categorical_names = { i : v for i , v in enumerate ( categories ) } , kernel_width = kernel_width ) exp = explainer . explain_instance ( prediction_instance [ 0 ] , predict_fn , num_features = num_features , labels = range ( len ( labels ) ) ) return exp | Explain categorical and numeric features for a prediction . | 311 | 11 |
236,846 | def explain_text ( self , labels , instance , column_name = None , num_features = 10 , num_samples = 5000 ) : from lime . lime_text import LimeTextExplainer if len ( self . _text_columns ) > 1 and not column_name : raise ValueError ( 'There are multiple text columns in the input of the model. ' + 'Please specify "column_name".' ) elif column_name and column_name not in self . _text_columns : raise ValueError ( 'Specified column_name "%s" not found in the model input.' % column_name ) text_column_name = column_name if column_name else self . _text_columns [ 0 ] if isinstance ( instance , six . string_types ) : instance = next ( csv . DictReader ( [ instance ] , fieldnames = self . _headers ) ) predict_fn = self . _make_text_predict_fn ( labels , instance , text_column_name ) explainer = LimeTextExplainer ( class_names = labels ) exp = explainer . explain_instance ( instance [ text_column_name ] , predict_fn , labels = range ( len ( labels ) ) , num_features = num_features , num_samples = num_samples ) return exp | Explain a text field of a prediction . | 287 | 9 |
236,847 | def explain_image ( self , labels , instance , column_name = None , num_features = 100000 , num_samples = 300 , batch_size = 200 , hide_color = 0 ) : from lime . lime_image import LimeImageExplainer if len ( self . _image_columns ) > 1 and not column_name : raise ValueError ( 'There are multiple image columns in the input of the model. ' + 'Please specify "column_name".' ) elif column_name and column_name not in self . _image_columns : raise ValueError ( 'Specified column_name "%s" not found in the model input.' % column_name ) image_column_name = column_name if column_name else self . _image_columns [ 0 ] if isinstance ( instance , six . string_types ) : instance = next ( csv . DictReader ( [ instance ] , fieldnames = self . _headers ) ) predict_fn = self . _make_image_predict_fn ( labels , instance , image_column_name ) explainer = LimeImageExplainer ( ) with file_io . FileIO ( instance [ image_column_name ] , 'rb' ) as fi : im = Image . open ( fi ) im . thumbnail ( ( 299 , 299 ) , Image . ANTIALIAS ) rgb_im = np . asarray ( im . convert ( 'RGB' ) ) exp = explainer . explain_instance ( rgb_im , predict_fn , labels = range ( len ( labels ) ) , top_labels = None , hide_color = hide_color , num_features = num_features , num_samples = num_samples , batch_size = batch_size ) return exp | Explain an image of a prediction . | 380 | 8 |
236,848 | def probe_image ( self , labels , instance , column_name = None , num_scaled_images = 50 , top_percent = 10 ) : if len ( self . _image_columns ) > 1 and not column_name : raise ValueError ( 'There are multiple image columns in the input of the model. ' + 'Please specify "column_name".' ) elif column_name and column_name not in self . _image_columns : raise ValueError ( 'Specified column_name "%s" not found in the model input.' % column_name ) image_column_name = column_name if column_name else self . _image_columns [ 0 ] if isinstance ( instance , six . string_types ) : instance = next ( csv . DictReader ( [ instance ] , fieldnames = self . _headers ) ) image_path = instance [ image_column_name ] with file_io . FileIO ( image_path , 'rb' ) as fi : im = Image . open ( fi ) resized_image = im . resize ( ( 299 , 299 ) ) # Produce a list of scaled images, create instances (csv lines) from these images. step = 1. / num_scaled_images scales = np . arange ( 0.0 , 1.0 , step ) + step csv_lines = [ ] for s in scales : pixels = ( np . asarray ( resized_image ) * s ) . astype ( 'uint8' ) scaled_image = Image . fromarray ( pixels ) buf = io . BytesIO ( ) scaled_image . save ( buf , "JPEG" ) encoded_image = base64 . urlsafe_b64encode ( buf . getvalue ( ) ) . decode ( 'ascii' ) instance_copy = dict ( instance ) instance_copy [ image_column_name ] = encoded_image buf = six . StringIO ( ) writer = csv . DictWriter ( buf , fieldnames = self . _headers , lineterminator = '' ) writer . writerow ( instance_copy ) csv_lines . append ( buf . getvalue ( ) ) integrated_gradients_images = [ ] for label in labels : # Send to tf model to get gradients. grads = self . _image_gradients ( csv_lines , label , image_column_name ) integrated_grads = resized_image * np . average ( grads , axis = 0 ) # Gray scale the grads by removing color dimension. # abs() is for getting the most impactful pixels regardless positive or negative. grayed = np . average ( abs ( integrated_grads ) , axis = 2 ) grayed = np . transpose ( [ grayed , grayed , grayed ] , axes = [ 1 , 2 , 0 ] ) # Only show the most impactful pixels. p = np . percentile ( grayed , 100 - top_percent ) viz_window = np . where ( grayed > p , 1 , 0 ) vis = resized_image * viz_window im_vis = Image . fromarray ( np . uint8 ( vis ) ) integrated_gradients_images . append ( im_vis ) return resized_image , integrated_gradients_images | Get pixel importance of the image . | 706 | 7 |
236,849 | def get_model_details ( self , model_name ) : full_name = model_name if not model_name . startswith ( 'projects/' ) : full_name = ( 'projects/%s/models/%s' % ( self . _project_id , model_name ) ) return self . _api . projects ( ) . models ( ) . get ( name = full_name ) . execute ( ) | Get details of the specified model from CloudML Service . | 93 | 11 |
236,850 | def create ( self , model_name ) : body = { 'name' : model_name } parent = 'projects/' + self . _project_id # Model creation is instant. If anything goes wrong, Exception will be thrown. return self . _api . projects ( ) . models ( ) . create ( body = body , parent = parent ) . execute ( ) | Create a model . | 78 | 4 |
236,851 | def list ( self , count = 10 ) : import IPython data = [ ] # Add range(count) to loop so it will stop either it reaches count, or iteration # on self is exhausted. "self" is iterable (see __iter__() method). for _ , model in zip ( range ( count ) , self . get_iterator ( ) ) : element = { 'name' : model [ 'name' ] } if 'defaultVersion' in model : version_short_name = model [ 'defaultVersion' ] [ 'name' ] . split ( '/' ) [ - 1 ] element [ 'defaultVersion' ] = version_short_name data . append ( element ) IPython . display . display ( datalab . utils . commands . render_dictionary ( data , [ 'name' , 'defaultVersion' ] ) ) | List models under the current project in a table view . | 181 | 11 |
236,852 | def get_version_details ( self , version_name ) : name = ( '%s/versions/%s' % ( self . _full_model_name , version_name ) ) return self . _api . projects ( ) . models ( ) . versions ( ) . get ( name = name ) . execute ( ) | Get details of a version . | 70 | 6 |
236,853 | def deploy ( self , version_name , path , runtime_version = None ) : if not path . startswith ( 'gs://' ) : raise Exception ( 'Invalid path. Only Google Cloud Storage path (gs://...) is accepted.' ) # If there is no "export.meta" or"saved_model.pb" under path but there is # path/model/export.meta or path/model/saved_model.pb, then append /model to the path. if not datalab . storage . Object . from_url ( os . path . join ( path , 'export.meta' ) ) . exists ( ) and not datalab . storage . Object . from_url ( os . path . join ( path , 'saved_model.pb' ) ) . exists ( ) : if datalab . storage . Object . from_url ( os . path . join ( path , 'model' , 'export.meta' ) ) . exists ( ) or datalab . storage . Object . from_url ( os . path . join ( path , 'model' , 'saved_model.pb' ) ) . exists ( ) : path = os . path . join ( path , 'model' ) else : print ( 'Cannot find export.meta or saved_model.pb, but continue with deployment anyway.' ) body = { 'name' : self . _model_name } parent = 'projects/' + self . _project_id try : self . _api . projects ( ) . models ( ) . create ( body = body , parent = parent ) . execute ( ) except : # Trying to create an already existing model gets an error. Ignore it. pass body = { 'name' : version_name , 'deployment_uri' : path , } if runtime_version : body [ 'runtime_version' ] = runtime_version response = self . _api . projects ( ) . models ( ) . versions ( ) . create ( body = body , parent = self . _full_model_name ) . execute ( ) if 'name' not in response : raise Exception ( 'Invalid response from service. "name" is not found.' ) _util . wait_for_long_running_operation ( response [ 'name' ] ) | Deploy a model version to the cloud . | 487 | 8 |
236,854 | def delete ( self , version_name ) : name = ( '%s/versions/%s' % ( self . _full_model_name , version_name ) ) response = self . _api . projects ( ) . models ( ) . versions ( ) . delete ( name = name ) . execute ( ) if 'name' not in response : raise Exception ( 'Invalid response from service. "name" is not found.' ) _util . wait_for_long_running_operation ( response [ 'name' ] ) | Delete a version of model . | 112 | 6 |
236,855 | def predict ( self , version_name , data ) : full_version_name = ( '%s/versions/%s' % ( self . _full_model_name , version_name ) ) request = self . _api . projects ( ) . predict ( body = { 'instances' : data } , name = full_version_name ) request . headers [ 'user-agent' ] = 'GoogleCloudDataLab/1.0' result = request . execute ( ) if 'predictions' not in result : raise Exception ( 'Invalid response from service. Cannot find "predictions" in response.' ) return result [ 'predictions' ] | Get prediction results from features instances . | 140 | 7 |
236,856 | def list ( self ) : import IPython # "self" is iterable (see __iter__() method). data = [ { 'name' : version [ 'name' ] . split ( ) [ - 1 ] , 'deploymentUri' : version [ 'deploymentUri' ] , 'createTime' : version [ 'createTime' ] } for version in self . get_iterator ( ) ] IPython . display . display ( datalab . utils . commands . render_dictionary ( data , [ 'name' , 'deploymentUri' , 'createTime' ] ) ) | List versions under the current model in a table view . | 133 | 11 |
236,857 | def create_feature_map ( features , feature_indices , output_dir ) : feature_map = [ ] for name , info in feature_indices : transform_name = features [ name ] [ 'transform' ] source_column = features [ name ] [ 'source_column' ] if transform_name in [ IDENTITY_TRANSFORM , SCALE_TRANSFORM ] : feature_map . append ( ( info [ 'index_start' ] , name ) ) elif transform_name in [ ONE_HOT_TRANSFORM , MULTI_HOT_TRANSFORM ] : vocab , _ = read_vocab_file ( os . path . join ( output_dir , VOCAB_ANALYSIS_FILE % source_column ) ) for i , word in enumerate ( vocab ) : if transform_name == ONE_HOT_TRANSFORM : feature_map . append ( ( info [ 'index_start' ] + i , '%s=%s' % ( source_column , word ) ) ) elif transform_name == MULTI_HOT_TRANSFORM : feature_map . append ( ( info [ 'index_start' ] + i , '%s has "%s"' % ( source_column , word ) ) ) elif transform_name == IMAGE_TRANSFORM : for i in range ( info [ 'size' ] ) : feature_map . append ( ( info [ 'index_start' ] + i , '%s image feature %d' % ( source_column , i ) ) ) return feature_map | Returns feature_map about the transformed features . | 349 | 9 |
236,858 | def create ( self , query ) : if isinstance ( query , _query . Query ) : query = query . sql try : response = self . _table . _api . tables_insert ( self . _table . name , query = query ) except Exception as e : raise e if 'selfLink' in response : return self raise Exception ( "View %s could not be created as it already exists" % str ( self ) ) | Creates the view with the specified query . | 91 | 9 |
236,859 | def sample ( self , fields = None , count = 5 , sampling = None , use_cache = True , dialect = None , billing_tier = None ) : return self . _table . sample ( fields = fields , count = count , sampling = sampling , use_cache = use_cache , dialect = dialect , billing_tier = billing_tier ) | Retrieves a sampling of data from the view . | 74 | 11 |
236,860 | def update ( self , friendly_name = None , description = None , query = None ) : self . _table . _load_info ( ) if query is not None : if isinstance ( query , _query . Query ) : query = query . sql self . _table . _info [ 'view' ] = { 'query' : query } self . _table . update ( friendly_name = friendly_name , description = description ) | Selectively updates View information . | 93 | 6 |
236,861 | def results ( self , use_cache = True , dialect = None , billing_tier = None ) : return self . _materialization . results ( use_cache = use_cache , dialect = dialect , billing_tier = billing_tier ) | Materialize the view synchronously . | 51 | 7 |
236,862 | def execute_async ( self , table_name = None , table_mode = 'create' , use_cache = True , priority = 'high' , allow_large_results = False , dialect = None , billing_tier = None ) : return self . _materialization . execute_async ( table_name = table_name , table_mode = table_mode , use_cache = use_cache , priority = priority , allow_large_results = allow_large_results , dialect = dialect , billing_tier = billing_tier ) | Materialize the View asynchronously . | 117 | 8 |
236,863 | def get_notebook_item ( name ) : env = notebook_environment ( ) return google . datalab . utils . get_item ( env , name ) | Get an item from the IPython environment . | 36 | 9 |
236,864 | def _get_data_from_list_of_dicts ( source , fields = '*' , first_row = 0 , count = - 1 , schema = None ) : if schema is None : schema = google . datalab . bigquery . Schema . from_data ( source ) fields = get_field_list ( fields , schema ) gen = source [ first_row : first_row + count ] if count >= 0 else source rows = [ { 'c' : [ { 'v' : row [ c ] } if c in row else { } for c in fields ] } for row in gen ] return { 'cols' : _get_cols ( fields , schema ) , 'rows' : rows } , len ( source ) | Helper function for _get_data that handles lists of dicts . | 162 | 14 |
236,865 | def _get_data_from_list_of_lists ( source , fields = '*' , first_row = 0 , count = - 1 , schema = None ) : if schema is None : schema = google . datalab . bigquery . Schema . from_data ( source ) fields = get_field_list ( fields , schema ) gen = source [ first_row : first_row + count ] if count >= 0 else source cols = [ schema . find ( name ) for name in fields ] rows = [ { 'c' : [ { 'v' : row [ i ] } for i in cols ] } for row in gen ] return { 'cols' : _get_cols ( fields , schema ) , 'rows' : rows } , len ( source ) | Helper function for _get_data that handles lists of lists . | 170 | 13 |
236,866 | def _get_data_from_dataframe ( source , fields = '*' , first_row = 0 , count = - 1 , schema = None ) : if schema is None : schema = google . datalab . bigquery . Schema . from_data ( source ) fields = get_field_list ( fields , schema ) rows = [ ] if count < 0 : count = len ( source . index ) df_slice = source . reset_index ( drop = True ) [ first_row : first_row + count ] for index , data_frame_row in df_slice . iterrows ( ) : row = data_frame_row . to_dict ( ) for key in list ( row . keys ( ) ) : val = row [ key ] if isinstance ( val , pandas . Timestamp ) : row [ key ] = val . to_pydatetime ( ) rows . append ( { 'c' : [ { 'v' : row [ c ] } if c in row else { } for c in fields ] } ) cols = _get_cols ( fields , schema ) return { 'cols' : cols , 'rows' : rows } , len ( source ) | Helper function for _get_data that handles Pandas DataFrames . | 259 | 14 |
236,867 | def parse_config_for_selected_keys ( content , keys ) : config_items = { key : None for key in keys } if not content : return config_items , content stripped = content . strip ( ) if len ( stripped ) == 0 : return { } , None elif stripped [ 0 ] == '{' : config = json . loads ( content ) else : config = yaml . load ( content ) if not isinstance ( config , dict ) : raise ValueError ( 'Invalid config.' ) for key in keys : config_items [ key ] = config . pop ( key , None ) if not config : return config_items , None if stripped [ 0 ] == '{' : content_out = json . dumps ( config , indent = 4 ) else : content_out = yaml . dump ( config , default_flow_style = False ) return config_items , content_out | Parse a config from a magic cell body for selected config keys . | 191 | 14 |
236,868 | def chart_html ( driver_name , chart_type , source , chart_options = None , fields = '*' , refresh_interval = 0 , refresh_data = None , control_defaults = None , control_ids = None , schema = None ) : div_id = _html . Html . next_id ( ) controls_html = '' if control_defaults is None : control_defaults = { } if control_ids is None : control_ids = [ ] if chart_options is not None and 'variables' in chart_options : controls = chart_options [ 'variables' ] del chart_options [ 'variables' ] # Just to make sure GCharts doesn't see them. controls_html , defaults , ids = parse_control_options ( controls ) # We augment what we are passed so that in principle we can have controls that are # shared by charts as well as controls that are specific to a chart. control_defaults . update ( defaults ) control_ids . extend ( ids ) , _HTML_TEMPLATE = """ <div class="bqgc-container"> {controls} <div class="bqgc {extra_class}" id="{id}"> </div> </div> <script src="/static/components/requirejs/require.js"></script> <script> require.config({{ paths: {{ base: '/static/base', d3: '//cdnjs.cloudflare.com/ajax/libs/d3/3.4.13/d3', plotly: 'https://cdn.plot.ly/plotly-1.5.1.min.js?noext', jquery: '//ajax.googleapis.com/ajax/libs/jquery/2.0.0/jquery.min' }}, map: {{ '*': {{ datalab: 'nbextensions/gcpdatalab' }} }}, shim: {{ plotly: {{ deps: ['d3', 'jquery'], exports: 'plotly' }} }} }}); require(['datalab/charting', 'datalab/element!{id}', 'base/js/events', 'datalab/style!/nbextensions/gcpdatalab/charting.css' ], function(charts, dom, events) {{ charts.render( '{driver}', dom, events, '{chart_type}', {control_ids}, {data}, {options}, {refresh_data}, {refresh_interval}, {total_rows}); }} ); </script> """ count = 25 if chart_type == 'paged_table' else - 1 data , total_count = get_data ( source , fields , control_defaults , 0 , count , schema ) if refresh_data is None : if isinstance ( source , basestring ) : source_index = get_data_source_index ( source ) refresh_data = { 'source_index' : source_index , 'name' : source_index } else : refresh_data = { 'name' : 'raw data' } refresh_data [ 'fields' ] = fields # TODO(gram): check if we need to augment env with user_ns return _HTML_TEMPLATE . format ( driver = driver_name , controls = controls_html , id = div_id , chart_type = chart_type , extra_class = " bqgc-controlled" if len ( controls_html ) else '' , data = json . dumps ( data , cls = google . datalab . utils . JSONEncoder ) , options = json . dumps ( chart_options , cls = google . datalab . utils . JSONEncoder ) , refresh_data = json . dumps ( refresh_data , cls = google . datalab . utils . JSONEncoder ) , refresh_interval = refresh_interval , control_ids = str ( control_ids ) , total_rows = total_count ) | Return HTML for a chart . | 885 | 6 |
236,869 | def default ( fields = None , count = 5 ) : projection = Sampling . _create_projection ( fields ) return lambda sql : 'SELECT %s FROM (%s) LIMIT %d' % ( projection , sql , count ) | Provides a simple default sampling strategy which limits the result set by a count . | 50 | 16 |
236,870 | def sorted ( field_name , ascending = True , fields = None , count = 5 ) : if field_name is None : raise Exception ( 'Sort field must be specified' ) direction = '' if ascending else ' DESC' projection = Sampling . _create_projection ( fields ) return lambda sql : 'SELECT %s FROM (%s) ORDER BY %s%s LIMIT %d' % ( projection , sql , field_name , direction , count ) | Provides a sampling strategy that picks from an ordered set of rows . | 98 | 14 |
236,871 | def hashed ( field_name , percent , fields = None , count = 0 ) : if field_name is None : raise Exception ( 'Hash field must be specified' ) def _hashed_sampling ( sql ) : projection = Sampling . _create_projection ( fields ) sql = 'SELECT %s FROM (%s) WHERE MOD(ABS(FARM_FINGERPRINT(CAST(%s AS STRING))), 100) < %d' % ( projection , sql , field_name , percent ) if count != 0 : sql = '%s LIMIT %d' % ( sql , count ) return sql return _hashed_sampling | Provides a sampling strategy based on hashing and selecting a percentage of data . | 143 | 15 |
236,872 | def random ( percent , fields = None , count = 0 ) : def _random_sampling ( sql ) : projection = Sampling . _create_projection ( fields ) sql = 'SELECT %s FROM (%s) WHERE rand() < %f' % ( projection , sql , ( float ( percent ) / 100.0 ) ) if count != 0 : sql = '%s LIMIT %d' % ( sql , count ) return sql return _random_sampling | Provides a sampling strategy that picks a semi - random set of rows . | 100 | 15 |
236,873 | def _auto ( method , fields , count , percent , key_field , ascending ) : if method == 'limit' : return Sampling . default ( fields = fields , count = count ) elif method == 'random' : return Sampling . random ( fields = fields , percent = percent , count = count ) elif method == 'hashed' : return Sampling . hashed ( fields = fields , field_name = key_field , percent = percent , count = count ) elif method == 'sorted' : return Sampling . sorted ( fields = fields , field_name = key_field , ascending = ascending , count = count ) else : raise Exception ( 'Unsupported sampling method: %s' % method ) | Construct a sampling function according to the provided sampling technique provided all its needed fields are passed as arguments | 154 | 19 |
236,874 | def _to_query_json ( self ) : return { 'quote' : self . _quote , 'fieldDelimiter' : self . _delimiter , 'encoding' : self . _encoding . upper ( ) , 'skipLeadingRows' : self . _skip_leading_rows , 'allowQuotedNewlines' : self . _allow_quoted_newlines , 'allowJaggedRows' : self . _allow_jagged_rows } | Return the options as a dictionary to be used as JSON in a query job . | 105 | 16 |
236,875 | def jobs_insert_load ( self , source , table_name , append = False , overwrite = False , create = False , source_format = 'CSV' , field_delimiter = ',' , allow_jagged_rows = False , allow_quoted_newlines = False , encoding = 'UTF-8' , ignore_unknown_values = False , max_bad_records = 0 , quote = '"' , skip_leading_rows = 0 ) : url = Api . _ENDPOINT + ( Api . _JOBS_PATH % ( table_name . project_id , '' ) ) if isinstance ( source , basestring ) : source = [ source ] write_disposition = 'WRITE_EMPTY' if overwrite : write_disposition = 'WRITE_TRUNCATE' if append : write_disposition = 'WRITE_APPEND' data = { 'kind' : 'bigquery#job' , 'configuration' : { 'load' : { 'sourceUris' : source , 'destinationTable' : { 'projectId' : table_name . project_id , 'datasetId' : table_name . dataset_id , 'tableId' : table_name . table_id } , 'createDisposition' : 'CREATE_IF_NEEDED' if create else 'CREATE_NEVER' , 'writeDisposition' : write_disposition , 'sourceFormat' : source_format , 'ignoreUnknownValues' : ignore_unknown_values , 'maxBadRecords' : max_bad_records , } } } if source_format == 'CSV' : load_config = data [ 'configuration' ] [ 'load' ] load_config . update ( { 'fieldDelimiter' : field_delimiter , 'allowJaggedRows' : allow_jagged_rows , 'allowQuotedNewlines' : allow_quoted_newlines , 'quote' : quote , 'encoding' : encoding , 'skipLeadingRows' : skip_leading_rows } ) return datalab . utils . Http . request ( url , data = data , credentials = self . _credentials ) | Issues a request to load data from GCS to a BQ table | 487 | 15 |
236,876 | def jobs_get ( self , job_id , project_id = None ) : if project_id is None : project_id = self . _project_id url = Api . _ENDPOINT + ( Api . _JOBS_PATH % ( project_id , job_id ) ) return datalab . utils . Http . request ( url , credentials = self . _credentials ) | Issues a request to retrieve information about a job . | 90 | 11 |
236,877 | def datasets_insert ( self , dataset_name , friendly_name = None , description = None ) : url = Api . _ENDPOINT + ( Api . _DATASETS_PATH % ( dataset_name . project_id , '' ) ) data = { 'kind' : 'bigquery#dataset' , 'datasetReference' : { 'projectId' : dataset_name . project_id , 'datasetId' : dataset_name . dataset_id } , } if friendly_name : data [ 'friendlyName' ] = friendly_name if description : data [ 'description' ] = description return datalab . utils . Http . request ( url , data = data , credentials = self . _credentials ) | Issues a request to create a dataset . | 166 | 9 |
236,878 | def datasets_delete ( self , dataset_name , delete_contents ) : url = Api . _ENDPOINT + ( Api . _DATASETS_PATH % dataset_name ) args = { } if delete_contents : args [ 'deleteContents' ] = True return datalab . utils . Http . request ( url , method = 'DELETE' , args = args , credentials = self . _credentials , raw_response = True ) | Issues a request to delete a dataset . | 105 | 9 |
236,879 | def datasets_update ( self , dataset_name , dataset_info ) : url = Api . _ENDPOINT + ( Api . _DATASETS_PATH % dataset_name ) return datalab . utils . Http . request ( url , method = 'PUT' , data = dataset_info , credentials = self . _credentials ) | Updates the Dataset info . | 79 | 8 |
236,880 | def datasets_get ( self , dataset_name ) : url = Api . _ENDPOINT + ( Api . _DATASETS_PATH % dataset_name ) return datalab . utils . Http . request ( url , credentials = self . _credentials ) | Issues a request to retrieve information about a dataset . | 63 | 11 |
236,881 | def datasets_list ( self , project_id = None , max_results = 0 , page_token = None ) : if project_id is None : project_id = self . _project_id url = Api . _ENDPOINT + ( Api . _DATASETS_PATH % ( project_id , '' ) ) args = { } if max_results != 0 : args [ 'maxResults' ] = max_results if page_token is not None : args [ 'pageToken' ] = page_token return datalab . utils . Http . request ( url , args = args , credentials = self . _credentials ) | Issues a request to list the datasets in the project . | 143 | 12 |
236,882 | def tables_get ( self , table_name ) : url = Api . _ENDPOINT + ( Api . _TABLES_PATH % table_name ) return datalab . utils . Http . request ( url , credentials = self . _credentials ) | Issues a request to retrieve information about a table . | 62 | 11 |
236,883 | def tables_insert ( self , table_name , schema = None , query = None , friendly_name = None , description = None ) : url = Api . _ENDPOINT + ( Api . _TABLES_PATH % ( table_name . project_id , table_name . dataset_id , '' , '' ) ) data = { 'kind' : 'bigquery#table' , 'tableReference' : { 'projectId' : table_name . project_id , 'datasetId' : table_name . dataset_id , 'tableId' : table_name . table_id } } if schema : data [ 'schema' ] = { 'fields' : schema } if query : data [ 'view' ] = { 'query' : query } if friendly_name : data [ 'friendlyName' ] = friendly_name if description : data [ 'description' ] = description return datalab . utils . Http . request ( url , data = data , credentials = self . _credentials ) | Issues a request to create a table or view in the specified dataset with the specified id . A schema must be provided to create a Table or a query must be provided to create a View . | 226 | 39 |
236,884 | def tabledata_insert_all ( self , table_name , rows ) : url = Api . _ENDPOINT + ( Api . _TABLES_PATH % table_name ) + "/insertAll" data = { 'kind' : 'bigquery#tableDataInsertAllRequest' , 'rows' : rows } return datalab . utils . Http . request ( url , data = data , credentials = self . _credentials ) | Issues a request to insert data into a table . | 101 | 11 |
236,885 | def tabledata_list ( self , table_name , start_index = None , max_results = None , page_token = None ) : url = Api . _ENDPOINT + ( Api . _TABLEDATA_PATH % table_name ) args = { } if start_index : args [ 'startIndex' ] = start_index if max_results : args [ 'maxResults' ] = max_results if page_token is not None : args [ 'pageToken' ] = page_token return datalab . utils . Http . request ( url , args = args , credentials = self . _credentials ) | Retrieves the contents of a table . | 142 | 9 |
236,886 | def table_delete ( self , table_name ) : url = Api . _ENDPOINT + ( Api . _TABLES_PATH % table_name ) return datalab . utils . Http . request ( url , method = 'DELETE' , credentials = self . _credentials , raw_response = True ) | Issues a request to delete a table . | 76 | 9 |
236,887 | def table_extract ( self , table_name , destination , format = 'CSV' , compress = True , field_delimiter = ',' , print_header = True ) : url = Api . _ENDPOINT + ( Api . _JOBS_PATH % ( table_name . project_id , '' ) ) if isinstance ( destination , basestring ) : destination = [ destination ] data = { # 'projectId': table_name.project_id, # Code sample shows this but it is not in job # reference spec. Filed as b/19235843 'kind' : 'bigquery#job' , 'configuration' : { 'extract' : { 'sourceTable' : { 'projectId' : table_name . project_id , 'datasetId' : table_name . dataset_id , 'tableId' : table_name . table_id , } , 'compression' : 'GZIP' if compress else 'NONE' , 'fieldDelimiter' : field_delimiter , 'printHeader' : print_header , 'destinationUris' : destination , 'destinationFormat' : format , } } } return datalab . utils . Http . request ( url , data = data , credentials = self . _credentials ) | Exports the table to GCS . | 290 | 8 |
236,888 | def table_update ( self , table_name , table_info ) : url = Api . _ENDPOINT + ( Api . _TABLES_PATH % table_name ) return datalab . utils . Http . request ( url , method = 'PUT' , data = table_info , credentials = self . _credentials ) | Updates the Table info . | 78 | 6 |
236,889 | def extract_archive ( archive_path , dest ) : # Make the dest folder if it does not exist if not os . path . isdir ( dest ) : os . makedirs ( dest ) try : tmpfolder = None if ( not tf . gfile . Exists ( archive_path ) ) or tf . gfile . IsDirectory ( archive_path ) : raise ValueError ( 'archive path %s is not a file' % archive_path ) if archive_path . startswith ( 'gs://' ) : # Copy the file to a local temp folder tmpfolder = tempfile . mkdtemp ( ) cmd_args = [ 'gsutil' , 'cp' , archive_path , tmpfolder ] _shell_process . run_and_monitor ( cmd_args , os . getpid ( ) ) archive_path = os . path . join ( tmpfolder , os . path . name ( archive_path ) ) if archive_path . lower ( ) . endswith ( '.tar.gz' ) : flags = '-xzf' elif archive_path . lower ( ) . endswith ( '.tar' ) : flags = '-xf' else : raise ValueError ( 'Only tar.gz or tar.Z files are supported.' ) cmd_args = [ 'tar' , flags , archive_path , '-C' , dest ] _shell_process . run_and_monitor ( cmd_args , os . getpid ( ) ) finally : if tmpfolder : shutil . rmtree ( tmpfolder ) | Extract a local or GCS archive file to a folder . | 332 | 13 |
236,890 | def preprocess ( train_dataset , output_dir , eval_dataset , checkpoint , pipeline_option ) : import apache_beam as beam import google . datalab . utils from . import _preprocess if checkpoint is None : checkpoint = _util . _DEFAULT_CHECKPOINT_GSURL job_name = ( 'preprocess-image-classification-' + datetime . datetime . now ( ) . strftime ( '%y%m%d-%H%M%S' ) ) staging_package_url = _util . repackage_to_staging ( output_dir ) tmpdir = tempfile . mkdtemp ( ) # suppress DataFlow warnings about wheel package as extra package. original_level = logging . getLogger ( ) . getEffectiveLevel ( ) logging . getLogger ( ) . setLevel ( logging . ERROR ) try : # Workaround for DataFlow 2.0, which doesn't work well with extra packages in GCS. # Remove when the issue is fixed and new version of DataFlow is included in Datalab. extra_packages = [ staging_package_url , _TF_GS_URL , _PROTOBUF_GS_URL ] local_packages = [ os . path . join ( tmpdir , os . path . basename ( p ) ) for p in extra_packages ] for source , dest in zip ( extra_packages , local_packages ) : file_io . copy ( source , dest , overwrite = True ) options = { 'staging_location' : os . path . join ( output_dir , 'tmp' , 'staging' ) , 'temp_location' : os . path . join ( output_dir , 'tmp' ) , 'job_name' : job_name , 'project' : _util . default_project ( ) , 'extra_packages' : local_packages , 'teardown_policy' : 'TEARDOWN_ALWAYS' , 'no_save_main_session' : True } if pipeline_option is not None : options . update ( pipeline_option ) opts = beam . pipeline . PipelineOptions ( flags = [ ] , * * options ) p = beam . Pipeline ( 'DataflowRunner' , options = opts ) _preprocess . configure_pipeline ( p , train_dataset , eval_dataset , checkpoint , output_dir , job_name ) job_results = p . run ( ) finally : shutil . rmtree ( tmpdir ) logging . getLogger ( ) . setLevel ( original_level ) if ( _util . is_in_IPython ( ) ) : import IPython dataflow_url = 'https://console.developers.google.com/dataflow?project=%s' % _util . default_project ( ) html = 'Job "%s" submitted.' % job_name html += '<p>Click <a href="%s" target="_blank">here</a> to track preprocessing job. <br/>' % dataflow_url IPython . display . display_html ( html , raw = True ) return google . datalab . utils . DataflowJob ( job_results ) | Preprocess data in Cloud with DataFlow . | 695 | 9 |
236,891 | def train ( input_dir , batch_size , max_steps , output_dir , checkpoint , cloud_train_config ) : import google . datalab . ml as ml if checkpoint is None : checkpoint = _util . _DEFAULT_CHECKPOINT_GSURL staging_package_url = _util . repackage_to_staging ( output_dir ) job_args = { 'input_dir' : input_dir , 'max_steps' : max_steps , 'batch_size' : batch_size , 'checkpoint' : checkpoint } job_request = { 'package_uris' : [ staging_package_url , _TF_GS_URL , _PROTOBUF_GS_URL ] , 'python_module' : 'mltoolbox.image.classification.task' , 'job_dir' : output_dir , 'args' : job_args } job_request . update ( dict ( cloud_train_config . _asdict ( ) ) ) job_id = 'image_classification_train_' + datetime . datetime . now ( ) . strftime ( '%y%m%d_%H%M%S' ) job = ml . Job . submit_training ( job_request , job_id ) if ( _util . is_in_IPython ( ) ) : import IPython log_url_query_strings = { 'project' : _util . default_project ( ) , 'resource' : 'ml.googleapis.com/job_id/' + job . info [ 'jobId' ] } log_url = 'https://console.developers.google.com/logs/viewer?' + urllib . urlencode ( log_url_query_strings ) html = 'Job "%s" submitted.' % job . info [ 'jobId' ] html += '<p>Click <a href="%s" target="_blank">here</a> to view cloud log. <br/>' % log_url IPython . display . display_html ( html , raw = True ) return job | Train model in the cloud with CloudML trainer service . | 459 | 11 |
236,892 | def from_table ( table , fields = None ) : if fields is None : fields = '*' elif isinstance ( fields , list ) : fields = ',' . join ( fields ) return Query ( 'SELECT %s FROM %s' % ( fields , table . _repr_sql_ ( ) ) ) | Return a Query for the given Table object | 68 | 8 |
236,893 | def _expanded_sql ( self , sampling = None ) : # use lists to preserve the order of subqueries, bigquery will not like listing subqueries # out of order if they depend on each other. for example. the following will be rejected: # WITH q2 as (SELECT * FROM q1), # q1 as (SELECT * FROM mytable), # SELECT * FROM q2 # so when we're getting the dependencies, use recursion into a list to maintain the order udfs = [ ] subqueries = [ ] expanded_sql = '' def _recurse_subqueries ( query ) : """Recursively scan subqueries and add their pieces to global scope udfs and subqueries """ if query . _subqueries : for subquery in query . _subqueries : _recurse_subqueries ( subquery [ 1 ] ) subqueries . extend ( [ s for s in query . _subqueries if s not in subqueries ] ) if query . _udfs : # query._udfs is a list of (name, UDF) tuples; we just want the UDF. udfs . extend ( [ u [ 1 ] for u in query . _udfs if u [ 1 ] not in udfs ] ) _recurse_subqueries ( self ) if udfs : expanded_sql += '\n' . join ( [ udf . _expanded_sql ( ) for udf in udfs ] ) expanded_sql += '\n' def _indent_query ( subquery ) : return ' ' + subquery . _sql . replace ( '\n' , '\n ' ) if subqueries : expanded_sql += 'WITH ' + '\n),\n' . join ( [ '%s AS (\n%s' % ( sq [ 0 ] , _indent_query ( sq [ 1 ] ) ) for sq in subqueries ] ) expanded_sql += '\n)\n\n' expanded_sql += sampling ( self . _sql ) if sampling else self . _sql return expanded_sql | Get the expanded SQL of this object including all subqueries UDFs and external datasources | 460 | 19 |
236,894 | def run_and_monitor ( args , pid_to_wait , std_out_filter_fn = None , cwd = None ) : monitor_process = None try : p = subprocess . Popen ( args , cwd = cwd , env = os . environ , stdout = subprocess . PIPE , stderr = subprocess . STDOUT ) pids_to_kill = [ p . pid ] script = ( 'import %s;%s._wait_and_kill(%s, %s)' % ( __name__ , __name__ , str ( pid_to_wait ) , str ( pids_to_kill ) ) ) monitor_process = subprocess . Popen ( [ 'python' , '-c' , script ] , env = os . environ ) while p . poll ( ) is None : line = p . stdout . readline ( ) if not six . PY2 : line = line . decode ( ) if std_out_filter_fn is None or std_out_filter_fn ( line ) : sys . stdout . write ( line ) # Cannot do sys.stdout.flush(). It appears that too many flush() calls will hang browser. finally : if monitor_process : monitor_process . kill ( ) | Start a process and have it depend on another specified process . | 278 | 12 |
236,895 | def created_on ( self ) : timestamp = self . _info . get ( 'creationTime' ) return _parser . Parser . parse_timestamp ( timestamp ) | The creation timestamp . | 36 | 4 |
236,896 | def expires_on ( self ) : timestamp = self . _info . get ( 'expirationTime' , None ) if timestamp is None : return None return _parser . Parser . parse_timestamp ( timestamp ) | The timestamp for when the table will expire or None if unknown . | 46 | 13 |
236,897 | def modified_on ( self ) : timestamp = self . _info . get ( 'lastModifiedTime' ) return _parser . Parser . parse_timestamp ( timestamp ) | The timestamp for when the table was last modified . | 38 | 10 |
236,898 | def _load_info ( self ) : if self . _info is None : try : self . _info = self . _api . tables_get ( self . _name_parts ) except Exception as e : raise e | Loads metadata about this table . | 47 | 7 |
236,899 | def exists ( self ) : try : info = self . _api . tables_get ( self . _name_parts ) except google . datalab . utils . RequestException as e : if e . status == 404 : return False raise e except Exception as e : raise e self . _info = info return True | Checks if the table exists . | 67 | 7 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.