docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
|---|---|---|
Create Ladybug Datetime from a minute of the year.
Args:
moy: An integer value 0 <= and < 525600
|
def from_moy(cls, moy, leap_year=False):
if not leap_year:
num_of_minutes_until_month = (0, 44640, 84960, 129600, 172800, 217440,
260640, 305280, 349920, 393120, 437760,
480960, 525600)
else:
num_of_minutes_until_month = (0, 44640, 84960 + 1440, 129600 + 1440,
172800 + 1440, 217440 + 1440, 260640 + 1440,
305280 + 1440, 349920 + 1440, 393120 + 1440,
437760 + 1440, 480960 + 1440, 525600 + 1440)
# find month
for monthCount in range(12):
if int(moy) < num_of_minutes_until_month[monthCount + 1]:
month = monthCount + 1
break
try:
day = int((moy - num_of_minutes_until_month[month - 1]) / (60 * 24)) + 1
except UnboundLocalError:
raise ValueError(
"moy must be positive and smaller than 525600. Invalid input %d" % (moy)
)
else:
hour = int((moy / 60) % 24)
minute = int(moy % 60)
return cls(month, day, hour, minute, leap_year)
| 435,525
|
Create a new DateTime after the minutes are added.
Args:
minute: An integer value for minutes.
|
def add_minute(self, minute):
_moy = self.moy + int(minute)
return self.__class__.from_moy(_moy)
| 435,528
|
Converts sequence of cartesian coordinates into a sequence of
line segments defined by spherical coordinates.
Args:
xyz = 2d numpy array, each row specifies a point in
cartesian coordinates (x,y,z) tracing out a
path in 3D space.
Returns:
r = lengths of each line segment (1D array)
theta = angles of line segments in XY plane (1D array)
phi = angles of line segments down from Z axis (1D array)
|
def sequential_spherical(xyz):
d_xyz = np.diff(xyz,axis=0)
r = np.linalg.norm(d_xyz,axis=1)
theta = np.arctan2(d_xyz[:,1], d_xyz[:,0])
hyp = d_xyz[:,0]**2 + d_xyz[:,1]**2
phi = np.arctan2(np.sqrt(hyp), d_xyz[:,2])
return (r,theta,phi)
| 435,843
|
Simple conversion of spherical to cartesian coordinates
Args:
r,theta,phi = scalar spherical coordinates
Returns:
x,y,z = scalar cartesian coordinates
|
def spherical_to_cartesian(r,theta,phi):
x = r * np.sin(phi) * np.cos(theta)
y = r * np.sin(phi) * np.sin(theta)
z = r * np.cos(phi)
return (x,y,z)
| 435,844
|
Find (x,y,z) ending coordinate of segment path along section
path.
Args:
targ_length = scalar specifying length of segment path, starting
from the begining of the section path
xyz = coordinates specifying the section path
rcum = cumulative sum of section path length at each node in xyz
theta, phi = angles between each coordinate in xyz
|
def find_coord(targ_length,xyz,rcum,theta,phi):
# [1] Find spherical coordinates for the line segment containing
# the endpoint.
# [2] Find endpoint in spherical coords and convert to cartesian
i = np.nonzero(rcum <= targ_length)[0][-1]
if i == len(theta):
return xyz[-1,:]
else:
r_lcl = targ_length-rcum[i] # remaining length along line segment
(dx,dy,dz) = spherical_to_cartesian(r_lcl,theta[i],phi[i])
return xyz[i,:] + [dx,dy,dz]
| 435,845
|
Interpolates along a jagged path in 3D
Args:
xyz = section path specified in cartesian coordinates
nseg = number of segment paths in section path
Returns:
interp_xyz = interpolated path
|
def interpolate_jagged(xyz,nseg):
# Spherical coordinates specifying the angles of all line
# segments that make up the section path
(r,theta,phi) = sequential_spherical(xyz)
# cumulative length of section path at each coordinate
rcum = np.append(0,np.cumsum(r))
# breakpoints for segment paths along section path
breakpoints = np.linspace(0,rcum[-1],nseg+1)
np.delete(breakpoints,0)
# Find segment paths
seg_paths = []
for a in range(nseg):
path = []
# find (x,y,z) starting coordinate of path
if a == 0:
start_coord = xyz[0,:]
else:
start_coord = end_coord # start at end of last path
path.append(start_coord)
# find all coordinates between the start and end points
start_length = breakpoints[a]
end_length = breakpoints[a+1]
mid_boolean = (rcum > start_length) & (rcum < end_length)
mid_indices = np.nonzero(mid_boolean)[0]
for mi in mid_indices:
path.append(xyz[mi,:])
# find (x,y,z) ending coordinate of path
end_coord = find_coord(end_length,xyz,rcum,theta,phi)
path.append(end_coord)
# Append path to list of segment paths
seg_paths.append(np.array(path))
# Return all segment paths
return seg_paths
| 435,846
|
Marks one or more locations on along a section. Could be used to
mark the location of a recording or electrical stimulation.
Args:
h = hocObject to interface with neuron
section = reference to section
locs = float between 0 and 1, or array of floats
optional arguments specify details of marker
Returns:
line = reference to plotted markers
|
def mark_locations(h,section,locs,markspec='or',**kwargs):
# get list of cartesian coordinates specifying section path
xyz = get_section_path(h,section)
(r,theta,phi) = sequential_spherical(xyz)
rcum = np.append(0,np.cumsum(r))
# convert locs into lengths from the beginning of the path
if type(locs) is float or type(locs) is np.float64:
locs = np.array([locs])
if type(locs) is list:
locs = np.array(locs)
lengths = locs*rcum[-1]
# find cartesian coordinates for markers
xyz_marks = []
for targ_length in lengths:
xyz_marks.append(find_coord(targ_length,xyz,rcum,theta,phi))
xyz_marks = np.array(xyz_marks)
# plot markers
line, = plt.plot(xyz_marks[:,0], xyz_marks[:,1], \
xyz_marks[:,2], markspec, **kwargs)
return line
| 435,851
|
Implements the datalab cell magic for MLWorkbench operations.
Args:
line: the contents of the ml command line.
Returns:
The results of executing the cell.
|
def ml(line, cell=None):
parser = google.datalab.utils.commands.CommandParser(
prog='%ml',
description=textwrap.dedent())
dataset_parser = parser.subcommand(
'dataset',
formatter_class=argparse.RawTextHelpFormatter,
help='Create or explore datasets.')
dataset_sub_commands = dataset_parser.add_subparsers(dest='command')
dataset_create_parser = dataset_sub_commands.add_parser(
'create', help='Create datasets', formatter_class=argparse.RawTextHelpFormatter,
epilog=textwrap.dedent())
dataset_create_parser.add_argument('--name', required=True,
help='the name of the dataset to define. ')
dataset_create_parser.add_argument('--format', required=True,
choices=['csv', 'bigquery', 'transformed'],
help='The format of the data.')
dataset_create_parser.add_argument('--train', required=True,
help='The path of the training file pattern if format ' +
'is csv or transformed, or table name if format ' +
'is bigquery.')
dataset_create_parser.add_argument('--eval', required=True,
help='The path of the eval file pattern if format ' +
'is csv or transformed, or table name if format ' +
'is bigquery.')
dataset_create_parser.add_cell_argument('schema',
help='yaml representation of CSV schema, or path to ' +
'schema file. Only needed if format is csv.')
dataset_create_parser.set_defaults(func=_dataset_create)
dataset_explore_parser = dataset_sub_commands.add_parser(
'explore', help='Explore training data.')
dataset_explore_parser.add_argument('--name', required=True,
help='The name of the dataset to explore.')
dataset_explore_parser.add_argument('--overview', action='store_true', default=False,
help='Plot overview of sampled data. Set "sample_size" ' +
'to change the default sample size.')
dataset_explore_parser.add_argument('--facets', action='store_true', default=False,
help='Plot facets view of sampled data. Set ' +
'"sample_size" to change the default sample size.')
dataset_explore_parser.add_argument('--sample_size', type=int, default=1000,
help='sample size for overview or facets view. Only ' +
'used if either --overview or --facets is set.')
dataset_explore_parser.set_defaults(func=_dataset_explore)
analyze_parser = parser.subcommand(
'analyze',
formatter_class=argparse.RawTextHelpFormatter,
help='Analyze training data and generate stats, such as min/max/mean '
'for numeric values, vocabulary for text columns.',
epilog=textwrap.dedent())
analyze_parser.add_argument('--output', required=True,
help='path of output directory.')
analyze_parser.add_argument('--cloud', action='store_true', default=False,
help='whether to run analysis in cloud or local.')
analyze_parser.add_argument('--package', required=False,
help='A local or GCS tarball path to use as the source. '
'If not set, the default source package will be used.')
analyze_parser.add_cell_argument(
'data',
required=True,
help=)
analyze_parser.add_cell_argument(
'features',
required=True,
help=textwrap.dedent())
analyze_parser.set_defaults(func=_analyze)
transform_parser = parser.subcommand(
'transform',
formatter_class=argparse.RawTextHelpFormatter,
help='Transform the data into tf.example which is more efficient in training.',
epilog=textwrap.dedent())
transform_parser.add_argument('--analysis', required=True,
help='path of analysis output directory.')
transform_parser.add_argument('--output', required=True,
help='path of output directory.')
transform_parser.add_argument('--cloud', action='store_true', default=False,
help='whether to run transform in cloud or local.')
transform_parser.add_argument('--shuffle', action='store_true', default=False,
help='whether to shuffle the training data in output.')
transform_parser.add_argument('--batch_size', type=int, default=100,
help='number of instances in a batch to process once. '
'Larger batch is more efficient but may consume more memory.')
transform_parser.add_argument('--package', required=False,
help='A local or GCS tarball path to use as the source. '
'If not set, the default source package will be used.')
transform_parser.add_cell_argument(
'data',
required=True,
help=)
transform_parser.add_cell_argument(
'cloud_config',
help=textwrap.dedent())
transform_parser.set_defaults(func=_transform)
train_parser = parser.subcommand(
'train',
formatter_class=argparse.RawTextHelpFormatter,
help='Train a model.',
epilog=textwrap.dedent())
train_parser.add_argument('--analysis', required=True,
help='path of analysis output directory.')
train_parser.add_argument('--output', required=True,
help='path of trained model directory.')
train_parser.add_argument('--cloud', action='store_true', default=False,
help='whether to run training in cloud or local.')
train_parser.add_argument('--notb', action='store_true', default=False,
help='If set, tensorboard is not automatically started.')
train_parser.add_argument('--package', required=False,
help='A local or GCS tarball path to use as the source. '
'If not set, the default source package will be used.')
train_parser.add_cell_argument(
'data',
required=True,
help=)
package_model_help = subprocess.Popen(
['python', '-m', 'trainer.task', '--datalab-help'],
cwd=DEFAULT_PACKAGE_PATH,
stdout=subprocess.PIPE).communicate()[0]
package_model_help = ('model_args: a dictionary of model specific args, including:\n\n' +
package_model_help.decode())
train_parser.add_cell_argument('model_args', help=package_model_help)
train_parser.add_cell_argument(
'cloud_config',
help=textwrap.dedent(.format(
url='https://cloud.google.com/sdk/gcloud/reference/ml-engine/jobs/submit/training')))
train_parser.set_defaults(func=_train)
predict_parser = parser.subcommand(
'predict',
formatter_class=argparse.RawTextHelpFormatter,
help='Predict with local or deployed models. (Good for small datasets).',
epilog=textwrap.dedent())
predict_parser.add_argument('--model', required=True,
help='The model path.')
predict_parser.add_argument('--no_show_image', action='store_true', default=False,
help='If not set, add a column of images in output.')
predict_parser.add_cell_argument(
'data',
required=True,
help=textwrap.dedent())
predict_parser.set_defaults(func=_predict)
batch_predict_parser = parser.subcommand(
'batch_predict',
formatter_class=argparse.RawTextHelpFormatter,
help='Batch prediction with local or deployed models. (Good for large datasets)',
epilog=textwrap.dedent())
batch_predict_parser.add_argument('--model', required=True,
help='The model path if not --cloud, or the id in '
'the form of model.version if --cloud.')
batch_predict_parser.add_argument('--output', required=True,
help='The path of output directory with prediction results. '
'If --cloud, it has to be GCS path.')
batch_predict_parser.add_argument('--format',
help='csv or json. For cloud run, '
'the only supported format is json.')
batch_predict_parser.add_argument('--batch_size', type=int, default=100,
help='number of instances in a batch to process once. '
'Larger batch is more efficient but may consume '
'more memory. Only used in local run.')
batch_predict_parser.add_argument('--cloud', action='store_true', default=False,
help='whether to run prediction in cloud or local.')
batch_predict_parser.add_cell_argument(
'data',
required=True,
help='Data to predict with. Only csv is supported.')
batch_predict_parser.add_cell_argument(
'cloud_config',
help=textwrap.dedent(.format(
url='https://cloud.google.com/sdk/gcloud/reference/ml-engine/jobs/submit/prediction'))) # noqa
batch_predict_parser.set_defaults(func=_batch_predict)
explain_parser = parser.subcommand(
'explain',
formatter_class=argparse.RawTextHelpFormatter,
help='Explain a prediction with LIME tool.')
explain_parser.add_argument('--type', default='all', choices=['text', 'image', 'tabular', 'all'],
help='the type of column to explain.')
explain_parser.add_argument('--algorithm', choices=['lime', 'ig'], default='lime',
help='"lime" is the open sourced project for prediction explainer.' +
'"ig" means integrated gradients and currently only applies ' +
'to image.')
explain_parser.add_argument('--model', required=True,
help='path of the model directory used for prediction.')
explain_parser.add_argument('--labels', required=True,
help='comma separated labels to explain.')
explain_parser.add_argument('--column_name',
help='the name of the column to explain. Optional if text type ' +
'and there is only one text column, or image type and ' +
'there is only one image column.')
explain_parser.add_cell_argument('data', required=True,
help='Prediction Data. Can be a csv line, or a dict.')
explain_parser.add_cell_argument('training_data',
help='A csv or bigquery dataset defined by %%ml dataset. ' +
'Used by tabular explainer only to determine the ' +
'distribution of numeric and categorical values. ' +
'Suggest using original training dataset.')
# options specific for lime
explain_parser.add_argument('--num_features', type=int,
help='number of features to analyze. In text, it is number of ' +
'words. In image, it is number of areas. For lime only.')
explain_parser.add_argument('--num_samples', type=int,
help='size of the neighborhood to learn the linear model. ' +
'For lime only.')
explain_parser.add_argument('--hide_color', type=int, default=0,
help='the color to use for perturbed area. If -1, average of ' +
'each channel is used for each channel. For image only.')
explain_parser.add_argument('--include_negative', action='store_true', default=False,
help='whether to show only positive areas. For lime image only.')
explain_parser.add_argument('--overview', action='store_true', default=False,
help='whether to show overview instead of details view.' +
'For lime text and tabular only.')
explain_parser.add_argument('--batch_size', type=int, default=100,
help='size of batches passed to prediction. For lime only.')
# options specific for integrated gradients
explain_parser.add_argument('--num_gradients', type=int, default=50,
help='the number of scaled images to get gradients from. Larger ' +
'number usually produces better results but slower.')
explain_parser.add_argument('--percent_show', type=int, default=10,
help='the percentage of top impactful pixels to show.')
explain_parser.set_defaults(func=_explain)
tensorboard_parser = parser.subcommand(
'tensorboard',
formatter_class=argparse.RawTextHelpFormatter,
help='Start/stop/list TensorBoard instances.')
tensorboard_sub_commands = tensorboard_parser.add_subparsers(dest='command')
tensorboard_start_parser = tensorboard_sub_commands.add_parser(
'start', help='Start a tensorboard instance.')
tensorboard_start_parser.add_argument('--logdir', required=True,
help='The local or GCS logdir path.')
tensorboard_start_parser.set_defaults(func=_tensorboard_start)
tensorboard_stop_parser = tensorboard_sub_commands.add_parser(
'stop', help='Stop a tensorboard instance.')
tensorboard_stop_parser.add_argument('--pid', required=True, type=int,
help='The pid of the tensorboard instance.')
tensorboard_stop_parser.set_defaults(func=_tensorboard_stop)
tensorboard_list_parser = tensorboard_sub_commands.add_parser(
'list', help='List tensorboard instances.')
tensorboard_list_parser.set_defaults(func=_tensorboard_list)
evaluate_parser = parser.subcommand(
'evaluate',
formatter_class=argparse.RawTextHelpFormatter,
help='Analyze model evaluation results, such as confusion matrix, ROC, RMSE.')
evaluate_sub_commands = evaluate_parser.add_subparsers(dest='command')
def _add_data_params_for_evaluate(parser):
parser.add_argument('--csv', help='csv file path patterns.')
parser.add_argument('--headers',
help='csv file headers. Required if csv is specified and ' +
'predict_results_schema.json does not exist in the same directory.')
parser.add_argument('--bigquery',
help='can be bigquery table, query as a string, or ' +
'a pre-defined query (%%bq query --name).')
evaluate_cm_parser = evaluate_sub_commands.add_parser(
'confusion_matrix', help='Get confusion matrix from evaluation results.')
_add_data_params_for_evaluate(evaluate_cm_parser)
evaluate_cm_parser.add_argument('--plot', action='store_true', default=False,
help='Whether to plot confusion matrix as graph.')
evaluate_cm_parser.add_argument('--size', type=int, default=10,
help='The size of the confusion matrix.')
evaluate_cm_parser.set_defaults(func=_evaluate_cm)
evaluate_accuracy_parser = evaluate_sub_commands.add_parser(
'accuracy', help='Get accuracy results from classification evaluation results.')
_add_data_params_for_evaluate(evaluate_accuracy_parser)
evaluate_accuracy_parser.set_defaults(func=_evaluate_accuracy)
evaluate_pr_parser = evaluate_sub_commands.add_parser(
'precision_recall', help='Get precision recall metrics from evaluation results.')
_add_data_params_for_evaluate(evaluate_pr_parser)
evaluate_pr_parser.add_argument('--plot', action='store_true', default=False,
help='Whether to plot precision recall as graph.')
evaluate_pr_parser.add_argument('--num_thresholds', type=int, default=20,
help='Number of thresholds which determines how many ' +
'points in the graph.')
evaluate_pr_parser.add_argument('--target_class', required=True,
help='The target class to determine correctness of ' +
'a prediction.')
evaluate_pr_parser.add_argument('--probability_column',
help='The name of the column holding the probability ' +
'value of the target class. If absent, the value ' +
'of target class is used.')
evaluate_pr_parser.set_defaults(func=_evaluate_pr)
evaluate_roc_parser = evaluate_sub_commands.add_parser(
'roc', help='Get ROC metrics from evaluation results.')
_add_data_params_for_evaluate(evaluate_roc_parser)
evaluate_roc_parser.add_argument('--plot', action='store_true', default=False,
help='Whether to plot ROC as graph.')
evaluate_roc_parser.add_argument('--num_thresholds', type=int, default=20,
help='Number of thresholds which determines how many ' +
'points in the graph.')
evaluate_roc_parser.add_argument('--target_class', required=True,
help='The target class to determine correctness of ' +
'a prediction.')
evaluate_roc_parser.add_argument('--probability_column',
help='The name of the column holding the probability ' +
'value of the target class. If absent, the value ' +
'of target class is used.')
evaluate_roc_parser.set_defaults(func=_evaluate_roc)
evaluate_regression_parser = evaluate_sub_commands.add_parser(
'regression', help='Get regression metrics from evaluation results.')
_add_data_params_for_evaluate(evaluate_regression_parser)
evaluate_regression_parser.set_defaults(func=_evaluate_regression)
model_parser = parser.subcommand(
'model',
help='Models and versions management such as deployment, deletion, listing.')
model_sub_commands = model_parser.add_subparsers(dest='command')
model_list_parser = model_sub_commands.add_parser(
'list', help='List models and versions.')
model_list_parser.add_argument('--name',
help='If absent, list all models of specified or current ' +
'project. If provided, list all versions of the ' +
'model.')
model_list_parser.add_argument('--project',
help='The project to list model(s) or version(s). If absent, ' +
'use Datalab\'s default project.')
model_list_parser.set_defaults(func=_model_list)
model_delete_parser = model_sub_commands.add_parser(
'delete', help='Delete models or versions.')
model_delete_parser.add_argument('--name', required=True,
help='If no "." in the name, try deleting the specified ' +
'model. If "model.version" is provided, try deleting ' +
'the specified version.')
model_delete_parser.add_argument('--project',
help='The project to delete model or version. If absent, ' +
'use Datalab\'s default project.')
model_delete_parser.set_defaults(func=_model_delete)
model_deploy_parser = model_sub_commands.add_parser(
'deploy', help='Deploy a model version.')
model_deploy_parser.add_argument('--name', required=True,
help='Must be model.version to indicate the model ' +
'and version name to deploy.')
model_deploy_parser.add_argument('--path', required=True,
help='The GCS path of the model to be deployed.')
model_deploy_parser.add_argument('--runtime_version',
help='The TensorFlow version to use for this model. ' +
'For example, "1.2.1". If absent, the current ' +
'TensorFlow version installed in Datalab will be used.')
model_deploy_parser.add_argument('--project',
help='The project to deploy a model version. If absent, ' +
'use Datalab\'s default project.')
model_deploy_parser.set_defaults(func=_model_deploy)
return google.datalab.utils.commands.handle_magic_line(line, cell, parser)
| 436,032
|
Create a Metrics instance from csv file pattern.
Args:
input_csv_pattern: Path to Csv file pattern (with no header). Can be local or GCS path.
headers: Csv headers.
schema_file: Path to a JSON file containing BigQuery schema. Used if "headers" is None.
Returns:
a Metrics instance.
Raises:
ValueError if both headers and schema_file are None.
|
def from_csv(input_csv_pattern, headers=None, schema_file=None):
if headers is not None:
names = headers
elif schema_file is not None:
with _util.open_local_or_gcs(schema_file, mode='r') as f:
schema = json.load(f)
names = [x['name'] for x in schema]
else:
raise ValueError('Either headers or schema_file is needed')
metrics = Metrics(input_csv_pattern=input_csv_pattern, headers=names)
return metrics
| 436,060
|
Create a Metrics instance from a bigquery query or table.
Returns:
a Metrics instance.
Args:
sql: A BigQuery table name or a query.
|
def from_bigquery(sql):
if isinstance(sql, bq.Query):
sql = sql._expanded_sql()
parts = sql.split('.')
if len(parts) == 1 or len(parts) > 3 or any(' ' in x for x in parts):
sql = '(' + sql + ')' # query, not a table name
else:
sql = '`' + sql + '`' # table name
metrics = Metrics(bigquery=sql)
return metrics
| 436,061
|
Get nearest percentile from regression model evaluation results.
Args:
percentile: a 0~100 float number.
Returns:
the percentile float number.
Raises:
Exception if the CSV headers do not include 'target' or 'predicted', or BigQuery
does not return 'target' or 'predicted' column, or if target or predicted is not
number.
|
def percentile_nearest(self, percentile):
if self._input_csv_files:
df = self._get_data_from_csv_files()
if 'target' not in df or 'predicted' not in df:
raise ValueError('Cannot find "target" or "predicted" column')
df = df[['target', 'predicted']].apply(pd.to_numeric)
abs_errors = np.array((df['target'] - df['predicted']).apply(abs))
return np.percentile(abs_errors, percentile, interpolation='nearest')
elif self._bigquery:
query = bq.Query( % (float(percentile) / 100, self._bigquery))
df = self._get_data_from_bigquery([query])
if df.empty:
return None
return df['percentile'][0]
| 436,069
|
Initializes a UDF object from its pieces.
Args:
name: the name of the javascript function
code: function body implementing the logic.
return_type: BigQuery data type of the function return. See supported data types in
the BigQuery docs
params: list of parameter tuples: (name, type)
language: see list of supported languages in the BigQuery docs
imports: a list of GCS paths containing further support code.
|
def __init__(self, name, code, return_type, params=None, language='js', imports=None):
if not isinstance(return_type, basestring):
raise TypeError('Argument return_type should be a string. Instead got: ', type(return_type))
if params and not isinstance(params, list):
raise TypeError('Argument params should be a list of parameter names and types')
if imports and not isinstance(imports, list):
raise TypeError('Argument imports should be a list of GCS string paths')
if imports and language != 'js':
raise Exception('Imports are available for Javascript UDFs only')
self._name = name
self._code = code
self._return_type = return_type
self._params = params or []
self._language = language
self._imports = imports or []
self._sql = None
| 436,070
|
Creates the UDF part of a BigQuery query using its pieces
Args:
name: the name of the javascript function
code: function body implementing the logic.
return_type: BigQuery data type of the function return. See supported data types in
the BigQuery docs
params: dictionary of parameter names and types
language: see list of supported languages in the BigQuery docs
imports: a list of GCS paths containing further support code.
|
def _build_udf(name, code, return_type, params, language, imports):
params = ','.join(['%s %s' % named_param for named_param in params])
imports = ','.join(['library="%s"' % i for i in imports])
if language.lower() == 'sql':
udf = 'CREATE TEMPORARY FUNCTION {name} ({params})\n' + \
'RETURNS {return_type}\n' + \
'AS (\n' + \
'{code}\n' + \
');'
else:
udf = 'CREATE TEMPORARY FUNCTION {name} ({params})\n' +\
'RETURNS {return_type}\n' + \
'LANGUAGE {language}\n' + \
'AS \n' +\
'OPTIONS (\n' +\
'{imports}\n' +\
');'
return udf.format(name=name, params=params, return_type=return_type,
language=language, code=code, imports=imports)
| 436,072
|
Parse a gs:// URL into the bucket and object names.
Args:
name: a GCS URL of the form gs://bucket or gs://bucket/object
Returns:
The bucket name (with no gs:// prefix), and the object name if present. If the name
could not be parsed returns None for both.
|
def parse_name(name):
bucket = None
obj = None
m = re.match(_STORAGE_NAME, name)
if m:
# We want to return the last two groups as first group is the optional 'gs://'
bucket = m.group(1)
obj = m.group(2)
if obj is not None:
obj = obj[1:] # Strip '/'
else:
m = re.match('(' + _OBJECT_NAME + ')', name)
if m:
obj = m.group(1)
return bucket, obj
| 436,073
|
Initializes an instance of a Bucket object.
Args:
name: the name of the bucket.
info: the information about the bucket if available.
context: an optional Context object providing project_id and credentials. If a specific
project id or credentials are unspecified, the default ones configured at the global
level are used.
|
def __init__(self, name, info=None, context=None):
if context is None:
context = google.datalab.Context.default()
self._context = context
self._api = _api.Api(context)
self._name = name
self._info = info
| 436,075
|
Retrieves a Storage Object for the specified key in this bucket.
The object need not exist.
Args:
key: the key of the object within the bucket.
Returns:
An Object instance representing the specified key.
|
def object(self, key):
return _object.Object(self._name, key, context=self._context)
| 436,077
|
Initializes an instance of a BucketList.
Args:
context: an optional Context object providing project_id and credentials. If a specific
project id or credentials are unspecified, the default ones configured at the global
level are used.
|
def __init__(self, context=None):
if context is None:
context = google.datalab.Context.default()
self._context = context
self._api = _api.Api(context)
self._project_id = context.project_id if context else self._api.project_id
| 436,080
|
Checks if the specified bucket exists.
Args:
name: the name of the bucket to lookup.
Returns:
True if the bucket exists; False otherwise.
Raises:
Exception if there was an error requesting information about the bucket.
|
def contains(self, name):
try:
self._api.buckets_get(name)
except google.datalab.utils.RequestException as e:
if e.status == 404:
return False
raise e
except Exception as e:
raise e
return True
| 436,081
|
Parse a gs:// URL into the bucket and item names.
Args:
name: a GCS URL of the form gs://bucket or gs://bucket/item
Returns:
The bucket name (with no gs:// prefix), and the item name if present. If the name
could not be parsed returns None for both.
|
def parse_name(name):
bucket = None
item = None
m = re.match(_STORAGE_NAME, name)
if m:
# We want to return the last two groups as first group is the optional 'gs://'
bucket = m.group(1)
item = m.group(2)
if item is not None:
item = item[1:] # Strip '/'
else:
m = re.match('(' + _OBJECT_NAME + ')', name)
if m:
item = m.group(1)
return bucket, item
| 436,083
|
Retrieves an Item object for the specified key in this bucket.
The item need not exist.
Args:
key: the key of the item within the bucket.
Returns:
An Item instance representing the specified key.
|
def item(self, key):
return _item.Item(self._name, key, context=self._context)
| 436,084
|
Creates the bucket.
Args:
project_id: the project in which to create the bucket.
Returns:
The bucket.
Raises:
Exception if there was an error creating the bucket.
|
def create(self, project_id=None):
if not self.exists():
if project_id is None:
project_id = self._api.project_id
try:
self._info = self._api.buckets_insert(self._name, project_id=project_id)
except Exception as e:
raise e
return self
| 436,086
|
Creates a new bucket.
Args:
name: a unique name for the new bucket.
Returns:
The newly created bucket.
Raises:
Exception if there was an error creating the bucket.
|
def create(self, name):
return Bucket(name, context=self._context).create(self._project_id)
| 436,087
|
Initializes an instance of a Airflow object.
Args:
gcs_dag_bucket: Bucket where Airflow expects dag files to be uploaded.
gcs_dag_file_path: File path of the Airflow dag files.
|
def __init__(self, gcs_dag_bucket, gcs_dag_file_path=None):
self._gcs_dag_bucket = gcs_dag_bucket
self._gcs_dag_file_path = gcs_dag_file_path or ''
| 436,093
|
Returns a list of resource descriptors that match the filters.
Args:
pattern: An optional pattern to further filter the descriptors. This can
include Unix shell-style wildcards. E.g. ``"aws*"``, ``"*cluster*"``.
Returns:
A list of ResourceDescriptor objects that match the filters.
|
def list(self, pattern='*'):
if self._descriptors is None:
self._descriptors = self._client.list_resource_descriptors(
filter_string=self._filter_string)
return [resource for resource in self._descriptors
if fnmatch.fnmatch(resource.type, pattern)]
| 436,095
|
Creates a pandas dataframe from the descriptors that match the filters.
Args:
pattern: An optional pattern to further filter the descriptors. This can
include Unix shell-style wildcards. E.g. ``"aws*"``, ``"*cluster*"``.
max_rows: The maximum number of descriptors to return. If None, return
all.
Returns:
A pandas dataframe containing matching resource descriptors.
|
def as_dataframe(self, pattern='*', max_rows=None):
data = []
for i, resource in enumerate(self.list(pattern)):
if max_rows is not None and i >= max_rows:
break
labels = ', '. join([l.key for l in resource.labels])
data.append([resource.type, resource.display_name, labels])
return pandas.DataFrame(data, columns=self._DISPLAY_HEADERS)
| 436,096
|
A helper function to extract user-friendly error messages from service exceptions.
Args:
message: An error message from an exception. If this is from our HTTP client code, it
will actually be a tuple.
Returns:
A modified version of the message that is less cryptic.
|
def _extract_gcs_api_response_error(message):
try:
if len(message) == 3:
# Try treat the last part as JSON
data = json.loads(message[2])
return data['error']['errors'][0]['message']
except Exception:
pass
return message
| 436,098
|
Implements the gcs cell magic for ipython notebooks.
Args:
line: the contents of the gcs line.
Returns:
The results of executing the cell.
|
def gcs(line, cell=None):
parser = google.datalab.utils.commands.CommandParser(prog='%gcs', description=)
# TODO(gram): consider adding a move command too. I did try this already using the
# objects.patch API to change the object name but that fails with an error:
#
# Value 'newname' in content does not agree with value 'oldname'. This can happen when a value
# set through a parameter is inconsistent with a value set in the request.
#
# This is despite 'name' being identified as writable in the storage API docs.
# The alternative would be to use a copy/delete.
copy_parser = parser.subcommand('copy', 'Copy one or more Google Cloud Storage objects to a '
'different location.')
copy_parser.add_argument('-s', '--source', help='The name of the object(s) to copy', nargs='+')
copy_parser.add_argument('-d', '--destination', required=True,
help='The copy destination. For multiple source objects this must be a '
'bucket.')
copy_parser.set_defaults(func=_gcs_copy)
create_parser = parser.subcommand('create', 'Create one or more Google Cloud Storage buckets.')
create_parser.add_argument('-p', '--project', help='The project associated with the objects')
create_parser.add_argument('-b', '--bucket', help='The name of the bucket(s) to create',
nargs='+')
create_parser.set_defaults(func=_gcs_create)
delete_parser = parser.subcommand('delete', 'Delete one or more Google Cloud Storage buckets or '
'objects.')
delete_parser.add_argument('-b', '--bucket', nargs='*',
help='The name of the bucket(s) to remove')
delete_parser.add_argument('-o', '--object', nargs='*',
help='The name of the object(s) to remove')
delete_parser.set_defaults(func=_gcs_delete)
list_parser = parser.subcommand('list', 'List buckets in a project, or contents of a bucket.')
list_parser.add_argument('-p', '--project', help='The project associated with the objects')
list_parser.add_argument('-o', '--objects',
help='List objects under the given Google Cloud Storage path',
nargs='?')
list_parser.set_defaults(func=_gcs_list)
read_parser = parser.subcommand('read', 'Read the contents of a Google Cloud Storage object into '
'a Python variable.')
read_parser.add_argument('-o', '--object', help='The name of the object to read',
required=True)
read_parser.add_argument('-v', '--variable', required=True,
help='The name of the Python variable to set')
read_parser.set_defaults(func=_gcs_read)
view_parser = parser.subcommand('view', 'View the contents of a Google Cloud Storage object.')
view_parser.add_argument('-n', '--head', type=int, default=20,
help='The number of initial lines to view')
view_parser.add_argument('-t', '--tail', type=int, default=20,
help='The number of lines from end to view')
view_parser.add_argument('-o', '--object', help='The name of the object to view',
required=True)
view_parser.set_defaults(func=_gcs_view)
write_parser = parser.subcommand('write', 'Write the value of a Python variable to a Google '
'Cloud Storage object.')
write_parser.add_argument('-v', '--variable', help='The name of the source Python variable',
required=True)
write_parser.add_argument('-o', '--object', required=True,
help='The name of the destination Google Cloud Storage object to write')
write_parser.add_argument('-c', '--content_type', help='MIME type', default='text/plain')
write_parser.set_defaults(func=_gcs_write)
return google.datalab.utils.commands.handle_magic_line(line, cell, parser)
| 436,099
|
Parse command line arguments.
Args:
argv: list of command line arguments including program name.
Returns:
The parsed arguments as returned by argparse.ArgumentParser.
|
def parse_arguments(argv):
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent())
source_group = parser.add_mutually_exclusive_group(required=True)
source_group.add_argument(
'--csv',
metavar='FILE',
required=False,
action='append',
help='CSV data to transform.')
source_group.add_argument(
'--bigquery',
metavar='PROJECT_ID.DATASET.TABLE_NAME',
type=str,
required=False,
help=('Must be in the form `project.dataset.table_name`. BigQuery '
'data to transform'))
parser.add_argument(
'--analysis',
metavar='ANALYSIS_OUTPUT_DIR',
required=True,
help='The output folder of analyze')
parser.add_argument(
'--prefix',
metavar='OUTPUT_FILENAME_PREFIX',
required=True,
type=str)
parser.add_argument(
'--output',
metavar='DIR',
default=None,
required=True,
help=('Google Cloud Storage or Local directory in which '
'to place outputs.'))
parser.add_argument(
'--shuffle',
action='store_true',
default=False,
help='If used, data source is shuffled. This is recommended for training data.')
parser.add_argument(
'--batch-size',
metavar='N',
type=int,
default=100,
help='Larger values increase performance and peak memory usage.')
cloud_group = parser.add_argument_group(
title='Cloud Parameters',
description='These parameters are only used if --cloud is used.')
cloud_group.add_argument(
'--cloud',
action='store_true',
help='Run preprocessing on the cloud.')
cloud_group.add_argument(
'--job-name',
type=str,
help='Unique dataflow job name.')
cloud_group.add_argument(
'--project-id',
help='The project to which the job will be submitted.')
cloud_group.add_argument(
'--num-workers',
metavar='N',
type=int,
default=0,
help='Set to 0 to use the default size determined by the Dataflow service.')
cloud_group.add_argument(
'--worker-machine-type',
metavar='NAME',
type=str,
help='A machine name from https://cloud.google.com/compute/docs/machine-types. '
' If not given, the service uses the default machine type.')
cloud_group.add_argument(
'--async',
action='store_true',
help='If used, this script returns before the dataflow job is completed.')
args = parser.parse_args(args=argv[1:])
if args.cloud and not args.project_id:
raise ValueError('--project-id is needed for --cloud')
if args.async and not args.cloud:
raise ValueError('--async should only be used with --cloud')
if not args.job_name:
args.job_name = ('dataflow-job-{}'.format(
datetime.datetime.now().strftime('%Y%m%d%H%M%S')))
return args
| 436,114
|
Parse a csv line into a dict.
Args:
csv_string: a csv string. May contain missing values "a,,c"
column_names: list of column names
Returns:
Dict of {column_name, value_from_csv}. If there are missing values,
value_from_csv will be ''.
|
def decode_csv(csv_string, column_names):
import csv
r = next(csv.reader([csv_string]))
if len(r) != len(column_names):
raise ValueError('csv line %s does not have %d columns' % (csv_string, len(column_names)))
return {k: v for k, v in zip(column_names, r)}
| 436,118
|
Builds a csv string.
Args:
data_dict: dict of {column_name: 1 value}
column_names: list of column names
Returns:
A csv string version of data_dict
|
def encode_csv(data_dict, column_names):
import csv
import six
values = [str(data_dict[x]) for x in column_names]
str_buff = six.StringIO()
writer = csv.writer(str_buff, lineterminator='')
writer.writerow(values)
return str_buff.getvalue()
| 436,119
|
Makes a serialized tf.example.
Args:
transformed_json_data: dict of transformed data.
info_dict: output of feature_transforms.get_transfrormed_feature_info()
Returns:
The serialized tf.example version of transformed_json_data.
|
def serialize_example(transformed_json_data, info_dict):
import six
import tensorflow as tf
def _make_int64_list(x):
return tf.train.Feature(int64_list=tf.train.Int64List(value=x))
def _make_bytes_list(x):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=x))
def _make_float_list(x):
return tf.train.Feature(float_list=tf.train.FloatList(value=x))
if sorted(six.iterkeys(transformed_json_data)) != sorted(six.iterkeys(info_dict)):
raise ValueError('Keys do not match %s, %s' % (list(six.iterkeys(transformed_json_data)),
list(six.iterkeys(info_dict))))
ex_dict = {}
for name, info in six.iteritems(info_dict):
if info['dtype'] == tf.int64:
ex_dict[name] = _make_int64_list(transformed_json_data[name])
elif info['dtype'] == tf.float32:
ex_dict[name] = _make_float_list(transformed_json_data[name])
elif info['dtype'] == tf.string:
ex_dict[name] = _make_bytes_list(transformed_json_data[name])
else:
raise ValueError('Unsupported data type %s' % info['dtype'])
ex = tf.train.Example(features=tf.train.Features(feature=ex_dict))
return ex.SerializeToString()
| 436,120
|
Run the transformation graph on batched input data
Args:
element: list of csv strings, representing one batch input to the TF graph.
Returns:
dict containing the transformed data. Results are un-batched. Sparse
tensors are converted to lists.
|
def process(self, element):
import apache_beam as beam
import six
import tensorflow as tf
# This function is invoked by a separate sub-process so setting the logging level
# does not affect Datalab's kernel process.
tf.logging.set_verbosity(tf.logging.ERROR)
try:
clean_element = []
for line in element:
clean_element.append(line.rstrip())
# batch_result is list of numpy arrays with batch_size many rows.
batch_result = self._session.run(
fetches=self._transformed_features,
feed_dict={self._input_placeholder_tensor: clean_element})
# ex batch_result.
# Dense tensor: {'col1': array([[batch_1], [batch_2]])}
# Sparse tensor: {'col1': tf.SparseTensorValue(
# indices=array([[batch_1, 0], [batch_1, 1], ...,
# [batch_2, 0], [batch_2, 1], ...]],
# values=array[value, value, value, ...])}
# Unbatch the results.
for i in range(len(clean_element)):
transformed_features = {}
for name, value in six.iteritems(batch_result):
if isinstance(value, tf.SparseTensorValue):
batch_i_indices = value.indices[:, 0] == i
batch_i_values = value.values[batch_i_indices]
transformed_features[name] = batch_i_values.tolist()
else:
transformed_features[name] = value[i].tolist()
yield transformed_features
except Exception as e: # pylint: disable=broad-except
yield beam.pvalue.TaggedOutput('errors', (str(e), element))
| 436,127
|
Parses a row from query results into an equivalent object.
Args:
schema: the array of fields defining the schema of the data.
data: the JSON row from a query result.
Returns:
The parsed row object.
|
def parse_row(schema, data):
def parse_value(data_type, value):
if value is not None:
if value == 'null':
value = None
elif data_type == 'INTEGER':
value = int(value)
elif data_type == 'FLOAT':
value = float(value)
elif data_type == 'TIMESTAMP':
value = datetime.datetime.utcfromtimestamp(float(value))
elif data_type == 'BOOLEAN':
value = value == 'true'
elif (type(value) != str):
# TODO(gram): Handle nested JSON records
value = str(value)
return value
row = {}
if data is None:
return row
for i, (field, schema_field) in enumerate(zip(data['f'], schema)):
val = field['v']
name = schema_field['name']
data_type = schema_field['type']
repeated = True if 'mode' in schema_field and schema_field['mode'] == 'REPEATED' else False
if repeated and val is None:
row[name] = []
elif data_type == 'RECORD':
sub_schema = schema_field['fields']
if repeated:
row[name] = [Parser.parse_row(sub_schema, v['v']) for v in val]
else:
row[name] = Parser.parse_row(sub_schema, val)
elif repeated:
row[name] = [parse_value(data_type, v['v']) for v in val]
else:
row[name] = parse_value(data_type, val)
return row
| 436,128
|
Prediction with a tf savedmodel.
Args:
model_dir: directory that contains a saved model
input_csvlines: list of csv strings
Returns:
Dict in the form tensor_name:prediction_list. Note that the value is always
a list, even if there was only 1 row in input_csvlines.
|
def _tf_predict(model_dir, input_csvlines):
with tf.Graph().as_default(), tf.Session() as sess:
input_alias_map, output_alias_map = _tf_load_model(sess, model_dir)
csv_tensor_name = list(input_alias_map.values())[0]
results = sess.run(fetches=output_alias_map,
feed_dict={csv_tensor_name: input_csvlines})
# convert any scalar values to a list. This may happen when there is one
# example in input_csvlines and the model uses tf.squeeze on the output
# tensor.
if len(input_csvlines) == 1:
for k, v in six.iteritems(results):
if not isinstance(v, (list, np.ndarray)):
results[k] = [v]
# Convert bytes to string. In python3 the results may be bytes.
for k, v in six.iteritems(results):
if any(isinstance(x, bytes) for x in v):
results[k] = [x.decode('utf-8') for x in v]
return results
| 436,130
|
Get a local model's schema and features config.
Args:
model_dir: local or GCS path of a model.
Returns:
A tuple of schema (list) and features config (dict).
|
def get_model_schema_and_features(model_dir):
schema_file = os.path.join(model_dir, 'assets.extra', 'schema.json')
schema = json.loads(file_io.read_file_to_string(schema_file))
features_file = os.path.join(model_dir, 'assets.extra', 'features.json')
features_config = json.loads(file_io.read_file_to_string(features_file))
return schema, features_config
| 436,134
|
Initializes an instance of a CloudML Job.
Args:
name: the name of the job. It can be an operation full name
("projects/[project_id]/jobs/[operation_name]") or just [operation_name].
context: an optional Context object providing project_id and credentials.
|
def __init__(self, name, context=None):
super(Job, self).__init__(name)
if context is None:
context = datalab.Context.default()
self._context = context
self._api = discovery.build('ml', 'v1', credentials=self._context.credentials)
if not name.startswith('projects/'):
name = 'projects/' + self._context.project_id + '/jobs/' + name
self._name = name
self._refresh_state()
| 436,141
|
Initializes an instance of a CloudML Job list that is iteratable ("for job in jobs()").
Args:
filter: filter string for retrieving jobs, such as "state=FAILED"
context: an optional Context object providing project_id and credentials.
api: an optional CloudML API client.
|
def __init__(self, filter=None):
self._filter = filter
self._context = datalab.Context.default()
self._api = discovery.build('ml', 'v1', credentials=self._context.credentials)
self._page_size = 0
| 436,146
|
Parse command line arguments.
Args:
argv: list of command line arguments, including program name.
Returns:
An argparse Namespace object.
Raises:
ValueError: for bad parameters
|
def parse_arguments(argv):
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent())
parser.add_argument('--cloud',
action='store_true',
help='Analysis will use cloud services.')
parser.add_argument('--output',
metavar='DIR',
type=str,
required=True,
help='GCS or local folder')
input_group = parser.add_argument_group(
title='Data Source Parameters',
description='schema is only needed if using --csv')
# CSV input
input_group.add_argument('--csv',
metavar='FILE',
type=str,
required=False,
action='append',
help='Input CSV absolute file paths. May contain a '
'file pattern.')
input_group.add_argument('--schema',
metavar='FILE',
type=str,
required=False,
help='Schema file path. Only required if using csv files')
# Bigquery input
input_group.add_argument('--bigquery',
metavar='PROJECT_ID.DATASET.TABLE_NAME',
type=str,
required=False,
help=('Must be in the form project.dataset.table_name'))
parser.add_argument('--features',
metavar='FILE',
type=str,
required=True,
help='Features file path')
args = parser.parse_args(args=argv[1:])
if args.cloud:
if not args.output.startswith('gs://'):
raise ValueError('--output must point to a location on GCS')
if (args.csv and
not all(x.startswith('gs://') for x in args.csv)):
raise ValueError('--csv must point to a location on GCS')
if args.schema and not args.schema.startswith('gs://'):
raise ValueError('--schema must point to a location on GCS')
if not args.cloud and args.bigquery:
raise ValueError('--bigquery must be used with --cloud')
if not ((args.bigquery and args.csv is None and
args.schema is None) or
(args.bigquery is None and args.csv and
args.schema)):
raise ValueError('either --csv and --schema must both'
' be set or just --bigquery is set')
return args
| 436,149
|
Use BigQuery to analyze input date.
Only one of csv_file_pattern or bigquery_table should be non-None.
Args:
output_dir: output folder
csv_file_pattern: list of csv file paths, may contain wildcards
bigquery_table: project_id.dataset_name.table_name
schema: schema list
features: features config
|
def run_cloud_analysis(output_dir, csv_file_pattern, bigquery_table, schema,
features):
def _execute_sql(sql, table):
import google.datalab.bigquery as bq
if isinstance(table, bq.ExternalDataSource):
query = bq.Query(sql, data_sources={'csv_table': table})
else:
query = bq.Query(sql)
return query.execute().result().to_dataframe()
feature_analysis.expand_defaults(schema, features) # features are updated.
inverted_features = feature_analysis.invert_features(features)
feature_analysis.check_schema_transforms_match(schema, inverted_features)
import google.datalab.bigquery as bq
if bigquery_table:
table_name = '`%s`' % bigquery_table
table = None
else:
table_name = 'csv_table'
table = bq.ExternalDataSource(
source=csv_file_pattern,
schema=bq.Schema(schema))
# Make a copy of inverted_features and update the target transform to be
# identity or one hot depending on the schema.
inverted_features_target = copy.deepcopy(inverted_features)
for name, transforms in six.iteritems(inverted_features_target):
transform_set = {x['transform'] for x in transforms}
if transform_set == set([constant.TARGET_TRANSFORM]):
target_schema = next(col['type'].lower() for col in schema if col['name'] == name)
if target_schema in constant.NUMERIC_SCHEMA:
inverted_features_target[name] = [{'transform': constant.IDENTITY_TRANSFORM}]
else:
inverted_features_target[name] = [{'transform': constant.ONE_HOT_TRANSFORM}]
numerical_vocab_stats = {}
for col_name, transform_set in six.iteritems(inverted_features_target):
sys.stdout.write('Analyzing column %s...\n' % col_name)
sys.stdout.flush()
# All transforms in transform_set require the same analysis. So look
# at the first transform.
transform = next(iter(transform_set))
if (transform['transform'] in constant.CATEGORICAL_TRANSFORMS or
transform['transform'] in constant.TEXT_TRANSFORMS):
if transform['transform'] in constant.TEXT_TRANSFORMS:
# Split strings on space, then extract labels and how many rows each
# token is in. This is done by making two temp tables:
# SplitTable: each text row is made into an array of strings. The
# array may contain repeat tokens
# TokenTable: SplitTable with repeated tokens removed per row.
# Then to flatten the arrays, TokenTable has to be joined with itself.
# See the sections 'Flattening Arrays' and 'Filtering Arrays' at
# https://cloud.google.com/bigquery/docs/reference/standard-sql/arrays
separator = transform.get('separator', ' ')
sql = ('WITH SplitTable AS '
' (SELECT SPLIT({name}, \'{separator}\') as token_array FROM {table}), '
' TokenTable AS '
' (SELECT ARRAY(SELECT DISTINCT x '
' FROM UNNEST(token_array) AS x) AS unique_tokens_per_row '
' FROM SplitTable) '
'SELECT token, COUNT(token) as token_count '
'FROM TokenTable '
'CROSS JOIN UNNEST(TokenTable.unique_tokens_per_row) as token '
'WHERE LENGTH(token) > 0 '
'GROUP BY token '
'ORDER BY token_count DESC, token ASC').format(separator=separator,
name=col_name,
table=table_name)
else:
# Extract label and frequency
sql = ('SELECT {name} as token, count(*) as count '
'FROM {table} '
'WHERE {name} IS NOT NULL '
'GROUP BY {name} '
'ORDER BY count DESC, token ASC').format(name=col_name,
table=table_name)
df = _execute_sql(sql, table)
# Save the vocab
csv_string = df.to_csv(index=False, header=False)
file_io.write_string_to_file(
os.path.join(output_dir, constant.VOCAB_ANALYSIS_FILE % col_name),
csv_string)
numerical_vocab_stats[col_name] = {'vocab_size': len(df)}
# free memeory
del csv_string
del df
elif transform['transform'] in constant.NUMERIC_TRANSFORMS:
# get min/max/average
sql = ('SELECT max({name}) as max_value, min({name}) as min_value, '
'avg({name}) as avg_value from {table}').format(name=col_name,
table=table_name)
df = _execute_sql(sql, table)
numerical_vocab_stats[col_name] = {'min': df.iloc[0]['min_value'],
'max': df.iloc[0]['max_value'],
'mean': df.iloc[0]['avg_value']}
sys.stdout.write('column %s analyzed.\n' % col_name)
sys.stdout.flush()
# get num examples
sql = 'SELECT count(*) as num_examples from {table}'.format(table=table_name)
df = _execute_sql(sql, table)
num_examples = df.iloc[0]['num_examples']
# Write the stats file.
stats = {'column_stats': numerical_vocab_stats, 'num_examples': num_examples}
file_io.write_string_to_file(
os.path.join(output_dir, constant.STATS_FILE),
json.dumps(stats, indent=2, separators=(',', ': ')))
feature_analysis.save_schema_features(schema, features, output_dir)
| 436,150
|
Defines the default InceptionV3 arg scope.
Args:
weight_decay: The weight decay to use for regularizing the model.
stddev: The standard deviation of the trunctated normal weight initializer.
batch_norm_var_collection: The name of the collection for the batch norm
variables.
Returns:
An `arg_scope` to use for the inception v3 model.
|
def inception_v3_arg_scope(weight_decay=0.00004,
stddev=0.1,
batch_norm_var_collection='moving_vars'):
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.9997,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# collection containing update_ops.
'updates_collections': tf.GraphKeys.UPDATE_OPS,
# collection containing the moving mean and moving variance.
'variables_collections': {
'beta': None,
'gamma': None,
'moving_mean': [batch_norm_var_collection],
'moving_variance': [batch_norm_var_collection],
}
}
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay)):
with slim.arg_scope([slim.conv2d],
weights_initializer=tf.truncated_normal_initializer(stddev=stddev),
activation_fn=tf.nn.relu, normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params) as sc:
return sc
| 436,155
|
Initializes an instance of a Job.
Args:
job_id: a unique ID for the job. If None, a UUID will be generated.
future: the Future associated with the Job, if any.
|
def __init__(self, job_id=None, future=None):
self._job_id = str(uuid.uuid4()) if job_id is None else job_id
self._future = future
self._is_complete = False
self._errors = None
self._fatal_error = None
self._result = None
self._start_time = datetime.datetime.utcnow()
self._end_time = None
| 436,161
|
Wait for the job to complete, or a timeout to happen.
Args:
timeout: how long to wait before giving up (in seconds); default None which means no timeout.
Returns:
The Job
|
def wait(self, timeout=None):
if self._future:
try:
# Future.exception() will return rather than raise any exception so we use it.
self._future.exception(timeout)
except concurrent.futures.TimeoutError:
self._timeout()
self._refresh_state()
else:
# fall back to polling
while not self.is_complete:
if timeout is not None:
if timeout <= 0:
self._timeout()
timeout -= Job._POLL_INTERVAL_SECONDS
time.sleep(Job._POLL_INTERVAL_SECONDS)
return self
| 436,164
|
Return when at least one of the specified jobs has completed or timeout expires.
Args:
jobs: a Job or list of Jobs to wait on.
timeout: a timeout in seconds to wait for. None (the default) means no timeout.
Returns:
A list of the jobs that have now completed or None if there were no jobs.
|
def wait_any(jobs, timeout=None):
return Job._wait(jobs, timeout, concurrent.futures.FIRST_COMPLETED)
| 436,167
|
Return when at all of the specified jobs have completed or timeout expires.
Args:
jobs: a Job or list of Jobs to wait on.
timeout: a timeout in seconds to wait for. None (the default) means no timeout.
Returns:
A list of the jobs that have now completed or None if there were no jobs.
|
def wait_all(jobs, timeout=None):
return Job._wait(jobs, timeout, concurrent.futures.ALL_COMPLETED)
| 436,168
|
Parse command line arguments.
Args:
argv: list of command line arguments, includeing programe name.
Returns:
An argparse Namespace object.
Raises:
ValueError: for bad parameters
|
def parse_arguments(argv):
parser = argparse.ArgumentParser(
description='Runs Preprocessing on structured data.')
parser.add_argument('--output-dir',
type=str,
required=True,
help='Google Cloud Storage which to place outputs.')
parser.add_argument('--schema-file',
type=str,
required=False,
help=('BigQuery json schema file'))
parser.add_argument('--input-file-pattern',
type=str,
required=False,
help='Input CSV file names. May contain a file pattern')
# If using bigquery table
# TODO(brandondutra): maybe also support an sql input, so the table can be
# ad-hoc.
parser.add_argument('--bigquery-table',
type=str,
required=False,
help=('project:dataset.table_name'))
args = parser.parse_args(args=argv[1:])
if not args.output_dir.startswith('gs://'):
raise ValueError('--output-dir must point to a location on GCS')
if args.bigquery_table:
if args.schema_file or args.input_file_pattern:
raise ValueError('If using --bigquery-table, then --schema-file and '
'--input-file-pattern, '
'are not needed.')
else:
if not args.schema_file or not args.input_file_pattern:
raise ValueError('If not using --bigquery-table, then --schema-file and '
'--input-file-pattern '
'are required.')
if not args.input_file_pattern.startswith('gs://'):
raise ValueError('--input-file-pattern must point to files on GCS')
return args
| 436,182
|
Giving a string a:b.c, returns b.c.
Args:
bigquery_table: full table name project_id:dataset:table
Returns:
dataset:table
Raises:
ValueError: if a, b, or c contain the character ':'.
|
def parse_table_name(bigquery_table):
id_name = bigquery_table.split(':')
if len(id_name) != 2:
raise ValueError('Bigquery table name should be in the form '
'project_id:dataset.table_name. Got %s' % bigquery_table)
return id_name[1]
| 436,183
|
Find min/max values for the numerical columns and writes a json file.
Args:
table: Reference to FederatedTable (if bigquery_table is false) or a
regular Table (otherwise)
schema_list: Bigquery schema json object
args: the command line args
|
def run_numerical_analysis(table, schema_list, args):
import google.datalab.bigquery as bq
# Get list of numerical columns.
numerical_columns = []
for col_schema in schema_list:
col_type = col_schema['type'].lower()
if col_type == 'integer' or col_type == 'float':
numerical_columns.append(col_schema['name'])
# Run the numerical analysis
if numerical_columns:
sys.stdout.write('Running numerical analysis...')
max_min = [
('max({name}) as max_{name}, '
'min({name}) as min_{name}, '
'avg({name}) as avg_{name} ').format(name=name)
for name in numerical_columns]
if args.bigquery_table:
sql = 'SELECT %s from `%s`' % (', '.join(max_min), parse_table_name(args.bigquery_table))
numerical_results = bq.Query(sql).execute().result().to_dataframe()
else:
sql = 'SELECT %s from csv_table' % ', '.join(max_min)
query = bq.Query(sql, data_sources={'csv_table': table})
numerical_results = query.execute().result().to_dataframe()
# Convert the numerical results to a json file.
results_dict = {}
for name in numerical_columns:
results_dict[name] = {'max': numerical_results.iloc[0]['max_%s' % name],
'min': numerical_results.iloc[0]['min_%s' % name],
'mean': numerical_results.iloc[0]['avg_%s' % name]}
file_io.write_string_to_file(
os.path.join(args.output_dir, NUMERICAL_ANALYSIS_FILE),
json.dumps(results_dict, indent=2, separators=(',', ': ')))
sys.stdout.write('done.\n')
| 436,184
|
Find vocab values for the categorical columns and writes a csv file.
The vocab files are in the from
label1
label2
label3
...
Args:
table: Reference to FederatedTable (if bigquery_table is false) or a
regular Table (otherwise)
schema_list: Bigquery schema json object
args: the command line args
|
def run_categorical_analysis(table, schema_list, args):
import google.datalab.bigquery as bq
# Get list of categorical columns.
categorical_columns = []
for col_schema in schema_list:
col_type = col_schema['type'].lower()
if col_type == 'string':
categorical_columns.append(col_schema['name'])
if categorical_columns:
sys.stdout.write('Running categorical analysis...')
for name in categorical_columns:
if args.bigquery_table:
table_name = parse_table_name(args.bigquery_table)
else:
table_name = 'table_name'
sql = .format(name=name, table=table_name)
out_file = os.path.join(args.output_dir,
CATEGORICAL_ANALYSIS_FILE % name)
# extract_async seems to have a bug and sometimes hangs. So get the
# results direclty.
if args.bigquery_table:
df = bq.Query(sql).execute().result().to_dataframe()
else:
query = bq.Query(sql, data_sources={'table_name': table})
df = query.execute().result().to_dataframe()
# Write the results to a file.
string_buff = six.StringIO()
df.to_csv(string_buff, index=False, header=False)
file_io.write_string_to_file(out_file, string_buff.getvalue())
sys.stdout.write('done.\n')
| 436,185
|
Builds an analysis file for training.
Uses BiqQuery tables to do the analysis.
Args:
args: command line args
Raises:
ValueError if schema contains unknown types.
|
def run_analysis(args):
import google.datalab.bigquery as bq
if args.bigquery_table:
table = bq.Table(args.bigquery_table)
schema_list = table.schema._bq_schema
else:
schema_list = json.loads(
file_io.read_file_to_string(args.schema_file).decode())
table = bq.ExternalDataSource(
source=args.input_file_pattern,
schema=bq.Schema(schema_list))
# Check the schema is supported.
for col_schema in schema_list:
col_type = col_schema['type'].lower()
if col_type != 'string' and col_type != 'integer' and col_type != 'float':
raise ValueError('Schema contains an unsupported type %s.' % col_type)
run_numerical_analysis(table, schema_list, args)
run_categorical_analysis(table, schema_list, args)
# Save a copy of the schema to the output location.
file_io.write_string_to_file(
os.path.join(args.output_dir, SCHEMA_FILE),
json.dumps(schema_list, indent=2, separators=(',', ': ')))
| 436,186
|
Initializes an instance of a Context object.
Args:
project_id: the current cloud project.
credentials: the credentials to use to authorize requests.
config: key/value configurations for cloud operations
|
def __init__(self, project_id, credentials, config=None):
self._project_id = project_id
self._credentials = credentials
self._config = config if config is not None else Context._get_default_config()
| 436,188
|
Create a ConfusionMatrix from a BigQuery table or query.
Args:
sql: Can be one of:
A SQL query string.
A Bigquery table string.
A Query object defined with '%%bq query --name [query_name]'.
The query results or table must include "target", "predicted" columns.
Returns:
A ConfusionMatrix that can be plotted.
Raises:
ValueError if query results or table does not include 'target' or 'predicted' columns.
|
def from_bigquery(sql):
if isinstance(sql, bq.Query):
sql = sql._expanded_sql()
parts = sql.split('.')
if len(parts) == 1 or len(parts) > 3 or any(' ' in x for x in parts):
sql = '(' + sql + ')' # query, not a table name
else:
sql = '`' + sql + '`' # table name
query = bq.Query(
'SELECT target, predicted, count(*) as count FROM %s group by target, predicted' % sql)
df = query.execute().result().to_dataframe()
labels = sorted(set(df['target']) | set(df['predicted']))
labels_count = len(labels)
df['target'] = [labels.index(x) for x in df['target']]
df['predicted'] = [labels.index(x) for x in df['predicted']]
cm = [[0] * labels_count for i in range(labels_count)]
for index, row in df.iterrows():
cm[row['target']][row['predicted']] = row['count']
return ConfusionMatrix(cm, labels)
| 436,193
|
Plot the confusion matrix.
Args:
figsize: tuple (x, y) of ints. Sets the size of the figure
rotation: the rotation angle of the labels on the x-axis.
|
def plot(self, figsize=None, rotation=45):
fig, ax = plt.subplots(figsize=figsize)
plt.imshow(self._cm, interpolation='nearest', cmap=plt.cm.Blues, aspect='auto')
plt.title('Confusion matrix')
plt.colorbar()
tick_marks = np.arange(len(self._labels))
plt.xticks(tick_marks, self._labels, rotation=rotation)
plt.yticks(tick_marks, self._labels)
if isinstance(self._cm, list):
# If cm is created from BigQuery then it is a list.
thresh = max(max(self._cm)) / 2.
for i, j in itertools.product(range(len(self._labels)), range(len(self._labels))):
plt.text(j, i, self._cm[i][j], horizontalalignment="center",
color="white" if self._cm[i][j] > thresh else "black")
else:
# If cm is created from csv then it is a sklearn's confusion_matrix.
thresh = self._cm.max() / 2.
for i, j in itertools.product(range(len(self._labels)), range(len(self._labels))):
plt.text(j, i, self._cm[i, j], horizontalalignment="center",
color="white" if self._cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
| 436,195
|
Issues a request to Composer to get the environment details.
Args:
zone: GCP zone of the composer environment
environment: name of the Composer environment
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
|
def get_environment_details(zone, environment):
default_context = google.datalab.Context.default()
url = (Api._ENDPOINT + (Api._ENVIRONMENTS_PATH_FORMAT % (default_context.project_id, zone,
environment)))
return google.datalab.utils.Http.request(url, credentials=default_context.credentials)
| 436,196
|
Initializes the Storage helper with context information.
Args:
context: a Context object providing project_id and credentials.
|
def __init__(self, context):
self._credentials = context.credentials
self._project_id = context.project_id
| 436,197
|
Issues a request to delete a bucket.
Args:
bucket: the name of the bucket.
Raises:
Exception if there is an error performing the operation.
|
def buckets_delete(self, bucket):
url = Api._ENDPOINT + (Api._BUCKET_PATH % bucket)
google.datalab.utils.Http.request(url, method='DELETE', credentials=self._credentials,
raw_response=True)
| 436,198
|
Issues a request to retrieve information about a bucket.
Args:
bucket: the name of the bucket.
projection: the projection of the bucket information to retrieve.
Returns:
A parsed bucket information dictionary.
Raises:
Exception if there is an error performing the operation.
|
def buckets_get(self, bucket, projection='noAcl'):
args = {'projection': projection}
url = Api._ENDPOINT + (Api._BUCKET_PATH % bucket)
return google.datalab.utils.Http.request(url, credentials=self._credentials, args=args)
| 436,199
|
Issues a request to retrieve the list of buckets.
Args:
projection: the projection of the bucket information to retrieve.
max_results: an optional maximum number of objects to retrieve.
page_token: an optional token to continue the retrieval.
project_id: the project whose buckets should be listed.
Returns:
A parsed list of bucket information dictionaries.
Raises:
Exception if there is an error performing the operation.
|
def buckets_list(self, projection='noAcl', max_results=0, page_token=None, project_id=None):
if max_results == 0:
max_results = Api._MAX_RESULTS
args = {'project': project_id if project_id else self._project_id, 'maxResults': max_results}
if projection is not None:
args['projection'] = projection
if page_token is not None:
args['pageToken'] = page_token
url = Api._ENDPOINT + (Api._BUCKET_PATH % '')
return google.datalab.utils.Http.request(url, args=args, credentials=self._credentials)
| 436,200
|
Reads the contents of an object as text.
Args:
bucket: the name of the bucket containing the object.
key: the key of the object to be read.
start_offset: the start offset of bytes to read.
byte_count: the number of bytes to read. If None, it reads to the end.
Returns:
The text content within the object.
Raises:
Exception if the object could not be read from.
|
def object_download(self, bucket, key, start_offset=0, byte_count=None):
args = {'alt': 'media'}
headers = {}
if start_offset > 0 or byte_count is not None:
header = 'bytes=%d-' % start_offset
if byte_count is not None:
header += '%d' % byte_count
headers['Range'] = header
url = Api._DOWNLOAD_ENDPOINT + (Api._OBJECT_PATH % (bucket, Api._escape_key(key)))
return google.datalab.utils.Http.request(url, args=args, headers=headers,
credentials=self._credentials, raw_response=True)
| 436,201
|
Writes text content to the object.
Args:
bucket: the name of the bucket containing the object.
key: the key of the object to be written.
content: the text content to be written.
content_type: the type of text content.
Raises:
Exception if the object could not be written to.
|
def object_upload(self, bucket, key, content, content_type):
args = {'uploadType': 'media', 'name': key}
headers = {'Content-Type': content_type}
url = Api._UPLOAD_ENDPOINT + (Api._OBJECT_PATH % (bucket, ''))
return google.datalab.utils.Http.request(url, args=args, data=content, headers=headers,
credentials=self._credentials, raw_response=True)
| 436,202
|
Updates the metadata associated with an object.
Args:
bucket: the name of the bucket containing the object.
key: the key of the object being updated.
info: the metadata to update.
Returns:
A parsed object information dictionary.
Raises:
Exception if there is an error performing the operation.
|
def objects_patch(self, bucket, key, info):
url = Api._ENDPOINT + (Api._OBJECT_PATH % (bucket, Api._escape_key(key)))
return google.datalab.utils.Http.request(url, method='PATCH', data=info,
credentials=self._credentials)
| 436,204
|
Check if the user has permissions to read from the given path.
Args:
gs_path: the GCS path to check if user is permitted to read.
Raises:
Exception if user has no permissions to read.
|
def verify_permitted_to_read(gs_path):
# TODO(qimingj): Storage APIs need to be modified to allow absence of project
# or credential on Objects. When that happens we can move the function
# to Objects class.
from . import _bucket
bucket, prefix = _bucket.parse_name(gs_path)
credentials = None
if google.datalab.Context._is_signed_in():
credentials = google.datalab.Context.default().credentials
args = {
'maxResults': Api._MAX_RESULTS,
'projection': 'noAcl'
}
if prefix is not None:
args['prefix'] = prefix
url = Api._ENDPOINT + (Api._OBJECT_PATH % (bucket, ''))
try:
google.datalab.utils.Http.request(url, args=args, credentials=credentials)
except google.datalab.utils.RequestException as e:
if e.status == 401:
raise Exception('Not permitted to read from specified path. '
'Please sign in and make sure you have read access.')
raise e
| 436,205
|
Initializes an instance of a Job.
Args:
job_id: the BigQuery job ID corresponding to this job.
context: a Context object providing project_id and credentials.
|
def __init__(self, job_id, context):
super(Job, self).__init__(job_id, context)
| 436,224
|
Implements the monitoring cell magic for ipython notebooks.
Args:
line: the contents of the storage line.
Returns:
The results of executing the cell.
|
def monitoring(line, cell=None):
parser = datalab.utils.commands.CommandParser(prog='monitoring', description=(
'Execute various Monitoring-related operations. Use "%monitoring '
'<command> -h" for help on a specific command.'))
list_parser = parser.subcommand(
'list', 'List the metrics or resource types in a monitored project.')
list_metric_parser = list_parser.subcommand(
'metrics',
'List the metrics that are available through the Monitoring API.')
list_metric_parser.add_argument(
'-t', '--type',
help='The type of metric(s) to list; can include wildchars.')
list_metric_parser.add_argument(
'-p', '--project', help='The project on which to execute the request.')
list_metric_parser.set_defaults(func=_list_metric_descriptors)
list_resource_parser = list_parser.subcommand(
'resource_types',
('List the monitored resource types that are available through the '
'Monitoring API.'))
list_resource_parser.add_argument(
'-p', '--project', help='The project on which to execute the request.')
list_resource_parser.add_argument(
'-t', '--type',
help='The resource type(s) to list; can include wildchars.')
list_resource_parser.set_defaults(func=_list_resource_descriptors)
list_group_parser = list_parser.subcommand(
'groups',
('List the Stackdriver groups in this project.'))
list_group_parser.add_argument(
'-p', '--project', help='The project on which to execute the request.')
list_group_parser.add_argument(
'-n', '--name',
help='The name of the group(s) to list; can include wildchars.')
list_group_parser.set_defaults(func=_list_groups)
return datalab.utils.commands.handle_magic_line(line, cell, parser)
| 436,227
|
create html representation of status of a job (long running operation).
Args:
job_name: the full name of the job.
job_type: type of job. Can be 'local' or 'cloud'.
refresh_interval: how often should the client refresh status.
html_on_running: additional html that the job view needs to include on job running.
html_on_success: additional html that the job view needs to include on job success.
|
def html_job_status(job_name, job_type, refresh_interval, html_on_running, html_on_success):
_HTML_TEMPLATE =
div_id = _html.Html.next_id()
return IPython.core.display.HTML(_HTML_TEMPLATE % (div_id, div_id, job_name, job_type,
refresh_interval, html_on_running, html_on_success))
| 436,232
|
Initializes an instance of an Object.
Args:
bucket: the name of the bucket containing the object.
key: the key of the object.
info: the information about the object if available.
context: an optional Context object providing project_id and credentials. If a specific
project id or credentials are unspecified, the default ones configured at the global
level are used.
|
def __init__(self, bucket, key, info=None, context=None):
if context is None:
context = google.datalab.Context.default()
self._context = context
self._api = _api.Api(context)
self._bucket = bucket
self._key = key
self._info = info
| 436,237
|
Deletes this object from its bucket.
Args:
wait_for_deletion: If True, we poll until this object no longer appears in
objects.list operations for this bucket before returning.
Raises:
Exception if there was an error deleting the object.
|
def delete(self, wait_for_deletion=True):
if self.exists():
try:
self._api.objects_delete(self._bucket, self._key)
except Exception as e:
raise e
if wait_for_deletion:
for _ in range(_MAX_POLL_ATTEMPTS):
objects = Objects(self._bucket, prefix=self.key, delimiter='/',
context=self._context)
if any(o.key == self.key for o in objects):
time.sleep(_POLLING_SLEEP)
continue
break
else:
logging.error('Failed to see object deletion after %d attempts.',
_MAX_POLL_ATTEMPTS)
| 436,239
|
Reads the content of this object as text.
Args:
start_offset: the start offset of bytes to read.
byte_count: the number of bytes to read. If None, it reads to the end.
Returns:
The text content within the object.
Raises:
Exception if there was an error requesting the object's content.
|
def read_stream(self, start_offset=0, byte_count=None):
try:
return self._api.object_download(self._bucket, self._key,
start_offset=start_offset, byte_count=byte_count)
except Exception as e:
raise e
| 436,241
|
Reads the content of this object as text, and return a list of lines up to some max.
Args:
max_lines: max number of lines to return. If None, return all lines.
Returns:
The text content of the object as a list of lines.
Raises:
Exception if there was an error requesting the object's content.
|
def read_lines(self, max_lines=None):
if max_lines is None:
return self.read_stream().split('\n')
max_to_read = self.metadata.size
bytes_to_read = min(100 * max_lines, self.metadata.size)
while True:
content = self.read_stream(byte_count=bytes_to_read)
lines = content.split('\n')
if len(lines) > max_lines or bytes_to_read >= max_to_read:
break
# try 10 times more bytes or max
bytes_to_read = min(bytes_to_read * 10, max_to_read)
# remove the partial line at last
del lines[-1]
return lines[0:max_lines]
| 436,242
|
Initializes an instance of a Csv instance.
Args:
path: path of the Csv file.
delimiter: the separator used to parse a Csv line.
|
def __init__(self, path, delimiter=b','):
self._path = path
self._delimiter = delimiter
| 436,244
|
Initializes an instance of an Iterator.
Args:
retriever: a function that can retrieve the next page of items.
|
def __init__(self, retriever):
self._page_token = None
self._first_page = True
self._retriever = retriever
self._count = 0
| 436,251
|
Issues a request to create a new bucket.
Args:
bucket: the name of the bucket.
project_id: the project to use when inserting the bucket.
Returns:
A parsed bucket information dictionary.
Raises:
Exception if there is an error performing the operation.
|
def buckets_insert(self, bucket, project_id=None):
args = {'project': project_id if project_id else self._project_id}
data = {'name': bucket}
url = Api._ENDPOINT + (Api._BUCKET_PATH % '')
return datalab.utils.Http.request(url, args=args, data=data, credentials=self._credentials)
| 436,258
|
Updates the metadata associated with an object.
Args:
source_bucket: the name of the bucket containing the source object.
source_key: the key of the source object being copied.
target_bucket: the name of the bucket that will contain the copied object.
target_key: the key of the copied object.
Returns:
A parsed object information dictionary.
Raises:
Exception if there is an error performing the operation.
|
def objects_copy(self, source_bucket, source_key, target_bucket, target_key):
url = Api._ENDPOINT + (Api._OBJECT_COPY_PATH % (source_bucket, Api._escape_key(source_key),
target_bucket, Api._escape_key(target_key)))
return datalab.utils.Http.request(url, method='POST', credentials=self._credentials)
| 436,259
|
Deletes the specified object.
Args:
bucket: the name of the bucket.
key: the key of the object within the bucket.
Raises:
Exception if there is an error performing the operation.
|
def objects_delete(self, bucket, key):
url = Api._ENDPOINT + (Api._OBJECT_PATH % (bucket, Api._escape_key(key)))
datalab.utils.Http.request(url, method='DELETE', credentials=self._credentials,
raw_response=True)
| 436,260
|
Issues a request to retrieve information about an object.
Args:
bucket: the name of the bucket.
key: the key of the object within the bucket.
projection: the projection of the object to retrieve.
Returns:
A parsed object information dictionary.
Raises:
Exception if there is an error performing the operation.
|
def objects_get(self, bucket, key, projection='noAcl'):
args = {}
if projection is not None:
args['projection'] = projection
url = Api._ENDPOINT + (Api._OBJECT_PATH % (bucket, Api._escape_key(key)))
return datalab.utils.Http.request(url, args=args, credentials=self._credentials)
| 436,261
|
Check if the user has permissions to read from the given path.
Args:
gs_path: the GCS path to check if user is permitted to read.
Raises:
Exception if user has no permissions to read.
|
def verify_permitted_to_read(gs_path):
# TODO(qimingj): Storage APIs need to be modified to allow absence of project
# or credential on Items. When that happens we can move the function
# to Items class.
from . import _bucket
bucket, prefix = _bucket.parse_name(gs_path)
credentials = None
if datalab.context.Context.is_signed_in():
credentials = datalab.context._utils.get_credentials()
args = {
'maxResults': Api._MAX_RESULTS,
'projection': 'noAcl'
}
if prefix is not None:
args['prefix'] = prefix
url = Api._ENDPOINT + (Api._OBJECT_PATH % (bucket, ''))
try:
datalab.utils.Http.request(url, args=args, credentials=credentials)
except datalab.utils.RequestException as e:
if e.status == 401:
raise Exception('Not permitted to read from specified path. '
'Please sign in and make sure you have read access.')
raise e
| 436,262
|
Initializes a QueryJob object.
Args:
job_id: the ID of the query job.
table_name: the name of the table where the query results will be stored.
sql: the SQL statement that was executed for the query.
context: the Context object providing project_id and credentials that was used
when executing the query.
|
def __init__(self, job_id, table_name, sql, context):
super(QueryJob, self).__init__(job_id, context)
self._sql = sql
self._table = _query_results_table.QueryResultsTable(table_name, context, self,
is_temporary=True)
self._bytes_processed = None
self._cache_hit = None
self._total_rows = None
| 436,263
|
Wait for the job to complete, or a timeout to happen.
This is more efficient than the version in the base Job class, in that we can
use a call that blocks for the poll duration rather than a sleep. That means we
shouldn't block unnecessarily long and can also poll less.
Args:
timeout: how long to wait (in seconds) before giving up; default None which means no timeout.
Returns:
The QueryJob
|
def wait(self, timeout=None):
poll = 30
while not self._is_complete:
try:
query_result = self._api.jobs_query_results(self._job_id,
project_id=self._context.project_id,
page_size=0,
timeout=poll * 1000)
except Exception as e:
raise e
if query_result['jobComplete']:
if 'totalBytesProcessed' in query_result:
self._bytes_processed = int(query_result['totalBytesProcessed'])
self._cache_hit = query_result.get('cacheHit', None)
if 'totalRows' in query_result:
self._total_rows = int(query_result['totalRows'])
break
if timeout is not None:
timeout -= poll
if timeout <= 0:
break
self._refresh_state()
return self
| 436,264
|
Returns a list of metric descriptors that match the filters.
Args:
pattern: An optional pattern to further filter the descriptors. This can
include Unix shell-style wildcards. E.g. ``"compute*"``,
``"*cpu/load_??m"``.
Returns:
A list of MetricDescriptor objects that match the filters.
|
def list(self, pattern='*'):
if self._descriptors is None:
self._descriptors = self._client.list_metric_descriptors(
filter_string=self._filter_string, type_prefix=self._type_prefix)
return [metric for metric in self._descriptors
if fnmatch.fnmatch(metric.type, pattern)]
| 436,266
|
Creates a pandas dataframe from the descriptors that match the filters.
Args:
pattern: An optional pattern to further filter the descriptors. This can
include Unix shell-style wildcards. E.g. ``"compute*"``,
``"*/cpu/load_??m"``.
max_rows: The maximum number of descriptors to return. If None, return
all.
Returns:
A pandas dataframe containing matching metric descriptors.
|
def as_dataframe(self, pattern='*', max_rows=None):
data = []
for i, metric in enumerate(self.list(pattern)):
if max_rows is not None and i >= max_rows:
break
labels = ', '. join([l.key for l in metric.labels])
data.append([
metric.type, metric.display_name, metric.metric_kind,
metric.value_type, metric.unit, labels])
return pandas.DataFrame(data, columns=self._DISPLAY_HEADERS)
| 436,267
|
Given a %%sql module return the default (last) query for the module.
Args:
module: the %%sql module.
Returns:
The default query associated with this module.
|
def get_default_query_from_module(module):
if isinstance(module, types.ModuleType):
return module.__dict__.get(_SQL_MODULE_LAST, None)
return None
| 436,268
|
Initializes an instance of a Job.
Args:
fn: the lambda function to execute asyncronously
job_id: an optional ID for the job. If None, a UUID will be generated.
|
def __init__(self, fn, job_id, *args, **kwargs):
super(LambdaJob, self).__init__(job_id)
self._future = _async.async.executor.submit(fn, *args, **kwargs)
| 436,270
|
Infer a BigQuery table schema from a dictionary. If the dictionary has entries that
are in turn OrderedDicts these will be turned into RECORD types. Ideally this will
be an OrderedDict but it is not required.
Args:
data: The dict to infer a schema from.
Returns:
A list of dictionaries containing field 'name' and 'type' entries, suitable for use in a
BigQuery Tables resource schema.
|
def _from_dict_record(data):
return [Schema._get_field_entry(name, value) for name, value in list(data.items())]
| 436,272
|
Infer a BigQuery table schema from a list of values.
Args:
data: The list of values.
Returns:
A list of dictionaries containing field 'name' and 'type' entries, suitable for use in a
BigQuery Tables resource schema.
|
def _from_list_record(data):
return [Schema._get_field_entry('Column%d' % (i + 1), value) for i, value in enumerate(data)]
| 436,273
|
Infer a BigQuery table schema from a list of fields or a dictionary. The typeof the elements
is used. For a list, the field names are simply 'Column1', 'Column2', etc.
Args:
data: The list of fields or dictionary.
Returns:
A list of dictionaries containing field 'name' and 'type' entries, suitable for use in a
BigQuery Tables resource schema.
|
def _from_record(data):
if isinstance(data, dict):
return Schema._from_dict_record(data)
elif isinstance(data, list):
return Schema._from_list_record(data)
else:
raise Exception('Cannot create a schema from record %s' % str(data))
| 436,274
|
Initializes a Schema from its raw JSON representation, a Pandas Dataframe, or a list.
Args:
definition: a definition of the schema as a list of dictionaries with 'name' and 'type'
entries and possibly 'mode' and 'description' entries. Only used if no data argument was
provided. 'mode' can be 'NULLABLE', 'REQUIRED' or 'REPEATED'. For the allowed types, see:
https://cloud.google.com/bigquery/preparing-data-for-bigquery#datatypes
|
def __init__(self, definition=None):
super(Schema, self).__init__()
self._map = {}
self._bq_schema = definition
self._populate_fields(definition)
| 436,276
|
Get the index of a field in the flattened list given its (fully-qualified) name.
Args:
name: the fully-qualified name of the field.
Returns:
The index of the field, if found; else -1.
|
def find(self, name):
for i in range(0, len(self)):
if self[i].name == name:
return i
return -1
| 436,279
|
Return a dictionary list formatted as a HTML table.
Args:
data: the dictionary list
headers: the keys in the dictionary to use as table columns, in order.
|
def render_dictionary(data, headers=None):
return IPython.core.display.HTML(_html.HtmlBuilder.render_table(data, headers))
| 436,287
|
Return text formatted as a HTML
Args:
text: the text to render
preformatted: whether the text should be rendered as preformatted
|
def render_text(text, preformatted=False):
return IPython.core.display.HTML(_html.HtmlBuilder.render_text(text, preformatted))
| 436,288
|
If v is a variable reference (for example: '$myvar'), replace it using the supplied
env dictionary.
Args:
v: the variable to replace if needed.
env: user supplied dictionary.
Raises:
Exception if v is a variable reference but it is not found in env.
|
def expand_var(v, env):
if len(v) == 0:
return v
# Using len() and v[0] instead of startswith makes this Unicode-safe.
if v[0] == '$':
v = v[1:]
if len(v) and v[0] != '$':
if v in env:
v = env[v]
else:
raise Exception('Cannot expand variable $%s' % v)
return v
| 436,295
|
Replace variable references in config using the supplied env dictionary.
Args:
config: the config to parse. Can be a tuple, list or dict.
env: user supplied dictionary.
Raises:
Exception if any variable references are not found in env.
|
def replace_vars(config, env):
if isinstance(config, dict):
for k, v in list(config.items()):
if isinstance(v, dict) or isinstance(v, list) or isinstance(v, tuple):
replace_vars(v, env)
elif isinstance(v, basestring):
config[k] = expand_var(v, env)
elif isinstance(config, list):
for i, v in enumerate(config):
if isinstance(v, dict) or isinstance(v, list) or isinstance(v, tuple):
replace_vars(v, env)
elif isinstance(v, basestring):
config[i] = expand_var(v, env)
elif isinstance(config, tuple):
# TODO(gram): figure out how to handle these if the tuple elements are scalar
for v in config:
if isinstance(v, dict) or isinstance(v, list) or isinstance(v, tuple):
replace_vars(v, env)
| 436,296
|
Validate a config dictionary to make sure it includes all required keys
and does not include any unexpected keys.
Args:
config: the config to validate.
required_keys: the names of the keys that the config must have.
optional_keys: the names of the keys that the config can have.
Raises:
Exception if the config is not a dict or invalid.
|
def validate_config(config, required_keys, optional_keys=None):
if optional_keys is None:
optional_keys = []
if not isinstance(config, dict):
raise Exception('config is not dict type')
invalid_keys = set(config) - set(required_keys + optional_keys)
if len(invalid_keys) > 0:
raise Exception('Invalid config with unexpected keys "%s"' % ', '.join(e for e in invalid_keys))
missing_keys = set(required_keys) - set(config)
if len(missing_keys) > 0:
raise Exception('Invalid config with missing keys "%s"' % ', '.join(missing_keys))
| 436,298
|
Validate a config dictionary to make sure it has all of the specified keys
Args:
config: the config to validate.
required_keys: the list of possible keys that config must include.
Raises:
Exception if the config does not have any of them.
|
def validate_config_must_have(config, required_keys):
missing_keys = set(required_keys) - set(config)
if len(missing_keys) > 0:
raise Exception('Invalid config with missing keys "%s"' % ', '.join(missing_keys))
| 436,299
|
Validate a config dictionary to make sure it has one and only one
key in one_of_keys.
Args:
config: the config to validate.
one_of_keys: the list of possible keys that config can have one and only one.
Raises:
Exception if the config does not have any of them, or multiple of them.
|
def validate_config_has_one_of(config, one_of_keys):
intersection = set(config).intersection(one_of_keys)
if len(intersection) > 1:
raise Exception('Only one of the values in "%s" is needed' % ', '.join(intersection))
if len(intersection) == 0:
raise Exception('One of the values in "%s" is needed' % ', '.join(one_of_keys))
| 436,300
|
Validate a config value to make sure it is one of the possible values.
Args:
value: the config value to validate.
possible_values: the possible values the value can be
Raises:
Exception if the value is not one of possible values.
|
def validate_config_value(value, possible_values):
if value not in possible_values:
raise Exception('Invalid config value "%s". Possible values are '
'%s' % (value, ', '.join(e for e in possible_values)))
| 436,301
|
Check whether a given path is a valid GCS path.
Args:
path: the config to check.
require_object: if True, the path has to be an object path but not bucket path.
Raises:
Exception if the path is invalid
|
def validate_gcs_path(path, require_object):
bucket, key = datalab.storage._bucket.parse_name(path)
if bucket is None:
raise Exception('Invalid GCS path "%s"' % path)
if require_object and key is None:
raise Exception('It appears the GCS path "%s" is a bucket path but not an object path' % path)
| 436,303
|
Generate a profile of data in a dataframe.
Args:
df: the Pandas dataframe.
|
def profile_df(df):
# The bootstrap CSS messes up the Datalab display so we tweak it to not have an effect.
# TODO(gram): strip it out rather than this kludge.
return IPython.core.display.HTML(
pandas_profiling.ProfileReport(df).html.replace('bootstrap', 'nonexistent'))
| 436,305
|
Check files starts wtih gs://.
Args:
files: string to file path, or list of file paths.
|
def _assert_gcs_files(files):
if sys.version_info.major > 2:
string_type = (str, bytes) # for python 3 compatibility
else:
string_type = basestring # noqa
if isinstance(files, string_type):
files = [files]
for f in files:
if f is not None and not f.startswith('gs://'):
raise ValueError('File %s is not a gcs path' % f)
| 436,307
|
Repackage this package from local installed location and copy it to GCS.
Args:
staging_package_url: GCS path.
|
def _package_to_staging(staging_package_url):
import google.datalab.ml as ml
# Find the package root. __file__ is under [package_root]/mltoolbox/_structured_data/this_file
package_root = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../../'))
setup_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'master_setup.py'))
tar_gz_path = os.path.join(staging_package_url, 'staging', 'trainer.tar.gz')
print('Building package and uploading to %s' % tar_gz_path)
ml.package_and_copy(package_root, setup_path, tar_gz_path)
return tar_gz_path
| 436,308
|
Helper function.
Wait for a process to finish if it exists, and then try to kill a list of
processes.
Used by local_train
Args:
pid_to_wait: the process to wait for.
pids_to_kill: a list of processes to kill after the process of pid_to_wait finishes.
|
def _wait_and_kill(pid_to_wait, pids_to_kill):
# cloud workers don't have psutil
import psutil
if psutil.pid_exists(pid_to_wait):
psutil.Process(pid=pid_to_wait).wait()
for pid_to_kill in pids_to_kill:
if psutil.pid_exists(pid_to_kill):
p = psutil.Process(pid=pid_to_kill)
p.kill()
p.wait()
| 436,309
|
Train model using CloudML.
See local_train() for a description of the args.
Args:
config: A CloudTrainingConfig object.
job_name: Training job name. A default will be picked if None.
|
def cloud_train(train_dataset,
eval_dataset,
analysis_dir,
output_dir,
features,
model_type,
max_steps,
num_epochs,
train_batch_size,
eval_batch_size,
min_eval_frequency,
top_n,
layer_sizes,
learning_rate,
epsilon,
job_name,
job_name_prefix,
config):
import google.datalab.ml as ml
if len(train_dataset.input_files) != 1 or len(eval_dataset.input_files) != 1:
raise ValueError('CsvDataSets must be built with a file pattern, not list '
'of files.')
if file_io.file_exists(output_dir):
raise ValueError('output_dir already exist. Use a new output path.')
if isinstance(features, dict):
# Make a features file.
if not file_io.file_exists(output_dir):
file_io.recursive_create_dir(output_dir)
features_file = os.path.join(output_dir, 'features_file.json')
file_io.write_string_to_file(
features_file,
json.dumps(features))
else:
features_file = features
if not isinstance(config, ml.CloudTrainingConfig):
raise ValueError('cloud should be an instance of '
'google.datalab.ml.CloudTrainingConfig for cloud training.')
_assert_gcs_files([output_dir, train_dataset.input_files[0], eval_dataset.input_files[0],
features_file, analysis_dir])
args = ['--train-data-paths=%s' % train_dataset.input_files[0],
'--eval-data-paths=%s' % eval_dataset.input_files[0],
'--preprocess-output-dir=%s' % analysis_dir,
'--transforms-file=%s' % features_file,
'--model-type=%s' % model_type,
'--max-steps=%s' % str(max_steps),
'--train-batch-size=%s' % str(train_batch_size),
'--eval-batch-size=%s' % str(eval_batch_size),
'--min-eval-frequency=%s' % str(min_eval_frequency),
'--learning-rate=%s' % str(learning_rate),
'--epsilon=%s' % str(epsilon)]
if num_epochs:
args.append('--num-epochs=%s' % str(num_epochs))
if top_n:
args.append('--top-n=%s' % str(top_n))
if layer_sizes:
for i in range(len(layer_sizes)):
args.append('--layer-size%s=%s' % (i + 1, str(layer_sizes[i])))
job_request = {
'package_uris': [_package_to_staging(output_dir), _TF_GS_URL, _PROTOBUF_GS_URL],
'python_module': 'mltoolbox._structured_data.trainer.task',
'job_dir': output_dir,
'args': args
}
job_request.update(dict(config._asdict()))
if not job_name:
job_name = job_name_prefix or 'structured_data_train'
job_name += '_' + datetime.datetime.now().strftime('%y%m%d_%H%M%S')
job = ml.Job.submit_training(job_request, job_name)
print('Job request send. View status of job at')
print('https://console.developers.google.com/ml/jobs?project=%s' %
_default_project())
return job
| 436,315
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.