id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
4,800
|
googledatalab/pydatalab
|
solutionbox/ml_workbench/tensorflow/transform.py
|
TransformFeaturesDoFn.process
|
def process(self, element):
"""Run the transformation graph on batched input data
Args:
element: list of csv strings, representing one batch input to the TF graph.
Returns:
dict containing the transformed data. Results are un-batched. Sparse
tensors are converted to lists.
"""
import apache_beam as beam
import six
import tensorflow as tf
# This function is invoked by a separate sub-process so setting the logging level
# does not affect Datalab's kernel process.
tf.logging.set_verbosity(tf.logging.ERROR)
try:
clean_element = []
for line in element:
clean_element.append(line.rstrip())
# batch_result is list of numpy arrays with batch_size many rows.
batch_result = self._session.run(
fetches=self._transformed_features,
feed_dict={self._input_placeholder_tensor: clean_element})
# ex batch_result.
# Dense tensor: {'col1': array([[batch_1], [batch_2]])}
# Sparse tensor: {'col1': tf.SparseTensorValue(
# indices=array([[batch_1, 0], [batch_1, 1], ...,
# [batch_2, 0], [batch_2, 1], ...]],
# values=array[value, value, value, ...])}
# Unbatch the results.
for i in range(len(clean_element)):
transformed_features = {}
for name, value in six.iteritems(batch_result):
if isinstance(value, tf.SparseTensorValue):
batch_i_indices = value.indices[:, 0] == i
batch_i_values = value.values[batch_i_indices]
transformed_features[name] = batch_i_values.tolist()
else:
transformed_features[name] = value[i].tolist()
yield transformed_features
except Exception as e: # pylint: disable=broad-except
yield beam.pvalue.TaggedOutput('errors', (str(e), element))
|
python
|
def process(self, element):
"""Run the transformation graph on batched input data
Args:
element: list of csv strings, representing one batch input to the TF graph.
Returns:
dict containing the transformed data. Results are un-batched. Sparse
tensors are converted to lists.
"""
import apache_beam as beam
import six
import tensorflow as tf
# This function is invoked by a separate sub-process so setting the logging level
# does not affect Datalab's kernel process.
tf.logging.set_verbosity(tf.logging.ERROR)
try:
clean_element = []
for line in element:
clean_element.append(line.rstrip())
# batch_result is list of numpy arrays with batch_size many rows.
batch_result = self._session.run(
fetches=self._transformed_features,
feed_dict={self._input_placeholder_tensor: clean_element})
# ex batch_result.
# Dense tensor: {'col1': array([[batch_1], [batch_2]])}
# Sparse tensor: {'col1': tf.SparseTensorValue(
# indices=array([[batch_1, 0], [batch_1, 1], ...,
# [batch_2, 0], [batch_2, 1], ...]],
# values=array[value, value, value, ...])}
# Unbatch the results.
for i in range(len(clean_element)):
transformed_features = {}
for name, value in six.iteritems(batch_result):
if isinstance(value, tf.SparseTensorValue):
batch_i_indices = value.indices[:, 0] == i
batch_i_values = value.values[batch_i_indices]
transformed_features[name] = batch_i_values.tolist()
else:
transformed_features[name] = value[i].tolist()
yield transformed_features
except Exception as e: # pylint: disable=broad-except
yield beam.pvalue.TaggedOutput('errors', (str(e), element))
|
[
"def",
"process",
"(",
"self",
",",
"element",
")",
":",
"import",
"apache_beam",
"as",
"beam",
"import",
"six",
"import",
"tensorflow",
"as",
"tf",
"# This function is invoked by a separate sub-process so setting the logging level",
"# does not affect Datalab's kernel process.",
"tf",
".",
"logging",
".",
"set_verbosity",
"(",
"tf",
".",
"logging",
".",
"ERROR",
")",
"try",
":",
"clean_element",
"=",
"[",
"]",
"for",
"line",
"in",
"element",
":",
"clean_element",
".",
"append",
"(",
"line",
".",
"rstrip",
"(",
")",
")",
"# batch_result is list of numpy arrays with batch_size many rows.",
"batch_result",
"=",
"self",
".",
"_session",
".",
"run",
"(",
"fetches",
"=",
"self",
".",
"_transformed_features",
",",
"feed_dict",
"=",
"{",
"self",
".",
"_input_placeholder_tensor",
":",
"clean_element",
"}",
")",
"# ex batch_result. ",
"# Dense tensor: {'col1': array([[batch_1], [batch_2]])}",
"# Sparse tensor: {'col1': tf.SparseTensorValue(",
"# indices=array([[batch_1, 0], [batch_1, 1], ...,",
"# [batch_2, 0], [batch_2, 1], ...]],",
"# values=array[value, value, value, ...])}",
"# Unbatch the results.",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"clean_element",
")",
")",
":",
"transformed_features",
"=",
"{",
"}",
"for",
"name",
",",
"value",
"in",
"six",
".",
"iteritems",
"(",
"batch_result",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"tf",
".",
"SparseTensorValue",
")",
":",
"batch_i_indices",
"=",
"value",
".",
"indices",
"[",
":",
",",
"0",
"]",
"==",
"i",
"batch_i_values",
"=",
"value",
".",
"values",
"[",
"batch_i_indices",
"]",
"transformed_features",
"[",
"name",
"]",
"=",
"batch_i_values",
".",
"tolist",
"(",
")",
"else",
":",
"transformed_features",
"[",
"name",
"]",
"=",
"value",
"[",
"i",
"]",
".",
"tolist",
"(",
")",
"yield",
"transformed_features",
"except",
"Exception",
"as",
"e",
":",
"# pylint: disable=broad-except",
"yield",
"beam",
".",
"pvalue",
".",
"TaggedOutput",
"(",
"'errors'",
",",
"(",
"str",
"(",
"e",
")",
",",
"element",
")",
")"
] |
Run the transformation graph on batched input data
Args:
element: list of csv strings, representing one batch input to the TF graph.
Returns:
dict containing the transformed data. Results are un-batched. Sparse
tensors are converted to lists.
|
[
"Run",
"the",
"transformation",
"graph",
"on",
"batched",
"input",
"data"
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/ml_workbench/tensorflow/transform.py#L304-L352
|
4,801
|
googledatalab/pydatalab
|
google/datalab/bigquery/_parser.py
|
Parser.parse_row
|
def parse_row(schema, data):
"""Parses a row from query results into an equivalent object.
Args:
schema: the array of fields defining the schema of the data.
data: the JSON row from a query result.
Returns:
The parsed row object.
"""
def parse_value(data_type, value):
"""Parses a value returned from a BigQuery response.
Args:
data_type: the type of the value as specified by the schema.
value: the raw value to return (before casting to data_type).
Returns:
The value cast to the data_type.
"""
if value is not None:
if value == 'null':
value = None
elif data_type == 'INTEGER':
value = int(value)
elif data_type == 'FLOAT':
value = float(value)
elif data_type == 'TIMESTAMP':
value = datetime.datetime.utcfromtimestamp(float(value))
elif data_type == 'BOOLEAN':
value = value == 'true'
elif (type(value) != str):
# TODO(gram): Handle nested JSON records
value = str(value)
return value
row = {}
if data is None:
return row
for i, (field, schema_field) in enumerate(zip(data['f'], schema)):
val = field['v']
name = schema_field['name']
data_type = schema_field['type']
repeated = True if 'mode' in schema_field and schema_field['mode'] == 'REPEATED' else False
if repeated and val is None:
row[name] = []
elif data_type == 'RECORD':
sub_schema = schema_field['fields']
if repeated:
row[name] = [Parser.parse_row(sub_schema, v['v']) for v in val]
else:
row[name] = Parser.parse_row(sub_schema, val)
elif repeated:
row[name] = [parse_value(data_type, v['v']) for v in val]
else:
row[name] = parse_value(data_type, val)
return row
|
python
|
def parse_row(schema, data):
"""Parses a row from query results into an equivalent object.
Args:
schema: the array of fields defining the schema of the data.
data: the JSON row from a query result.
Returns:
The parsed row object.
"""
def parse_value(data_type, value):
"""Parses a value returned from a BigQuery response.
Args:
data_type: the type of the value as specified by the schema.
value: the raw value to return (before casting to data_type).
Returns:
The value cast to the data_type.
"""
if value is not None:
if value == 'null':
value = None
elif data_type == 'INTEGER':
value = int(value)
elif data_type == 'FLOAT':
value = float(value)
elif data_type == 'TIMESTAMP':
value = datetime.datetime.utcfromtimestamp(float(value))
elif data_type == 'BOOLEAN':
value = value == 'true'
elif (type(value) != str):
# TODO(gram): Handle nested JSON records
value = str(value)
return value
row = {}
if data is None:
return row
for i, (field, schema_field) in enumerate(zip(data['f'], schema)):
val = field['v']
name = schema_field['name']
data_type = schema_field['type']
repeated = True if 'mode' in schema_field and schema_field['mode'] == 'REPEATED' else False
if repeated and val is None:
row[name] = []
elif data_type == 'RECORD':
sub_schema = schema_field['fields']
if repeated:
row[name] = [Parser.parse_row(sub_schema, v['v']) for v in val]
else:
row[name] = Parser.parse_row(sub_schema, val)
elif repeated:
row[name] = [parse_value(data_type, v['v']) for v in val]
else:
row[name] = parse_value(data_type, val)
return row
|
[
"def",
"parse_row",
"(",
"schema",
",",
"data",
")",
":",
"def",
"parse_value",
"(",
"data_type",
",",
"value",
")",
":",
"\"\"\"Parses a value returned from a BigQuery response.\n\n Args:\n data_type: the type of the value as specified by the schema.\n value: the raw value to return (before casting to data_type).\n\n Returns:\n The value cast to the data_type.\n \"\"\"",
"if",
"value",
"is",
"not",
"None",
":",
"if",
"value",
"==",
"'null'",
":",
"value",
"=",
"None",
"elif",
"data_type",
"==",
"'INTEGER'",
":",
"value",
"=",
"int",
"(",
"value",
")",
"elif",
"data_type",
"==",
"'FLOAT'",
":",
"value",
"=",
"float",
"(",
"value",
")",
"elif",
"data_type",
"==",
"'TIMESTAMP'",
":",
"value",
"=",
"datetime",
".",
"datetime",
".",
"utcfromtimestamp",
"(",
"float",
"(",
"value",
")",
")",
"elif",
"data_type",
"==",
"'BOOLEAN'",
":",
"value",
"=",
"value",
"==",
"'true'",
"elif",
"(",
"type",
"(",
"value",
")",
"!=",
"str",
")",
":",
"# TODO(gram): Handle nested JSON records",
"value",
"=",
"str",
"(",
"value",
")",
"return",
"value",
"row",
"=",
"{",
"}",
"if",
"data",
"is",
"None",
":",
"return",
"row",
"for",
"i",
",",
"(",
"field",
",",
"schema_field",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"data",
"[",
"'f'",
"]",
",",
"schema",
")",
")",
":",
"val",
"=",
"field",
"[",
"'v'",
"]",
"name",
"=",
"schema_field",
"[",
"'name'",
"]",
"data_type",
"=",
"schema_field",
"[",
"'type'",
"]",
"repeated",
"=",
"True",
"if",
"'mode'",
"in",
"schema_field",
"and",
"schema_field",
"[",
"'mode'",
"]",
"==",
"'REPEATED'",
"else",
"False",
"if",
"repeated",
"and",
"val",
"is",
"None",
":",
"row",
"[",
"name",
"]",
"=",
"[",
"]",
"elif",
"data_type",
"==",
"'RECORD'",
":",
"sub_schema",
"=",
"schema_field",
"[",
"'fields'",
"]",
"if",
"repeated",
":",
"row",
"[",
"name",
"]",
"=",
"[",
"Parser",
".",
"parse_row",
"(",
"sub_schema",
",",
"v",
"[",
"'v'",
"]",
")",
"for",
"v",
"in",
"val",
"]",
"else",
":",
"row",
"[",
"name",
"]",
"=",
"Parser",
".",
"parse_row",
"(",
"sub_schema",
",",
"val",
")",
"elif",
"repeated",
":",
"row",
"[",
"name",
"]",
"=",
"[",
"parse_value",
"(",
"data_type",
",",
"v",
"[",
"'v'",
"]",
")",
"for",
"v",
"in",
"val",
"]",
"else",
":",
"row",
"[",
"name",
"]",
"=",
"parse_value",
"(",
"data_type",
",",
"val",
")",
"return",
"row"
] |
Parses a row from query results into an equivalent object.
Args:
schema: the array of fields defining the schema of the data.
data: the JSON row from a query result.
Returns:
The parsed row object.
|
[
"Parses",
"a",
"row",
"from",
"query",
"results",
"into",
"an",
"equivalent",
"object",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_parser.py#L31-L89
|
4,802
|
googledatalab/pydatalab
|
google/datalab/contrib/mlworkbench/_local_predict.py
|
_tf_predict
|
def _tf_predict(model_dir, input_csvlines):
"""Prediction with a tf savedmodel.
Args:
model_dir: directory that contains a saved model
input_csvlines: list of csv strings
Returns:
Dict in the form tensor_name:prediction_list. Note that the value is always
a list, even if there was only 1 row in input_csvlines.
"""
with tf.Graph().as_default(), tf.Session() as sess:
input_alias_map, output_alias_map = _tf_load_model(sess, model_dir)
csv_tensor_name = list(input_alias_map.values())[0]
results = sess.run(fetches=output_alias_map,
feed_dict={csv_tensor_name: input_csvlines})
# convert any scalar values to a list. This may happen when there is one
# example in input_csvlines and the model uses tf.squeeze on the output
# tensor.
if len(input_csvlines) == 1:
for k, v in six.iteritems(results):
if not isinstance(v, (list, np.ndarray)):
results[k] = [v]
# Convert bytes to string. In python3 the results may be bytes.
for k, v in six.iteritems(results):
if any(isinstance(x, bytes) for x in v):
results[k] = [x.decode('utf-8') for x in v]
return results
|
python
|
def _tf_predict(model_dir, input_csvlines):
"""Prediction with a tf savedmodel.
Args:
model_dir: directory that contains a saved model
input_csvlines: list of csv strings
Returns:
Dict in the form tensor_name:prediction_list. Note that the value is always
a list, even if there was only 1 row in input_csvlines.
"""
with tf.Graph().as_default(), tf.Session() as sess:
input_alias_map, output_alias_map = _tf_load_model(sess, model_dir)
csv_tensor_name = list(input_alias_map.values())[0]
results = sess.run(fetches=output_alias_map,
feed_dict={csv_tensor_name: input_csvlines})
# convert any scalar values to a list. This may happen when there is one
# example in input_csvlines and the model uses tf.squeeze on the output
# tensor.
if len(input_csvlines) == 1:
for k, v in six.iteritems(results):
if not isinstance(v, (list, np.ndarray)):
results[k] = [v]
# Convert bytes to string. In python3 the results may be bytes.
for k, v in six.iteritems(results):
if any(isinstance(x, bytes) for x in v):
results[k] = [x.decode('utf-8') for x in v]
return results
|
[
"def",
"_tf_predict",
"(",
"model_dir",
",",
"input_csvlines",
")",
":",
"with",
"tf",
".",
"Graph",
"(",
")",
".",
"as_default",
"(",
")",
",",
"tf",
".",
"Session",
"(",
")",
"as",
"sess",
":",
"input_alias_map",
",",
"output_alias_map",
"=",
"_tf_load_model",
"(",
"sess",
",",
"model_dir",
")",
"csv_tensor_name",
"=",
"list",
"(",
"input_alias_map",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
"results",
"=",
"sess",
".",
"run",
"(",
"fetches",
"=",
"output_alias_map",
",",
"feed_dict",
"=",
"{",
"csv_tensor_name",
":",
"input_csvlines",
"}",
")",
"# convert any scalar values to a list. This may happen when there is one",
"# example in input_csvlines and the model uses tf.squeeze on the output",
"# tensor.",
"if",
"len",
"(",
"input_csvlines",
")",
"==",
"1",
":",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"results",
")",
":",
"if",
"not",
"isinstance",
"(",
"v",
",",
"(",
"list",
",",
"np",
".",
"ndarray",
")",
")",
":",
"results",
"[",
"k",
"]",
"=",
"[",
"v",
"]",
"# Convert bytes to string. In python3 the results may be bytes.",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"results",
")",
":",
"if",
"any",
"(",
"isinstance",
"(",
"x",
",",
"bytes",
")",
"for",
"x",
"in",
"v",
")",
":",
"results",
"[",
"k",
"]",
"=",
"[",
"x",
".",
"decode",
"(",
"'utf-8'",
")",
"for",
"x",
"in",
"v",
"]",
"return",
"results"
] |
Prediction with a tf savedmodel.
Args:
model_dir: directory that contains a saved model
input_csvlines: list of csv strings
Returns:
Dict in the form tensor_name:prediction_list. Note that the value is always
a list, even if there was only 1 row in input_csvlines.
|
[
"Prediction",
"with",
"a",
"tf",
"savedmodel",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/mlworkbench/_local_predict.py#L56-L87
|
4,803
|
googledatalab/pydatalab
|
google/datalab/contrib/mlworkbench/_local_predict.py
|
_download_images
|
def _download_images(data, img_cols):
"""Download images given image columns."""
images = collections.defaultdict(list)
for d in data:
for img_col in img_cols:
if d.get(img_col, None):
if isinstance(d[img_col], Image.Image):
# If it is already an Image, just copy and continue.
images[img_col].append(d[img_col])
else:
# Otherwise it is image url. Load the image.
with file_io.FileIO(d[img_col], 'rb') as fi:
im = Image.open(fi)
images[img_col].append(im)
else:
images[img_col].append('')
return images
|
python
|
def _download_images(data, img_cols):
"""Download images given image columns."""
images = collections.defaultdict(list)
for d in data:
for img_col in img_cols:
if d.get(img_col, None):
if isinstance(d[img_col], Image.Image):
# If it is already an Image, just copy and continue.
images[img_col].append(d[img_col])
else:
# Otherwise it is image url. Load the image.
with file_io.FileIO(d[img_col], 'rb') as fi:
im = Image.open(fi)
images[img_col].append(im)
else:
images[img_col].append('')
return images
|
[
"def",
"_download_images",
"(",
"data",
",",
"img_cols",
")",
":",
"images",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"for",
"d",
"in",
"data",
":",
"for",
"img_col",
"in",
"img_cols",
":",
"if",
"d",
".",
"get",
"(",
"img_col",
",",
"None",
")",
":",
"if",
"isinstance",
"(",
"d",
"[",
"img_col",
"]",
",",
"Image",
".",
"Image",
")",
":",
"# If it is already an Image, just copy and continue.",
"images",
"[",
"img_col",
"]",
".",
"append",
"(",
"d",
"[",
"img_col",
"]",
")",
"else",
":",
"# Otherwise it is image url. Load the image.",
"with",
"file_io",
".",
"FileIO",
"(",
"d",
"[",
"img_col",
"]",
",",
"'rb'",
")",
"as",
"fi",
":",
"im",
"=",
"Image",
".",
"open",
"(",
"fi",
")",
"images",
"[",
"img_col",
"]",
".",
"append",
"(",
"im",
")",
"else",
":",
"images",
"[",
"img_col",
"]",
".",
"append",
"(",
"''",
")",
"return",
"images"
] |
Download images given image columns.
|
[
"Download",
"images",
"given",
"image",
"columns",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/mlworkbench/_local_predict.py#L90-L108
|
4,804
|
googledatalab/pydatalab
|
google/datalab/contrib/mlworkbench/_local_predict.py
|
_get_predicton_csv_lines
|
def _get_predicton_csv_lines(data, headers, images):
"""Create CSV lines from list-of-dict data."""
if images:
data = copy.deepcopy(data)
for img_col in images:
for d, im in zip(data, images[img_col]):
if im == '':
continue
im = im.copy()
im.thumbnail((299, 299), Image.ANTIALIAS)
buf = BytesIO()
im.save(buf, "JPEG")
content = base64.urlsafe_b64encode(buf.getvalue()).decode('ascii')
d[img_col] = content
csv_lines = []
for d in data:
buf = six.StringIO()
writer = csv.DictWriter(buf, fieldnames=headers, lineterminator='')
writer.writerow(d)
csv_lines.append(buf.getvalue())
return csv_lines
|
python
|
def _get_predicton_csv_lines(data, headers, images):
"""Create CSV lines from list-of-dict data."""
if images:
data = copy.deepcopy(data)
for img_col in images:
for d, im in zip(data, images[img_col]):
if im == '':
continue
im = im.copy()
im.thumbnail((299, 299), Image.ANTIALIAS)
buf = BytesIO()
im.save(buf, "JPEG")
content = base64.urlsafe_b64encode(buf.getvalue()).decode('ascii')
d[img_col] = content
csv_lines = []
for d in data:
buf = six.StringIO()
writer = csv.DictWriter(buf, fieldnames=headers, lineterminator='')
writer.writerow(d)
csv_lines.append(buf.getvalue())
return csv_lines
|
[
"def",
"_get_predicton_csv_lines",
"(",
"data",
",",
"headers",
",",
"images",
")",
":",
"if",
"images",
":",
"data",
"=",
"copy",
".",
"deepcopy",
"(",
"data",
")",
"for",
"img_col",
"in",
"images",
":",
"for",
"d",
",",
"im",
"in",
"zip",
"(",
"data",
",",
"images",
"[",
"img_col",
"]",
")",
":",
"if",
"im",
"==",
"''",
":",
"continue",
"im",
"=",
"im",
".",
"copy",
"(",
")",
"im",
".",
"thumbnail",
"(",
"(",
"299",
",",
"299",
")",
",",
"Image",
".",
"ANTIALIAS",
")",
"buf",
"=",
"BytesIO",
"(",
")",
"im",
".",
"save",
"(",
"buf",
",",
"\"JPEG\"",
")",
"content",
"=",
"base64",
".",
"urlsafe_b64encode",
"(",
"buf",
".",
"getvalue",
"(",
")",
")",
".",
"decode",
"(",
"'ascii'",
")",
"d",
"[",
"img_col",
"]",
"=",
"content",
"csv_lines",
"=",
"[",
"]",
"for",
"d",
"in",
"data",
":",
"buf",
"=",
"six",
".",
"StringIO",
"(",
")",
"writer",
"=",
"csv",
".",
"DictWriter",
"(",
"buf",
",",
"fieldnames",
"=",
"headers",
",",
"lineterminator",
"=",
"''",
")",
"writer",
".",
"writerow",
"(",
"d",
")",
"csv_lines",
".",
"append",
"(",
"buf",
".",
"getvalue",
"(",
")",
")",
"return",
"csv_lines"
] |
Create CSV lines from list-of-dict data.
|
[
"Create",
"CSV",
"lines",
"from",
"list",
"-",
"of",
"-",
"dict",
"data",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/mlworkbench/_local_predict.py#L111-L135
|
4,805
|
googledatalab/pydatalab
|
google/datalab/contrib/mlworkbench/_local_predict.py
|
_get_display_data_with_images
|
def _get_display_data_with_images(data, images):
"""Create display data by converting image urls to base64 strings."""
if not images:
return data
display_data = copy.deepcopy(data)
for img_col in images:
for d, im in zip(display_data, images[img_col]):
if im == '':
d[img_col + '_image'] = ''
else:
im = im.copy()
im.thumbnail((128, 128), Image.ANTIALIAS)
buf = BytesIO()
im.save(buf, "PNG")
content = base64.b64encode(buf.getvalue()).decode('ascii')
d[img_col + '_image'] = content
return display_data
|
python
|
def _get_display_data_with_images(data, images):
"""Create display data by converting image urls to base64 strings."""
if not images:
return data
display_data = copy.deepcopy(data)
for img_col in images:
for d, im in zip(display_data, images[img_col]):
if im == '':
d[img_col + '_image'] = ''
else:
im = im.copy()
im.thumbnail((128, 128), Image.ANTIALIAS)
buf = BytesIO()
im.save(buf, "PNG")
content = base64.b64encode(buf.getvalue()).decode('ascii')
d[img_col + '_image'] = content
return display_data
|
[
"def",
"_get_display_data_with_images",
"(",
"data",
",",
"images",
")",
":",
"if",
"not",
"images",
":",
"return",
"data",
"display_data",
"=",
"copy",
".",
"deepcopy",
"(",
"data",
")",
"for",
"img_col",
"in",
"images",
":",
"for",
"d",
",",
"im",
"in",
"zip",
"(",
"display_data",
",",
"images",
"[",
"img_col",
"]",
")",
":",
"if",
"im",
"==",
"''",
":",
"d",
"[",
"img_col",
"+",
"'_image'",
"]",
"=",
"''",
"else",
":",
"im",
"=",
"im",
".",
"copy",
"(",
")",
"im",
".",
"thumbnail",
"(",
"(",
"128",
",",
"128",
")",
",",
"Image",
".",
"ANTIALIAS",
")",
"buf",
"=",
"BytesIO",
"(",
")",
"im",
".",
"save",
"(",
"buf",
",",
"\"PNG\"",
")",
"content",
"=",
"base64",
".",
"b64encode",
"(",
"buf",
".",
"getvalue",
"(",
")",
")",
".",
"decode",
"(",
"'ascii'",
")",
"d",
"[",
"img_col",
"+",
"'_image'",
"]",
"=",
"content",
"return",
"display_data"
] |
Create display data by converting image urls to base64 strings.
|
[
"Create",
"display",
"data",
"by",
"converting",
"image",
"urls",
"to",
"base64",
"strings",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/mlworkbench/_local_predict.py#L138-L157
|
4,806
|
googledatalab/pydatalab
|
google/datalab/contrib/mlworkbench/_local_predict.py
|
get_model_schema_and_features
|
def get_model_schema_and_features(model_dir):
"""Get a local model's schema and features config.
Args:
model_dir: local or GCS path of a model.
Returns:
A tuple of schema (list) and features config (dict).
"""
schema_file = os.path.join(model_dir, 'assets.extra', 'schema.json')
schema = json.loads(file_io.read_file_to_string(schema_file))
features_file = os.path.join(model_dir, 'assets.extra', 'features.json')
features_config = json.loads(file_io.read_file_to_string(features_file))
return schema, features_config
|
python
|
def get_model_schema_and_features(model_dir):
"""Get a local model's schema and features config.
Args:
model_dir: local or GCS path of a model.
Returns:
A tuple of schema (list) and features config (dict).
"""
schema_file = os.path.join(model_dir, 'assets.extra', 'schema.json')
schema = json.loads(file_io.read_file_to_string(schema_file))
features_file = os.path.join(model_dir, 'assets.extra', 'features.json')
features_config = json.loads(file_io.read_file_to_string(features_file))
return schema, features_config
|
[
"def",
"get_model_schema_and_features",
"(",
"model_dir",
")",
":",
"schema_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"model_dir",
",",
"'assets.extra'",
",",
"'schema.json'",
")",
"schema",
"=",
"json",
".",
"loads",
"(",
"file_io",
".",
"read_file_to_string",
"(",
"schema_file",
")",
")",
"features_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"model_dir",
",",
"'assets.extra'",
",",
"'features.json'",
")",
"features_config",
"=",
"json",
".",
"loads",
"(",
"file_io",
".",
"read_file_to_string",
"(",
"features_file",
")",
")",
"return",
"schema",
",",
"features_config"
] |
Get a local model's schema and features config.
Args:
model_dir: local or GCS path of a model.
Returns:
A tuple of schema (list) and features config (dict).
|
[
"Get",
"a",
"local",
"model",
"s",
"schema",
"and",
"features",
"config",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/mlworkbench/_local_predict.py#L160-L172
|
4,807
|
googledatalab/pydatalab
|
google/datalab/contrib/mlworkbench/_local_predict.py
|
get_prediction_results
|
def get_prediction_results(model_dir_or_id, data, headers, img_cols=None,
cloud=False, with_source=True, show_image=True):
""" Predict with a specified model.
It predicts with the model, join source data with prediction results, and formats
the results so they can be displayed nicely in Datalab.
Args:
model_dir_or_id: The model directory if cloud is False, or model.version if cloud is True.
data: Can be a list of dictionaries, a list of csv lines, or a Pandas DataFrame. If it is not
a list of csv lines, data will be converted to csv lines first, using the orders specified
by headers and then send to model. For images, it can be image gs urls or in-memory PIL
images. Images will be converted to base64 encoded strings before prediction.
headers: the column names of data. It specifies the order of the columns when
serializing to csv lines for prediction.
img_cols: The image url columns. If specified, the img_urls will be converted to
base64 encoded image bytes.
with_source: Whether return a joined prediction source and prediction results, or prediction
results only.
show_image: When displaying prediction source, whether to add a column of image bytes for
each image url column.
Returns:
A dataframe of joined prediction source and prediction results, or prediction results only.
"""
if img_cols is None:
img_cols = []
if isinstance(data, pd.DataFrame):
data = list(data.T.to_dict().values())
elif isinstance(data[0], six.string_types):
data = list(csv.DictReader(data, fieldnames=headers))
images = _download_images(data, img_cols)
predict_data = _get_predicton_csv_lines(data, headers, images)
if cloud:
parts = model_dir_or_id.split('.')
if len(parts) != 2:
raise ValueError('Invalid model name for cloud prediction. Use "model.version".')
predict_results = ml.ModelVersions(parts[0]).predict(parts[1], predict_data)
else:
tf_logging_level = logging.getLogger("tensorflow").level
logging.getLogger("tensorflow").setLevel(logging.WARNING)
try:
predict_results = _tf_predict(model_dir_or_id, predict_data)
finally:
logging.getLogger("tensorflow").setLevel(tf_logging_level)
df_r = pd.DataFrame(predict_results)
if not with_source:
return df_r
display_data = data
if show_image:
display_data = _get_display_data_with_images(data, images)
df_s = pd.DataFrame(display_data)
df = pd.concat([df_r, df_s], axis=1)
# Remove duplicate columns. All 'key' columns are duplicate here.
df = df.loc[:, ~df.columns.duplicated()]
return df
|
python
|
def get_prediction_results(model_dir_or_id, data, headers, img_cols=None,
cloud=False, with_source=True, show_image=True):
""" Predict with a specified model.
It predicts with the model, join source data with prediction results, and formats
the results so they can be displayed nicely in Datalab.
Args:
model_dir_or_id: The model directory if cloud is False, or model.version if cloud is True.
data: Can be a list of dictionaries, a list of csv lines, or a Pandas DataFrame. If it is not
a list of csv lines, data will be converted to csv lines first, using the orders specified
by headers and then send to model. For images, it can be image gs urls or in-memory PIL
images. Images will be converted to base64 encoded strings before prediction.
headers: the column names of data. It specifies the order of the columns when
serializing to csv lines for prediction.
img_cols: The image url columns. If specified, the img_urls will be converted to
base64 encoded image bytes.
with_source: Whether return a joined prediction source and prediction results, or prediction
results only.
show_image: When displaying prediction source, whether to add a column of image bytes for
each image url column.
Returns:
A dataframe of joined prediction source and prediction results, or prediction results only.
"""
if img_cols is None:
img_cols = []
if isinstance(data, pd.DataFrame):
data = list(data.T.to_dict().values())
elif isinstance(data[0], six.string_types):
data = list(csv.DictReader(data, fieldnames=headers))
images = _download_images(data, img_cols)
predict_data = _get_predicton_csv_lines(data, headers, images)
if cloud:
parts = model_dir_or_id.split('.')
if len(parts) != 2:
raise ValueError('Invalid model name for cloud prediction. Use "model.version".')
predict_results = ml.ModelVersions(parts[0]).predict(parts[1], predict_data)
else:
tf_logging_level = logging.getLogger("tensorflow").level
logging.getLogger("tensorflow").setLevel(logging.WARNING)
try:
predict_results = _tf_predict(model_dir_or_id, predict_data)
finally:
logging.getLogger("tensorflow").setLevel(tf_logging_level)
df_r = pd.DataFrame(predict_results)
if not with_source:
return df_r
display_data = data
if show_image:
display_data = _get_display_data_with_images(data, images)
df_s = pd.DataFrame(display_data)
df = pd.concat([df_r, df_s], axis=1)
# Remove duplicate columns. All 'key' columns are duplicate here.
df = df.loc[:, ~df.columns.duplicated()]
return df
|
[
"def",
"get_prediction_results",
"(",
"model_dir_or_id",
",",
"data",
",",
"headers",
",",
"img_cols",
"=",
"None",
",",
"cloud",
"=",
"False",
",",
"with_source",
"=",
"True",
",",
"show_image",
"=",
"True",
")",
":",
"if",
"img_cols",
"is",
"None",
":",
"img_cols",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"data",
",",
"pd",
".",
"DataFrame",
")",
":",
"data",
"=",
"list",
"(",
"data",
".",
"T",
".",
"to_dict",
"(",
")",
".",
"values",
"(",
")",
")",
"elif",
"isinstance",
"(",
"data",
"[",
"0",
"]",
",",
"six",
".",
"string_types",
")",
":",
"data",
"=",
"list",
"(",
"csv",
".",
"DictReader",
"(",
"data",
",",
"fieldnames",
"=",
"headers",
")",
")",
"images",
"=",
"_download_images",
"(",
"data",
",",
"img_cols",
")",
"predict_data",
"=",
"_get_predicton_csv_lines",
"(",
"data",
",",
"headers",
",",
"images",
")",
"if",
"cloud",
":",
"parts",
"=",
"model_dir_or_id",
".",
"split",
"(",
"'.'",
")",
"if",
"len",
"(",
"parts",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'Invalid model name for cloud prediction. Use \"model.version\".'",
")",
"predict_results",
"=",
"ml",
".",
"ModelVersions",
"(",
"parts",
"[",
"0",
"]",
")",
".",
"predict",
"(",
"parts",
"[",
"1",
"]",
",",
"predict_data",
")",
"else",
":",
"tf_logging_level",
"=",
"logging",
".",
"getLogger",
"(",
"\"tensorflow\"",
")",
".",
"level",
"logging",
".",
"getLogger",
"(",
"\"tensorflow\"",
")",
".",
"setLevel",
"(",
"logging",
".",
"WARNING",
")",
"try",
":",
"predict_results",
"=",
"_tf_predict",
"(",
"model_dir_or_id",
",",
"predict_data",
")",
"finally",
":",
"logging",
".",
"getLogger",
"(",
"\"tensorflow\"",
")",
".",
"setLevel",
"(",
"tf_logging_level",
")",
"df_r",
"=",
"pd",
".",
"DataFrame",
"(",
"predict_results",
")",
"if",
"not",
"with_source",
":",
"return",
"df_r",
"display_data",
"=",
"data",
"if",
"show_image",
":",
"display_data",
"=",
"_get_display_data_with_images",
"(",
"data",
",",
"images",
")",
"df_s",
"=",
"pd",
".",
"DataFrame",
"(",
"display_data",
")",
"df",
"=",
"pd",
".",
"concat",
"(",
"[",
"df_r",
",",
"df_s",
"]",
",",
"axis",
"=",
"1",
")",
"# Remove duplicate columns. All 'key' columns are duplicate here.",
"df",
"=",
"df",
".",
"loc",
"[",
":",
",",
"~",
"df",
".",
"columns",
".",
"duplicated",
"(",
")",
"]",
"return",
"df"
] |
Predict with a specified model.
It predicts with the model, join source data with prediction results, and formats
the results so they can be displayed nicely in Datalab.
Args:
model_dir_or_id: The model directory if cloud is False, or model.version if cloud is True.
data: Can be a list of dictionaries, a list of csv lines, or a Pandas DataFrame. If it is not
a list of csv lines, data will be converted to csv lines first, using the orders specified
by headers and then send to model. For images, it can be image gs urls or in-memory PIL
images. Images will be converted to base64 encoded strings before prediction.
headers: the column names of data. It specifies the order of the columns when
serializing to csv lines for prediction.
img_cols: The image url columns. If specified, the img_urls will be converted to
base64 encoded image bytes.
with_source: Whether return a joined prediction source and prediction results, or prediction
results only.
show_image: When displaying prediction source, whether to add a column of image bytes for
each image url column.
Returns:
A dataframe of joined prediction source and prediction results, or prediction results only.
|
[
"Predict",
"with",
"a",
"specified",
"model",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/mlworkbench/_local_predict.py#L175-L239
|
4,808
|
googledatalab/pydatalab
|
google/datalab/contrib/mlworkbench/_local_predict.py
|
get_probs_for_labels
|
def get_probs_for_labels(labels, prediction_results):
""" Given ML Workbench prediction results, get probs of each label for each instance.
The prediction results are like:
[
{'predicted': 'daisy', 'probability': 0.8, 'predicted_2': 'rose', 'probability_2': 0.1},
{'predicted': 'sunflower', 'probability': 0.9, 'predicted_2': 'daisy', 'probability_2': 0.01},
...
]
Each instance is ordered by prob. But in some cases probs are needed for fixed
order of labels. For example, given labels = ['daisy', 'rose', 'sunflower'], the
results of above is expected to be:
[
[0.8, 0.1, 0.0],
[0.01, 0.0, 0.9],
...
]
Note that the sum of each instance may not be always 1. If model's top_n is set to
none-zero, and is less than number of labels, then prediction results may not contain
probs for all labels.
Args:
labels: a list of labels specifying the order of the labels.
prediction_results: a pandas DataFrame containing prediction results, usually returned
by get_prediction_results() call.
Returns:
A list of list of probs for each class.
"""
probs = []
if 'probability' in prediction_results:
# 'probability' exists so top-n is set to none zero, and results are like
# "predicted, predicted_2,...,probability,probability_2,...
for i, r in prediction_results.iterrows():
probs_one = [0.0] * len(labels)
for k, v in six.iteritems(r):
if v in labels and k.startswith('predicted'):
if k == 'predict':
prob_name = 'probability'
else:
prob_name = 'probability' + k[9:]
probs_one[labels.index(v)] = r[prob_name]
probs.append(probs_one)
return probs
else:
# 'probability' does not exist, so top-n is set to zero. Results are like
# "predicted, class_name1, class_name2,...
for i, r in prediction_results.iterrows():
probs_one = [0.0] * len(labels)
for k, v in six.iteritems(r):
if k in labels:
probs_one[labels.index(k)] = v
probs.append(probs_one)
return probs
|
python
|
def get_probs_for_labels(labels, prediction_results):
""" Given ML Workbench prediction results, get probs of each label for each instance.
The prediction results are like:
[
{'predicted': 'daisy', 'probability': 0.8, 'predicted_2': 'rose', 'probability_2': 0.1},
{'predicted': 'sunflower', 'probability': 0.9, 'predicted_2': 'daisy', 'probability_2': 0.01},
...
]
Each instance is ordered by prob. But in some cases probs are needed for fixed
order of labels. For example, given labels = ['daisy', 'rose', 'sunflower'], the
results of above is expected to be:
[
[0.8, 0.1, 0.0],
[0.01, 0.0, 0.9],
...
]
Note that the sum of each instance may not be always 1. If model's top_n is set to
none-zero, and is less than number of labels, then prediction results may not contain
probs for all labels.
Args:
labels: a list of labels specifying the order of the labels.
prediction_results: a pandas DataFrame containing prediction results, usually returned
by get_prediction_results() call.
Returns:
A list of list of probs for each class.
"""
probs = []
if 'probability' in prediction_results:
# 'probability' exists so top-n is set to none zero, and results are like
# "predicted, predicted_2,...,probability,probability_2,...
for i, r in prediction_results.iterrows():
probs_one = [0.0] * len(labels)
for k, v in six.iteritems(r):
if v in labels and k.startswith('predicted'):
if k == 'predict':
prob_name = 'probability'
else:
prob_name = 'probability' + k[9:]
probs_one[labels.index(v)] = r[prob_name]
probs.append(probs_one)
return probs
else:
# 'probability' does not exist, so top-n is set to zero. Results are like
# "predicted, class_name1, class_name2,...
for i, r in prediction_results.iterrows():
probs_one = [0.0] * len(labels)
for k, v in six.iteritems(r):
if k in labels:
probs_one[labels.index(k)] = v
probs.append(probs_one)
return probs
|
[
"def",
"get_probs_for_labels",
"(",
"labels",
",",
"prediction_results",
")",
":",
"probs",
"=",
"[",
"]",
"if",
"'probability'",
"in",
"prediction_results",
":",
"# 'probability' exists so top-n is set to none zero, and results are like",
"# \"predicted, predicted_2,...,probability,probability_2,...",
"for",
"i",
",",
"r",
"in",
"prediction_results",
".",
"iterrows",
"(",
")",
":",
"probs_one",
"=",
"[",
"0.0",
"]",
"*",
"len",
"(",
"labels",
")",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"r",
")",
":",
"if",
"v",
"in",
"labels",
"and",
"k",
".",
"startswith",
"(",
"'predicted'",
")",
":",
"if",
"k",
"==",
"'predict'",
":",
"prob_name",
"=",
"'probability'",
"else",
":",
"prob_name",
"=",
"'probability'",
"+",
"k",
"[",
"9",
":",
"]",
"probs_one",
"[",
"labels",
".",
"index",
"(",
"v",
")",
"]",
"=",
"r",
"[",
"prob_name",
"]",
"probs",
".",
"append",
"(",
"probs_one",
")",
"return",
"probs",
"else",
":",
"# 'probability' does not exist, so top-n is set to zero. Results are like",
"# \"predicted, class_name1, class_name2,...",
"for",
"i",
",",
"r",
"in",
"prediction_results",
".",
"iterrows",
"(",
")",
":",
"probs_one",
"=",
"[",
"0.0",
"]",
"*",
"len",
"(",
"labels",
")",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"r",
")",
":",
"if",
"k",
"in",
"labels",
":",
"probs_one",
"[",
"labels",
".",
"index",
"(",
"k",
")",
"]",
"=",
"v",
"probs",
".",
"append",
"(",
"probs_one",
")",
"return",
"probs"
] |
Given ML Workbench prediction results, get probs of each label for each instance.
The prediction results are like:
[
{'predicted': 'daisy', 'probability': 0.8, 'predicted_2': 'rose', 'probability_2': 0.1},
{'predicted': 'sunflower', 'probability': 0.9, 'predicted_2': 'daisy', 'probability_2': 0.01},
...
]
Each instance is ordered by prob. But in some cases probs are needed for fixed
order of labels. For example, given labels = ['daisy', 'rose', 'sunflower'], the
results of above is expected to be:
[
[0.8, 0.1, 0.0],
[0.01, 0.0, 0.9],
...
]
Note that the sum of each instance may not be always 1. If model's top_n is set to
none-zero, and is less than number of labels, then prediction results may not contain
probs for all labels.
Args:
labels: a list of labels specifying the order of the labels.
prediction_results: a pandas DataFrame containing prediction results, usually returned
by get_prediction_results() call.
Returns:
A list of list of probs for each class.
|
[
"Given",
"ML",
"Workbench",
"prediction",
"results",
"get",
"probs",
"of",
"each",
"label",
"for",
"each",
"instance",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/mlworkbench/_local_predict.py#L242-L297
|
4,809
|
googledatalab/pydatalab
|
google/datalab/contrib/mlworkbench/_local_predict.py
|
local_batch_predict
|
def local_batch_predict(model_dir, csv_file_pattern, output_dir, output_format, batch_size=100):
""" Batch Predict with a specified model.
It does batch prediction, saves results to output files and also creates an output
schema file. The output file names are input file names prepended by 'predict_results_'.
Args:
model_dir: The model directory containing a SavedModel (usually saved_model.pb).
csv_file_pattern: a pattern of csv files as batch prediction source.
output_dir: the path of the output directory.
output_format: csv or json.
batch_size: Larger batch_size improves performance but may
cause more memory usage.
"""
file_io.recursive_create_dir(output_dir)
csv_files = file_io.get_matching_files(csv_file_pattern)
if len(csv_files) == 0:
raise ValueError('No files found given ' + csv_file_pattern)
with tf.Graph().as_default(), tf.Session() as sess:
input_alias_map, output_alias_map = _tf_load_model(sess, model_dir)
csv_tensor_name = list(input_alias_map.values())[0]
output_schema = _get_output_schema(sess, output_alias_map)
for csv_file in csv_files:
output_file = os.path.join(
output_dir,
'predict_results_' +
os.path.splitext(os.path.basename(csv_file))[0] + '.' + output_format)
with file_io.FileIO(output_file, 'w') as f:
prediction_source = _batch_csv_reader(csv_file, batch_size)
for batch in prediction_source:
batch = [l.rstrip() for l in batch if l]
predict_results = sess.run(fetches=output_alias_map, feed_dict={csv_tensor_name: batch})
formatted_results = _format_results(output_format, output_schema, predict_results)
f.write('\n'.join(formatted_results) + '\n')
file_io.write_string_to_file(os.path.join(output_dir, 'predict_results_schema.json'),
json.dumps(output_schema, indent=2))
|
python
|
def local_batch_predict(model_dir, csv_file_pattern, output_dir, output_format, batch_size=100):
""" Batch Predict with a specified model.
It does batch prediction, saves results to output files and also creates an output
schema file. The output file names are input file names prepended by 'predict_results_'.
Args:
model_dir: The model directory containing a SavedModel (usually saved_model.pb).
csv_file_pattern: a pattern of csv files as batch prediction source.
output_dir: the path of the output directory.
output_format: csv or json.
batch_size: Larger batch_size improves performance but may
cause more memory usage.
"""
file_io.recursive_create_dir(output_dir)
csv_files = file_io.get_matching_files(csv_file_pattern)
if len(csv_files) == 0:
raise ValueError('No files found given ' + csv_file_pattern)
with tf.Graph().as_default(), tf.Session() as sess:
input_alias_map, output_alias_map = _tf_load_model(sess, model_dir)
csv_tensor_name = list(input_alias_map.values())[0]
output_schema = _get_output_schema(sess, output_alias_map)
for csv_file in csv_files:
output_file = os.path.join(
output_dir,
'predict_results_' +
os.path.splitext(os.path.basename(csv_file))[0] + '.' + output_format)
with file_io.FileIO(output_file, 'w') as f:
prediction_source = _batch_csv_reader(csv_file, batch_size)
for batch in prediction_source:
batch = [l.rstrip() for l in batch if l]
predict_results = sess.run(fetches=output_alias_map, feed_dict={csv_tensor_name: batch})
formatted_results = _format_results(output_format, output_schema, predict_results)
f.write('\n'.join(formatted_results) + '\n')
file_io.write_string_to_file(os.path.join(output_dir, 'predict_results_schema.json'),
json.dumps(output_schema, indent=2))
|
[
"def",
"local_batch_predict",
"(",
"model_dir",
",",
"csv_file_pattern",
",",
"output_dir",
",",
"output_format",
",",
"batch_size",
"=",
"100",
")",
":",
"file_io",
".",
"recursive_create_dir",
"(",
"output_dir",
")",
"csv_files",
"=",
"file_io",
".",
"get_matching_files",
"(",
"csv_file_pattern",
")",
"if",
"len",
"(",
"csv_files",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"'No files found given '",
"+",
"csv_file_pattern",
")",
"with",
"tf",
".",
"Graph",
"(",
")",
".",
"as_default",
"(",
")",
",",
"tf",
".",
"Session",
"(",
")",
"as",
"sess",
":",
"input_alias_map",
",",
"output_alias_map",
"=",
"_tf_load_model",
"(",
"sess",
",",
"model_dir",
")",
"csv_tensor_name",
"=",
"list",
"(",
"input_alias_map",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
"output_schema",
"=",
"_get_output_schema",
"(",
"sess",
",",
"output_alias_map",
")",
"for",
"csv_file",
"in",
"csv_files",
":",
"output_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"'predict_results_'",
"+",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"csv_file",
")",
")",
"[",
"0",
"]",
"+",
"'.'",
"+",
"output_format",
")",
"with",
"file_io",
".",
"FileIO",
"(",
"output_file",
",",
"'w'",
")",
"as",
"f",
":",
"prediction_source",
"=",
"_batch_csv_reader",
"(",
"csv_file",
",",
"batch_size",
")",
"for",
"batch",
"in",
"prediction_source",
":",
"batch",
"=",
"[",
"l",
".",
"rstrip",
"(",
")",
"for",
"l",
"in",
"batch",
"if",
"l",
"]",
"predict_results",
"=",
"sess",
".",
"run",
"(",
"fetches",
"=",
"output_alias_map",
",",
"feed_dict",
"=",
"{",
"csv_tensor_name",
":",
"batch",
"}",
")",
"formatted_results",
"=",
"_format_results",
"(",
"output_format",
",",
"output_schema",
",",
"predict_results",
")",
"f",
".",
"write",
"(",
"'\\n'",
".",
"join",
"(",
"formatted_results",
")",
"+",
"'\\n'",
")",
"file_io",
".",
"write_string_to_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"'predict_results_schema.json'",
")",
",",
"json",
".",
"dumps",
"(",
"output_schema",
",",
"indent",
"=",
"2",
")",
")"
] |
Batch Predict with a specified model.
It does batch prediction, saves results to output files and also creates an output
schema file. The output file names are input file names prepended by 'predict_results_'.
Args:
model_dir: The model directory containing a SavedModel (usually saved_model.pb).
csv_file_pattern: a pattern of csv files as batch prediction source.
output_dir: the path of the output directory.
output_format: csv or json.
batch_size: Larger batch_size improves performance but may
cause more memory usage.
|
[
"Batch",
"Predict",
"with",
"a",
"specified",
"model",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/mlworkbench/_local_predict.py#L359-L397
|
4,810
|
googledatalab/pydatalab
|
google/datalab/ml/_job.py
|
Job.submit_training
|
def submit_training(job_request, job_id=None):
"""Submit a training job.
Args:
job_request: the arguments of the training job in a dict. For example,
{
'package_uris': 'gs://my-bucket/iris/trainer-0.1.tar.gz',
'python_module': 'trainer.task',
'scale_tier': 'BASIC',
'region': 'us-central1',
'args': {
'train_data_paths': ['gs://mubucket/data/features_train'],
'eval_data_paths': ['gs://mubucket/data/features_eval'],
'metadata_path': 'gs://mubucket/data/metadata.yaml',
'output_path': 'gs://mubucket/data/mymodel/',
}
}
If 'args' is present in job_request and is a dict, it will be expanded to
--key value or --key list_item_0 --key list_item_1, ...
job_id: id for the training job. If None, an id based on timestamp will be generated.
Returns:
A Job object representing the cloud training job.
"""
new_job_request = dict(job_request)
# convert job_args from dict to list as service required.
if 'args' in job_request and isinstance(job_request['args'], dict):
job_args = job_request['args']
args = []
for k, v in six.iteritems(job_args):
if isinstance(v, list):
for item in v:
args.append('--' + str(k))
args.append(str(item))
else:
args.append('--' + str(k))
args.append(str(v))
new_job_request['args'] = args
if job_id is None:
job_id = datetime.datetime.now().strftime('%y%m%d_%H%M%S')
if 'python_module' in new_job_request:
job_id = new_job_request['python_module'].replace('.', '_') + \
'_' + job_id
job = {
'job_id': job_id,
'training_input': new_job_request,
}
context = datalab.Context.default()
cloudml = discovery.build('ml', 'v1', credentials=context.credentials)
request = cloudml.projects().jobs().create(body=job,
parent='projects/' + context.project_id)
request.headers['user-agent'] = 'GoogleCloudDataLab/1.0'
request.execute()
return Job(job_id)
|
python
|
def submit_training(job_request, job_id=None):
"""Submit a training job.
Args:
job_request: the arguments of the training job in a dict. For example,
{
'package_uris': 'gs://my-bucket/iris/trainer-0.1.tar.gz',
'python_module': 'trainer.task',
'scale_tier': 'BASIC',
'region': 'us-central1',
'args': {
'train_data_paths': ['gs://mubucket/data/features_train'],
'eval_data_paths': ['gs://mubucket/data/features_eval'],
'metadata_path': 'gs://mubucket/data/metadata.yaml',
'output_path': 'gs://mubucket/data/mymodel/',
}
}
If 'args' is present in job_request and is a dict, it will be expanded to
--key value or --key list_item_0 --key list_item_1, ...
job_id: id for the training job. If None, an id based on timestamp will be generated.
Returns:
A Job object representing the cloud training job.
"""
new_job_request = dict(job_request)
# convert job_args from dict to list as service required.
if 'args' in job_request and isinstance(job_request['args'], dict):
job_args = job_request['args']
args = []
for k, v in six.iteritems(job_args):
if isinstance(v, list):
for item in v:
args.append('--' + str(k))
args.append(str(item))
else:
args.append('--' + str(k))
args.append(str(v))
new_job_request['args'] = args
if job_id is None:
job_id = datetime.datetime.now().strftime('%y%m%d_%H%M%S')
if 'python_module' in new_job_request:
job_id = new_job_request['python_module'].replace('.', '_') + \
'_' + job_id
job = {
'job_id': job_id,
'training_input': new_job_request,
}
context = datalab.Context.default()
cloudml = discovery.build('ml', 'v1', credentials=context.credentials)
request = cloudml.projects().jobs().create(body=job,
parent='projects/' + context.project_id)
request.headers['user-agent'] = 'GoogleCloudDataLab/1.0'
request.execute()
return Job(job_id)
|
[
"def",
"submit_training",
"(",
"job_request",
",",
"job_id",
"=",
"None",
")",
":",
"new_job_request",
"=",
"dict",
"(",
"job_request",
")",
"# convert job_args from dict to list as service required.",
"if",
"'args'",
"in",
"job_request",
"and",
"isinstance",
"(",
"job_request",
"[",
"'args'",
"]",
",",
"dict",
")",
":",
"job_args",
"=",
"job_request",
"[",
"'args'",
"]",
"args",
"=",
"[",
"]",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"job_args",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"list",
")",
":",
"for",
"item",
"in",
"v",
":",
"args",
".",
"append",
"(",
"'--'",
"+",
"str",
"(",
"k",
")",
")",
"args",
".",
"append",
"(",
"str",
"(",
"item",
")",
")",
"else",
":",
"args",
".",
"append",
"(",
"'--'",
"+",
"str",
"(",
"k",
")",
")",
"args",
".",
"append",
"(",
"str",
"(",
"v",
")",
")",
"new_job_request",
"[",
"'args'",
"]",
"=",
"args",
"if",
"job_id",
"is",
"None",
":",
"job_id",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"'%y%m%d_%H%M%S'",
")",
"if",
"'python_module'",
"in",
"new_job_request",
":",
"job_id",
"=",
"new_job_request",
"[",
"'python_module'",
"]",
".",
"replace",
"(",
"'.'",
",",
"'_'",
")",
"+",
"'_'",
"+",
"job_id",
"job",
"=",
"{",
"'job_id'",
":",
"job_id",
",",
"'training_input'",
":",
"new_job_request",
",",
"}",
"context",
"=",
"datalab",
".",
"Context",
".",
"default",
"(",
")",
"cloudml",
"=",
"discovery",
".",
"build",
"(",
"'ml'",
",",
"'v1'",
",",
"credentials",
"=",
"context",
".",
"credentials",
")",
"request",
"=",
"cloudml",
".",
"projects",
"(",
")",
".",
"jobs",
"(",
")",
".",
"create",
"(",
"body",
"=",
"job",
",",
"parent",
"=",
"'projects/'",
"+",
"context",
".",
"project_id",
")",
"request",
".",
"headers",
"[",
"'user-agent'",
"]",
"=",
"'GoogleCloudDataLab/1.0'",
"request",
".",
"execute",
"(",
")",
"return",
"Job",
"(",
"job_id",
")"
] |
Submit a training job.
Args:
job_request: the arguments of the training job in a dict. For example,
{
'package_uris': 'gs://my-bucket/iris/trainer-0.1.tar.gz',
'python_module': 'trainer.task',
'scale_tier': 'BASIC',
'region': 'us-central1',
'args': {
'train_data_paths': ['gs://mubucket/data/features_train'],
'eval_data_paths': ['gs://mubucket/data/features_eval'],
'metadata_path': 'gs://mubucket/data/metadata.yaml',
'output_path': 'gs://mubucket/data/mymodel/',
}
}
If 'args' is present in job_request and is a dict, it will be expanded to
--key value or --key list_item_0 --key list_item_1, ...
job_id: id for the training job. If None, an id based on timestamp will be generated.
Returns:
A Job object representing the cloud training job.
|
[
"Submit",
"a",
"training",
"job",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_job.py#L62-L116
|
4,811
|
googledatalab/pydatalab
|
google/datalab/ml/_job.py
|
Job.submit_batch_prediction
|
def submit_batch_prediction(job_request, job_id=None):
"""Submit a batch prediction job.
Args:
job_request: the arguments of the training job in a dict. For example,
{
'version_name': 'projects/my-project/models/my-model/versions/my-version',
'data_format': 'TEXT',
'input_paths': ['gs://my_bucket/my_file.csv'],
'output_path': 'gs://my_bucket/predict_output',
'region': 'us-central1',
'max_worker_count': 1,
}
job_id: id for the training job. If None, an id based on timestamp will be generated.
Returns:
A Job object representing the batch prediction job.
"""
if job_id is None:
job_id = 'prediction_' + datetime.datetime.now().strftime('%y%m%d_%H%M%S')
job = {
'job_id': job_id,
'prediction_input': job_request,
}
context = datalab.Context.default()
cloudml = discovery.build('ml', 'v1', credentials=context.credentials)
request = cloudml.projects().jobs().create(body=job,
parent='projects/' + context.project_id)
request.headers['user-agent'] = 'GoogleCloudDataLab/1.0'
request.execute()
return Job(job_id)
|
python
|
def submit_batch_prediction(job_request, job_id=None):
"""Submit a batch prediction job.
Args:
job_request: the arguments of the training job in a dict. For example,
{
'version_name': 'projects/my-project/models/my-model/versions/my-version',
'data_format': 'TEXT',
'input_paths': ['gs://my_bucket/my_file.csv'],
'output_path': 'gs://my_bucket/predict_output',
'region': 'us-central1',
'max_worker_count': 1,
}
job_id: id for the training job. If None, an id based on timestamp will be generated.
Returns:
A Job object representing the batch prediction job.
"""
if job_id is None:
job_id = 'prediction_' + datetime.datetime.now().strftime('%y%m%d_%H%M%S')
job = {
'job_id': job_id,
'prediction_input': job_request,
}
context = datalab.Context.default()
cloudml = discovery.build('ml', 'v1', credentials=context.credentials)
request = cloudml.projects().jobs().create(body=job,
parent='projects/' + context.project_id)
request.headers['user-agent'] = 'GoogleCloudDataLab/1.0'
request.execute()
return Job(job_id)
|
[
"def",
"submit_batch_prediction",
"(",
"job_request",
",",
"job_id",
"=",
"None",
")",
":",
"if",
"job_id",
"is",
"None",
":",
"job_id",
"=",
"'prediction_'",
"+",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"'%y%m%d_%H%M%S'",
")",
"job",
"=",
"{",
"'job_id'",
":",
"job_id",
",",
"'prediction_input'",
":",
"job_request",
",",
"}",
"context",
"=",
"datalab",
".",
"Context",
".",
"default",
"(",
")",
"cloudml",
"=",
"discovery",
".",
"build",
"(",
"'ml'",
",",
"'v1'",
",",
"credentials",
"=",
"context",
".",
"credentials",
")",
"request",
"=",
"cloudml",
".",
"projects",
"(",
")",
".",
"jobs",
"(",
")",
".",
"create",
"(",
"body",
"=",
"job",
",",
"parent",
"=",
"'projects/'",
"+",
"context",
".",
"project_id",
")",
"request",
".",
"headers",
"[",
"'user-agent'",
"]",
"=",
"'GoogleCloudDataLab/1.0'",
"request",
".",
"execute",
"(",
")",
"return",
"Job",
"(",
"job_id",
")"
] |
Submit a batch prediction job.
Args:
job_request: the arguments of the training job in a dict. For example,
{
'version_name': 'projects/my-project/models/my-model/versions/my-version',
'data_format': 'TEXT',
'input_paths': ['gs://my_bucket/my_file.csv'],
'output_path': 'gs://my_bucket/predict_output',
'region': 'us-central1',
'max_worker_count': 1,
}
job_id: id for the training job. If None, an id based on timestamp will be generated.
Returns:
A Job object representing the batch prediction job.
|
[
"Submit",
"a",
"batch",
"prediction",
"job",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_job.py#L119-L151
|
4,812
|
googledatalab/pydatalab
|
solutionbox/image_classification/mltoolbox/image/classification/_inceptionlib.py
|
_reduced_kernel_size_for_small_input
|
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
"""Define kernel size which is automatically reduced for small input.
If the shape of the input images is unknown at graph construction time this
function assumes that the input images are is large enough.
Args:
input_tensor: input tensor of size [batch_size, height, width, channels].
kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]
Returns:
a tensor with the kernel size.
TODO(jrru): Make this function work with unknown shapes. Theoretically, this
can be done with the code below. Problems are two-fold: (1) If the shape was
known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot
handle tensors that define the kernel size.
shape = tf.shape(input_tensor)
return = tf.stack([tf.minimum(shape[1], kernel_size[0]),
tf.minimum(shape[2], kernel_size[1])])
"""
shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None:
kernel_size_out = kernel_size
else:
kernel_size_out = [min(shape[1], kernel_size[0]),
min(shape[2], kernel_size[1])]
return kernel_size_out
|
python
|
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
"""Define kernel size which is automatically reduced for small input.
If the shape of the input images is unknown at graph construction time this
function assumes that the input images are is large enough.
Args:
input_tensor: input tensor of size [batch_size, height, width, channels].
kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]
Returns:
a tensor with the kernel size.
TODO(jrru): Make this function work with unknown shapes. Theoretically, this
can be done with the code below. Problems are two-fold: (1) If the shape was
known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot
handle tensors that define the kernel size.
shape = tf.shape(input_tensor)
return = tf.stack([tf.minimum(shape[1], kernel_size[0]),
tf.minimum(shape[2], kernel_size[1])])
"""
shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None:
kernel_size_out = kernel_size
else:
kernel_size_out = [min(shape[1], kernel_size[0]),
min(shape[2], kernel_size[1])]
return kernel_size_out
|
[
"def",
"_reduced_kernel_size_for_small_input",
"(",
"input_tensor",
",",
"kernel_size",
")",
":",
"shape",
"=",
"input_tensor",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"if",
"shape",
"[",
"1",
"]",
"is",
"None",
"or",
"shape",
"[",
"2",
"]",
"is",
"None",
":",
"kernel_size_out",
"=",
"kernel_size",
"else",
":",
"kernel_size_out",
"=",
"[",
"min",
"(",
"shape",
"[",
"1",
"]",
",",
"kernel_size",
"[",
"0",
"]",
")",
",",
"min",
"(",
"shape",
"[",
"2",
"]",
",",
"kernel_size",
"[",
"1",
"]",
")",
"]",
"return",
"kernel_size_out"
] |
Define kernel size which is automatically reduced for small input.
If the shape of the input images is unknown at graph construction time this
function assumes that the input images are is large enough.
Args:
input_tensor: input tensor of size [batch_size, height, width, channels].
kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]
Returns:
a tensor with the kernel size.
TODO(jrru): Make this function work with unknown shapes. Theoretically, this
can be done with the code below. Problems are two-fold: (1) If the shape was
known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot
handle tensors that define the kernel size.
shape = tf.shape(input_tensor)
return = tf.stack([tf.minimum(shape[1], kernel_size[0]),
tf.minimum(shape[2], kernel_size[1])])
|
[
"Define",
"kernel",
"size",
"which",
"is",
"automatically",
"reduced",
"for",
"small",
"input",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_inceptionlib.py#L556-L584
|
4,813
|
googledatalab/pydatalab
|
solutionbox/image_classification/mltoolbox/image/classification/_inceptionlib.py
|
inception_v3_arg_scope
|
def inception_v3_arg_scope(weight_decay=0.00004,
stddev=0.1,
batch_norm_var_collection='moving_vars'):
"""Defines the default InceptionV3 arg scope.
Args:
weight_decay: The weight decay to use for regularizing the model.
stddev: The standard deviation of the trunctated normal weight initializer.
batch_norm_var_collection: The name of the collection for the batch norm
variables.
Returns:
An `arg_scope` to use for the inception v3 model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.9997,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# collection containing update_ops.
'updates_collections': tf.GraphKeys.UPDATE_OPS,
# collection containing the moving mean and moving variance.
'variables_collections': {
'beta': None,
'gamma': None,
'moving_mean': [batch_norm_var_collection],
'moving_variance': [batch_norm_var_collection],
}
}
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay)):
with slim.arg_scope([slim.conv2d],
weights_initializer=tf.truncated_normal_initializer(stddev=stddev),
activation_fn=tf.nn.relu, normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params) as sc:
return sc
|
python
|
def inception_v3_arg_scope(weight_decay=0.00004,
stddev=0.1,
batch_norm_var_collection='moving_vars'):
"""Defines the default InceptionV3 arg scope.
Args:
weight_decay: The weight decay to use for regularizing the model.
stddev: The standard deviation of the trunctated normal weight initializer.
batch_norm_var_collection: The name of the collection for the batch norm
variables.
Returns:
An `arg_scope` to use for the inception v3 model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.9997,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# collection containing update_ops.
'updates_collections': tf.GraphKeys.UPDATE_OPS,
# collection containing the moving mean and moving variance.
'variables_collections': {
'beta': None,
'gamma': None,
'moving_mean': [batch_norm_var_collection],
'moving_variance': [batch_norm_var_collection],
}
}
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay)):
with slim.arg_scope([slim.conv2d],
weights_initializer=tf.truncated_normal_initializer(stddev=stddev),
activation_fn=tf.nn.relu, normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params) as sc:
return sc
|
[
"def",
"inception_v3_arg_scope",
"(",
"weight_decay",
"=",
"0.00004",
",",
"stddev",
"=",
"0.1",
",",
"batch_norm_var_collection",
"=",
"'moving_vars'",
")",
":",
"batch_norm_params",
"=",
"{",
"# Decay for the moving averages.",
"'decay'",
":",
"0.9997",
",",
"# epsilon to prevent 0s in variance.",
"'epsilon'",
":",
"0.001",
",",
"# collection containing update_ops.",
"'updates_collections'",
":",
"tf",
".",
"GraphKeys",
".",
"UPDATE_OPS",
",",
"# collection containing the moving mean and moving variance.",
"'variables_collections'",
":",
"{",
"'beta'",
":",
"None",
",",
"'gamma'",
":",
"None",
",",
"'moving_mean'",
":",
"[",
"batch_norm_var_collection",
"]",
",",
"'moving_variance'",
":",
"[",
"batch_norm_var_collection",
"]",
",",
"}",
"}",
"# Set weight_decay for weights in Conv and FC layers.",
"with",
"slim",
".",
"arg_scope",
"(",
"[",
"slim",
".",
"conv2d",
",",
"slim",
".",
"fully_connected",
"]",
",",
"weights_regularizer",
"=",
"slim",
".",
"l2_regularizer",
"(",
"weight_decay",
")",
")",
":",
"with",
"slim",
".",
"arg_scope",
"(",
"[",
"slim",
".",
"conv2d",
"]",
",",
"weights_initializer",
"=",
"tf",
".",
"truncated_normal_initializer",
"(",
"stddev",
"=",
"stddev",
")",
",",
"activation_fn",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"normalizer_fn",
"=",
"slim",
".",
"batch_norm",
",",
"normalizer_params",
"=",
"batch_norm_params",
")",
"as",
"sc",
":",
"return",
"sc"
] |
Defines the default InceptionV3 arg scope.
Args:
weight_decay: The weight decay to use for regularizing the model.
stddev: The standard deviation of the trunctated normal weight initializer.
batch_norm_var_collection: The name of the collection for the batch norm
variables.
Returns:
An `arg_scope` to use for the inception v3 model.
|
[
"Defines",
"the",
"default",
"InceptionV3",
"arg",
"scope",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_inceptionlib.py#L587-L624
|
4,814
|
googledatalab/pydatalab
|
solutionbox/image_classification/mltoolbox/image/classification/_local.py
|
Local.preprocess
|
def preprocess(train_dataset, output_dir, eval_dataset, checkpoint):
"""Preprocess data locally."""
import apache_beam as beam
from google.datalab.utils import LambdaJob
from . import _preprocess
if checkpoint is None:
checkpoint = _util._DEFAULT_CHECKPOINT_GSURL
job_id = ('preprocess-image-classification-' +
datetime.datetime.now().strftime('%y%m%d-%H%M%S'))
# Project is needed for bigquery data source, even in local run.
options = {
'project': _util.default_project(),
}
opts = beam.pipeline.PipelineOptions(flags=[], **options)
p = beam.Pipeline('DirectRunner', options=opts)
_preprocess.configure_pipeline(p, train_dataset, eval_dataset, checkpoint, output_dir, job_id)
job = LambdaJob(lambda: p.run().wait_until_finish(), job_id)
return job
|
python
|
def preprocess(train_dataset, output_dir, eval_dataset, checkpoint):
"""Preprocess data locally."""
import apache_beam as beam
from google.datalab.utils import LambdaJob
from . import _preprocess
if checkpoint is None:
checkpoint = _util._DEFAULT_CHECKPOINT_GSURL
job_id = ('preprocess-image-classification-' +
datetime.datetime.now().strftime('%y%m%d-%H%M%S'))
# Project is needed for bigquery data source, even in local run.
options = {
'project': _util.default_project(),
}
opts = beam.pipeline.PipelineOptions(flags=[], **options)
p = beam.Pipeline('DirectRunner', options=opts)
_preprocess.configure_pipeline(p, train_dataset, eval_dataset, checkpoint, output_dir, job_id)
job = LambdaJob(lambda: p.run().wait_until_finish(), job_id)
return job
|
[
"def",
"preprocess",
"(",
"train_dataset",
",",
"output_dir",
",",
"eval_dataset",
",",
"checkpoint",
")",
":",
"import",
"apache_beam",
"as",
"beam",
"from",
"google",
".",
"datalab",
".",
"utils",
"import",
"LambdaJob",
"from",
".",
"import",
"_preprocess",
"if",
"checkpoint",
"is",
"None",
":",
"checkpoint",
"=",
"_util",
".",
"_DEFAULT_CHECKPOINT_GSURL",
"job_id",
"=",
"(",
"'preprocess-image-classification-'",
"+",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"'%y%m%d-%H%M%S'",
")",
")",
"# Project is needed for bigquery data source, even in local run.",
"options",
"=",
"{",
"'project'",
":",
"_util",
".",
"default_project",
"(",
")",
",",
"}",
"opts",
"=",
"beam",
".",
"pipeline",
".",
"PipelineOptions",
"(",
"flags",
"=",
"[",
"]",
",",
"*",
"*",
"options",
")",
"p",
"=",
"beam",
".",
"Pipeline",
"(",
"'DirectRunner'",
",",
"options",
"=",
"opts",
")",
"_preprocess",
".",
"configure_pipeline",
"(",
"p",
",",
"train_dataset",
",",
"eval_dataset",
",",
"checkpoint",
",",
"output_dir",
",",
"job_id",
")",
"job",
"=",
"LambdaJob",
"(",
"lambda",
":",
"p",
".",
"run",
"(",
")",
".",
"wait_until_finish",
"(",
")",
",",
"job_id",
")",
"return",
"job"
] |
Preprocess data locally.
|
[
"Preprocess",
"data",
"locally",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_local.py#L32-L51
|
4,815
|
googledatalab/pydatalab
|
solutionbox/image_classification/mltoolbox/image/classification/_local.py
|
Local.train
|
def train(input_dir, batch_size, max_steps, output_dir, checkpoint):
"""Train model locally."""
from google.datalab.utils import LambdaJob
if checkpoint is None:
checkpoint = _util._DEFAULT_CHECKPOINT_GSURL
labels = _util.get_labels(input_dir)
model = _model.Model(labels, 0.5, checkpoint)
task_data = {'type': 'master', 'index': 0}
task = type('TaskSpec', (object,), task_data)
job = LambdaJob(lambda: _trainer.Trainer(input_dir, batch_size, max_steps, output_dir,
model, None, task).run_training(), 'training')
return job
|
python
|
def train(input_dir, batch_size, max_steps, output_dir, checkpoint):
"""Train model locally."""
from google.datalab.utils import LambdaJob
if checkpoint is None:
checkpoint = _util._DEFAULT_CHECKPOINT_GSURL
labels = _util.get_labels(input_dir)
model = _model.Model(labels, 0.5, checkpoint)
task_data = {'type': 'master', 'index': 0}
task = type('TaskSpec', (object,), task_data)
job = LambdaJob(lambda: _trainer.Trainer(input_dir, batch_size, max_steps, output_dir,
model, None, task).run_training(), 'training')
return job
|
[
"def",
"train",
"(",
"input_dir",
",",
"batch_size",
",",
"max_steps",
",",
"output_dir",
",",
"checkpoint",
")",
":",
"from",
"google",
".",
"datalab",
".",
"utils",
"import",
"LambdaJob",
"if",
"checkpoint",
"is",
"None",
":",
"checkpoint",
"=",
"_util",
".",
"_DEFAULT_CHECKPOINT_GSURL",
"labels",
"=",
"_util",
".",
"get_labels",
"(",
"input_dir",
")",
"model",
"=",
"_model",
".",
"Model",
"(",
"labels",
",",
"0.5",
",",
"checkpoint",
")",
"task_data",
"=",
"{",
"'type'",
":",
"'master'",
",",
"'index'",
":",
"0",
"}",
"task",
"=",
"type",
"(",
"'TaskSpec'",
",",
"(",
"object",
",",
")",
",",
"task_data",
")",
"job",
"=",
"LambdaJob",
"(",
"lambda",
":",
"_trainer",
".",
"Trainer",
"(",
"input_dir",
",",
"batch_size",
",",
"max_steps",
",",
"output_dir",
",",
"model",
",",
"None",
",",
"task",
")",
".",
"run_training",
"(",
")",
",",
"'training'",
")",
"return",
"job"
] |
Train model locally.
|
[
"Train",
"model",
"locally",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_local.py#L54-L67
|
4,816
|
googledatalab/pydatalab
|
solutionbox/image_classification/mltoolbox/image/classification/_local.py
|
Local.predict
|
def predict(model_dir, image_files, resize, show_image):
"""Predict using an model in a local or GCS directory."""
from . import _predictor
images = _util.load_images(image_files, resize=resize)
labels_and_scores = _predictor.predict(model_dir, images)
results = zip(image_files, images, labels_and_scores)
ret = _util.process_prediction_results(results, show_image)
return ret
|
python
|
def predict(model_dir, image_files, resize, show_image):
"""Predict using an model in a local or GCS directory."""
from . import _predictor
images = _util.load_images(image_files, resize=resize)
labels_and_scores = _predictor.predict(model_dir, images)
results = zip(image_files, images, labels_and_scores)
ret = _util.process_prediction_results(results, show_image)
return ret
|
[
"def",
"predict",
"(",
"model_dir",
",",
"image_files",
",",
"resize",
",",
"show_image",
")",
":",
"from",
".",
"import",
"_predictor",
"images",
"=",
"_util",
".",
"load_images",
"(",
"image_files",
",",
"resize",
"=",
"resize",
")",
"labels_and_scores",
"=",
"_predictor",
".",
"predict",
"(",
"model_dir",
",",
"images",
")",
"results",
"=",
"zip",
"(",
"image_files",
",",
"images",
",",
"labels_and_scores",
")",
"ret",
"=",
"_util",
".",
"process_prediction_results",
"(",
"results",
",",
"show_image",
")",
"return",
"ret"
] |
Predict using an model in a local or GCS directory.
|
[
"Predict",
"using",
"an",
"model",
"in",
"a",
"local",
"or",
"GCS",
"directory",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_local.py#L70-L79
|
4,817
|
googledatalab/pydatalab
|
solutionbox/image_classification/mltoolbox/image/classification/_local.py
|
Local.batch_predict
|
def batch_predict(dataset, model_dir, output_csv, output_bq_table):
"""Batch predict running locally."""
import apache_beam as beam
from google.datalab.utils import LambdaJob
from . import _predictor
if output_csv is None and output_bq_table is None:
raise ValueError('output_csv and output_bq_table cannot both be None.')
job_id = ('batch-predict-image-classification-' +
datetime.datetime.now().strftime('%y%m%d-%H%M%S'))
# Project is needed for bigquery data source, even in local run.
options = {
'project': _util.default_project(),
}
opts = beam.pipeline.PipelineOptions(flags=[], **options)
p = beam.Pipeline('DirectRunner', options=opts)
_predictor.configure_pipeline(p, dataset, model_dir, output_csv, output_bq_table)
job = LambdaJob(lambda: p.run().wait_until_finish(), job_id)
return job
|
python
|
def batch_predict(dataset, model_dir, output_csv, output_bq_table):
"""Batch predict running locally."""
import apache_beam as beam
from google.datalab.utils import LambdaJob
from . import _predictor
if output_csv is None and output_bq_table is None:
raise ValueError('output_csv and output_bq_table cannot both be None.')
job_id = ('batch-predict-image-classification-' +
datetime.datetime.now().strftime('%y%m%d-%H%M%S'))
# Project is needed for bigquery data source, even in local run.
options = {
'project': _util.default_project(),
}
opts = beam.pipeline.PipelineOptions(flags=[], **options)
p = beam.Pipeline('DirectRunner', options=opts)
_predictor.configure_pipeline(p, dataset, model_dir, output_csv, output_bq_table)
job = LambdaJob(lambda: p.run().wait_until_finish(), job_id)
return job
|
[
"def",
"batch_predict",
"(",
"dataset",
",",
"model_dir",
",",
"output_csv",
",",
"output_bq_table",
")",
":",
"import",
"apache_beam",
"as",
"beam",
"from",
"google",
".",
"datalab",
".",
"utils",
"import",
"LambdaJob",
"from",
".",
"import",
"_predictor",
"if",
"output_csv",
"is",
"None",
"and",
"output_bq_table",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'output_csv and output_bq_table cannot both be None.'",
")",
"job_id",
"=",
"(",
"'batch-predict-image-classification-'",
"+",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"'%y%m%d-%H%M%S'",
")",
")",
"# Project is needed for bigquery data source, even in local run.",
"options",
"=",
"{",
"'project'",
":",
"_util",
".",
"default_project",
"(",
")",
",",
"}",
"opts",
"=",
"beam",
".",
"pipeline",
".",
"PipelineOptions",
"(",
"flags",
"=",
"[",
"]",
",",
"*",
"*",
"options",
")",
"p",
"=",
"beam",
".",
"Pipeline",
"(",
"'DirectRunner'",
",",
"options",
"=",
"opts",
")",
"_predictor",
".",
"configure_pipeline",
"(",
"p",
",",
"dataset",
",",
"model_dir",
",",
"output_csv",
",",
"output_bq_table",
")",
"job",
"=",
"LambdaJob",
"(",
"lambda",
":",
"p",
".",
"run",
"(",
")",
".",
"wait_until_finish",
"(",
")",
",",
"job_id",
")",
"return",
"job"
] |
Batch predict running locally.
|
[
"Batch",
"predict",
"running",
"locally",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_local.py#L82-L103
|
4,818
|
googledatalab/pydatalab
|
datalab/utils/_job.py
|
Job.result
|
def result(self):
""" Get the result for a job. This will block if the job is incomplete.
Returns:
The result for the Job.
Raises:
An exception if the Job resulted in an exception.
"""
self.wait()
if self._fatal_error:
raise self._fatal_error
return self._result
|
python
|
def result(self):
""" Get the result for a job. This will block if the job is incomplete.
Returns:
The result for the Job.
Raises:
An exception if the Job resulted in an exception.
"""
self.wait()
if self._fatal_error:
raise self._fatal_error
return self._result
|
[
"def",
"result",
"(",
"self",
")",
":",
"self",
".",
"wait",
"(",
")",
"if",
"self",
".",
"_fatal_error",
":",
"raise",
"self",
".",
"_fatal_error",
"return",
"self",
".",
"_result"
] |
Get the result for a job. This will block if the job is incomplete.
Returns:
The result for the Job.
Raises:
An exception if the Job resulted in an exception.
|
[
"Get",
"the",
"result",
"for",
"a",
"job",
".",
"This",
"will",
"block",
"if",
"the",
"job",
"is",
"incomplete",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/utils/_job.py#L119-L132
|
4,819
|
googledatalab/pydatalab
|
datalab/utils/_job.py
|
Job._refresh_state
|
def _refresh_state(self):
""" Get the state of a job. Must be overridden by derived Job classes
for Jobs that don't use a Future.
"""
if self._is_complete:
return
if not self._future:
raise Exception('Please implement this in the derived class')
if self._future.done():
self._is_complete = True
self._end_time = datetime.datetime.utcnow()
try:
self._result = self._future.result()
except Exception as e:
message = str(e)
self._fatal_error = JobError(location=traceback.format_exc(), message=message,
reason=str(type(e)))
|
python
|
def _refresh_state(self):
""" Get the state of a job. Must be overridden by derived Job classes
for Jobs that don't use a Future.
"""
if self._is_complete:
return
if not self._future:
raise Exception('Please implement this in the derived class')
if self._future.done():
self._is_complete = True
self._end_time = datetime.datetime.utcnow()
try:
self._result = self._future.result()
except Exception as e:
message = str(e)
self._fatal_error = JobError(location=traceback.format_exc(), message=message,
reason=str(type(e)))
|
[
"def",
"_refresh_state",
"(",
"self",
")",
":",
"if",
"self",
".",
"_is_complete",
":",
"return",
"if",
"not",
"self",
".",
"_future",
":",
"raise",
"Exception",
"(",
"'Please implement this in the derived class'",
")",
"if",
"self",
".",
"_future",
".",
"done",
"(",
")",
":",
"self",
".",
"_is_complete",
"=",
"True",
"self",
".",
"_end_time",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"try",
":",
"self",
".",
"_result",
"=",
"self",
".",
"_future",
".",
"result",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"message",
"=",
"str",
"(",
"e",
")",
"self",
".",
"_fatal_error",
"=",
"JobError",
"(",
"location",
"=",
"traceback",
".",
"format_exc",
"(",
")",
",",
"message",
"=",
"message",
",",
"reason",
"=",
"str",
"(",
"type",
"(",
"e",
")",
")",
")"
] |
Get the state of a job. Must be overridden by derived Job classes
for Jobs that don't use a Future.
|
[
"Get",
"the",
"state",
"of",
"a",
"job",
".",
"Must",
"be",
"overridden",
"by",
"derived",
"Job",
"classes",
"for",
"Jobs",
"that",
"don",
"t",
"use",
"a",
"Future",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/utils/_job.py#L151-L169
|
4,820
|
googledatalab/pydatalab
|
datalab/utils/_job.py
|
Job.state
|
def state(self):
""" Describe the state of a Job.
Returns: A string describing the job's state.
"""
state = 'in progress'
if self.is_complete:
if self.failed:
state = 'failed with error: %s' % str(self._fatal_error)
elif self._errors:
state = 'completed with some non-fatal errors'
else:
state = 'completed'
return state
|
python
|
def state(self):
""" Describe the state of a Job.
Returns: A string describing the job's state.
"""
state = 'in progress'
if self.is_complete:
if self.failed:
state = 'failed with error: %s' % str(self._fatal_error)
elif self._errors:
state = 'completed with some non-fatal errors'
else:
state = 'completed'
return state
|
[
"def",
"state",
"(",
"self",
")",
":",
"state",
"=",
"'in progress'",
"if",
"self",
".",
"is_complete",
":",
"if",
"self",
".",
"failed",
":",
"state",
"=",
"'failed with error: %s'",
"%",
"str",
"(",
"self",
".",
"_fatal_error",
")",
"elif",
"self",
".",
"_errors",
":",
"state",
"=",
"'completed with some non-fatal errors'",
"else",
":",
"state",
"=",
"'completed'",
"return",
"state"
] |
Describe the state of a Job.
Returns: A string describing the job's state.
|
[
"Describe",
"the",
"state",
"of",
"a",
"Job",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/utils/_job.py#L202-L215
|
4,821
|
googledatalab/pydatalab
|
datalab/utils/_job.py
|
Job.wait_any
|
def wait_any(jobs, timeout=None):
""" Return when at least one of the specified jobs has completed or timeout expires.
Args:
jobs: a Job or list of Jobs to wait on.
timeout: a timeout in seconds to wait for. None (the default) means no timeout.
Returns:
A list of the jobs that have now completed or None if there were no jobs.
"""
return Job._wait(jobs, timeout, concurrent.futures.FIRST_COMPLETED)
|
python
|
def wait_any(jobs, timeout=None):
""" Return when at least one of the specified jobs has completed or timeout expires.
Args:
jobs: a Job or list of Jobs to wait on.
timeout: a timeout in seconds to wait for. None (the default) means no timeout.
Returns:
A list of the jobs that have now completed or None if there were no jobs.
"""
return Job._wait(jobs, timeout, concurrent.futures.FIRST_COMPLETED)
|
[
"def",
"wait_any",
"(",
"jobs",
",",
"timeout",
"=",
"None",
")",
":",
"return",
"Job",
".",
"_wait",
"(",
"jobs",
",",
"timeout",
",",
"concurrent",
".",
"futures",
".",
"FIRST_COMPLETED",
")"
] |
Return when at least one of the specified jobs has completed or timeout expires.
Args:
jobs: a Job or list of Jobs to wait on.
timeout: a timeout in seconds to wait for. None (the default) means no timeout.
Returns:
A list of the jobs that have now completed or None if there were no jobs.
|
[
"Return",
"when",
"at",
"least",
"one",
"of",
"the",
"specified",
"jobs",
"has",
"completed",
"or",
"timeout",
"expires",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/utils/_job.py#L257-L267
|
4,822
|
googledatalab/pydatalab
|
datalab/utils/_job.py
|
Job.wait_all
|
def wait_all(jobs, timeout=None):
""" Return when at all of the specified jobs have completed or timeout expires.
Args:
jobs: a Job or list of Jobs to wait on.
timeout: a timeout in seconds to wait for. None (the default) means no timeout.
Returns:
A list of the jobs that have now completed or None if there were no jobs.
"""
return Job._wait(jobs, timeout, concurrent.futures.ALL_COMPLETED)
|
python
|
def wait_all(jobs, timeout=None):
""" Return when at all of the specified jobs have completed or timeout expires.
Args:
jobs: a Job or list of Jobs to wait on.
timeout: a timeout in seconds to wait for. None (the default) means no timeout.
Returns:
A list of the jobs that have now completed or None if there were no jobs.
"""
return Job._wait(jobs, timeout, concurrent.futures.ALL_COMPLETED)
|
[
"def",
"wait_all",
"(",
"jobs",
",",
"timeout",
"=",
"None",
")",
":",
"return",
"Job",
".",
"_wait",
"(",
"jobs",
",",
"timeout",
",",
"concurrent",
".",
"futures",
".",
"ALL_COMPLETED",
")"
] |
Return when at all of the specified jobs have completed or timeout expires.
Args:
jobs: a Job or list of Jobs to wait on.
timeout: a timeout in seconds to wait for. None (the default) means no timeout.
Returns:
A list of the jobs that have now completed or None if there were no jobs.
|
[
"Return",
"when",
"at",
"all",
"of",
"the",
"specified",
"jobs",
"have",
"completed",
"or",
"timeout",
"expires",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/utils/_job.py#L270-L279
|
4,823
|
googledatalab/pydatalab
|
solutionbox/image_classification/mltoolbox/image/classification/_trainer.py
|
Evaluator.evaluate
|
def evaluate(self, num_eval_batches=None):
"""Run one round of evaluation, return loss and accuracy."""
num_eval_batches = num_eval_batches or self.num_eval_batches
with tf.Graph().as_default() as graph:
self.tensors = self.model.build_eval_graph(self.eval_data_paths,
self.batch_size)
self.summary = tf.summary.merge_all()
self.saver = tf.train.Saver()
self.summary_writer = tf.summary.FileWriter(self.output_path)
self.sv = tf.train.Supervisor(
graph=graph,
logdir=self.output_path,
summary_op=None,
global_step=None,
saver=self.saver)
last_checkpoint = tf.train.latest_checkpoint(self.checkpoint_path)
with self.sv.managed_session(master='', start_standard_services=False) as session:
self.sv.saver.restore(session, last_checkpoint)
if not self.batch_of_examples:
self.sv.start_queue_runners(session)
for i in range(num_eval_batches):
self.batch_of_examples.append(session.run(self.tensors.examples))
for i in range(num_eval_batches):
session.run(self.tensors.metric_updates,
{self.tensors.examples: self.batch_of_examples[i]})
metric_values = session.run(self.tensors.metric_values)
global_step = tf.train.global_step(session, self.tensors.global_step)
summary = session.run(self.summary)
self.summary_writer.add_summary(summary, global_step)
self.summary_writer.flush()
return metric_values
|
python
|
def evaluate(self, num_eval_batches=None):
"""Run one round of evaluation, return loss and accuracy."""
num_eval_batches = num_eval_batches or self.num_eval_batches
with tf.Graph().as_default() as graph:
self.tensors = self.model.build_eval_graph(self.eval_data_paths,
self.batch_size)
self.summary = tf.summary.merge_all()
self.saver = tf.train.Saver()
self.summary_writer = tf.summary.FileWriter(self.output_path)
self.sv = tf.train.Supervisor(
graph=graph,
logdir=self.output_path,
summary_op=None,
global_step=None,
saver=self.saver)
last_checkpoint = tf.train.latest_checkpoint(self.checkpoint_path)
with self.sv.managed_session(master='', start_standard_services=False) as session:
self.sv.saver.restore(session, last_checkpoint)
if not self.batch_of_examples:
self.sv.start_queue_runners(session)
for i in range(num_eval_batches):
self.batch_of_examples.append(session.run(self.tensors.examples))
for i in range(num_eval_batches):
session.run(self.tensors.metric_updates,
{self.tensors.examples: self.batch_of_examples[i]})
metric_values = session.run(self.tensors.metric_values)
global_step = tf.train.global_step(session, self.tensors.global_step)
summary = session.run(self.summary)
self.summary_writer.add_summary(summary, global_step)
self.summary_writer.flush()
return metric_values
|
[
"def",
"evaluate",
"(",
"self",
",",
"num_eval_batches",
"=",
"None",
")",
":",
"num_eval_batches",
"=",
"num_eval_batches",
"or",
"self",
".",
"num_eval_batches",
"with",
"tf",
".",
"Graph",
"(",
")",
".",
"as_default",
"(",
")",
"as",
"graph",
":",
"self",
".",
"tensors",
"=",
"self",
".",
"model",
".",
"build_eval_graph",
"(",
"self",
".",
"eval_data_paths",
",",
"self",
".",
"batch_size",
")",
"self",
".",
"summary",
"=",
"tf",
".",
"summary",
".",
"merge_all",
"(",
")",
"self",
".",
"saver",
"=",
"tf",
".",
"train",
".",
"Saver",
"(",
")",
"self",
".",
"summary_writer",
"=",
"tf",
".",
"summary",
".",
"FileWriter",
"(",
"self",
".",
"output_path",
")",
"self",
".",
"sv",
"=",
"tf",
".",
"train",
".",
"Supervisor",
"(",
"graph",
"=",
"graph",
",",
"logdir",
"=",
"self",
".",
"output_path",
",",
"summary_op",
"=",
"None",
",",
"global_step",
"=",
"None",
",",
"saver",
"=",
"self",
".",
"saver",
")",
"last_checkpoint",
"=",
"tf",
".",
"train",
".",
"latest_checkpoint",
"(",
"self",
".",
"checkpoint_path",
")",
"with",
"self",
".",
"sv",
".",
"managed_session",
"(",
"master",
"=",
"''",
",",
"start_standard_services",
"=",
"False",
")",
"as",
"session",
":",
"self",
".",
"sv",
".",
"saver",
".",
"restore",
"(",
"session",
",",
"last_checkpoint",
")",
"if",
"not",
"self",
".",
"batch_of_examples",
":",
"self",
".",
"sv",
".",
"start_queue_runners",
"(",
"session",
")",
"for",
"i",
"in",
"range",
"(",
"num_eval_batches",
")",
":",
"self",
".",
"batch_of_examples",
".",
"append",
"(",
"session",
".",
"run",
"(",
"self",
".",
"tensors",
".",
"examples",
")",
")",
"for",
"i",
"in",
"range",
"(",
"num_eval_batches",
")",
":",
"session",
".",
"run",
"(",
"self",
".",
"tensors",
".",
"metric_updates",
",",
"{",
"self",
".",
"tensors",
".",
"examples",
":",
"self",
".",
"batch_of_examples",
"[",
"i",
"]",
"}",
")",
"metric_values",
"=",
"session",
".",
"run",
"(",
"self",
".",
"tensors",
".",
"metric_values",
")",
"global_step",
"=",
"tf",
".",
"train",
".",
"global_step",
"(",
"session",
",",
"self",
".",
"tensors",
".",
"global_step",
")",
"summary",
"=",
"session",
".",
"run",
"(",
"self",
".",
"summary",
")",
"self",
".",
"summary_writer",
".",
"add_summary",
"(",
"summary",
",",
"global_step",
")",
"self",
".",
"summary_writer",
".",
"flush",
"(",
")",
"return",
"metric_values"
] |
Run one round of evaluation, return loss and accuracy.
|
[
"Run",
"one",
"round",
"of",
"evaluation",
"return",
"loss",
"and",
"accuracy",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_trainer.py#L65-L101
|
4,824
|
googledatalab/pydatalab
|
solutionbox/image_classification/mltoolbox/image/classification/_trainer.py
|
Trainer.log
|
def log(self, session):
"""Logs training progress."""
logging.info('Train [%s/%d], step %d (%.3f sec) %.1f '
'global steps/s, %.1f local steps/s', self.task.type,
self.task.index, self.global_step,
(self.now - self.start_time),
(self.global_step - self.last_global_step) /
(self.now - self.last_global_time),
(self.local_step - self.last_local_step) /
(self.now - self.last_local_time))
self.last_log = self.now
self.last_global_step, self.last_global_time = self.global_step, self.now
self.last_local_step, self.last_local_time = self.local_step, self.now
|
python
|
def log(self, session):
"""Logs training progress."""
logging.info('Train [%s/%d], step %d (%.3f sec) %.1f '
'global steps/s, %.1f local steps/s', self.task.type,
self.task.index, self.global_step,
(self.now - self.start_time),
(self.global_step - self.last_global_step) /
(self.now - self.last_global_time),
(self.local_step - self.last_local_step) /
(self.now - self.last_local_time))
self.last_log = self.now
self.last_global_step, self.last_global_time = self.global_step, self.now
self.last_local_step, self.last_local_time = self.local_step, self.now
|
[
"def",
"log",
"(",
"self",
",",
"session",
")",
":",
"logging",
".",
"info",
"(",
"'Train [%s/%d], step %d (%.3f sec) %.1f '",
"'global steps/s, %.1f local steps/s'",
",",
"self",
".",
"task",
".",
"type",
",",
"self",
".",
"task",
".",
"index",
",",
"self",
".",
"global_step",
",",
"(",
"self",
".",
"now",
"-",
"self",
".",
"start_time",
")",
",",
"(",
"self",
".",
"global_step",
"-",
"self",
".",
"last_global_step",
")",
"/",
"(",
"self",
".",
"now",
"-",
"self",
".",
"last_global_time",
")",
",",
"(",
"self",
".",
"local_step",
"-",
"self",
".",
"last_local_step",
")",
"/",
"(",
"self",
".",
"now",
"-",
"self",
".",
"last_local_time",
")",
")",
"self",
".",
"last_log",
"=",
"self",
".",
"now",
"self",
".",
"last_global_step",
",",
"self",
".",
"last_global_time",
"=",
"self",
".",
"global_step",
",",
"self",
".",
"now",
"self",
".",
"last_local_step",
",",
"self",
".",
"last_local_time",
"=",
"self",
".",
"local_step",
",",
"self",
".",
"now"
] |
Logs training progress.
|
[
"Logs",
"training",
"progress",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_trainer.py#L232-L244
|
4,825
|
googledatalab/pydatalab
|
solutionbox/image_classification/mltoolbox/image/classification/_trainer.py
|
Trainer.eval
|
def eval(self, session):
"""Runs evaluation loop."""
eval_start = time.time()
self.saver.save(session, self.sv.save_path, self.tensors.global_step)
logging.info(
'Eval, step %d:\n- on train set %s\n-- on eval set %s',
self.global_step,
self.model.format_metric_values(self.train_evaluator.evaluate()),
self.model.format_metric_values(self.evaluator.evaluate()))
now = time.time()
# Make sure eval doesn't consume too much of total time.
eval_time = now - eval_start
train_eval_rate = self.eval_interval / eval_time
if train_eval_rate < self.min_train_eval_rate and self.last_save > 0:
logging.info('Adjusting eval interval from %.2fs to %.2fs',
self.eval_interval, self.min_train_eval_rate * eval_time)
self.eval_interval = self.min_train_eval_rate * eval_time
self.last_save = now
self.last_log = now
|
python
|
def eval(self, session):
"""Runs evaluation loop."""
eval_start = time.time()
self.saver.save(session, self.sv.save_path, self.tensors.global_step)
logging.info(
'Eval, step %d:\n- on train set %s\n-- on eval set %s',
self.global_step,
self.model.format_metric_values(self.train_evaluator.evaluate()),
self.model.format_metric_values(self.evaluator.evaluate()))
now = time.time()
# Make sure eval doesn't consume too much of total time.
eval_time = now - eval_start
train_eval_rate = self.eval_interval / eval_time
if train_eval_rate < self.min_train_eval_rate and self.last_save > 0:
logging.info('Adjusting eval interval from %.2fs to %.2fs',
self.eval_interval, self.min_train_eval_rate * eval_time)
self.eval_interval = self.min_train_eval_rate * eval_time
self.last_save = now
self.last_log = now
|
[
"def",
"eval",
"(",
"self",
",",
"session",
")",
":",
"eval_start",
"=",
"time",
".",
"time",
"(",
")",
"self",
".",
"saver",
".",
"save",
"(",
"session",
",",
"self",
".",
"sv",
".",
"save_path",
",",
"self",
".",
"tensors",
".",
"global_step",
")",
"logging",
".",
"info",
"(",
"'Eval, step %d:\\n- on train set %s\\n-- on eval set %s'",
",",
"self",
".",
"global_step",
",",
"self",
".",
"model",
".",
"format_metric_values",
"(",
"self",
".",
"train_evaluator",
".",
"evaluate",
"(",
")",
")",
",",
"self",
".",
"model",
".",
"format_metric_values",
"(",
"self",
".",
"evaluator",
".",
"evaluate",
"(",
")",
")",
")",
"now",
"=",
"time",
".",
"time",
"(",
")",
"# Make sure eval doesn't consume too much of total time.",
"eval_time",
"=",
"now",
"-",
"eval_start",
"train_eval_rate",
"=",
"self",
".",
"eval_interval",
"/",
"eval_time",
"if",
"train_eval_rate",
"<",
"self",
".",
"min_train_eval_rate",
"and",
"self",
".",
"last_save",
">",
"0",
":",
"logging",
".",
"info",
"(",
"'Adjusting eval interval from %.2fs to %.2fs'",
",",
"self",
".",
"eval_interval",
",",
"self",
".",
"min_train_eval_rate",
"*",
"eval_time",
")",
"self",
".",
"eval_interval",
"=",
"self",
".",
"min_train_eval_rate",
"*",
"eval_time",
"self",
".",
"last_save",
"=",
"now",
"self",
".",
"last_log",
"=",
"now"
] |
Runs evaluation loop.
|
[
"Runs",
"evaluation",
"loop",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_trainer.py#L246-L266
|
4,826
|
googledatalab/pydatalab
|
google/datalab/ml/_feature_slice_view.py
|
FeatureSliceView.plot
|
def plot(self, data):
""" Plots a featire slice view on given data.
Args:
data: Can be one of:
A string of sql query.
A sql query module defined by "%%sql --module module_name".
A pandas DataFrame.
Regardless of data type, it must include the following columns:
"feature": identifies a slice of features. For example: "petal_length:4.0-4.2".
"count": number of instances in that slice of features.
All other columns are viewed as metrics for its feature slice. At least one is required.
"""
import IPython
if ((sys.version_info.major > 2 and isinstance(data, str)) or
(sys.version_info.major <= 2 and isinstance(data, basestring))):
data = bq.Query(data)
if isinstance(data, bq.Query):
df = data.execute().result().to_dataframe()
data = self._get_lantern_format(df)
elif isinstance(data, pd.core.frame.DataFrame):
data = self._get_lantern_format(data)
else:
raise Exception('data needs to be a sql query, or a pandas DataFrame.')
HTML_TEMPLATE = """<link rel="import" href="/nbextensions/gcpdatalab/extern/lantern-browser.html" >
<lantern-browser id="{html_id}"></lantern-browser>
<script>
var browser = document.querySelector('#{html_id}');
browser.metrics = {metrics};
browser.data = {data};
browser.sourceType = 'colab';
browser.weightedExamplesColumn = 'count';
browser.calibrationPlotUriFn = function(s) {{ return '/' + s; }}
</script>"""
# Serialize the data and list of metrics names to JSON string.
metrics_str = str(map(str, data[0]['metricValues'].keys()))
data_str = str([{str(k): json.dumps(v) for k, v in elem.iteritems()} for elem in data])
html_id = 'l' + datalab.utils.commands.Html.next_id()
html = HTML_TEMPLATE.format(html_id=html_id, metrics=metrics_str, data=data_str)
IPython.display.display(IPython.display.HTML(html))
|
python
|
def plot(self, data):
""" Plots a featire slice view on given data.
Args:
data: Can be one of:
A string of sql query.
A sql query module defined by "%%sql --module module_name".
A pandas DataFrame.
Regardless of data type, it must include the following columns:
"feature": identifies a slice of features. For example: "petal_length:4.0-4.2".
"count": number of instances in that slice of features.
All other columns are viewed as metrics for its feature slice. At least one is required.
"""
import IPython
if ((sys.version_info.major > 2 and isinstance(data, str)) or
(sys.version_info.major <= 2 and isinstance(data, basestring))):
data = bq.Query(data)
if isinstance(data, bq.Query):
df = data.execute().result().to_dataframe()
data = self._get_lantern_format(df)
elif isinstance(data, pd.core.frame.DataFrame):
data = self._get_lantern_format(data)
else:
raise Exception('data needs to be a sql query, or a pandas DataFrame.')
HTML_TEMPLATE = """<link rel="import" href="/nbextensions/gcpdatalab/extern/lantern-browser.html" >
<lantern-browser id="{html_id}"></lantern-browser>
<script>
var browser = document.querySelector('#{html_id}');
browser.metrics = {metrics};
browser.data = {data};
browser.sourceType = 'colab';
browser.weightedExamplesColumn = 'count';
browser.calibrationPlotUriFn = function(s) {{ return '/' + s; }}
</script>"""
# Serialize the data and list of metrics names to JSON string.
metrics_str = str(map(str, data[0]['metricValues'].keys()))
data_str = str([{str(k): json.dumps(v) for k, v in elem.iteritems()} for elem in data])
html_id = 'l' + datalab.utils.commands.Html.next_id()
html = HTML_TEMPLATE.format(html_id=html_id, metrics=metrics_str, data=data_str)
IPython.display.display(IPython.display.HTML(html))
|
[
"def",
"plot",
"(",
"self",
",",
"data",
")",
":",
"import",
"IPython",
"if",
"(",
"(",
"sys",
".",
"version_info",
".",
"major",
">",
"2",
"and",
"isinstance",
"(",
"data",
",",
"str",
")",
")",
"or",
"(",
"sys",
".",
"version_info",
".",
"major",
"<=",
"2",
"and",
"isinstance",
"(",
"data",
",",
"basestring",
")",
")",
")",
":",
"data",
"=",
"bq",
".",
"Query",
"(",
"data",
")",
"if",
"isinstance",
"(",
"data",
",",
"bq",
".",
"Query",
")",
":",
"df",
"=",
"data",
".",
"execute",
"(",
")",
".",
"result",
"(",
")",
".",
"to_dataframe",
"(",
")",
"data",
"=",
"self",
".",
"_get_lantern_format",
"(",
"df",
")",
"elif",
"isinstance",
"(",
"data",
",",
"pd",
".",
"core",
".",
"frame",
".",
"DataFrame",
")",
":",
"data",
"=",
"self",
".",
"_get_lantern_format",
"(",
"data",
")",
"else",
":",
"raise",
"Exception",
"(",
"'data needs to be a sql query, or a pandas DataFrame.'",
")",
"HTML_TEMPLATE",
"=",
"\"\"\"<link rel=\"import\" href=\"/nbextensions/gcpdatalab/extern/lantern-browser.html\" >\n <lantern-browser id=\"{html_id}\"></lantern-browser>\n <script>\n var browser = document.querySelector('#{html_id}');\n browser.metrics = {metrics};\n browser.data = {data};\n browser.sourceType = 'colab';\n browser.weightedExamplesColumn = 'count';\n browser.calibrationPlotUriFn = function(s) {{ return '/' + s; }}\n </script>\"\"\"",
"# Serialize the data and list of metrics names to JSON string.",
"metrics_str",
"=",
"str",
"(",
"map",
"(",
"str",
",",
"data",
"[",
"0",
"]",
"[",
"'metricValues'",
"]",
".",
"keys",
"(",
")",
")",
")",
"data_str",
"=",
"str",
"(",
"[",
"{",
"str",
"(",
"k",
")",
":",
"json",
".",
"dumps",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"elem",
".",
"iteritems",
"(",
")",
"}",
"for",
"elem",
"in",
"data",
"]",
")",
"html_id",
"=",
"'l'",
"+",
"datalab",
".",
"utils",
".",
"commands",
".",
"Html",
".",
"next_id",
"(",
")",
"html",
"=",
"HTML_TEMPLATE",
".",
"format",
"(",
"html_id",
"=",
"html_id",
",",
"metrics",
"=",
"metrics_str",
",",
"data",
"=",
"data_str",
")",
"IPython",
".",
"display",
".",
"display",
"(",
"IPython",
".",
"display",
".",
"HTML",
"(",
"html",
")",
")"
] |
Plots a featire slice view on given data.
Args:
data: Can be one of:
A string of sql query.
A sql query module defined by "%%sql --module module_name".
A pandas DataFrame.
Regardless of data type, it must include the following columns:
"feature": identifies a slice of features. For example: "petal_length:4.0-4.2".
"count": number of instances in that slice of features.
All other columns are viewed as metrics for its feature slice. At least one is required.
|
[
"Plots",
"a",
"featire",
"slice",
"view",
"on",
"given",
"data",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_feature_slice_view.py#L46-L88
|
4,827
|
googledatalab/pydatalab
|
solutionbox/structured_data/mltoolbox/_structured_data/preprocess/cloud_preprocess.py
|
run_analysis
|
def run_analysis(args):
"""Builds an analysis file for training.
Uses BiqQuery tables to do the analysis.
Args:
args: command line args
Raises:
ValueError if schema contains unknown types.
"""
import google.datalab.bigquery as bq
if args.bigquery_table:
table = bq.Table(args.bigquery_table)
schema_list = table.schema._bq_schema
else:
schema_list = json.loads(
file_io.read_file_to_string(args.schema_file).decode())
table = bq.ExternalDataSource(
source=args.input_file_pattern,
schema=bq.Schema(schema_list))
# Check the schema is supported.
for col_schema in schema_list:
col_type = col_schema['type'].lower()
if col_type != 'string' and col_type != 'integer' and col_type != 'float':
raise ValueError('Schema contains an unsupported type %s.' % col_type)
run_numerical_analysis(table, schema_list, args)
run_categorical_analysis(table, schema_list, args)
# Save a copy of the schema to the output location.
file_io.write_string_to_file(
os.path.join(args.output_dir, SCHEMA_FILE),
json.dumps(schema_list, indent=2, separators=(',', ': ')))
|
python
|
def run_analysis(args):
"""Builds an analysis file for training.
Uses BiqQuery tables to do the analysis.
Args:
args: command line args
Raises:
ValueError if schema contains unknown types.
"""
import google.datalab.bigquery as bq
if args.bigquery_table:
table = bq.Table(args.bigquery_table)
schema_list = table.schema._bq_schema
else:
schema_list = json.loads(
file_io.read_file_to_string(args.schema_file).decode())
table = bq.ExternalDataSource(
source=args.input_file_pattern,
schema=bq.Schema(schema_list))
# Check the schema is supported.
for col_schema in schema_list:
col_type = col_schema['type'].lower()
if col_type != 'string' and col_type != 'integer' and col_type != 'float':
raise ValueError('Schema contains an unsupported type %s.' % col_type)
run_numerical_analysis(table, schema_list, args)
run_categorical_analysis(table, schema_list, args)
# Save a copy of the schema to the output location.
file_io.write_string_to_file(
os.path.join(args.output_dir, SCHEMA_FILE),
json.dumps(schema_list, indent=2, separators=(',', ': ')))
|
[
"def",
"run_analysis",
"(",
"args",
")",
":",
"import",
"google",
".",
"datalab",
".",
"bigquery",
"as",
"bq",
"if",
"args",
".",
"bigquery_table",
":",
"table",
"=",
"bq",
".",
"Table",
"(",
"args",
".",
"bigquery_table",
")",
"schema_list",
"=",
"table",
".",
"schema",
".",
"_bq_schema",
"else",
":",
"schema_list",
"=",
"json",
".",
"loads",
"(",
"file_io",
".",
"read_file_to_string",
"(",
"args",
".",
"schema_file",
")",
".",
"decode",
"(",
")",
")",
"table",
"=",
"bq",
".",
"ExternalDataSource",
"(",
"source",
"=",
"args",
".",
"input_file_pattern",
",",
"schema",
"=",
"bq",
".",
"Schema",
"(",
"schema_list",
")",
")",
"# Check the schema is supported.",
"for",
"col_schema",
"in",
"schema_list",
":",
"col_type",
"=",
"col_schema",
"[",
"'type'",
"]",
".",
"lower",
"(",
")",
"if",
"col_type",
"!=",
"'string'",
"and",
"col_type",
"!=",
"'integer'",
"and",
"col_type",
"!=",
"'float'",
":",
"raise",
"ValueError",
"(",
"'Schema contains an unsupported type %s.'",
"%",
"col_type",
")",
"run_numerical_analysis",
"(",
"table",
",",
"schema_list",
",",
"args",
")",
"run_categorical_analysis",
"(",
"table",
",",
"schema_list",
",",
"args",
")",
"# Save a copy of the schema to the output location.",
"file_io",
".",
"write_string_to_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"output_dir",
",",
"SCHEMA_FILE",
")",
",",
"json",
".",
"dumps",
"(",
"schema_list",
",",
"indent",
"=",
"2",
",",
"separators",
"=",
"(",
"','",
",",
"': '",
")",
")",
")"
] |
Builds an analysis file for training.
Uses BiqQuery tables to do the analysis.
Args:
args: command line args
Raises:
ValueError if schema contains unknown types.
|
[
"Builds",
"an",
"analysis",
"file",
"for",
"training",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/preprocess/cloud_preprocess.py#L222-L256
|
4,828
|
googledatalab/pydatalab
|
google/datalab/ml/_confusion_matrix.py
|
ConfusionMatrix.from_csv
|
def from_csv(input_csv, headers=None, schema_file=None):
"""Create a ConfusionMatrix from a csv file.
Args:
input_csv: Path to a Csv file (with no header). Can be local or GCS path.
headers: Csv headers. If present, it must include 'target' and 'predicted'.
schema_file: Path to a JSON file containing BigQuery schema. Used if "headers" is None.
If present, it must include 'target' and 'predicted' columns.
Returns:
A ConfusionMatrix that can be plotted.
Raises:
ValueError if both headers and schema_file are None, or it does not include 'target'
or 'predicted' columns.
"""
if headers is not None:
names = headers
elif schema_file is not None:
with _util.open_local_or_gcs(schema_file, mode='r') as f:
schema = json.load(f)
names = [x['name'] for x in schema]
else:
raise ValueError('Either headers or schema_file is needed')
all_files = _util.glob_files(input_csv)
all_df = []
for file_name in all_files:
with _util.open_local_or_gcs(file_name, mode='r') as f:
all_df.append(pd.read_csv(f, names=names))
df = pd.concat(all_df, ignore_index=True)
if 'target' not in df or 'predicted' not in df:
raise ValueError('Cannot find "target" or "predicted" column')
labels = sorted(set(df['target']) | set(df['predicted']))
cm = confusion_matrix(df['target'], df['predicted'], labels=labels)
return ConfusionMatrix(cm, labels)
|
python
|
def from_csv(input_csv, headers=None, schema_file=None):
"""Create a ConfusionMatrix from a csv file.
Args:
input_csv: Path to a Csv file (with no header). Can be local or GCS path.
headers: Csv headers. If present, it must include 'target' and 'predicted'.
schema_file: Path to a JSON file containing BigQuery schema. Used if "headers" is None.
If present, it must include 'target' and 'predicted' columns.
Returns:
A ConfusionMatrix that can be plotted.
Raises:
ValueError if both headers and schema_file are None, or it does not include 'target'
or 'predicted' columns.
"""
if headers is not None:
names = headers
elif schema_file is not None:
with _util.open_local_or_gcs(schema_file, mode='r') as f:
schema = json.load(f)
names = [x['name'] for x in schema]
else:
raise ValueError('Either headers or schema_file is needed')
all_files = _util.glob_files(input_csv)
all_df = []
for file_name in all_files:
with _util.open_local_or_gcs(file_name, mode='r') as f:
all_df.append(pd.read_csv(f, names=names))
df = pd.concat(all_df, ignore_index=True)
if 'target' not in df or 'predicted' not in df:
raise ValueError('Cannot find "target" or "predicted" column')
labels = sorted(set(df['target']) | set(df['predicted']))
cm = confusion_matrix(df['target'], df['predicted'], labels=labels)
return ConfusionMatrix(cm, labels)
|
[
"def",
"from_csv",
"(",
"input_csv",
",",
"headers",
"=",
"None",
",",
"schema_file",
"=",
"None",
")",
":",
"if",
"headers",
"is",
"not",
"None",
":",
"names",
"=",
"headers",
"elif",
"schema_file",
"is",
"not",
"None",
":",
"with",
"_util",
".",
"open_local_or_gcs",
"(",
"schema_file",
",",
"mode",
"=",
"'r'",
")",
"as",
"f",
":",
"schema",
"=",
"json",
".",
"load",
"(",
"f",
")",
"names",
"=",
"[",
"x",
"[",
"'name'",
"]",
"for",
"x",
"in",
"schema",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'Either headers or schema_file is needed'",
")",
"all_files",
"=",
"_util",
".",
"glob_files",
"(",
"input_csv",
")",
"all_df",
"=",
"[",
"]",
"for",
"file_name",
"in",
"all_files",
":",
"with",
"_util",
".",
"open_local_or_gcs",
"(",
"file_name",
",",
"mode",
"=",
"'r'",
")",
"as",
"f",
":",
"all_df",
".",
"append",
"(",
"pd",
".",
"read_csv",
"(",
"f",
",",
"names",
"=",
"names",
")",
")",
"df",
"=",
"pd",
".",
"concat",
"(",
"all_df",
",",
"ignore_index",
"=",
"True",
")",
"if",
"'target'",
"not",
"in",
"df",
"or",
"'predicted'",
"not",
"in",
"df",
":",
"raise",
"ValueError",
"(",
"'Cannot find \"target\" or \"predicted\" column'",
")",
"labels",
"=",
"sorted",
"(",
"set",
"(",
"df",
"[",
"'target'",
"]",
")",
"|",
"set",
"(",
"df",
"[",
"'predicted'",
"]",
")",
")",
"cm",
"=",
"confusion_matrix",
"(",
"df",
"[",
"'target'",
"]",
",",
"df",
"[",
"'predicted'",
"]",
",",
"labels",
"=",
"labels",
")",
"return",
"ConfusionMatrix",
"(",
"cm",
",",
"labels",
")"
] |
Create a ConfusionMatrix from a csv file.
Args:
input_csv: Path to a Csv file (with no header). Can be local or GCS path.
headers: Csv headers. If present, it must include 'target' and 'predicted'.
schema_file: Path to a JSON file containing BigQuery schema. Used if "headers" is None.
If present, it must include 'target' and 'predicted' columns.
Returns:
A ConfusionMatrix that can be plotted.
Raises:
ValueError if both headers and schema_file are None, or it does not include 'target'
or 'predicted' columns.
|
[
"Create",
"a",
"ConfusionMatrix",
"from",
"a",
"csv",
"file",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_confusion_matrix.py#L41-L77
|
4,829
|
googledatalab/pydatalab
|
google/datalab/ml/_confusion_matrix.py
|
ConfusionMatrix.from_bigquery
|
def from_bigquery(sql):
"""Create a ConfusionMatrix from a BigQuery table or query.
Args:
sql: Can be one of:
A SQL query string.
A Bigquery table string.
A Query object defined with '%%bq query --name [query_name]'.
The query results or table must include "target", "predicted" columns.
Returns:
A ConfusionMatrix that can be plotted.
Raises:
ValueError if query results or table does not include 'target' or 'predicted' columns.
"""
if isinstance(sql, bq.Query):
sql = sql._expanded_sql()
parts = sql.split('.')
if len(parts) == 1 or len(parts) > 3 or any(' ' in x for x in parts):
sql = '(' + sql + ')' # query, not a table name
else:
sql = '`' + sql + '`' # table name
query = bq.Query(
'SELECT target, predicted, count(*) as count FROM %s group by target, predicted' % sql)
df = query.execute().result().to_dataframe()
labels = sorted(set(df['target']) | set(df['predicted']))
labels_count = len(labels)
df['target'] = [labels.index(x) for x in df['target']]
df['predicted'] = [labels.index(x) for x in df['predicted']]
cm = [[0] * labels_count for i in range(labels_count)]
for index, row in df.iterrows():
cm[row['target']][row['predicted']] = row['count']
return ConfusionMatrix(cm, labels)
|
python
|
def from_bigquery(sql):
"""Create a ConfusionMatrix from a BigQuery table or query.
Args:
sql: Can be one of:
A SQL query string.
A Bigquery table string.
A Query object defined with '%%bq query --name [query_name]'.
The query results or table must include "target", "predicted" columns.
Returns:
A ConfusionMatrix that can be plotted.
Raises:
ValueError if query results or table does not include 'target' or 'predicted' columns.
"""
if isinstance(sql, bq.Query):
sql = sql._expanded_sql()
parts = sql.split('.')
if len(parts) == 1 or len(parts) > 3 or any(' ' in x for x in parts):
sql = '(' + sql + ')' # query, not a table name
else:
sql = '`' + sql + '`' # table name
query = bq.Query(
'SELECT target, predicted, count(*) as count FROM %s group by target, predicted' % sql)
df = query.execute().result().to_dataframe()
labels = sorted(set(df['target']) | set(df['predicted']))
labels_count = len(labels)
df['target'] = [labels.index(x) for x in df['target']]
df['predicted'] = [labels.index(x) for x in df['predicted']]
cm = [[0] * labels_count for i in range(labels_count)]
for index, row in df.iterrows():
cm[row['target']][row['predicted']] = row['count']
return ConfusionMatrix(cm, labels)
|
[
"def",
"from_bigquery",
"(",
"sql",
")",
":",
"if",
"isinstance",
"(",
"sql",
",",
"bq",
".",
"Query",
")",
":",
"sql",
"=",
"sql",
".",
"_expanded_sql",
"(",
")",
"parts",
"=",
"sql",
".",
"split",
"(",
"'.'",
")",
"if",
"len",
"(",
"parts",
")",
"==",
"1",
"or",
"len",
"(",
"parts",
")",
">",
"3",
"or",
"any",
"(",
"' '",
"in",
"x",
"for",
"x",
"in",
"parts",
")",
":",
"sql",
"=",
"'('",
"+",
"sql",
"+",
"')'",
"# query, not a table name",
"else",
":",
"sql",
"=",
"'`'",
"+",
"sql",
"+",
"'`'",
"# table name",
"query",
"=",
"bq",
".",
"Query",
"(",
"'SELECT target, predicted, count(*) as count FROM %s group by target, predicted'",
"%",
"sql",
")",
"df",
"=",
"query",
".",
"execute",
"(",
")",
".",
"result",
"(",
")",
".",
"to_dataframe",
"(",
")",
"labels",
"=",
"sorted",
"(",
"set",
"(",
"df",
"[",
"'target'",
"]",
")",
"|",
"set",
"(",
"df",
"[",
"'predicted'",
"]",
")",
")",
"labels_count",
"=",
"len",
"(",
"labels",
")",
"df",
"[",
"'target'",
"]",
"=",
"[",
"labels",
".",
"index",
"(",
"x",
")",
"for",
"x",
"in",
"df",
"[",
"'target'",
"]",
"]",
"df",
"[",
"'predicted'",
"]",
"=",
"[",
"labels",
".",
"index",
"(",
"x",
")",
"for",
"x",
"in",
"df",
"[",
"'predicted'",
"]",
"]",
"cm",
"=",
"[",
"[",
"0",
"]",
"*",
"labels_count",
"for",
"i",
"in",
"range",
"(",
"labels_count",
")",
"]",
"for",
"index",
",",
"row",
"in",
"df",
".",
"iterrows",
"(",
")",
":",
"cm",
"[",
"row",
"[",
"'target'",
"]",
"]",
"[",
"row",
"[",
"'predicted'",
"]",
"]",
"=",
"row",
"[",
"'count'",
"]",
"return",
"ConfusionMatrix",
"(",
"cm",
",",
"labels",
")"
] |
Create a ConfusionMatrix from a BigQuery table or query.
Args:
sql: Can be one of:
A SQL query string.
A Bigquery table string.
A Query object defined with '%%bq query --name [query_name]'.
The query results or table must include "target", "predicted" columns.
Returns:
A ConfusionMatrix that can be plotted.
Raises:
ValueError if query results or table does not include 'target' or 'predicted' columns.
|
[
"Create",
"a",
"ConfusionMatrix",
"from",
"a",
"BigQuery",
"table",
"or",
"query",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_confusion_matrix.py#L80-L113
|
4,830
|
googledatalab/pydatalab
|
google/datalab/ml/_confusion_matrix.py
|
ConfusionMatrix.to_dataframe
|
def to_dataframe(self):
"""Convert the confusion matrix to a dataframe.
Returns:
A DataFrame with "target", "predicted", "count" columns.
"""
data = []
for target_index, target_row in enumerate(self._cm):
for predicted_index, count in enumerate(target_row):
data.append((self._labels[target_index], self._labels[predicted_index], count))
return pd.DataFrame(data, columns=['target', 'predicted', 'count'])
|
python
|
def to_dataframe(self):
"""Convert the confusion matrix to a dataframe.
Returns:
A DataFrame with "target", "predicted", "count" columns.
"""
data = []
for target_index, target_row in enumerate(self._cm):
for predicted_index, count in enumerate(target_row):
data.append((self._labels[target_index], self._labels[predicted_index], count))
return pd.DataFrame(data, columns=['target', 'predicted', 'count'])
|
[
"def",
"to_dataframe",
"(",
"self",
")",
":",
"data",
"=",
"[",
"]",
"for",
"target_index",
",",
"target_row",
"in",
"enumerate",
"(",
"self",
".",
"_cm",
")",
":",
"for",
"predicted_index",
",",
"count",
"in",
"enumerate",
"(",
"target_row",
")",
":",
"data",
".",
"append",
"(",
"(",
"self",
".",
"_labels",
"[",
"target_index",
"]",
",",
"self",
".",
"_labels",
"[",
"predicted_index",
"]",
",",
"count",
")",
")",
"return",
"pd",
".",
"DataFrame",
"(",
"data",
",",
"columns",
"=",
"[",
"'target'",
",",
"'predicted'",
",",
"'count'",
"]",
")"
] |
Convert the confusion matrix to a dataframe.
Returns:
A DataFrame with "target", "predicted", "count" columns.
|
[
"Convert",
"the",
"confusion",
"matrix",
"to",
"a",
"dataframe",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_confusion_matrix.py#L115-L127
|
4,831
|
googledatalab/pydatalab
|
google/datalab/ml/_confusion_matrix.py
|
ConfusionMatrix.plot
|
def plot(self, figsize=None, rotation=45):
"""Plot the confusion matrix.
Args:
figsize: tuple (x, y) of ints. Sets the size of the figure
rotation: the rotation angle of the labels on the x-axis.
"""
fig, ax = plt.subplots(figsize=figsize)
plt.imshow(self._cm, interpolation='nearest', cmap=plt.cm.Blues, aspect='auto')
plt.title('Confusion matrix')
plt.colorbar()
tick_marks = np.arange(len(self._labels))
plt.xticks(tick_marks, self._labels, rotation=rotation)
plt.yticks(tick_marks, self._labels)
if isinstance(self._cm, list):
# If cm is created from BigQuery then it is a list.
thresh = max(max(self._cm)) / 2.
for i, j in itertools.product(range(len(self._labels)), range(len(self._labels))):
plt.text(j, i, self._cm[i][j], horizontalalignment="center",
color="white" if self._cm[i][j] > thresh else "black")
else:
# If cm is created from csv then it is a sklearn's confusion_matrix.
thresh = self._cm.max() / 2.
for i, j in itertools.product(range(len(self._labels)), range(len(self._labels))):
plt.text(j, i, self._cm[i, j], horizontalalignment="center",
color="white" if self._cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
|
python
|
def plot(self, figsize=None, rotation=45):
"""Plot the confusion matrix.
Args:
figsize: tuple (x, y) of ints. Sets the size of the figure
rotation: the rotation angle of the labels on the x-axis.
"""
fig, ax = plt.subplots(figsize=figsize)
plt.imshow(self._cm, interpolation='nearest', cmap=plt.cm.Blues, aspect='auto')
plt.title('Confusion matrix')
plt.colorbar()
tick_marks = np.arange(len(self._labels))
plt.xticks(tick_marks, self._labels, rotation=rotation)
plt.yticks(tick_marks, self._labels)
if isinstance(self._cm, list):
# If cm is created from BigQuery then it is a list.
thresh = max(max(self._cm)) / 2.
for i, j in itertools.product(range(len(self._labels)), range(len(self._labels))):
plt.text(j, i, self._cm[i][j], horizontalalignment="center",
color="white" if self._cm[i][j] > thresh else "black")
else:
# If cm is created from csv then it is a sklearn's confusion_matrix.
thresh = self._cm.max() / 2.
for i, j in itertools.product(range(len(self._labels)), range(len(self._labels))):
plt.text(j, i, self._cm[i, j], horizontalalignment="center",
color="white" if self._cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
|
[
"def",
"plot",
"(",
"self",
",",
"figsize",
"=",
"None",
",",
"rotation",
"=",
"45",
")",
":",
"fig",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
"figsize",
"=",
"figsize",
")",
"plt",
".",
"imshow",
"(",
"self",
".",
"_cm",
",",
"interpolation",
"=",
"'nearest'",
",",
"cmap",
"=",
"plt",
".",
"cm",
".",
"Blues",
",",
"aspect",
"=",
"'auto'",
")",
"plt",
".",
"title",
"(",
"'Confusion matrix'",
")",
"plt",
".",
"colorbar",
"(",
")",
"tick_marks",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"self",
".",
"_labels",
")",
")",
"plt",
".",
"xticks",
"(",
"tick_marks",
",",
"self",
".",
"_labels",
",",
"rotation",
"=",
"rotation",
")",
"plt",
".",
"yticks",
"(",
"tick_marks",
",",
"self",
".",
"_labels",
")",
"if",
"isinstance",
"(",
"self",
".",
"_cm",
",",
"list",
")",
":",
"# If cm is created from BigQuery then it is a list.",
"thresh",
"=",
"max",
"(",
"max",
"(",
"self",
".",
"_cm",
")",
")",
"/",
"2.",
"for",
"i",
",",
"j",
"in",
"itertools",
".",
"product",
"(",
"range",
"(",
"len",
"(",
"self",
".",
"_labels",
")",
")",
",",
"range",
"(",
"len",
"(",
"self",
".",
"_labels",
")",
")",
")",
":",
"plt",
".",
"text",
"(",
"j",
",",
"i",
",",
"self",
".",
"_cm",
"[",
"i",
"]",
"[",
"j",
"]",
",",
"horizontalalignment",
"=",
"\"center\"",
",",
"color",
"=",
"\"white\"",
"if",
"self",
".",
"_cm",
"[",
"i",
"]",
"[",
"j",
"]",
">",
"thresh",
"else",
"\"black\"",
")",
"else",
":",
"# If cm is created from csv then it is a sklearn's confusion_matrix.",
"thresh",
"=",
"self",
".",
"_cm",
".",
"max",
"(",
")",
"/",
"2.",
"for",
"i",
",",
"j",
"in",
"itertools",
".",
"product",
"(",
"range",
"(",
"len",
"(",
"self",
".",
"_labels",
")",
")",
",",
"range",
"(",
"len",
"(",
"self",
".",
"_labels",
")",
")",
")",
":",
"plt",
".",
"text",
"(",
"j",
",",
"i",
",",
"self",
".",
"_cm",
"[",
"i",
",",
"j",
"]",
",",
"horizontalalignment",
"=",
"\"center\"",
",",
"color",
"=",
"\"white\"",
"if",
"self",
".",
"_cm",
"[",
"i",
",",
"j",
"]",
">",
"thresh",
"else",
"\"black\"",
")",
"plt",
".",
"tight_layout",
"(",
")",
"plt",
".",
"ylabel",
"(",
"'True label'",
")",
"plt",
".",
"xlabel",
"(",
"'Predicted label'",
")"
] |
Plot the confusion matrix.
Args:
figsize: tuple (x, y) of ints. Sets the size of the figure
rotation: the rotation angle of the labels on the x-axis.
|
[
"Plot",
"the",
"confusion",
"matrix",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_confusion_matrix.py#L129-L159
|
4,832
|
googledatalab/pydatalab
|
google/datalab/contrib/pipeline/composer/_api.py
|
Api.get_environment_details
|
def get_environment_details(zone, environment):
""" Issues a request to Composer to get the environment details.
Args:
zone: GCP zone of the composer environment
environment: name of the Composer environment
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
default_context = google.datalab.Context.default()
url = (Api._ENDPOINT + (Api._ENVIRONMENTS_PATH_FORMAT % (default_context.project_id, zone,
environment)))
return google.datalab.utils.Http.request(url, credentials=default_context.credentials)
|
python
|
def get_environment_details(zone, environment):
""" Issues a request to Composer to get the environment details.
Args:
zone: GCP zone of the composer environment
environment: name of the Composer environment
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
default_context = google.datalab.Context.default()
url = (Api._ENDPOINT + (Api._ENVIRONMENTS_PATH_FORMAT % (default_context.project_id, zone,
environment)))
return google.datalab.utils.Http.request(url, credentials=default_context.credentials)
|
[
"def",
"get_environment_details",
"(",
"zone",
",",
"environment",
")",
":",
"default_context",
"=",
"google",
".",
"datalab",
".",
"Context",
".",
"default",
"(",
")",
"url",
"=",
"(",
"Api",
".",
"_ENDPOINT",
"+",
"(",
"Api",
".",
"_ENVIRONMENTS_PATH_FORMAT",
"%",
"(",
"default_context",
".",
"project_id",
",",
"zone",
",",
"environment",
")",
")",
")",
"return",
"google",
".",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"credentials",
"=",
"default_context",
".",
"credentials",
")"
] |
Issues a request to Composer to get the environment details.
Args:
zone: GCP zone of the composer environment
environment: name of the Composer environment
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
|
[
"Issues",
"a",
"request",
"to",
"Composer",
"to",
"get",
"the",
"environment",
"details",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/pipeline/composer/_api.py#L24-L39
|
4,833
|
googledatalab/pydatalab
|
google/datalab/storage/_api.py
|
Api.buckets_delete
|
def buckets_delete(self, bucket):
"""Issues a request to delete a bucket.
Args:
bucket: the name of the bucket.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._BUCKET_PATH % bucket)
google.datalab.utils.Http.request(url, method='DELETE', credentials=self._credentials,
raw_response=True)
|
python
|
def buckets_delete(self, bucket):
"""Issues a request to delete a bucket.
Args:
bucket: the name of the bucket.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._BUCKET_PATH % bucket)
google.datalab.utils.Http.request(url, method='DELETE', credentials=self._credentials,
raw_response=True)
|
[
"def",
"buckets_delete",
"(",
"self",
",",
"bucket",
")",
":",
"url",
"=",
"Api",
".",
"_ENDPOINT",
"+",
"(",
"Api",
".",
"_BUCKET_PATH",
"%",
"bucket",
")",
"google",
".",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"method",
"=",
"'DELETE'",
",",
"credentials",
"=",
"self",
".",
"_credentials",
",",
"raw_response",
"=",
"True",
")"
] |
Issues a request to delete a bucket.
Args:
bucket: the name of the bucket.
Raises:
Exception if there is an error performing the operation.
|
[
"Issues",
"a",
"request",
"to",
"delete",
"a",
"bucket",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/storage/_api.py#L72-L82
|
4,834
|
googledatalab/pydatalab
|
google/datalab/storage/_api.py
|
Api.buckets_get
|
def buckets_get(self, bucket, projection='noAcl'):
"""Issues a request to retrieve information about a bucket.
Args:
bucket: the name of the bucket.
projection: the projection of the bucket information to retrieve.
Returns:
A parsed bucket information dictionary.
Raises:
Exception if there is an error performing the operation.
"""
args = {'projection': projection}
url = Api._ENDPOINT + (Api._BUCKET_PATH % bucket)
return google.datalab.utils.Http.request(url, credentials=self._credentials, args=args)
|
python
|
def buckets_get(self, bucket, projection='noAcl'):
"""Issues a request to retrieve information about a bucket.
Args:
bucket: the name of the bucket.
projection: the projection of the bucket information to retrieve.
Returns:
A parsed bucket information dictionary.
Raises:
Exception if there is an error performing the operation.
"""
args = {'projection': projection}
url = Api._ENDPOINT + (Api._BUCKET_PATH % bucket)
return google.datalab.utils.Http.request(url, credentials=self._credentials, args=args)
|
[
"def",
"buckets_get",
"(",
"self",
",",
"bucket",
",",
"projection",
"=",
"'noAcl'",
")",
":",
"args",
"=",
"{",
"'projection'",
":",
"projection",
"}",
"url",
"=",
"Api",
".",
"_ENDPOINT",
"+",
"(",
"Api",
".",
"_BUCKET_PATH",
"%",
"bucket",
")",
"return",
"google",
".",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"credentials",
"=",
"self",
".",
"_credentials",
",",
"args",
"=",
"args",
")"
] |
Issues a request to retrieve information about a bucket.
Args:
bucket: the name of the bucket.
projection: the projection of the bucket information to retrieve.
Returns:
A parsed bucket information dictionary.
Raises:
Exception if there is an error performing the operation.
|
[
"Issues",
"a",
"request",
"to",
"retrieve",
"information",
"about",
"a",
"bucket",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/storage/_api.py#L84-L97
|
4,835
|
googledatalab/pydatalab
|
google/datalab/storage/_api.py
|
Api.buckets_list
|
def buckets_list(self, projection='noAcl', max_results=0, page_token=None, project_id=None):
"""Issues a request to retrieve the list of buckets.
Args:
projection: the projection of the bucket information to retrieve.
max_results: an optional maximum number of objects to retrieve.
page_token: an optional token to continue the retrieval.
project_id: the project whose buckets should be listed.
Returns:
A parsed list of bucket information dictionaries.
Raises:
Exception if there is an error performing the operation.
"""
if max_results == 0:
max_results = Api._MAX_RESULTS
args = {'project': project_id if project_id else self._project_id, 'maxResults': max_results}
if projection is not None:
args['projection'] = projection
if page_token is not None:
args['pageToken'] = page_token
url = Api._ENDPOINT + (Api._BUCKET_PATH % '')
return google.datalab.utils.Http.request(url, args=args, credentials=self._credentials)
|
python
|
def buckets_list(self, projection='noAcl', max_results=0, page_token=None, project_id=None):
"""Issues a request to retrieve the list of buckets.
Args:
projection: the projection of the bucket information to retrieve.
max_results: an optional maximum number of objects to retrieve.
page_token: an optional token to continue the retrieval.
project_id: the project whose buckets should be listed.
Returns:
A parsed list of bucket information dictionaries.
Raises:
Exception if there is an error performing the operation.
"""
if max_results == 0:
max_results = Api._MAX_RESULTS
args = {'project': project_id if project_id else self._project_id, 'maxResults': max_results}
if projection is not None:
args['projection'] = projection
if page_token is not None:
args['pageToken'] = page_token
url = Api._ENDPOINT + (Api._BUCKET_PATH % '')
return google.datalab.utils.Http.request(url, args=args, credentials=self._credentials)
|
[
"def",
"buckets_list",
"(",
"self",
",",
"projection",
"=",
"'noAcl'",
",",
"max_results",
"=",
"0",
",",
"page_token",
"=",
"None",
",",
"project_id",
"=",
"None",
")",
":",
"if",
"max_results",
"==",
"0",
":",
"max_results",
"=",
"Api",
".",
"_MAX_RESULTS",
"args",
"=",
"{",
"'project'",
":",
"project_id",
"if",
"project_id",
"else",
"self",
".",
"_project_id",
",",
"'maxResults'",
":",
"max_results",
"}",
"if",
"projection",
"is",
"not",
"None",
":",
"args",
"[",
"'projection'",
"]",
"=",
"projection",
"if",
"page_token",
"is",
"not",
"None",
":",
"args",
"[",
"'pageToken'",
"]",
"=",
"page_token",
"url",
"=",
"Api",
".",
"_ENDPOINT",
"+",
"(",
"Api",
".",
"_BUCKET_PATH",
"%",
"''",
")",
"return",
"google",
".",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"args",
"=",
"args",
",",
"credentials",
"=",
"self",
".",
"_credentials",
")"
] |
Issues a request to retrieve the list of buckets.
Args:
projection: the projection of the bucket information to retrieve.
max_results: an optional maximum number of objects to retrieve.
page_token: an optional token to continue the retrieval.
project_id: the project whose buckets should be listed.
Returns:
A parsed list of bucket information dictionaries.
Raises:
Exception if there is an error performing the operation.
|
[
"Issues",
"a",
"request",
"to",
"retrieve",
"the",
"list",
"of",
"buckets",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/storage/_api.py#L99-L122
|
4,836
|
googledatalab/pydatalab
|
google/datalab/storage/_api.py
|
Api.object_download
|
def object_download(self, bucket, key, start_offset=0, byte_count=None):
"""Reads the contents of an object as text.
Args:
bucket: the name of the bucket containing the object.
key: the key of the object to be read.
start_offset: the start offset of bytes to read.
byte_count: the number of bytes to read. If None, it reads to the end.
Returns:
The text content within the object.
Raises:
Exception if the object could not be read from.
"""
args = {'alt': 'media'}
headers = {}
if start_offset > 0 or byte_count is not None:
header = 'bytes=%d-' % start_offset
if byte_count is not None:
header += '%d' % byte_count
headers['Range'] = header
url = Api._DOWNLOAD_ENDPOINT + (Api._OBJECT_PATH % (bucket, Api._escape_key(key)))
return google.datalab.utils.Http.request(url, args=args, headers=headers,
credentials=self._credentials, raw_response=True)
|
python
|
def object_download(self, bucket, key, start_offset=0, byte_count=None):
"""Reads the contents of an object as text.
Args:
bucket: the name of the bucket containing the object.
key: the key of the object to be read.
start_offset: the start offset of bytes to read.
byte_count: the number of bytes to read. If None, it reads to the end.
Returns:
The text content within the object.
Raises:
Exception if the object could not be read from.
"""
args = {'alt': 'media'}
headers = {}
if start_offset > 0 or byte_count is not None:
header = 'bytes=%d-' % start_offset
if byte_count is not None:
header += '%d' % byte_count
headers['Range'] = header
url = Api._DOWNLOAD_ENDPOINT + (Api._OBJECT_PATH % (bucket, Api._escape_key(key)))
return google.datalab.utils.Http.request(url, args=args, headers=headers,
credentials=self._credentials, raw_response=True)
|
[
"def",
"object_download",
"(",
"self",
",",
"bucket",
",",
"key",
",",
"start_offset",
"=",
"0",
",",
"byte_count",
"=",
"None",
")",
":",
"args",
"=",
"{",
"'alt'",
":",
"'media'",
"}",
"headers",
"=",
"{",
"}",
"if",
"start_offset",
">",
"0",
"or",
"byte_count",
"is",
"not",
"None",
":",
"header",
"=",
"'bytes=%d-'",
"%",
"start_offset",
"if",
"byte_count",
"is",
"not",
"None",
":",
"header",
"+=",
"'%d'",
"%",
"byte_count",
"headers",
"[",
"'Range'",
"]",
"=",
"header",
"url",
"=",
"Api",
".",
"_DOWNLOAD_ENDPOINT",
"+",
"(",
"Api",
".",
"_OBJECT_PATH",
"%",
"(",
"bucket",
",",
"Api",
".",
"_escape_key",
"(",
"key",
")",
")",
")",
"return",
"google",
".",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"args",
"=",
"args",
",",
"headers",
"=",
"headers",
",",
"credentials",
"=",
"self",
".",
"_credentials",
",",
"raw_response",
"=",
"True",
")"
] |
Reads the contents of an object as text.
Args:
bucket: the name of the bucket containing the object.
key: the key of the object to be read.
start_offset: the start offset of bytes to read.
byte_count: the number of bytes to read. If None, it reads to the end.
Returns:
The text content within the object.
Raises:
Exception if the object could not be read from.
|
[
"Reads",
"the",
"contents",
"of",
"an",
"object",
"as",
"text",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/storage/_api.py#L124-L146
|
4,837
|
googledatalab/pydatalab
|
google/datalab/storage/_api.py
|
Api.object_upload
|
def object_upload(self, bucket, key, content, content_type):
"""Writes text content to the object.
Args:
bucket: the name of the bucket containing the object.
key: the key of the object to be written.
content: the text content to be written.
content_type: the type of text content.
Raises:
Exception if the object could not be written to.
"""
args = {'uploadType': 'media', 'name': key}
headers = {'Content-Type': content_type}
url = Api._UPLOAD_ENDPOINT + (Api._OBJECT_PATH % (bucket, ''))
return google.datalab.utils.Http.request(url, args=args, data=content, headers=headers,
credentials=self._credentials, raw_response=True)
|
python
|
def object_upload(self, bucket, key, content, content_type):
"""Writes text content to the object.
Args:
bucket: the name of the bucket containing the object.
key: the key of the object to be written.
content: the text content to be written.
content_type: the type of text content.
Raises:
Exception if the object could not be written to.
"""
args = {'uploadType': 'media', 'name': key}
headers = {'Content-Type': content_type}
url = Api._UPLOAD_ENDPOINT + (Api._OBJECT_PATH % (bucket, ''))
return google.datalab.utils.Http.request(url, args=args, data=content, headers=headers,
credentials=self._credentials, raw_response=True)
|
[
"def",
"object_upload",
"(",
"self",
",",
"bucket",
",",
"key",
",",
"content",
",",
"content_type",
")",
":",
"args",
"=",
"{",
"'uploadType'",
":",
"'media'",
",",
"'name'",
":",
"key",
"}",
"headers",
"=",
"{",
"'Content-Type'",
":",
"content_type",
"}",
"url",
"=",
"Api",
".",
"_UPLOAD_ENDPOINT",
"+",
"(",
"Api",
".",
"_OBJECT_PATH",
"%",
"(",
"bucket",
",",
"''",
")",
")",
"return",
"google",
".",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"args",
"=",
"args",
",",
"data",
"=",
"content",
",",
"headers",
"=",
"headers",
",",
"credentials",
"=",
"self",
".",
"_credentials",
",",
"raw_response",
"=",
"True",
")"
] |
Writes text content to the object.
Args:
bucket: the name of the bucket containing the object.
key: the key of the object to be written.
content: the text content to be written.
content_type: the type of text content.
Raises:
Exception if the object could not be written to.
|
[
"Writes",
"text",
"content",
"to",
"the",
"object",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/storage/_api.py#L148-L164
|
4,838
|
googledatalab/pydatalab
|
solutionbox/image_classification/mltoolbox/image/classification/_api.py
|
preprocess_async
|
def preprocess_async(train_dataset, output_dir, eval_dataset=None, checkpoint=None, cloud=None):
"""Preprocess data. Produce output that can be used by training efficiently.
Args:
train_dataset: training data source to preprocess. Can be CsvDataset or BigQueryDataSet.
If eval_dataset is None, the pipeline will randomly split train_dataset into
train/eval set with 7:3 ratio.
output_dir: The output directory to use. Preprocessing will create a sub directory under
it for each run, and also update "latest" file which points to the latest preprocessed
directory. Users are responsible for cleanup. Can be local or GCS path.
eval_dataset: evaluation data source to preprocess. Can be CsvDataset or BigQueryDataSet.
If specified, it will be used for evaluation during training, and train_dataset will be
completely used for training.
checkpoint: the Inception checkpoint to use. If None, a default checkpoint is used.
cloud: a DataFlow pipeline option dictionary such as {'num_workers': 3}. If anything but
not None, it will run in cloud. Otherwise, it runs locally.
Returns:
A google.datalab.utils.Job object that can be used to query state from or wait.
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if cloud is None:
return _local.Local.preprocess(train_dataset, output_dir, eval_dataset, checkpoint)
if not isinstance(cloud, dict):
cloud = {}
return _cloud.Cloud.preprocess(train_dataset, output_dir, eval_dataset, checkpoint, cloud)
|
python
|
def preprocess_async(train_dataset, output_dir, eval_dataset=None, checkpoint=None, cloud=None):
"""Preprocess data. Produce output that can be used by training efficiently.
Args:
train_dataset: training data source to preprocess. Can be CsvDataset or BigQueryDataSet.
If eval_dataset is None, the pipeline will randomly split train_dataset into
train/eval set with 7:3 ratio.
output_dir: The output directory to use. Preprocessing will create a sub directory under
it for each run, and also update "latest" file which points to the latest preprocessed
directory. Users are responsible for cleanup. Can be local or GCS path.
eval_dataset: evaluation data source to preprocess. Can be CsvDataset or BigQueryDataSet.
If specified, it will be used for evaluation during training, and train_dataset will be
completely used for training.
checkpoint: the Inception checkpoint to use. If None, a default checkpoint is used.
cloud: a DataFlow pipeline option dictionary such as {'num_workers': 3}. If anything but
not None, it will run in cloud. Otherwise, it runs locally.
Returns:
A google.datalab.utils.Job object that can be used to query state from or wait.
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if cloud is None:
return _local.Local.preprocess(train_dataset, output_dir, eval_dataset, checkpoint)
if not isinstance(cloud, dict):
cloud = {}
return _cloud.Cloud.preprocess(train_dataset, output_dir, eval_dataset, checkpoint, cloud)
|
[
"def",
"preprocess_async",
"(",
"train_dataset",
",",
"output_dir",
",",
"eval_dataset",
"=",
"None",
",",
"checkpoint",
"=",
"None",
",",
"cloud",
"=",
"None",
")",
":",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"warnings",
".",
"simplefilter",
"(",
"\"ignore\"",
")",
"if",
"cloud",
"is",
"None",
":",
"return",
"_local",
".",
"Local",
".",
"preprocess",
"(",
"train_dataset",
",",
"output_dir",
",",
"eval_dataset",
",",
"checkpoint",
")",
"if",
"not",
"isinstance",
"(",
"cloud",
",",
"dict",
")",
":",
"cloud",
"=",
"{",
"}",
"return",
"_cloud",
".",
"Cloud",
".",
"preprocess",
"(",
"train_dataset",
",",
"output_dir",
",",
"eval_dataset",
",",
"checkpoint",
",",
"cloud",
")"
] |
Preprocess data. Produce output that can be used by training efficiently.
Args:
train_dataset: training data source to preprocess. Can be CsvDataset or BigQueryDataSet.
If eval_dataset is None, the pipeline will randomly split train_dataset into
train/eval set with 7:3 ratio.
output_dir: The output directory to use. Preprocessing will create a sub directory under
it for each run, and also update "latest" file which points to the latest preprocessed
directory. Users are responsible for cleanup. Can be local or GCS path.
eval_dataset: evaluation data source to preprocess. Can be CsvDataset or BigQueryDataSet.
If specified, it will be used for evaluation during training, and train_dataset will be
completely used for training.
checkpoint: the Inception checkpoint to use. If None, a default checkpoint is used.
cloud: a DataFlow pipeline option dictionary such as {'num_workers': 3}. If anything but
not None, it will run in cloud. Otherwise, it runs locally.
Returns:
A google.datalab.utils.Job object that can be used to query state from or wait.
|
[
"Preprocess",
"data",
".",
"Produce",
"output",
"that",
"can",
"be",
"used",
"by",
"training",
"efficiently",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_api.py#L25-L52
|
4,839
|
googledatalab/pydatalab
|
solutionbox/image_classification/mltoolbox/image/classification/_api.py
|
train_async
|
def train_async(input_dir, batch_size, max_steps, output_dir, checkpoint=None, cloud=None):
"""Train model. The output can be used for batch prediction or online deployment.
Args:
input_dir: A directory path containing preprocessed results. Can be local or GCS path.
batch_size: size of batch used for training.
max_steps: number of steps to train.
output_dir: The output directory to use. Can be local or GCS path.
checkpoint: the Inception checkpoint to use. If None, a default checkpoint is used.
cloud: a google.datalab.ml.CloudTrainingConfig object to let it run in cloud.
If None, it runs locally.
Returns:
A google.datalab.utils.Job object that can be used to query state from or wait.
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if cloud is None:
return _local.Local.train(input_dir, batch_size, max_steps, output_dir, checkpoint)
return _cloud.Cloud.train(input_dir, batch_size, max_steps, output_dir, checkpoint, cloud)
|
python
|
def train_async(input_dir, batch_size, max_steps, output_dir, checkpoint=None, cloud=None):
"""Train model. The output can be used for batch prediction or online deployment.
Args:
input_dir: A directory path containing preprocessed results. Can be local or GCS path.
batch_size: size of batch used for training.
max_steps: number of steps to train.
output_dir: The output directory to use. Can be local or GCS path.
checkpoint: the Inception checkpoint to use. If None, a default checkpoint is used.
cloud: a google.datalab.ml.CloudTrainingConfig object to let it run in cloud.
If None, it runs locally.
Returns:
A google.datalab.utils.Job object that can be used to query state from or wait.
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if cloud is None:
return _local.Local.train(input_dir, batch_size, max_steps, output_dir, checkpoint)
return _cloud.Cloud.train(input_dir, batch_size, max_steps, output_dir, checkpoint, cloud)
|
[
"def",
"train_async",
"(",
"input_dir",
",",
"batch_size",
",",
"max_steps",
",",
"output_dir",
",",
"checkpoint",
"=",
"None",
",",
"cloud",
"=",
"None",
")",
":",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"warnings",
".",
"simplefilter",
"(",
"\"ignore\"",
")",
"if",
"cloud",
"is",
"None",
":",
"return",
"_local",
".",
"Local",
".",
"train",
"(",
"input_dir",
",",
"batch_size",
",",
"max_steps",
",",
"output_dir",
",",
"checkpoint",
")",
"return",
"_cloud",
".",
"Cloud",
".",
"train",
"(",
"input_dir",
",",
"batch_size",
",",
"max_steps",
",",
"output_dir",
",",
"checkpoint",
",",
"cloud",
")"
] |
Train model. The output can be used for batch prediction or online deployment.
Args:
input_dir: A directory path containing preprocessed results. Can be local or GCS path.
batch_size: size of batch used for training.
max_steps: number of steps to train.
output_dir: The output directory to use. Can be local or GCS path.
checkpoint: the Inception checkpoint to use. If None, a default checkpoint is used.
cloud: a google.datalab.ml.CloudTrainingConfig object to let it run in cloud.
If None, it runs locally.
Returns:
A google.datalab.utils.Job object that can be used to query state from or wait.
|
[
"Train",
"model",
".",
"The",
"output",
"can",
"be",
"used",
"for",
"batch",
"prediction",
"or",
"online",
"deployment",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_api.py#L66-L86
|
4,840
|
googledatalab/pydatalab
|
solutionbox/image_classification/mltoolbox/image/classification/_api.py
|
batch_predict_async
|
def batch_predict_async(dataset, model_dir, output_csv=None, output_bq_table=None, cloud=None):
"""Batch prediction with an offline model.
Args:
dataset: CsvDataSet or BigQueryDataSet for batch prediction input. Can contain either
one column 'image_url', or two columns with another being 'label'.
model_dir: The directory of a trained inception model. Can be local or GCS paths.
output_csv: The output csv file for prediction results. If specified,
it will also output a csv schema file with the name output_csv + '.schema.json'.
output_bq_table: if specified, the output BigQuery table for prediction results.
output_csv and output_bq_table can both be set.
cloud: a DataFlow pipeline option dictionary such as {'num_workers': 3}. If anything but
not None, it will run in cloud. Otherwise, it runs locally.
If specified, it must include 'temp_location' with value being a GCS path, because cloud
run requires a staging GCS directory.
Raises:
ValueError if both output_csv and output_bq_table are None, or if cloud is not None
but it does not include 'temp_location'.
Returns:
A google.datalab.utils.Job object that can be used to query state from or wait.
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if cloud is None:
return _local.Local.batch_predict(dataset, model_dir, output_csv, output_bq_table)
if not isinstance(cloud, dict):
cloud = {}
return _cloud.Cloud.batch_predict(dataset, model_dir, output_csv, output_bq_table, cloud)
|
python
|
def batch_predict_async(dataset, model_dir, output_csv=None, output_bq_table=None, cloud=None):
"""Batch prediction with an offline model.
Args:
dataset: CsvDataSet or BigQueryDataSet for batch prediction input. Can contain either
one column 'image_url', or two columns with another being 'label'.
model_dir: The directory of a trained inception model. Can be local or GCS paths.
output_csv: The output csv file for prediction results. If specified,
it will also output a csv schema file with the name output_csv + '.schema.json'.
output_bq_table: if specified, the output BigQuery table for prediction results.
output_csv and output_bq_table can both be set.
cloud: a DataFlow pipeline option dictionary such as {'num_workers': 3}. If anything but
not None, it will run in cloud. Otherwise, it runs locally.
If specified, it must include 'temp_location' with value being a GCS path, because cloud
run requires a staging GCS directory.
Raises:
ValueError if both output_csv and output_bq_table are None, or if cloud is not None
but it does not include 'temp_location'.
Returns:
A google.datalab.utils.Job object that can be used to query state from or wait.
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if cloud is None:
return _local.Local.batch_predict(dataset, model_dir, output_csv, output_bq_table)
if not isinstance(cloud, dict):
cloud = {}
return _cloud.Cloud.batch_predict(dataset, model_dir, output_csv, output_bq_table, cloud)
|
[
"def",
"batch_predict_async",
"(",
"dataset",
",",
"model_dir",
",",
"output_csv",
"=",
"None",
",",
"output_bq_table",
"=",
"None",
",",
"cloud",
"=",
"None",
")",
":",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"warnings",
".",
"simplefilter",
"(",
"\"ignore\"",
")",
"if",
"cloud",
"is",
"None",
":",
"return",
"_local",
".",
"Local",
".",
"batch_predict",
"(",
"dataset",
",",
"model_dir",
",",
"output_csv",
",",
"output_bq_table",
")",
"if",
"not",
"isinstance",
"(",
"cloud",
",",
"dict",
")",
":",
"cloud",
"=",
"{",
"}",
"return",
"_cloud",
".",
"Cloud",
".",
"batch_predict",
"(",
"dataset",
",",
"model_dir",
",",
"output_csv",
",",
"output_bq_table",
",",
"cloud",
")"
] |
Batch prediction with an offline model.
Args:
dataset: CsvDataSet or BigQueryDataSet for batch prediction input. Can contain either
one column 'image_url', or two columns with another being 'label'.
model_dir: The directory of a trained inception model. Can be local or GCS paths.
output_csv: The output csv file for prediction results. If specified,
it will also output a csv schema file with the name output_csv + '.schema.json'.
output_bq_table: if specified, the output BigQuery table for prediction results.
output_csv and output_bq_table can both be set.
cloud: a DataFlow pipeline option dictionary such as {'num_workers': 3}. If anything but
not None, it will run in cloud. Otherwise, it runs locally.
If specified, it must include 'temp_location' with value being a GCS path, because cloud
run requires a staging GCS directory.
Raises:
ValueError if both output_csv and output_bq_table are None, or if cloud is not None
but it does not include 'temp_location'.
Returns:
A google.datalab.utils.Job object that can be used to query state from or wait.
|
[
"Batch",
"prediction",
"with",
"an",
"offline",
"model",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_api.py#L126-L156
|
4,841
|
googledatalab/pydatalab
|
google/datalab/bigquery/_job.py
|
Job._refresh_state
|
def _refresh_state(self):
""" Get the state of a job. If the job is complete this does nothing;
otherwise it gets a refreshed copy of the job resource.
"""
# TODO(gram): should we put a choke on refreshes? E.g. if the last call was less than
# a second ago should we return the cached value?
if self._is_complete:
return
try:
response = self._api.jobs_get(self._job_id)
except Exception as e:
raise e
if 'status' in response:
status = response['status']
if 'state' in status and status['state'] == 'DONE':
self._end_time = datetime.datetime.utcnow()
self._is_complete = True
self._process_job_status(status)
if 'statistics' in response:
statistics = response['statistics']
start_time = statistics.get('creationTime', None)
end_time = statistics.get('endTime', None)
if start_time and end_time and end_time >= start_time:
self._start_time = datetime.datetime.fromtimestamp(float(start_time) / 1000.0)
self._end_time = datetime.datetime.fromtimestamp(float(end_time) / 1000.0)
|
python
|
def _refresh_state(self):
""" Get the state of a job. If the job is complete this does nothing;
otherwise it gets a refreshed copy of the job resource.
"""
# TODO(gram): should we put a choke on refreshes? E.g. if the last call was less than
# a second ago should we return the cached value?
if self._is_complete:
return
try:
response = self._api.jobs_get(self._job_id)
except Exception as e:
raise e
if 'status' in response:
status = response['status']
if 'state' in status and status['state'] == 'DONE':
self._end_time = datetime.datetime.utcnow()
self._is_complete = True
self._process_job_status(status)
if 'statistics' in response:
statistics = response['statistics']
start_time = statistics.get('creationTime', None)
end_time = statistics.get('endTime', None)
if start_time and end_time and end_time >= start_time:
self._start_time = datetime.datetime.fromtimestamp(float(start_time) / 1000.0)
self._end_time = datetime.datetime.fromtimestamp(float(end_time) / 1000.0)
|
[
"def",
"_refresh_state",
"(",
"self",
")",
":",
"# TODO(gram): should we put a choke on refreshes? E.g. if the last call was less than",
"# a second ago should we return the cached value?",
"if",
"self",
".",
"_is_complete",
":",
"return",
"try",
":",
"response",
"=",
"self",
".",
"_api",
".",
"jobs_get",
"(",
"self",
".",
"_job_id",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"if",
"'status'",
"in",
"response",
":",
"status",
"=",
"response",
"[",
"'status'",
"]",
"if",
"'state'",
"in",
"status",
"and",
"status",
"[",
"'state'",
"]",
"==",
"'DONE'",
":",
"self",
".",
"_end_time",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"self",
".",
"_is_complete",
"=",
"True",
"self",
".",
"_process_job_status",
"(",
"status",
")",
"if",
"'statistics'",
"in",
"response",
":",
"statistics",
"=",
"response",
"[",
"'statistics'",
"]",
"start_time",
"=",
"statistics",
".",
"get",
"(",
"'creationTime'",
",",
"None",
")",
"end_time",
"=",
"statistics",
".",
"get",
"(",
"'endTime'",
",",
"None",
")",
"if",
"start_time",
"and",
"end_time",
"and",
"end_time",
">=",
"start_time",
":",
"self",
".",
"_start_time",
"=",
"datetime",
".",
"datetime",
".",
"fromtimestamp",
"(",
"float",
"(",
"start_time",
")",
"/",
"1000.0",
")",
"self",
".",
"_end_time",
"=",
"datetime",
".",
"datetime",
".",
"fromtimestamp",
"(",
"float",
"(",
"end_time",
")",
"/",
"1000.0",
")"
] |
Get the state of a job. If the job is complete this does nothing;
otherwise it gets a refreshed copy of the job resource.
|
[
"Get",
"the",
"state",
"of",
"a",
"job",
".",
"If",
"the",
"job",
"is",
"complete",
"this",
"does",
"nothing",
";",
"otherwise",
"it",
"gets",
"a",
"refreshed",
"copy",
"of",
"the",
"job",
"resource",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_job.py#L43-L70
|
4,842
|
googledatalab/pydatalab
|
datalab/stackdriver/commands/_monitoring.py
|
monitoring
|
def monitoring(line, cell=None):
"""Implements the monitoring cell magic for ipython notebooks.
Args:
line: the contents of the storage line.
Returns:
The results of executing the cell.
"""
parser = datalab.utils.commands.CommandParser(prog='monitoring', description=(
'Execute various Monitoring-related operations. Use "%monitoring '
'<command> -h" for help on a specific command.'))
list_parser = parser.subcommand(
'list', 'List the metrics or resource types in a monitored project.')
list_metric_parser = list_parser.subcommand(
'metrics',
'List the metrics that are available through the Monitoring API.')
list_metric_parser.add_argument(
'-t', '--type',
help='The type of metric(s) to list; can include wildchars.')
list_metric_parser.add_argument(
'-p', '--project', help='The project on which to execute the request.')
list_metric_parser.set_defaults(func=_list_metric_descriptors)
list_resource_parser = list_parser.subcommand(
'resource_types',
('List the monitored resource types that are available through the '
'Monitoring API.'))
list_resource_parser.add_argument(
'-p', '--project', help='The project on which to execute the request.')
list_resource_parser.add_argument(
'-t', '--type',
help='The resource type(s) to list; can include wildchars.')
list_resource_parser.set_defaults(func=_list_resource_descriptors)
list_group_parser = list_parser.subcommand(
'groups',
('List the Stackdriver groups in this project.'))
list_group_parser.add_argument(
'-p', '--project', help='The project on which to execute the request.')
list_group_parser.add_argument(
'-n', '--name',
help='The name of the group(s) to list; can include wildchars.')
list_group_parser.set_defaults(func=_list_groups)
return datalab.utils.commands.handle_magic_line(line, cell, parser)
|
python
|
def monitoring(line, cell=None):
"""Implements the monitoring cell magic for ipython notebooks.
Args:
line: the contents of the storage line.
Returns:
The results of executing the cell.
"""
parser = datalab.utils.commands.CommandParser(prog='monitoring', description=(
'Execute various Monitoring-related operations. Use "%monitoring '
'<command> -h" for help on a specific command.'))
list_parser = parser.subcommand(
'list', 'List the metrics or resource types in a monitored project.')
list_metric_parser = list_parser.subcommand(
'metrics',
'List the metrics that are available through the Monitoring API.')
list_metric_parser.add_argument(
'-t', '--type',
help='The type of metric(s) to list; can include wildchars.')
list_metric_parser.add_argument(
'-p', '--project', help='The project on which to execute the request.')
list_metric_parser.set_defaults(func=_list_metric_descriptors)
list_resource_parser = list_parser.subcommand(
'resource_types',
('List the monitored resource types that are available through the '
'Monitoring API.'))
list_resource_parser.add_argument(
'-p', '--project', help='The project on which to execute the request.')
list_resource_parser.add_argument(
'-t', '--type',
help='The resource type(s) to list; can include wildchars.')
list_resource_parser.set_defaults(func=_list_resource_descriptors)
list_group_parser = list_parser.subcommand(
'groups',
('List the Stackdriver groups in this project.'))
list_group_parser.add_argument(
'-p', '--project', help='The project on which to execute the request.')
list_group_parser.add_argument(
'-n', '--name',
help='The name of the group(s) to list; can include wildchars.')
list_group_parser.set_defaults(func=_list_groups)
return datalab.utils.commands.handle_magic_line(line, cell, parser)
|
[
"def",
"monitoring",
"(",
"line",
",",
"cell",
"=",
"None",
")",
":",
"parser",
"=",
"datalab",
".",
"utils",
".",
"commands",
".",
"CommandParser",
"(",
"prog",
"=",
"'monitoring'",
",",
"description",
"=",
"(",
"'Execute various Monitoring-related operations. Use \"%monitoring '",
"'<command> -h\" for help on a specific command.'",
")",
")",
"list_parser",
"=",
"parser",
".",
"subcommand",
"(",
"'list'",
",",
"'List the metrics or resource types in a monitored project.'",
")",
"list_metric_parser",
"=",
"list_parser",
".",
"subcommand",
"(",
"'metrics'",
",",
"'List the metrics that are available through the Monitoring API.'",
")",
"list_metric_parser",
".",
"add_argument",
"(",
"'-t'",
",",
"'--type'",
",",
"help",
"=",
"'The type of metric(s) to list; can include wildchars.'",
")",
"list_metric_parser",
".",
"add_argument",
"(",
"'-p'",
",",
"'--project'",
",",
"help",
"=",
"'The project on which to execute the request.'",
")",
"list_metric_parser",
".",
"set_defaults",
"(",
"func",
"=",
"_list_metric_descriptors",
")",
"list_resource_parser",
"=",
"list_parser",
".",
"subcommand",
"(",
"'resource_types'",
",",
"(",
"'List the monitored resource types that are available through the '",
"'Monitoring API.'",
")",
")",
"list_resource_parser",
".",
"add_argument",
"(",
"'-p'",
",",
"'--project'",
",",
"help",
"=",
"'The project on which to execute the request.'",
")",
"list_resource_parser",
".",
"add_argument",
"(",
"'-t'",
",",
"'--type'",
",",
"help",
"=",
"'The resource type(s) to list; can include wildchars.'",
")",
"list_resource_parser",
".",
"set_defaults",
"(",
"func",
"=",
"_list_resource_descriptors",
")",
"list_group_parser",
"=",
"list_parser",
".",
"subcommand",
"(",
"'groups'",
",",
"(",
"'List the Stackdriver groups in this project.'",
")",
")",
"list_group_parser",
".",
"add_argument",
"(",
"'-p'",
",",
"'--project'",
",",
"help",
"=",
"'The project on which to execute the request.'",
")",
"list_group_parser",
".",
"add_argument",
"(",
"'-n'",
",",
"'--name'",
",",
"help",
"=",
"'The name of the group(s) to list; can include wildchars.'",
")",
"list_group_parser",
".",
"set_defaults",
"(",
"func",
"=",
"_list_groups",
")",
"return",
"datalab",
".",
"utils",
".",
"commands",
".",
"handle_magic_line",
"(",
"line",
",",
"cell",
",",
"parser",
")"
] |
Implements the monitoring cell magic for ipython notebooks.
Args:
line: the contents of the storage line.
Returns:
The results of executing the cell.
|
[
"Implements",
"the",
"monitoring",
"cell",
"magic",
"for",
"ipython",
"notebooks",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/stackdriver/commands/_monitoring.py#L27-L73
|
4,843
|
googledatalab/pydatalab
|
datalab/stackdriver/commands/_monitoring.py
|
_render_dataframe
|
def _render_dataframe(dataframe):
"""Helper to render a dataframe as an HTML table."""
data = dataframe.to_dict(orient='records')
fields = dataframe.columns.tolist()
return IPython.core.display.HTML(
datalab.utils.commands.HtmlBuilder.render_table(data, fields))
|
python
|
def _render_dataframe(dataframe):
"""Helper to render a dataframe as an HTML table."""
data = dataframe.to_dict(orient='records')
fields = dataframe.columns.tolist()
return IPython.core.display.HTML(
datalab.utils.commands.HtmlBuilder.render_table(data, fields))
|
[
"def",
"_render_dataframe",
"(",
"dataframe",
")",
":",
"data",
"=",
"dataframe",
".",
"to_dict",
"(",
"orient",
"=",
"'records'",
")",
"fields",
"=",
"dataframe",
".",
"columns",
".",
"tolist",
"(",
")",
"return",
"IPython",
".",
"core",
".",
"display",
".",
"HTML",
"(",
"datalab",
".",
"utils",
".",
"commands",
".",
"HtmlBuilder",
".",
"render_table",
"(",
"data",
",",
"fields",
")",
")"
] |
Helper to render a dataframe as an HTML table.
|
[
"Helper",
"to",
"render",
"a",
"dataframe",
"as",
"an",
"HTML",
"table",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/stackdriver/commands/_monitoring.py#L103-L108
|
4,844
|
googledatalab/pydatalab
|
google/datalab/utils/commands/_job.py
|
_get_job_status
|
def _get_job_status(line):
"""magic used as an endpoint for client to get job status.
%_get_job_status <name>
Returns:
A JSON object of the job status.
"""
try:
args = line.strip().split()
job_name = args[0]
job = None
if job_name in _local_jobs:
job = _local_jobs[job_name]
else:
raise Exception('invalid job %s' % job_name)
if job is not None:
error = '' if job.fatal_error is None else str(job.fatal_error)
data = {'exists': True, 'done': job.is_complete, 'error': error}
else:
data = {'exists': False}
except Exception as e:
google.datalab.utils.print_exception_with_last_stack(e)
data = {'done': True, 'error': str(e)}
return IPython.core.display.JSON(data)
|
python
|
def _get_job_status(line):
"""magic used as an endpoint for client to get job status.
%_get_job_status <name>
Returns:
A JSON object of the job status.
"""
try:
args = line.strip().split()
job_name = args[0]
job = None
if job_name in _local_jobs:
job = _local_jobs[job_name]
else:
raise Exception('invalid job %s' % job_name)
if job is not None:
error = '' if job.fatal_error is None else str(job.fatal_error)
data = {'exists': True, 'done': job.is_complete, 'error': error}
else:
data = {'exists': False}
except Exception as e:
google.datalab.utils.print_exception_with_last_stack(e)
data = {'done': True, 'error': str(e)}
return IPython.core.display.JSON(data)
|
[
"def",
"_get_job_status",
"(",
"line",
")",
":",
"try",
":",
"args",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"job_name",
"=",
"args",
"[",
"0",
"]",
"job",
"=",
"None",
"if",
"job_name",
"in",
"_local_jobs",
":",
"job",
"=",
"_local_jobs",
"[",
"job_name",
"]",
"else",
":",
"raise",
"Exception",
"(",
"'invalid job %s'",
"%",
"job_name",
")",
"if",
"job",
"is",
"not",
"None",
":",
"error",
"=",
"''",
"if",
"job",
".",
"fatal_error",
"is",
"None",
"else",
"str",
"(",
"job",
".",
"fatal_error",
")",
"data",
"=",
"{",
"'exists'",
":",
"True",
",",
"'done'",
":",
"job",
".",
"is_complete",
",",
"'error'",
":",
"error",
"}",
"else",
":",
"data",
"=",
"{",
"'exists'",
":",
"False",
"}",
"except",
"Exception",
"as",
"e",
":",
"google",
".",
"datalab",
".",
"utils",
".",
"print_exception_with_last_stack",
"(",
"e",
")",
"data",
"=",
"{",
"'done'",
":",
"True",
",",
"'error'",
":",
"str",
"(",
"e",
")",
"}",
"return",
"IPython",
".",
"core",
".",
"display",
".",
"JSON",
"(",
"data",
")"
] |
magic used as an endpoint for client to get job status.
%_get_job_status <name>
Returns:
A JSON object of the job status.
|
[
"magic",
"used",
"as",
"an",
"endpoint",
"for",
"client",
"to",
"get",
"job",
"status",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/commands/_job.py#L61-L89
|
4,845
|
googledatalab/pydatalab
|
google/datalab/storage/_object.py
|
ObjectMetadata.updated_on
|
def updated_on(self):
"""The updated timestamp of the object as a datetime.datetime."""
s = self._info.get('updated', None)
return dateutil.parser.parse(s) if s else None
|
python
|
def updated_on(self):
"""The updated timestamp of the object as a datetime.datetime."""
s = self._info.get('updated', None)
return dateutil.parser.parse(s) if s else None
|
[
"def",
"updated_on",
"(",
"self",
")",
":",
"s",
"=",
"self",
".",
"_info",
".",
"get",
"(",
"'updated'",
",",
"None",
")",
"return",
"dateutil",
".",
"parser",
".",
"parse",
"(",
"s",
")",
"if",
"s",
"else",
"None"
] |
The updated timestamp of the object as a datetime.datetime.
|
[
"The",
"updated",
"timestamp",
"of",
"the",
"object",
"as",
"a",
"datetime",
".",
"datetime",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/storage/_object.py#L69-L72
|
4,846
|
googledatalab/pydatalab
|
google/datalab/storage/_object.py
|
Object.delete
|
def delete(self, wait_for_deletion=True):
"""Deletes this object from its bucket.
Args:
wait_for_deletion: If True, we poll until this object no longer appears in
objects.list operations for this bucket before returning.
Raises:
Exception if there was an error deleting the object.
"""
if self.exists():
try:
self._api.objects_delete(self._bucket, self._key)
except Exception as e:
raise e
if wait_for_deletion:
for _ in range(_MAX_POLL_ATTEMPTS):
objects = Objects(self._bucket, prefix=self.key, delimiter='/',
context=self._context)
if any(o.key == self.key for o in objects):
time.sleep(_POLLING_SLEEP)
continue
break
else:
logging.error('Failed to see object deletion after %d attempts.',
_MAX_POLL_ATTEMPTS)
|
python
|
def delete(self, wait_for_deletion=True):
"""Deletes this object from its bucket.
Args:
wait_for_deletion: If True, we poll until this object no longer appears in
objects.list operations for this bucket before returning.
Raises:
Exception if there was an error deleting the object.
"""
if self.exists():
try:
self._api.objects_delete(self._bucket, self._key)
except Exception as e:
raise e
if wait_for_deletion:
for _ in range(_MAX_POLL_ATTEMPTS):
objects = Objects(self._bucket, prefix=self.key, delimiter='/',
context=self._context)
if any(o.key == self.key for o in objects):
time.sleep(_POLLING_SLEEP)
continue
break
else:
logging.error('Failed to see object deletion after %d attempts.',
_MAX_POLL_ATTEMPTS)
|
[
"def",
"delete",
"(",
"self",
",",
"wait_for_deletion",
"=",
"True",
")",
":",
"if",
"self",
".",
"exists",
"(",
")",
":",
"try",
":",
"self",
".",
"_api",
".",
"objects_delete",
"(",
"self",
".",
"_bucket",
",",
"self",
".",
"_key",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"if",
"wait_for_deletion",
":",
"for",
"_",
"in",
"range",
"(",
"_MAX_POLL_ATTEMPTS",
")",
":",
"objects",
"=",
"Objects",
"(",
"self",
".",
"_bucket",
",",
"prefix",
"=",
"self",
".",
"key",
",",
"delimiter",
"=",
"'/'",
",",
"context",
"=",
"self",
".",
"_context",
")",
"if",
"any",
"(",
"o",
".",
"key",
"==",
"self",
".",
"key",
"for",
"o",
"in",
"objects",
")",
":",
"time",
".",
"sleep",
"(",
"_POLLING_SLEEP",
")",
"continue",
"break",
"else",
":",
"logging",
".",
"error",
"(",
"'Failed to see object deletion after %d attempts.'",
",",
"_MAX_POLL_ATTEMPTS",
")"
] |
Deletes this object from its bucket.
Args:
wait_for_deletion: If True, we poll until this object no longer appears in
objects.list operations for this bucket before returning.
Raises:
Exception if there was an error deleting the object.
|
[
"Deletes",
"this",
"object",
"from",
"its",
"bucket",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/storage/_object.py#L147-L172
|
4,847
|
googledatalab/pydatalab
|
google/datalab/storage/_object.py
|
Object.metadata
|
def metadata(self):
"""Retrieves metadata about the object.
Returns:
An ObjectMetadata instance with information about this object.
Raises:
Exception if there was an error requesting the object's metadata.
"""
if self._info is None:
try:
self._info = self._api.objects_get(self._bucket, self._key)
except Exception as e:
raise e
return ObjectMetadata(self._info) if self._info else None
|
python
|
def metadata(self):
"""Retrieves metadata about the object.
Returns:
An ObjectMetadata instance with information about this object.
Raises:
Exception if there was an error requesting the object's metadata.
"""
if self._info is None:
try:
self._info = self._api.objects_get(self._bucket, self._key)
except Exception as e:
raise e
return ObjectMetadata(self._info) if self._info else None
|
[
"def",
"metadata",
"(",
"self",
")",
":",
"if",
"self",
".",
"_info",
"is",
"None",
":",
"try",
":",
"self",
".",
"_info",
"=",
"self",
".",
"_api",
".",
"objects_get",
"(",
"self",
".",
"_bucket",
",",
"self",
".",
"_key",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"return",
"ObjectMetadata",
"(",
"self",
".",
"_info",
")",
"if",
"self",
".",
"_info",
"else",
"None"
] |
Retrieves metadata about the object.
Returns:
An ObjectMetadata instance with information about this object.
Raises:
Exception if there was an error requesting the object's metadata.
|
[
"Retrieves",
"metadata",
"about",
"the",
"object",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/storage/_object.py#L175-L188
|
4,848
|
googledatalab/pydatalab
|
google/datalab/storage/_object.py
|
Object.read_stream
|
def read_stream(self, start_offset=0, byte_count=None):
"""Reads the content of this object as text.
Args:
start_offset: the start offset of bytes to read.
byte_count: the number of bytes to read. If None, it reads to the end.
Returns:
The text content within the object.
Raises:
Exception if there was an error requesting the object's content.
"""
try:
return self._api.object_download(self._bucket, self._key,
start_offset=start_offset, byte_count=byte_count)
except Exception as e:
raise e
|
python
|
def read_stream(self, start_offset=0, byte_count=None):
"""Reads the content of this object as text.
Args:
start_offset: the start offset of bytes to read.
byte_count: the number of bytes to read. If None, it reads to the end.
Returns:
The text content within the object.
Raises:
Exception if there was an error requesting the object's content.
"""
try:
return self._api.object_download(self._bucket, self._key,
start_offset=start_offset, byte_count=byte_count)
except Exception as e:
raise e
|
[
"def",
"read_stream",
"(",
"self",
",",
"start_offset",
"=",
"0",
",",
"byte_count",
"=",
"None",
")",
":",
"try",
":",
"return",
"self",
".",
"_api",
".",
"object_download",
"(",
"self",
".",
"_bucket",
",",
"self",
".",
"_key",
",",
"start_offset",
"=",
"start_offset",
",",
"byte_count",
"=",
"byte_count",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e"
] |
Reads the content of this object as text.
Args:
start_offset: the start offset of bytes to read.
byte_count: the number of bytes to read. If None, it reads to the end.
Returns:
The text content within the object.
Raises:
Exception if there was an error requesting the object's content.
|
[
"Reads",
"the",
"content",
"of",
"this",
"object",
"as",
"text",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/storage/_object.py#L190-L205
|
4,849
|
googledatalab/pydatalab
|
google/datalab/storage/_object.py
|
Object.read_lines
|
def read_lines(self, max_lines=None):
"""Reads the content of this object as text, and return a list of lines up to some max.
Args:
max_lines: max number of lines to return. If None, return all lines.
Returns:
The text content of the object as a list of lines.
Raises:
Exception if there was an error requesting the object's content.
"""
if max_lines is None:
return self.read_stream().split('\n')
max_to_read = self.metadata.size
bytes_to_read = min(100 * max_lines, self.metadata.size)
while True:
content = self.read_stream(byte_count=bytes_to_read)
lines = content.split('\n')
if len(lines) > max_lines or bytes_to_read >= max_to_read:
break
# try 10 times more bytes or max
bytes_to_read = min(bytes_to_read * 10, max_to_read)
# remove the partial line at last
del lines[-1]
return lines[0:max_lines]
|
python
|
def read_lines(self, max_lines=None):
"""Reads the content of this object as text, and return a list of lines up to some max.
Args:
max_lines: max number of lines to return. If None, return all lines.
Returns:
The text content of the object as a list of lines.
Raises:
Exception if there was an error requesting the object's content.
"""
if max_lines is None:
return self.read_stream().split('\n')
max_to_read = self.metadata.size
bytes_to_read = min(100 * max_lines, self.metadata.size)
while True:
content = self.read_stream(byte_count=bytes_to_read)
lines = content.split('\n')
if len(lines) > max_lines or bytes_to_read >= max_to_read:
break
# try 10 times more bytes or max
bytes_to_read = min(bytes_to_read * 10, max_to_read)
# remove the partial line at last
del lines[-1]
return lines[0:max_lines]
|
[
"def",
"read_lines",
"(",
"self",
",",
"max_lines",
"=",
"None",
")",
":",
"if",
"max_lines",
"is",
"None",
":",
"return",
"self",
".",
"read_stream",
"(",
")",
".",
"split",
"(",
"'\\n'",
")",
"max_to_read",
"=",
"self",
".",
"metadata",
".",
"size",
"bytes_to_read",
"=",
"min",
"(",
"100",
"*",
"max_lines",
",",
"self",
".",
"metadata",
".",
"size",
")",
"while",
"True",
":",
"content",
"=",
"self",
".",
"read_stream",
"(",
"byte_count",
"=",
"bytes_to_read",
")",
"lines",
"=",
"content",
".",
"split",
"(",
"'\\n'",
")",
"if",
"len",
"(",
"lines",
")",
">",
"max_lines",
"or",
"bytes_to_read",
">=",
"max_to_read",
":",
"break",
"# try 10 times more bytes or max",
"bytes_to_read",
"=",
"min",
"(",
"bytes_to_read",
"*",
"10",
",",
"max_to_read",
")",
"# remove the partial line at last",
"del",
"lines",
"[",
"-",
"1",
"]",
"return",
"lines",
"[",
"0",
":",
"max_lines",
"]"
] |
Reads the content of this object as text, and return a list of lines up to some max.
Args:
max_lines: max number of lines to return. If None, return all lines.
Returns:
The text content of the object as a list of lines.
Raises:
Exception if there was an error requesting the object's content.
|
[
"Reads",
"the",
"content",
"of",
"this",
"object",
"as",
"text",
"and",
"return",
"a",
"list",
"of",
"lines",
"up",
"to",
"some",
"max",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/storage/_object.py#L217-L243
|
4,850
|
googledatalab/pydatalab
|
datalab/data/_csv.py
|
Csv.sample_to
|
def sample_to(self, count, skip_header_rows, strategy, target):
"""Sample rows from GCS or local file and save results to target file.
Args:
count: number of rows to sample. If strategy is "BIGQUERY", it is used as approximate number.
skip_header_rows: whether to skip first row when reading from source.
strategy: can be "LOCAL" or "BIGQUERY". If local, the sampling happens in local memory,
and number of resulting rows matches count. If BigQuery, sampling is done
with BigQuery in cloud, and the number of resulting rows will be approximated to
count.
target: The target file path, can be GCS or local path.
Raises:
Exception if strategy is "BIGQUERY" but source is not a GCS path.
"""
# TODO(qimingj) Add unit test
# Read data from source into DataFrame.
if sys.version_info.major > 2:
xrange = range # for python 3 compatibility
if strategy == 'BIGQUERY':
import datalab.bigquery as bq
if not self.path.startswith('gs://'):
raise Exception('Cannot use BIGQUERY if data is not in GCS')
federated_table = self._create_federated_table(skip_header_rows)
row_count = self._get_gcs_csv_row_count(federated_table)
query = bq.Query('SELECT * from data', data_sources={'data': federated_table})
sampling = bq.Sampling.random(count * 100 / float(row_count))
sample = query.sample(sampling=sampling)
df = sample.to_dataframe()
elif strategy == 'LOCAL':
local_file = self.path
if self.path.startswith('gs://'):
local_file = tempfile.mktemp()
datalab.utils.gcs_copy_file(self.path, local_file)
with open(local_file) as f:
row_count = sum(1 for line in f)
start_row = 1 if skip_header_rows is True else 0
skip_count = row_count - count - 1 if skip_header_rows is True else row_count - count
skip = sorted(random.sample(xrange(start_row, row_count), skip_count))
header_row = 0 if skip_header_rows is True else None
df = pd.read_csv(local_file, skiprows=skip, header=header_row, delimiter=self._delimiter)
if self.path.startswith('gs://'):
os.remove(local_file)
else:
raise Exception('strategy must be BIGQUERY or LOCAL')
# Write to target.
if target.startswith('gs://'):
with tempfile.NamedTemporaryFile() as f:
df.to_csv(f, header=False, index=False)
f.flush()
datalab.utils.gcs_copy_file(f.name, target)
else:
with open(target, 'w') as f:
df.to_csv(f, header=False, index=False, sep=str(self._delimiter))
|
python
|
def sample_to(self, count, skip_header_rows, strategy, target):
"""Sample rows from GCS or local file and save results to target file.
Args:
count: number of rows to sample. If strategy is "BIGQUERY", it is used as approximate number.
skip_header_rows: whether to skip first row when reading from source.
strategy: can be "LOCAL" or "BIGQUERY". If local, the sampling happens in local memory,
and number of resulting rows matches count. If BigQuery, sampling is done
with BigQuery in cloud, and the number of resulting rows will be approximated to
count.
target: The target file path, can be GCS or local path.
Raises:
Exception if strategy is "BIGQUERY" but source is not a GCS path.
"""
# TODO(qimingj) Add unit test
# Read data from source into DataFrame.
if sys.version_info.major > 2:
xrange = range # for python 3 compatibility
if strategy == 'BIGQUERY':
import datalab.bigquery as bq
if not self.path.startswith('gs://'):
raise Exception('Cannot use BIGQUERY if data is not in GCS')
federated_table = self._create_federated_table(skip_header_rows)
row_count = self._get_gcs_csv_row_count(federated_table)
query = bq.Query('SELECT * from data', data_sources={'data': federated_table})
sampling = bq.Sampling.random(count * 100 / float(row_count))
sample = query.sample(sampling=sampling)
df = sample.to_dataframe()
elif strategy == 'LOCAL':
local_file = self.path
if self.path.startswith('gs://'):
local_file = tempfile.mktemp()
datalab.utils.gcs_copy_file(self.path, local_file)
with open(local_file) as f:
row_count = sum(1 for line in f)
start_row = 1 if skip_header_rows is True else 0
skip_count = row_count - count - 1 if skip_header_rows is True else row_count - count
skip = sorted(random.sample(xrange(start_row, row_count), skip_count))
header_row = 0 if skip_header_rows is True else None
df = pd.read_csv(local_file, skiprows=skip, header=header_row, delimiter=self._delimiter)
if self.path.startswith('gs://'):
os.remove(local_file)
else:
raise Exception('strategy must be BIGQUERY or LOCAL')
# Write to target.
if target.startswith('gs://'):
with tempfile.NamedTemporaryFile() as f:
df.to_csv(f, header=False, index=False)
f.flush()
datalab.utils.gcs_copy_file(f.name, target)
else:
with open(target, 'w') as f:
df.to_csv(f, header=False, index=False, sep=str(self._delimiter))
|
[
"def",
"sample_to",
"(",
"self",
",",
"count",
",",
"skip_header_rows",
",",
"strategy",
",",
"target",
")",
":",
"# TODO(qimingj) Add unit test",
"# Read data from source into DataFrame.",
"if",
"sys",
".",
"version_info",
".",
"major",
">",
"2",
":",
"xrange",
"=",
"range",
"# for python 3 compatibility",
"if",
"strategy",
"==",
"'BIGQUERY'",
":",
"import",
"datalab",
".",
"bigquery",
"as",
"bq",
"if",
"not",
"self",
".",
"path",
".",
"startswith",
"(",
"'gs://'",
")",
":",
"raise",
"Exception",
"(",
"'Cannot use BIGQUERY if data is not in GCS'",
")",
"federated_table",
"=",
"self",
".",
"_create_federated_table",
"(",
"skip_header_rows",
")",
"row_count",
"=",
"self",
".",
"_get_gcs_csv_row_count",
"(",
"federated_table",
")",
"query",
"=",
"bq",
".",
"Query",
"(",
"'SELECT * from data'",
",",
"data_sources",
"=",
"{",
"'data'",
":",
"federated_table",
"}",
")",
"sampling",
"=",
"bq",
".",
"Sampling",
".",
"random",
"(",
"count",
"*",
"100",
"/",
"float",
"(",
"row_count",
")",
")",
"sample",
"=",
"query",
".",
"sample",
"(",
"sampling",
"=",
"sampling",
")",
"df",
"=",
"sample",
".",
"to_dataframe",
"(",
")",
"elif",
"strategy",
"==",
"'LOCAL'",
":",
"local_file",
"=",
"self",
".",
"path",
"if",
"self",
".",
"path",
".",
"startswith",
"(",
"'gs://'",
")",
":",
"local_file",
"=",
"tempfile",
".",
"mktemp",
"(",
")",
"datalab",
".",
"utils",
".",
"gcs_copy_file",
"(",
"self",
".",
"path",
",",
"local_file",
")",
"with",
"open",
"(",
"local_file",
")",
"as",
"f",
":",
"row_count",
"=",
"sum",
"(",
"1",
"for",
"line",
"in",
"f",
")",
"start_row",
"=",
"1",
"if",
"skip_header_rows",
"is",
"True",
"else",
"0",
"skip_count",
"=",
"row_count",
"-",
"count",
"-",
"1",
"if",
"skip_header_rows",
"is",
"True",
"else",
"row_count",
"-",
"count",
"skip",
"=",
"sorted",
"(",
"random",
".",
"sample",
"(",
"xrange",
"(",
"start_row",
",",
"row_count",
")",
",",
"skip_count",
")",
")",
"header_row",
"=",
"0",
"if",
"skip_header_rows",
"is",
"True",
"else",
"None",
"df",
"=",
"pd",
".",
"read_csv",
"(",
"local_file",
",",
"skiprows",
"=",
"skip",
",",
"header",
"=",
"header_row",
",",
"delimiter",
"=",
"self",
".",
"_delimiter",
")",
"if",
"self",
".",
"path",
".",
"startswith",
"(",
"'gs://'",
")",
":",
"os",
".",
"remove",
"(",
"local_file",
")",
"else",
":",
"raise",
"Exception",
"(",
"'strategy must be BIGQUERY or LOCAL'",
")",
"# Write to target.",
"if",
"target",
".",
"startswith",
"(",
"'gs://'",
")",
":",
"with",
"tempfile",
".",
"NamedTemporaryFile",
"(",
")",
"as",
"f",
":",
"df",
".",
"to_csv",
"(",
"f",
",",
"header",
"=",
"False",
",",
"index",
"=",
"False",
")",
"f",
".",
"flush",
"(",
")",
"datalab",
".",
"utils",
".",
"gcs_copy_file",
"(",
"f",
".",
"name",
",",
"target",
")",
"else",
":",
"with",
"open",
"(",
"target",
",",
"'w'",
")",
"as",
"f",
":",
"df",
".",
"to_csv",
"(",
"f",
",",
"header",
"=",
"False",
",",
"index",
"=",
"False",
",",
"sep",
"=",
"str",
"(",
"self",
".",
"_delimiter",
")",
")"
] |
Sample rows from GCS or local file and save results to target file.
Args:
count: number of rows to sample. If strategy is "BIGQUERY", it is used as approximate number.
skip_header_rows: whether to skip first row when reading from source.
strategy: can be "LOCAL" or "BIGQUERY". If local, the sampling happens in local memory,
and number of resulting rows matches count. If BigQuery, sampling is done
with BigQuery in cloud, and the number of resulting rows will be approximated to
count.
target: The target file path, can be GCS or local path.
Raises:
Exception if strategy is "BIGQUERY" but source is not a GCS path.
|
[
"Sample",
"rows",
"from",
"GCS",
"or",
"local",
"file",
"and",
"save",
"results",
"to",
"target",
"file",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/data/_csv.py#L129-L183
|
4,851
|
googledatalab/pydatalab
|
google/datalab/utils/facets/base_feature_statistics_generator.py
|
BaseFeatureStatisticsGenerator._ParseExample
|
def _ParseExample(self, example_features, example_feature_lists, entries,
index):
"""Parses data from an example, populating a dictionary of feature values.
Args:
example_features: A map of strings to tf.Features from the example.
example_feature_lists: A map of strings to tf.FeatureLists from the
example.
entries: A dictionary of all features parsed thus far and arrays of their
values. This is mutated by the function.
index: The index of the example to parse from a list of examples.
Raises:
TypeError: Raises an exception when a feature has inconsistent types
across
examples.
"""
features_seen = set()
for feature_list, is_feature in zip(
[example_features, example_feature_lists], [True, False]):
sequence_length = None
for feature_name in feature_list:
# If this feature has not been seen in previous examples, then
# initialize its entry into the entries dictionary.
if feature_name not in entries:
entries[feature_name] = {
'vals': [],
'counts': [],
'feat_lens': [],
'missing': index
}
feature_entry = entries[feature_name]
feature = feature_list[feature_name]
value_type = None
value_list = []
if is_feature:
# If parsing a tf.Feature, extract the type and values simply.
if feature.HasField('float_list'):
value_list = feature.float_list.value
value_type = self.fs_proto.FLOAT
elif feature.HasField('bytes_list'):
value_list = feature.bytes_list.value
value_type = self.fs_proto.STRING
elif feature.HasField('int64_list'):
value_list = feature.int64_list.value
value_type = self.fs_proto.INT
else:
# If parsing a tf.FeatureList, get the type and values by iterating
# over all Features in the FeatureList.
sequence_length = len(feature.feature)
if sequence_length != 0 and feature.feature[0].HasField('float_list'):
for feat in feature.feature:
for value in feat.float_list.value:
value_list.append(value)
value_type = self.fs_proto.FLOAT
elif sequence_length != 0 and feature.feature[0].HasField(
'bytes_list'):
for feat in feature.feature:
for value in feat.bytes_list.value:
value_list.append(value)
value_type = self.fs_proto.STRING
elif sequence_length != 0 and feature.feature[0].HasField(
'int64_list'):
for feat in feature.feature:
for value in feat.int64_list.value:
value_list.append(value)
value_type = self.fs_proto.INT
if value_type is not None:
if 'type' not in feature_entry:
feature_entry['type'] = value_type
elif feature_entry['type'] != value_type:
raise TypeError('type mismatch for feature ' + feature_name)
feature_entry['counts'].append(len(value_list))
feature_entry['vals'].extend(value_list)
if sequence_length is not None:
feature_entry['feat_lens'].append(sequence_length)
if value_list:
features_seen.add(feature_name)
# For all previously-seen features not found in this example, update the
# feature's missing value.
for f in entries:
fv = entries[f]
if f not in features_seen:
fv['missing'] += 1
|
python
|
def _ParseExample(self, example_features, example_feature_lists, entries,
index):
"""Parses data from an example, populating a dictionary of feature values.
Args:
example_features: A map of strings to tf.Features from the example.
example_feature_lists: A map of strings to tf.FeatureLists from the
example.
entries: A dictionary of all features parsed thus far and arrays of their
values. This is mutated by the function.
index: The index of the example to parse from a list of examples.
Raises:
TypeError: Raises an exception when a feature has inconsistent types
across
examples.
"""
features_seen = set()
for feature_list, is_feature in zip(
[example_features, example_feature_lists], [True, False]):
sequence_length = None
for feature_name in feature_list:
# If this feature has not been seen in previous examples, then
# initialize its entry into the entries dictionary.
if feature_name not in entries:
entries[feature_name] = {
'vals': [],
'counts': [],
'feat_lens': [],
'missing': index
}
feature_entry = entries[feature_name]
feature = feature_list[feature_name]
value_type = None
value_list = []
if is_feature:
# If parsing a tf.Feature, extract the type and values simply.
if feature.HasField('float_list'):
value_list = feature.float_list.value
value_type = self.fs_proto.FLOAT
elif feature.HasField('bytes_list'):
value_list = feature.bytes_list.value
value_type = self.fs_proto.STRING
elif feature.HasField('int64_list'):
value_list = feature.int64_list.value
value_type = self.fs_proto.INT
else:
# If parsing a tf.FeatureList, get the type and values by iterating
# over all Features in the FeatureList.
sequence_length = len(feature.feature)
if sequence_length != 0 and feature.feature[0].HasField('float_list'):
for feat in feature.feature:
for value in feat.float_list.value:
value_list.append(value)
value_type = self.fs_proto.FLOAT
elif sequence_length != 0 and feature.feature[0].HasField(
'bytes_list'):
for feat in feature.feature:
for value in feat.bytes_list.value:
value_list.append(value)
value_type = self.fs_proto.STRING
elif sequence_length != 0 and feature.feature[0].HasField(
'int64_list'):
for feat in feature.feature:
for value in feat.int64_list.value:
value_list.append(value)
value_type = self.fs_proto.INT
if value_type is not None:
if 'type' not in feature_entry:
feature_entry['type'] = value_type
elif feature_entry['type'] != value_type:
raise TypeError('type mismatch for feature ' + feature_name)
feature_entry['counts'].append(len(value_list))
feature_entry['vals'].extend(value_list)
if sequence_length is not None:
feature_entry['feat_lens'].append(sequence_length)
if value_list:
features_seen.add(feature_name)
# For all previously-seen features not found in this example, update the
# feature's missing value.
for f in entries:
fv = entries[f]
if f not in features_seen:
fv['missing'] += 1
|
[
"def",
"_ParseExample",
"(",
"self",
",",
"example_features",
",",
"example_feature_lists",
",",
"entries",
",",
"index",
")",
":",
"features_seen",
"=",
"set",
"(",
")",
"for",
"feature_list",
",",
"is_feature",
"in",
"zip",
"(",
"[",
"example_features",
",",
"example_feature_lists",
"]",
",",
"[",
"True",
",",
"False",
"]",
")",
":",
"sequence_length",
"=",
"None",
"for",
"feature_name",
"in",
"feature_list",
":",
"# If this feature has not been seen in previous examples, then",
"# initialize its entry into the entries dictionary.",
"if",
"feature_name",
"not",
"in",
"entries",
":",
"entries",
"[",
"feature_name",
"]",
"=",
"{",
"'vals'",
":",
"[",
"]",
",",
"'counts'",
":",
"[",
"]",
",",
"'feat_lens'",
":",
"[",
"]",
",",
"'missing'",
":",
"index",
"}",
"feature_entry",
"=",
"entries",
"[",
"feature_name",
"]",
"feature",
"=",
"feature_list",
"[",
"feature_name",
"]",
"value_type",
"=",
"None",
"value_list",
"=",
"[",
"]",
"if",
"is_feature",
":",
"# If parsing a tf.Feature, extract the type and values simply.",
"if",
"feature",
".",
"HasField",
"(",
"'float_list'",
")",
":",
"value_list",
"=",
"feature",
".",
"float_list",
".",
"value",
"value_type",
"=",
"self",
".",
"fs_proto",
".",
"FLOAT",
"elif",
"feature",
".",
"HasField",
"(",
"'bytes_list'",
")",
":",
"value_list",
"=",
"feature",
".",
"bytes_list",
".",
"value",
"value_type",
"=",
"self",
".",
"fs_proto",
".",
"STRING",
"elif",
"feature",
".",
"HasField",
"(",
"'int64_list'",
")",
":",
"value_list",
"=",
"feature",
".",
"int64_list",
".",
"value",
"value_type",
"=",
"self",
".",
"fs_proto",
".",
"INT",
"else",
":",
"# If parsing a tf.FeatureList, get the type and values by iterating",
"# over all Features in the FeatureList.",
"sequence_length",
"=",
"len",
"(",
"feature",
".",
"feature",
")",
"if",
"sequence_length",
"!=",
"0",
"and",
"feature",
".",
"feature",
"[",
"0",
"]",
".",
"HasField",
"(",
"'float_list'",
")",
":",
"for",
"feat",
"in",
"feature",
".",
"feature",
":",
"for",
"value",
"in",
"feat",
".",
"float_list",
".",
"value",
":",
"value_list",
".",
"append",
"(",
"value",
")",
"value_type",
"=",
"self",
".",
"fs_proto",
".",
"FLOAT",
"elif",
"sequence_length",
"!=",
"0",
"and",
"feature",
".",
"feature",
"[",
"0",
"]",
".",
"HasField",
"(",
"'bytes_list'",
")",
":",
"for",
"feat",
"in",
"feature",
".",
"feature",
":",
"for",
"value",
"in",
"feat",
".",
"bytes_list",
".",
"value",
":",
"value_list",
".",
"append",
"(",
"value",
")",
"value_type",
"=",
"self",
".",
"fs_proto",
".",
"STRING",
"elif",
"sequence_length",
"!=",
"0",
"and",
"feature",
".",
"feature",
"[",
"0",
"]",
".",
"HasField",
"(",
"'int64_list'",
")",
":",
"for",
"feat",
"in",
"feature",
".",
"feature",
":",
"for",
"value",
"in",
"feat",
".",
"int64_list",
".",
"value",
":",
"value_list",
".",
"append",
"(",
"value",
")",
"value_type",
"=",
"self",
".",
"fs_proto",
".",
"INT",
"if",
"value_type",
"is",
"not",
"None",
":",
"if",
"'type'",
"not",
"in",
"feature_entry",
":",
"feature_entry",
"[",
"'type'",
"]",
"=",
"value_type",
"elif",
"feature_entry",
"[",
"'type'",
"]",
"!=",
"value_type",
":",
"raise",
"TypeError",
"(",
"'type mismatch for feature '",
"+",
"feature_name",
")",
"feature_entry",
"[",
"'counts'",
"]",
".",
"append",
"(",
"len",
"(",
"value_list",
")",
")",
"feature_entry",
"[",
"'vals'",
"]",
".",
"extend",
"(",
"value_list",
")",
"if",
"sequence_length",
"is",
"not",
"None",
":",
"feature_entry",
"[",
"'feat_lens'",
"]",
".",
"append",
"(",
"sequence_length",
")",
"if",
"value_list",
":",
"features_seen",
".",
"add",
"(",
"feature_name",
")",
"# For all previously-seen features not found in this example, update the",
"# feature's missing value.",
"for",
"f",
"in",
"entries",
":",
"fv",
"=",
"entries",
"[",
"f",
"]",
"if",
"f",
"not",
"in",
"features_seen",
":",
"fv",
"[",
"'missing'",
"]",
"+=",
"1"
] |
Parses data from an example, populating a dictionary of feature values.
Args:
example_features: A map of strings to tf.Features from the example.
example_feature_lists: A map of strings to tf.FeatureLists from the
example.
entries: A dictionary of all features parsed thus far and arrays of their
values. This is mutated by the function.
index: The index of the example to parse from a list of examples.
Raises:
TypeError: Raises an exception when a feature has inconsistent types
across
examples.
|
[
"Parses",
"data",
"from",
"an",
"example",
"populating",
"a",
"dictionary",
"of",
"feature",
"values",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/facets/base_feature_statistics_generator.py#L74-L160
|
4,852
|
googledatalab/pydatalab
|
google/datalab/utils/facets/base_feature_statistics_generator.py
|
BaseFeatureStatisticsGenerator._GetEntries
|
def _GetEntries(self,
paths,
max_entries,
iterator_from_file,
is_sequence=False):
"""Extracts examples into a dictionary of feature values.
Args:
paths: A list of the paths to the files to parse.
max_entries: The maximum number of examples to load.
iterator_from_file: A method that takes a file path string and returns an
iterator to the examples in that file.
is_sequence: True if the input data from 'iterator_from_file' are
tf.SequenceExamples, False if tf.Examples. Defaults to false.
Returns:
A tuple with two elements:
- A dictionary of all features parsed thus far and arrays of their
values.
- The number of examples parsed.
"""
entries = {}
index = 0
for filepath in paths:
reader = iterator_from_file(filepath)
for record in reader:
if is_sequence:
sequence_example = tf.train.SequenceExample.FromString(record)
self._ParseExample(sequence_example.context.feature,
sequence_example.feature_lists.feature_list,
entries, index)
else:
self._ParseExample(
tf.train.Example.FromString(record).features.feature, [], entries,
index)
index += 1
if index == max_entries:
return entries, index
return entries, index
|
python
|
def _GetEntries(self,
paths,
max_entries,
iterator_from_file,
is_sequence=False):
"""Extracts examples into a dictionary of feature values.
Args:
paths: A list of the paths to the files to parse.
max_entries: The maximum number of examples to load.
iterator_from_file: A method that takes a file path string and returns an
iterator to the examples in that file.
is_sequence: True if the input data from 'iterator_from_file' are
tf.SequenceExamples, False if tf.Examples. Defaults to false.
Returns:
A tuple with two elements:
- A dictionary of all features parsed thus far and arrays of their
values.
- The number of examples parsed.
"""
entries = {}
index = 0
for filepath in paths:
reader = iterator_from_file(filepath)
for record in reader:
if is_sequence:
sequence_example = tf.train.SequenceExample.FromString(record)
self._ParseExample(sequence_example.context.feature,
sequence_example.feature_lists.feature_list,
entries, index)
else:
self._ParseExample(
tf.train.Example.FromString(record).features.feature, [], entries,
index)
index += 1
if index == max_entries:
return entries, index
return entries, index
|
[
"def",
"_GetEntries",
"(",
"self",
",",
"paths",
",",
"max_entries",
",",
"iterator_from_file",
",",
"is_sequence",
"=",
"False",
")",
":",
"entries",
"=",
"{",
"}",
"index",
"=",
"0",
"for",
"filepath",
"in",
"paths",
":",
"reader",
"=",
"iterator_from_file",
"(",
"filepath",
")",
"for",
"record",
"in",
"reader",
":",
"if",
"is_sequence",
":",
"sequence_example",
"=",
"tf",
".",
"train",
".",
"SequenceExample",
".",
"FromString",
"(",
"record",
")",
"self",
".",
"_ParseExample",
"(",
"sequence_example",
".",
"context",
".",
"feature",
",",
"sequence_example",
".",
"feature_lists",
".",
"feature_list",
",",
"entries",
",",
"index",
")",
"else",
":",
"self",
".",
"_ParseExample",
"(",
"tf",
".",
"train",
".",
"Example",
".",
"FromString",
"(",
"record",
")",
".",
"features",
".",
"feature",
",",
"[",
"]",
",",
"entries",
",",
"index",
")",
"index",
"+=",
"1",
"if",
"index",
"==",
"max_entries",
":",
"return",
"entries",
",",
"index",
"return",
"entries",
",",
"index"
] |
Extracts examples into a dictionary of feature values.
Args:
paths: A list of the paths to the files to parse.
max_entries: The maximum number of examples to load.
iterator_from_file: A method that takes a file path string and returns an
iterator to the examples in that file.
is_sequence: True if the input data from 'iterator_from_file' are
tf.SequenceExamples, False if tf.Examples. Defaults to false.
Returns:
A tuple with two elements:
- A dictionary of all features parsed thus far and arrays of their
values.
- The number of examples parsed.
|
[
"Extracts",
"examples",
"into",
"a",
"dictionary",
"of",
"feature",
"values",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/facets/base_feature_statistics_generator.py#L162-L200
|
4,853
|
googledatalab/pydatalab
|
google/datalab/utils/facets/base_feature_statistics_generator.py
|
BaseFeatureStatisticsGenerator._GetTfRecordEntries
|
def _GetTfRecordEntries(self, path, max_entries, is_sequence,
iterator_options):
"""Extracts TFRecord examples into a dictionary of feature values.
Args:
path: The path to the TFRecord file(s).
max_entries: The maximum number of examples to load.
is_sequence: True if the input data from 'path' are tf.SequenceExamples,
False if tf.Examples. Defaults to false.
iterator_options: Options to pass to the iterator that reads the examples.
Defaults to None.
Returns:
A tuple with two elements:
- A dictionary of all features parsed thus far and arrays of their
values.
- The number of examples parsed.
"""
return self._GetEntries([path], max_entries,
partial(
tf.python_io.tf_record_iterator,
options=iterator_options), is_sequence)
|
python
|
def _GetTfRecordEntries(self, path, max_entries, is_sequence,
iterator_options):
"""Extracts TFRecord examples into a dictionary of feature values.
Args:
path: The path to the TFRecord file(s).
max_entries: The maximum number of examples to load.
is_sequence: True if the input data from 'path' are tf.SequenceExamples,
False if tf.Examples. Defaults to false.
iterator_options: Options to pass to the iterator that reads the examples.
Defaults to None.
Returns:
A tuple with two elements:
- A dictionary of all features parsed thus far and arrays of their
values.
- The number of examples parsed.
"""
return self._GetEntries([path], max_entries,
partial(
tf.python_io.tf_record_iterator,
options=iterator_options), is_sequence)
|
[
"def",
"_GetTfRecordEntries",
"(",
"self",
",",
"path",
",",
"max_entries",
",",
"is_sequence",
",",
"iterator_options",
")",
":",
"return",
"self",
".",
"_GetEntries",
"(",
"[",
"path",
"]",
",",
"max_entries",
",",
"partial",
"(",
"tf",
".",
"python_io",
".",
"tf_record_iterator",
",",
"options",
"=",
"iterator_options",
")",
",",
"is_sequence",
")"
] |
Extracts TFRecord examples into a dictionary of feature values.
Args:
path: The path to the TFRecord file(s).
max_entries: The maximum number of examples to load.
is_sequence: True if the input data from 'path' are tf.SequenceExamples,
False if tf.Examples. Defaults to false.
iterator_options: Options to pass to the iterator that reads the examples.
Defaults to None.
Returns:
A tuple with two elements:
- A dictionary of all features parsed thus far and arrays of their
values.
- The number of examples parsed.
|
[
"Extracts",
"TFRecord",
"examples",
"into",
"a",
"dictionary",
"of",
"feature",
"values",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/facets/base_feature_statistics_generator.py#L202-L223
|
4,854
|
googledatalab/pydatalab
|
datalab/storage/_api.py
|
Api.buckets_insert
|
def buckets_insert(self, bucket, project_id=None):
"""Issues a request to create a new bucket.
Args:
bucket: the name of the bucket.
project_id: the project to use when inserting the bucket.
Returns:
A parsed bucket information dictionary.
Raises:
Exception if there is an error performing the operation.
"""
args = {'project': project_id if project_id else self._project_id}
data = {'name': bucket}
url = Api._ENDPOINT + (Api._BUCKET_PATH % '')
return datalab.utils.Http.request(url, args=args, data=data, credentials=self._credentials)
|
python
|
def buckets_insert(self, bucket, project_id=None):
"""Issues a request to create a new bucket.
Args:
bucket: the name of the bucket.
project_id: the project to use when inserting the bucket.
Returns:
A parsed bucket information dictionary.
Raises:
Exception if there is an error performing the operation.
"""
args = {'project': project_id if project_id else self._project_id}
data = {'name': bucket}
url = Api._ENDPOINT + (Api._BUCKET_PATH % '')
return datalab.utils.Http.request(url, args=args, data=data, credentials=self._credentials)
|
[
"def",
"buckets_insert",
"(",
"self",
",",
"bucket",
",",
"project_id",
"=",
"None",
")",
":",
"args",
"=",
"{",
"'project'",
":",
"project_id",
"if",
"project_id",
"else",
"self",
".",
"_project_id",
"}",
"data",
"=",
"{",
"'name'",
":",
"bucket",
"}",
"url",
"=",
"Api",
".",
"_ENDPOINT",
"+",
"(",
"Api",
".",
"_BUCKET_PATH",
"%",
"''",
")",
"return",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"args",
"=",
"args",
",",
"data",
"=",
"data",
",",
"credentials",
"=",
"self",
".",
"_credentials",
")"
] |
Issues a request to create a new bucket.
Args:
bucket: the name of the bucket.
project_id: the project to use when inserting the bucket.
Returns:
A parsed bucket information dictionary.
Raises:
Exception if there is an error performing the operation.
|
[
"Issues",
"a",
"request",
"to",
"create",
"a",
"new",
"bucket",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/storage/_api.py#L54-L69
|
4,855
|
googledatalab/pydatalab
|
datalab/storage/_api.py
|
Api.objects_delete
|
def objects_delete(self, bucket, key):
"""Deletes the specified object.
Args:
bucket: the name of the bucket.
key: the key of the object within the bucket.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._OBJECT_PATH % (bucket, Api._escape_key(key)))
datalab.utils.Http.request(url, method='DELETE', credentials=self._credentials,
raw_response=True)
|
python
|
def objects_delete(self, bucket, key):
"""Deletes the specified object.
Args:
bucket: the name of the bucket.
key: the key of the object within the bucket.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._OBJECT_PATH % (bucket, Api._escape_key(key)))
datalab.utils.Http.request(url, method='DELETE', credentials=self._credentials,
raw_response=True)
|
[
"def",
"objects_delete",
"(",
"self",
",",
"bucket",
",",
"key",
")",
":",
"url",
"=",
"Api",
".",
"_ENDPOINT",
"+",
"(",
"Api",
".",
"_OBJECT_PATH",
"%",
"(",
"bucket",
",",
"Api",
".",
"_escape_key",
"(",
"key",
")",
")",
")",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"method",
"=",
"'DELETE'",
",",
"credentials",
"=",
"self",
".",
"_credentials",
",",
"raw_response",
"=",
"True",
")"
] |
Deletes the specified object.
Args:
bucket: the name of the bucket.
key: the key of the object within the bucket.
Raises:
Exception if there is an error performing the operation.
|
[
"Deletes",
"the",
"specified",
"object",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/storage/_api.py#L182-L193
|
4,856
|
googledatalab/pydatalab
|
google/datalab/stackdriver/monitoring/_metric.py
|
MetricDescriptors.list
|
def list(self, pattern='*'):
"""Returns a list of metric descriptors that match the filters.
Args:
pattern: An optional pattern to further filter the descriptors. This can
include Unix shell-style wildcards. E.g. ``"compute*"``,
``"*cpu/load_??m"``.
Returns:
A list of MetricDescriptor objects that match the filters.
"""
if self._descriptors is None:
self._descriptors = self._client.list_metric_descriptors(
filter_string=self._filter_string, type_prefix=self._type_prefix)
return [metric for metric in self._descriptors
if fnmatch.fnmatch(metric.type, pattern)]
|
python
|
def list(self, pattern='*'):
"""Returns a list of metric descriptors that match the filters.
Args:
pattern: An optional pattern to further filter the descriptors. This can
include Unix shell-style wildcards. E.g. ``"compute*"``,
``"*cpu/load_??m"``.
Returns:
A list of MetricDescriptor objects that match the filters.
"""
if self._descriptors is None:
self._descriptors = self._client.list_metric_descriptors(
filter_string=self._filter_string, type_prefix=self._type_prefix)
return [metric for metric in self._descriptors
if fnmatch.fnmatch(metric.type, pattern)]
|
[
"def",
"list",
"(",
"self",
",",
"pattern",
"=",
"'*'",
")",
":",
"if",
"self",
".",
"_descriptors",
"is",
"None",
":",
"self",
".",
"_descriptors",
"=",
"self",
".",
"_client",
".",
"list_metric_descriptors",
"(",
"filter_string",
"=",
"self",
".",
"_filter_string",
",",
"type_prefix",
"=",
"self",
".",
"_type_prefix",
")",
"return",
"[",
"metric",
"for",
"metric",
"in",
"self",
".",
"_descriptors",
"if",
"fnmatch",
".",
"fnmatch",
"(",
"metric",
".",
"type",
",",
"pattern",
")",
"]"
] |
Returns a list of metric descriptors that match the filters.
Args:
pattern: An optional pattern to further filter the descriptors. This can
include Unix shell-style wildcards. E.g. ``"compute*"``,
``"*cpu/load_??m"``.
Returns:
A list of MetricDescriptor objects that match the filters.
|
[
"Returns",
"a",
"list",
"of",
"metric",
"descriptors",
"that",
"match",
"the",
"filters",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/stackdriver/monitoring/_metric.py#L45-L60
|
4,857
|
googledatalab/pydatalab
|
datalab/bigquery/_schema.py
|
Schema._from_dataframe
|
def _from_dataframe(dataframe, default_type='STRING'):
"""
Infer a BigQuery table schema from a Pandas dataframe. Note that if you don't explicitly set
the types of the columns in the dataframe, they may be of a type that forces coercion to
STRING, so even though the fields in the dataframe themselves may be numeric, the type in the
derived schema may not be. Hence it is prudent to make sure the Pandas dataframe is typed
correctly.
Args:
dataframe: The DataFrame.
default_type : The default big query type in case the type of the column does not exist in
the schema. Defaults to 'STRING'.
Returns:
A list of dictionaries containing field 'name' and 'type' entries, suitable for use in a
BigQuery Tables resource schema.
"""
type_mapping = {
'i': 'INTEGER',
'b': 'BOOLEAN',
'f': 'FLOAT',
'O': 'STRING',
'S': 'STRING',
'U': 'STRING',
'M': 'TIMESTAMP'
}
fields = []
for column_name, dtype in dataframe.dtypes.iteritems():
fields.append({'name': column_name,
'type': type_mapping.get(dtype.kind, default_type)})
return fields
|
python
|
def _from_dataframe(dataframe, default_type='STRING'):
"""
Infer a BigQuery table schema from a Pandas dataframe. Note that if you don't explicitly set
the types of the columns in the dataframe, they may be of a type that forces coercion to
STRING, so even though the fields in the dataframe themselves may be numeric, the type in the
derived schema may not be. Hence it is prudent to make sure the Pandas dataframe is typed
correctly.
Args:
dataframe: The DataFrame.
default_type : The default big query type in case the type of the column does not exist in
the schema. Defaults to 'STRING'.
Returns:
A list of dictionaries containing field 'name' and 'type' entries, suitable for use in a
BigQuery Tables resource schema.
"""
type_mapping = {
'i': 'INTEGER',
'b': 'BOOLEAN',
'f': 'FLOAT',
'O': 'STRING',
'S': 'STRING',
'U': 'STRING',
'M': 'TIMESTAMP'
}
fields = []
for column_name, dtype in dataframe.dtypes.iteritems():
fields.append({'name': column_name,
'type': type_mapping.get(dtype.kind, default_type)})
return fields
|
[
"def",
"_from_dataframe",
"(",
"dataframe",
",",
"default_type",
"=",
"'STRING'",
")",
":",
"type_mapping",
"=",
"{",
"'i'",
":",
"'INTEGER'",
",",
"'b'",
":",
"'BOOLEAN'",
",",
"'f'",
":",
"'FLOAT'",
",",
"'O'",
":",
"'STRING'",
",",
"'S'",
":",
"'STRING'",
",",
"'U'",
":",
"'STRING'",
",",
"'M'",
":",
"'TIMESTAMP'",
"}",
"fields",
"=",
"[",
"]",
"for",
"column_name",
",",
"dtype",
"in",
"dataframe",
".",
"dtypes",
".",
"iteritems",
"(",
")",
":",
"fields",
".",
"append",
"(",
"{",
"'name'",
":",
"column_name",
",",
"'type'",
":",
"type_mapping",
".",
"get",
"(",
"dtype",
".",
"kind",
",",
"default_type",
")",
"}",
")",
"return",
"fields"
] |
Infer a BigQuery table schema from a Pandas dataframe. Note that if you don't explicitly set
the types of the columns in the dataframe, they may be of a type that forces coercion to
STRING, so even though the fields in the dataframe themselves may be numeric, the type in the
derived schema may not be. Hence it is prudent to make sure the Pandas dataframe is typed
correctly.
Args:
dataframe: The DataFrame.
default_type : The default big query type in case the type of the column does not exist in
the schema. Defaults to 'STRING'.
Returns:
A list of dictionaries containing field 'name' and 'type' entries, suitable for use in a
BigQuery Tables resource schema.
|
[
"Infer",
"a",
"BigQuery",
"table",
"schema",
"from",
"a",
"Pandas",
"dataframe",
".",
"Note",
"that",
"if",
"you",
"don",
"t",
"explicitly",
"set",
"the",
"types",
"of",
"the",
"columns",
"in",
"the",
"dataframe",
"they",
"may",
"be",
"of",
"a",
"type",
"that",
"forces",
"coercion",
"to",
"STRING",
"so",
"even",
"though",
"the",
"fields",
"in",
"the",
"dataframe",
"themselves",
"may",
"be",
"numeric",
"the",
"type",
"in",
"the",
"derived",
"schema",
"may",
"not",
"be",
".",
"Hence",
"it",
"is",
"prudent",
"to",
"make",
"sure",
"the",
"Pandas",
"dataframe",
"is",
"typed",
"correctly",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_schema.py#L92-L124
|
4,858
|
googledatalab/pydatalab
|
datalab/bigquery/_schema.py
|
Schema._from_dict_record
|
def _from_dict_record(data):
"""
Infer a BigQuery table schema from a dictionary. If the dictionary has entries that
are in turn OrderedDicts these will be turned into RECORD types. Ideally this will
be an OrderedDict but it is not required.
Args:
data: The dict to infer a schema from.
Returns:
A list of dictionaries containing field 'name' and 'type' entries, suitable for use in a
BigQuery Tables resource schema.
"""
return [Schema._get_field_entry(name, value) for name, value in list(data.items())]
|
python
|
def _from_dict_record(data):
"""
Infer a BigQuery table schema from a dictionary. If the dictionary has entries that
are in turn OrderedDicts these will be turned into RECORD types. Ideally this will
be an OrderedDict but it is not required.
Args:
data: The dict to infer a schema from.
Returns:
A list of dictionaries containing field 'name' and 'type' entries, suitable for use in a
BigQuery Tables resource schema.
"""
return [Schema._get_field_entry(name, value) for name, value in list(data.items())]
|
[
"def",
"_from_dict_record",
"(",
"data",
")",
":",
"return",
"[",
"Schema",
".",
"_get_field_entry",
"(",
"name",
",",
"value",
")",
"for",
"name",
",",
"value",
"in",
"list",
"(",
"data",
".",
"items",
"(",
")",
")",
"]"
] |
Infer a BigQuery table schema from a dictionary. If the dictionary has entries that
are in turn OrderedDicts these will be turned into RECORD types. Ideally this will
be an OrderedDict but it is not required.
Args:
data: The dict to infer a schema from.
Returns:
A list of dictionaries containing field 'name' and 'type' entries, suitable for use in a
BigQuery Tables resource schema.
|
[
"Infer",
"a",
"BigQuery",
"table",
"schema",
"from",
"a",
"dictionary",
".",
"If",
"the",
"dictionary",
"has",
"entries",
"that",
"are",
"in",
"turn",
"OrderedDicts",
"these",
"will",
"be",
"turned",
"into",
"RECORD",
"types",
".",
"Ideally",
"this",
"will",
"be",
"an",
"OrderedDict",
"but",
"it",
"is",
"not",
"required",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_schema.py#L164-L176
|
4,859
|
googledatalab/pydatalab
|
datalab/bigquery/_schema.py
|
Schema._from_list_record
|
def _from_list_record(data):
"""
Infer a BigQuery table schema from a list of values.
Args:
data: The list of values.
Returns:
A list of dictionaries containing field 'name' and 'type' entries, suitable for use in a
BigQuery Tables resource schema.
"""
return [Schema._get_field_entry('Column%d' % (i + 1), value) for i, value in enumerate(data)]
|
python
|
def _from_list_record(data):
"""
Infer a BigQuery table schema from a list of values.
Args:
data: The list of values.
Returns:
A list of dictionaries containing field 'name' and 'type' entries, suitable for use in a
BigQuery Tables resource schema.
"""
return [Schema._get_field_entry('Column%d' % (i + 1), value) for i, value in enumerate(data)]
|
[
"def",
"_from_list_record",
"(",
"data",
")",
":",
"return",
"[",
"Schema",
".",
"_get_field_entry",
"(",
"'Column%d'",
"%",
"(",
"i",
"+",
"1",
")",
",",
"value",
")",
"for",
"i",
",",
"value",
"in",
"enumerate",
"(",
"data",
")",
"]"
] |
Infer a BigQuery table schema from a list of values.
Args:
data: The list of values.
Returns:
A list of dictionaries containing field 'name' and 'type' entries, suitable for use in a
BigQuery Tables resource schema.
|
[
"Infer",
"a",
"BigQuery",
"table",
"schema",
"from",
"a",
"list",
"of",
"values",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_schema.py#L179-L189
|
4,860
|
googledatalab/pydatalab
|
datalab/bigquery/_schema.py
|
Schema._from_record
|
def _from_record(data):
"""
Infer a BigQuery table schema from a list of fields or a dictionary. The typeof the elements
is used. For a list, the field names are simply 'Column1', 'Column2', etc.
Args:
data: The list of fields or dictionary.
Returns:
A list of dictionaries containing field 'name' and 'type' entries, suitable for use in a
BigQuery Tables resource schema.
"""
if isinstance(data, dict):
return Schema._from_dict_record(data)
elif isinstance(data, list):
return Schema._from_list_record(data)
else:
raise Exception('Cannot create a schema from record %s' % str(data))
|
python
|
def _from_record(data):
"""
Infer a BigQuery table schema from a list of fields or a dictionary. The typeof the elements
is used. For a list, the field names are simply 'Column1', 'Column2', etc.
Args:
data: The list of fields or dictionary.
Returns:
A list of dictionaries containing field 'name' and 'type' entries, suitable for use in a
BigQuery Tables resource schema.
"""
if isinstance(data, dict):
return Schema._from_dict_record(data)
elif isinstance(data, list):
return Schema._from_list_record(data)
else:
raise Exception('Cannot create a schema from record %s' % str(data))
|
[
"def",
"_from_record",
"(",
"data",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"return",
"Schema",
".",
"_from_dict_record",
"(",
"data",
")",
"elif",
"isinstance",
"(",
"data",
",",
"list",
")",
":",
"return",
"Schema",
".",
"_from_list_record",
"(",
"data",
")",
"else",
":",
"raise",
"Exception",
"(",
"'Cannot create a schema from record %s'",
"%",
"str",
"(",
"data",
")",
")"
] |
Infer a BigQuery table schema from a list of fields or a dictionary. The typeof the elements
is used. For a list, the field names are simply 'Column1', 'Column2', etc.
Args:
data: The list of fields or dictionary.
Returns:
A list of dictionaries containing field 'name' and 'type' entries, suitable for use in a
BigQuery Tables resource schema.
|
[
"Infer",
"a",
"BigQuery",
"table",
"schema",
"from",
"a",
"list",
"of",
"fields",
"or",
"a",
"dictionary",
".",
"The",
"typeof",
"the",
"elements",
"is",
"used",
".",
"For",
"a",
"list",
"the",
"field",
"names",
"are",
"simply",
"Column1",
"Column2",
"etc",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_schema.py#L192-L208
|
4,861
|
googledatalab/pydatalab
|
datalab/utils/commands/_commands.py
|
CommandParser.create_args
|
def create_args(line, namespace):
""" Expand any meta-variable references in the argument list. """
args = []
# Using shlex.split handles quotes args and escape characters.
for arg in shlex.split(line):
if not arg:
continue
if arg[0] == '$':
var_name = arg[1:]
if var_name in namespace:
args.append((namespace[var_name]))
else:
raise Exception('Undefined variable referenced in command line: %s' % arg)
else:
args.append(arg)
return args
|
python
|
def create_args(line, namespace):
""" Expand any meta-variable references in the argument list. """
args = []
# Using shlex.split handles quotes args and escape characters.
for arg in shlex.split(line):
if not arg:
continue
if arg[0] == '$':
var_name = arg[1:]
if var_name in namespace:
args.append((namespace[var_name]))
else:
raise Exception('Undefined variable referenced in command line: %s' % arg)
else:
args.append(arg)
return args
|
[
"def",
"create_args",
"(",
"line",
",",
"namespace",
")",
":",
"args",
"=",
"[",
"]",
"# Using shlex.split handles quotes args and escape characters.",
"for",
"arg",
"in",
"shlex",
".",
"split",
"(",
"line",
")",
":",
"if",
"not",
"arg",
":",
"continue",
"if",
"arg",
"[",
"0",
"]",
"==",
"'$'",
":",
"var_name",
"=",
"arg",
"[",
"1",
":",
"]",
"if",
"var_name",
"in",
"namespace",
":",
"args",
".",
"append",
"(",
"(",
"namespace",
"[",
"var_name",
"]",
")",
")",
"else",
":",
"raise",
"Exception",
"(",
"'Undefined variable referenced in command line: %s'",
"%",
"arg",
")",
"else",
":",
"args",
".",
"append",
"(",
"arg",
")",
"return",
"args"
] |
Expand any meta-variable references in the argument list.
|
[
"Expand",
"any",
"meta",
"-",
"variable",
"references",
"in",
"the",
"argument",
"list",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/utils/commands/_commands.py#L49-L64
|
4,862
|
googledatalab/pydatalab
|
datalab/utils/commands/_commands.py
|
CommandParser.parse
|
def parse(self, line, namespace=None):
"""Parses a line into a dictionary of arguments, expanding meta-variables from a namespace. """
try:
if namespace is None:
ipy = IPython.get_ipython()
namespace = ipy.user_ns
args = CommandParser.create_args(line, namespace)
return self.parse_args(args)
except Exception as e:
print(str(e))
return None
|
python
|
def parse(self, line, namespace=None):
"""Parses a line into a dictionary of arguments, expanding meta-variables from a namespace. """
try:
if namespace is None:
ipy = IPython.get_ipython()
namespace = ipy.user_ns
args = CommandParser.create_args(line, namespace)
return self.parse_args(args)
except Exception as e:
print(str(e))
return None
|
[
"def",
"parse",
"(",
"self",
",",
"line",
",",
"namespace",
"=",
"None",
")",
":",
"try",
":",
"if",
"namespace",
"is",
"None",
":",
"ipy",
"=",
"IPython",
".",
"get_ipython",
"(",
")",
"namespace",
"=",
"ipy",
".",
"user_ns",
"args",
"=",
"CommandParser",
".",
"create_args",
"(",
"line",
",",
"namespace",
")",
"return",
"self",
".",
"parse_args",
"(",
"args",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"str",
"(",
"e",
")",
")",
"return",
"None"
] |
Parses a line into a dictionary of arguments, expanding meta-variables from a namespace.
|
[
"Parses",
"a",
"line",
"into",
"a",
"dictionary",
"of",
"arguments",
"expanding",
"meta",
"-",
"variables",
"from",
"a",
"namespace",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/utils/commands/_commands.py#L66-L76
|
4,863
|
googledatalab/pydatalab
|
datalab/utils/commands/_commands.py
|
CommandParser.subcommand
|
def subcommand(self, name, help):
"""Creates a parser for a sub-command. """
if self._subcommands is None:
self._subcommands = self.add_subparsers(help='commands')
return self._subcommands.add_parser(name, description=help, help=help)
|
python
|
def subcommand(self, name, help):
"""Creates a parser for a sub-command. """
if self._subcommands is None:
self._subcommands = self.add_subparsers(help='commands')
return self._subcommands.add_parser(name, description=help, help=help)
|
[
"def",
"subcommand",
"(",
"self",
",",
"name",
",",
"help",
")",
":",
"if",
"self",
".",
"_subcommands",
"is",
"None",
":",
"self",
".",
"_subcommands",
"=",
"self",
".",
"add_subparsers",
"(",
"help",
"=",
"'commands'",
")",
"return",
"self",
".",
"_subcommands",
".",
"add_parser",
"(",
"name",
",",
"description",
"=",
"help",
",",
"help",
"=",
"help",
")"
] |
Creates a parser for a sub-command.
|
[
"Creates",
"a",
"parser",
"for",
"a",
"sub",
"-",
"command",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/utils/commands/_commands.py#L78-L82
|
4,864
|
googledatalab/pydatalab
|
datalab/utils/commands/_utils.py
|
render_text
|
def render_text(text, preformatted=False):
""" Return text formatted as a HTML
Args:
text: the text to render
preformatted: whether the text should be rendered as preformatted
"""
return IPython.core.display.HTML(_html.HtmlBuilder.render_text(text, preformatted))
|
python
|
def render_text(text, preformatted=False):
""" Return text formatted as a HTML
Args:
text: the text to render
preformatted: whether the text should be rendered as preformatted
"""
return IPython.core.display.HTML(_html.HtmlBuilder.render_text(text, preformatted))
|
[
"def",
"render_text",
"(",
"text",
",",
"preformatted",
"=",
"False",
")",
":",
"return",
"IPython",
".",
"core",
".",
"display",
".",
"HTML",
"(",
"_html",
".",
"HtmlBuilder",
".",
"render_text",
"(",
"text",
",",
"preformatted",
")",
")"
] |
Return text formatted as a HTML
Args:
text: the text to render
preformatted: whether the text should be rendered as preformatted
|
[
"Return",
"text",
"formatted",
"as",
"a",
"HTML"
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/utils/commands/_utils.py#L73-L80
|
4,865
|
googledatalab/pydatalab
|
datalab/utils/commands/_utils.py
|
_get_cols
|
def _get_cols(fields, schema):
""" Get column metadata for Google Charts based on field list and schema. """
typemap = {
'STRING': 'string',
'INT64': 'number',
'INTEGER': 'number',
'FLOAT': 'number',
'FLOAT64': 'number',
'BOOL': 'boolean',
'BOOLEAN': 'boolean',
'DATE': 'date',
'TIME': 'timeofday',
'DATETIME': 'datetime',
'TIMESTAMP': 'datetime'
}
cols = []
for col in fields:
if schema:
f = schema[col]
t = 'string' if f.mode == 'REPEATED' else typemap.get(f.data_type, 'string')
cols.append({'id': f.name, 'label': f.name, 'type': t})
else:
# This will only happen if we had no rows to infer a schema from, so the type
# is not really important, except that GCharts will choke if we pass such a schema
# to a chart if it is string x string so we default to number.
cols.append({'id': col, 'label': col, 'type': 'number'})
return cols
|
python
|
def _get_cols(fields, schema):
""" Get column metadata for Google Charts based on field list and schema. """
typemap = {
'STRING': 'string',
'INT64': 'number',
'INTEGER': 'number',
'FLOAT': 'number',
'FLOAT64': 'number',
'BOOL': 'boolean',
'BOOLEAN': 'boolean',
'DATE': 'date',
'TIME': 'timeofday',
'DATETIME': 'datetime',
'TIMESTAMP': 'datetime'
}
cols = []
for col in fields:
if schema:
f = schema[col]
t = 'string' if f.mode == 'REPEATED' else typemap.get(f.data_type, 'string')
cols.append({'id': f.name, 'label': f.name, 'type': t})
else:
# This will only happen if we had no rows to infer a schema from, so the type
# is not really important, except that GCharts will choke if we pass such a schema
# to a chart if it is string x string so we default to number.
cols.append({'id': col, 'label': col, 'type': 'number'})
return cols
|
[
"def",
"_get_cols",
"(",
"fields",
",",
"schema",
")",
":",
"typemap",
"=",
"{",
"'STRING'",
":",
"'string'",
",",
"'INT64'",
":",
"'number'",
",",
"'INTEGER'",
":",
"'number'",
",",
"'FLOAT'",
":",
"'number'",
",",
"'FLOAT64'",
":",
"'number'",
",",
"'BOOL'",
":",
"'boolean'",
",",
"'BOOLEAN'",
":",
"'boolean'",
",",
"'DATE'",
":",
"'date'",
",",
"'TIME'",
":",
"'timeofday'",
",",
"'DATETIME'",
":",
"'datetime'",
",",
"'TIMESTAMP'",
":",
"'datetime'",
"}",
"cols",
"=",
"[",
"]",
"for",
"col",
"in",
"fields",
":",
"if",
"schema",
":",
"f",
"=",
"schema",
"[",
"col",
"]",
"t",
"=",
"'string'",
"if",
"f",
".",
"mode",
"==",
"'REPEATED'",
"else",
"typemap",
".",
"get",
"(",
"f",
".",
"data_type",
",",
"'string'",
")",
"cols",
".",
"append",
"(",
"{",
"'id'",
":",
"f",
".",
"name",
",",
"'label'",
":",
"f",
".",
"name",
",",
"'type'",
":",
"t",
"}",
")",
"else",
":",
"# This will only happen if we had no rows to infer a schema from, so the type",
"# is not really important, except that GCharts will choke if we pass such a schema",
"# to a chart if it is string x string so we default to number.",
"cols",
".",
"append",
"(",
"{",
"'id'",
":",
"col",
",",
"'label'",
":",
"col",
",",
"'type'",
":",
"'number'",
"}",
")",
"return",
"cols"
] |
Get column metadata for Google Charts based on field list and schema.
|
[
"Get",
"column",
"metadata",
"for",
"Google",
"Charts",
"based",
"on",
"field",
"list",
"and",
"schema",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/utils/commands/_utils.py#L99-L125
|
4,866
|
googledatalab/pydatalab
|
datalab/utils/commands/_utils.py
|
_get_data_from_empty_list
|
def _get_data_from_empty_list(source, fields='*', first_row=0, count=-1, schema=None):
""" Helper function for _get_data that handles empty lists. """
fields = get_field_list(fields, schema)
return {'cols': _get_cols(fields, schema), 'rows': []}, 0
|
python
|
def _get_data_from_empty_list(source, fields='*', first_row=0, count=-1, schema=None):
""" Helper function for _get_data that handles empty lists. """
fields = get_field_list(fields, schema)
return {'cols': _get_cols(fields, schema), 'rows': []}, 0
|
[
"def",
"_get_data_from_empty_list",
"(",
"source",
",",
"fields",
"=",
"'*'",
",",
"first_row",
"=",
"0",
",",
"count",
"=",
"-",
"1",
",",
"schema",
"=",
"None",
")",
":",
"fields",
"=",
"get_field_list",
"(",
"fields",
",",
"schema",
")",
"return",
"{",
"'cols'",
":",
"_get_cols",
"(",
"fields",
",",
"schema",
")",
",",
"'rows'",
":",
"[",
"]",
"}",
",",
"0"
] |
Helper function for _get_data that handles empty lists.
|
[
"Helper",
"function",
"for",
"_get_data",
"that",
"handles",
"empty",
"lists",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/utils/commands/_utils.py#L128-L131
|
4,867
|
googledatalab/pydatalab
|
datalab/utils/commands/_utils.py
|
_get_data_from_table
|
def _get_data_from_table(source, fields='*', first_row=0, count=-1, schema=None):
""" Helper function for _get_data that handles BQ Tables. """
if not source.exists():
return _get_data_from_empty_list(source, fields, first_row, count)
if schema is None:
schema = source.schema
fields = get_field_list(fields, schema)
gen = source.range(first_row, count) if count >= 0 else source
rows = [{'c': [{'v': row[c]} if c in row else {} for c in fields]} for row in gen]
return {'cols': _get_cols(fields, schema), 'rows': rows}, source.length
|
python
|
def _get_data_from_table(source, fields='*', first_row=0, count=-1, schema=None):
""" Helper function for _get_data that handles BQ Tables. """
if not source.exists():
return _get_data_from_empty_list(source, fields, first_row, count)
if schema is None:
schema = source.schema
fields = get_field_list(fields, schema)
gen = source.range(first_row, count) if count >= 0 else source
rows = [{'c': [{'v': row[c]} if c in row else {} for c in fields]} for row in gen]
return {'cols': _get_cols(fields, schema), 'rows': rows}, source.length
|
[
"def",
"_get_data_from_table",
"(",
"source",
",",
"fields",
"=",
"'*'",
",",
"first_row",
"=",
"0",
",",
"count",
"=",
"-",
"1",
",",
"schema",
"=",
"None",
")",
":",
"if",
"not",
"source",
".",
"exists",
"(",
")",
":",
"return",
"_get_data_from_empty_list",
"(",
"source",
",",
"fields",
",",
"first_row",
",",
"count",
")",
"if",
"schema",
"is",
"None",
":",
"schema",
"=",
"source",
".",
"schema",
"fields",
"=",
"get_field_list",
"(",
"fields",
",",
"schema",
")",
"gen",
"=",
"source",
".",
"range",
"(",
"first_row",
",",
"count",
")",
"if",
"count",
">=",
"0",
"else",
"source",
"rows",
"=",
"[",
"{",
"'c'",
":",
"[",
"{",
"'v'",
":",
"row",
"[",
"c",
"]",
"}",
"if",
"c",
"in",
"row",
"else",
"{",
"}",
"for",
"c",
"in",
"fields",
"]",
"}",
"for",
"row",
"in",
"gen",
"]",
"return",
"{",
"'cols'",
":",
"_get_cols",
"(",
"fields",
",",
"schema",
")",
",",
"'rows'",
":",
"rows",
"}",
",",
"source",
".",
"length"
] |
Helper function for _get_data that handles BQ Tables.
|
[
"Helper",
"function",
"for",
"_get_data",
"that",
"handles",
"BQ",
"Tables",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/utils/commands/_utils.py#L176-L185
|
4,868
|
googledatalab/pydatalab
|
datalab/utils/commands/_utils.py
|
replace_vars
|
def replace_vars(config, env):
""" Replace variable references in config using the supplied env dictionary.
Args:
config: the config to parse. Can be a tuple, list or dict.
env: user supplied dictionary.
Raises:
Exception if any variable references are not found in env.
"""
if isinstance(config, dict):
for k, v in list(config.items()):
if isinstance(v, dict) or isinstance(v, list) or isinstance(v, tuple):
replace_vars(v, env)
elif isinstance(v, basestring):
config[k] = expand_var(v, env)
elif isinstance(config, list):
for i, v in enumerate(config):
if isinstance(v, dict) or isinstance(v, list) or isinstance(v, tuple):
replace_vars(v, env)
elif isinstance(v, basestring):
config[i] = expand_var(v, env)
elif isinstance(config, tuple):
# TODO(gram): figure out how to handle these if the tuple elements are scalar
for v in config:
if isinstance(v, dict) or isinstance(v, list) or isinstance(v, tuple):
replace_vars(v, env)
|
python
|
def replace_vars(config, env):
""" Replace variable references in config using the supplied env dictionary.
Args:
config: the config to parse. Can be a tuple, list or dict.
env: user supplied dictionary.
Raises:
Exception if any variable references are not found in env.
"""
if isinstance(config, dict):
for k, v in list(config.items()):
if isinstance(v, dict) or isinstance(v, list) or isinstance(v, tuple):
replace_vars(v, env)
elif isinstance(v, basestring):
config[k] = expand_var(v, env)
elif isinstance(config, list):
for i, v in enumerate(config):
if isinstance(v, dict) or isinstance(v, list) or isinstance(v, tuple):
replace_vars(v, env)
elif isinstance(v, basestring):
config[i] = expand_var(v, env)
elif isinstance(config, tuple):
# TODO(gram): figure out how to handle these if the tuple elements are scalar
for v in config:
if isinstance(v, dict) or isinstance(v, list) or isinstance(v, tuple):
replace_vars(v, env)
|
[
"def",
"replace_vars",
"(",
"config",
",",
"env",
")",
":",
"if",
"isinstance",
"(",
"config",
",",
"dict",
")",
":",
"for",
"k",
",",
"v",
"in",
"list",
"(",
"config",
".",
"items",
"(",
")",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"dict",
")",
"or",
"isinstance",
"(",
"v",
",",
"list",
")",
"or",
"isinstance",
"(",
"v",
",",
"tuple",
")",
":",
"replace_vars",
"(",
"v",
",",
"env",
")",
"elif",
"isinstance",
"(",
"v",
",",
"basestring",
")",
":",
"config",
"[",
"k",
"]",
"=",
"expand_var",
"(",
"v",
",",
"env",
")",
"elif",
"isinstance",
"(",
"config",
",",
"list",
")",
":",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"config",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"dict",
")",
"or",
"isinstance",
"(",
"v",
",",
"list",
")",
"or",
"isinstance",
"(",
"v",
",",
"tuple",
")",
":",
"replace_vars",
"(",
"v",
",",
"env",
")",
"elif",
"isinstance",
"(",
"v",
",",
"basestring",
")",
":",
"config",
"[",
"i",
"]",
"=",
"expand_var",
"(",
"v",
",",
"env",
")",
"elif",
"isinstance",
"(",
"config",
",",
"tuple",
")",
":",
"# TODO(gram): figure out how to handle these if the tuple elements are scalar",
"for",
"v",
"in",
"config",
":",
"if",
"isinstance",
"(",
"v",
",",
"dict",
")",
"or",
"isinstance",
"(",
"v",
",",
"list",
")",
"or",
"isinstance",
"(",
"v",
",",
"tuple",
")",
":",
"replace_vars",
"(",
"v",
",",
"env",
")"
] |
Replace variable references in config using the supplied env dictionary.
Args:
config: the config to parse. Can be a tuple, list or dict.
env: user supplied dictionary.
Raises:
Exception if any variable references are not found in env.
|
[
"Replace",
"variable",
"references",
"in",
"config",
"using",
"the",
"supplied",
"env",
"dictionary",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/utils/commands/_utils.py#L284-L310
|
4,869
|
googledatalab/pydatalab
|
datalab/utils/commands/_utils.py
|
parse_config
|
def parse_config(config, env, as_dict=True):
""" Parse a config from a magic cell body. This could be JSON or YAML. We turn it into
a Python dictionary then recursively replace any variable references using the supplied
env dictionary.
"""
if config is None:
return None
stripped = config.strip()
if len(stripped) == 0:
config = {}
elif stripped[0] == '{':
config = json.loads(config)
else:
config = yaml.load(config)
if as_dict:
config = dict(config)
# Now we need to walk the config dictionary recursively replacing any '$name' vars.
replace_vars(config, env)
return config
|
python
|
def parse_config(config, env, as_dict=True):
""" Parse a config from a magic cell body. This could be JSON or YAML. We turn it into
a Python dictionary then recursively replace any variable references using the supplied
env dictionary.
"""
if config is None:
return None
stripped = config.strip()
if len(stripped) == 0:
config = {}
elif stripped[0] == '{':
config = json.loads(config)
else:
config = yaml.load(config)
if as_dict:
config = dict(config)
# Now we need to walk the config dictionary recursively replacing any '$name' vars.
replace_vars(config, env)
return config
|
[
"def",
"parse_config",
"(",
"config",
",",
"env",
",",
"as_dict",
"=",
"True",
")",
":",
"if",
"config",
"is",
"None",
":",
"return",
"None",
"stripped",
"=",
"config",
".",
"strip",
"(",
")",
"if",
"len",
"(",
"stripped",
")",
"==",
"0",
":",
"config",
"=",
"{",
"}",
"elif",
"stripped",
"[",
"0",
"]",
"==",
"'{'",
":",
"config",
"=",
"json",
".",
"loads",
"(",
"config",
")",
"else",
":",
"config",
"=",
"yaml",
".",
"load",
"(",
"config",
")",
"if",
"as_dict",
":",
"config",
"=",
"dict",
"(",
"config",
")",
"# Now we need to walk the config dictionary recursively replacing any '$name' vars.",
"replace_vars",
"(",
"config",
",",
"env",
")",
"return",
"config"
] |
Parse a config from a magic cell body. This could be JSON or YAML. We turn it into
a Python dictionary then recursively replace any variable references using the supplied
env dictionary.
|
[
"Parse",
"a",
"config",
"from",
"a",
"magic",
"cell",
"body",
".",
"This",
"could",
"be",
"JSON",
"or",
"YAML",
".",
"We",
"turn",
"it",
"into",
"a",
"Python",
"dictionary",
"then",
"recursively",
"replace",
"any",
"variable",
"references",
"using",
"the",
"supplied",
"env",
"dictionary",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/utils/commands/_utils.py#L313-L333
|
4,870
|
googledatalab/pydatalab
|
datalab/utils/commands/_utils.py
|
validate_config
|
def validate_config(config, required_keys, optional_keys=None):
""" Validate a config dictionary to make sure it includes all required keys
and does not include any unexpected keys.
Args:
config: the config to validate.
required_keys: the names of the keys that the config must have.
optional_keys: the names of the keys that the config can have.
Raises:
Exception if the config is not a dict or invalid.
"""
if optional_keys is None:
optional_keys = []
if not isinstance(config, dict):
raise Exception('config is not dict type')
invalid_keys = set(config) - set(required_keys + optional_keys)
if len(invalid_keys) > 0:
raise Exception('Invalid config with unexpected keys "%s"' % ', '.join(e for e in invalid_keys))
missing_keys = set(required_keys) - set(config)
if len(missing_keys) > 0:
raise Exception('Invalid config with missing keys "%s"' % ', '.join(missing_keys))
|
python
|
def validate_config(config, required_keys, optional_keys=None):
""" Validate a config dictionary to make sure it includes all required keys
and does not include any unexpected keys.
Args:
config: the config to validate.
required_keys: the names of the keys that the config must have.
optional_keys: the names of the keys that the config can have.
Raises:
Exception if the config is not a dict or invalid.
"""
if optional_keys is None:
optional_keys = []
if not isinstance(config, dict):
raise Exception('config is not dict type')
invalid_keys = set(config) - set(required_keys + optional_keys)
if len(invalid_keys) > 0:
raise Exception('Invalid config with unexpected keys "%s"' % ', '.join(e for e in invalid_keys))
missing_keys = set(required_keys) - set(config)
if len(missing_keys) > 0:
raise Exception('Invalid config with missing keys "%s"' % ', '.join(missing_keys))
|
[
"def",
"validate_config",
"(",
"config",
",",
"required_keys",
",",
"optional_keys",
"=",
"None",
")",
":",
"if",
"optional_keys",
"is",
"None",
":",
"optional_keys",
"=",
"[",
"]",
"if",
"not",
"isinstance",
"(",
"config",
",",
"dict",
")",
":",
"raise",
"Exception",
"(",
"'config is not dict type'",
")",
"invalid_keys",
"=",
"set",
"(",
"config",
")",
"-",
"set",
"(",
"required_keys",
"+",
"optional_keys",
")",
"if",
"len",
"(",
"invalid_keys",
")",
">",
"0",
":",
"raise",
"Exception",
"(",
"'Invalid config with unexpected keys \"%s\"'",
"%",
"', '",
".",
"join",
"(",
"e",
"for",
"e",
"in",
"invalid_keys",
")",
")",
"missing_keys",
"=",
"set",
"(",
"required_keys",
")",
"-",
"set",
"(",
"config",
")",
"if",
"len",
"(",
"missing_keys",
")",
">",
"0",
":",
"raise",
"Exception",
"(",
"'Invalid config with missing keys \"%s\"'",
"%",
"', '",
".",
"join",
"(",
"missing_keys",
")",
")"
] |
Validate a config dictionary to make sure it includes all required keys
and does not include any unexpected keys.
Args:
config: the config to validate.
required_keys: the names of the keys that the config must have.
optional_keys: the names of the keys that the config can have.
Raises:
Exception if the config is not a dict or invalid.
|
[
"Validate",
"a",
"config",
"dictionary",
"to",
"make",
"sure",
"it",
"includes",
"all",
"required",
"keys",
"and",
"does",
"not",
"include",
"any",
"unexpected",
"keys",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/utils/commands/_utils.py#L336-L357
|
4,871
|
googledatalab/pydatalab
|
datalab/utils/commands/_utils.py
|
validate_config_must_have
|
def validate_config_must_have(config, required_keys):
""" Validate a config dictionary to make sure it has all of the specified keys
Args:
config: the config to validate.
required_keys: the list of possible keys that config must include.
Raises:
Exception if the config does not have any of them.
"""
missing_keys = set(required_keys) - set(config)
if len(missing_keys) > 0:
raise Exception('Invalid config with missing keys "%s"' % ', '.join(missing_keys))
|
python
|
def validate_config_must_have(config, required_keys):
""" Validate a config dictionary to make sure it has all of the specified keys
Args:
config: the config to validate.
required_keys: the list of possible keys that config must include.
Raises:
Exception if the config does not have any of them.
"""
missing_keys = set(required_keys) - set(config)
if len(missing_keys) > 0:
raise Exception('Invalid config with missing keys "%s"' % ', '.join(missing_keys))
|
[
"def",
"validate_config_must_have",
"(",
"config",
",",
"required_keys",
")",
":",
"missing_keys",
"=",
"set",
"(",
"required_keys",
")",
"-",
"set",
"(",
"config",
")",
"if",
"len",
"(",
"missing_keys",
")",
">",
"0",
":",
"raise",
"Exception",
"(",
"'Invalid config with missing keys \"%s\"'",
"%",
"', '",
".",
"join",
"(",
"missing_keys",
")",
")"
] |
Validate a config dictionary to make sure it has all of the specified keys
Args:
config: the config to validate.
required_keys: the list of possible keys that config must include.
Raises:
Exception if the config does not have any of them.
|
[
"Validate",
"a",
"config",
"dictionary",
"to",
"make",
"sure",
"it",
"has",
"all",
"of",
"the",
"specified",
"keys"
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/utils/commands/_utils.py#L360-L372
|
4,872
|
googledatalab/pydatalab
|
datalab/utils/commands/_utils.py
|
validate_config_has_one_of
|
def validate_config_has_one_of(config, one_of_keys):
""" Validate a config dictionary to make sure it has one and only one
key in one_of_keys.
Args:
config: the config to validate.
one_of_keys: the list of possible keys that config can have one and only one.
Raises:
Exception if the config does not have any of them, or multiple of them.
"""
intersection = set(config).intersection(one_of_keys)
if len(intersection) > 1:
raise Exception('Only one of the values in "%s" is needed' % ', '.join(intersection))
if len(intersection) == 0:
raise Exception('One of the values in "%s" is needed' % ', '.join(one_of_keys))
|
python
|
def validate_config_has_one_of(config, one_of_keys):
""" Validate a config dictionary to make sure it has one and only one
key in one_of_keys.
Args:
config: the config to validate.
one_of_keys: the list of possible keys that config can have one and only one.
Raises:
Exception if the config does not have any of them, or multiple of them.
"""
intersection = set(config).intersection(one_of_keys)
if len(intersection) > 1:
raise Exception('Only one of the values in "%s" is needed' % ', '.join(intersection))
if len(intersection) == 0:
raise Exception('One of the values in "%s" is needed' % ', '.join(one_of_keys))
|
[
"def",
"validate_config_has_one_of",
"(",
"config",
",",
"one_of_keys",
")",
":",
"intersection",
"=",
"set",
"(",
"config",
")",
".",
"intersection",
"(",
"one_of_keys",
")",
"if",
"len",
"(",
"intersection",
")",
">",
"1",
":",
"raise",
"Exception",
"(",
"'Only one of the values in \"%s\" is needed'",
"%",
"', '",
".",
"join",
"(",
"intersection",
")",
")",
"if",
"len",
"(",
"intersection",
")",
"==",
"0",
":",
"raise",
"Exception",
"(",
"'One of the values in \"%s\" is needed'",
"%",
"', '",
".",
"join",
"(",
"one_of_keys",
")",
")"
] |
Validate a config dictionary to make sure it has one and only one
key in one_of_keys.
Args:
config: the config to validate.
one_of_keys: the list of possible keys that config can have one and only one.
Raises:
Exception if the config does not have any of them, or multiple of them.
|
[
"Validate",
"a",
"config",
"dictionary",
"to",
"make",
"sure",
"it",
"has",
"one",
"and",
"only",
"one",
"key",
"in",
"one_of_keys",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/utils/commands/_utils.py#L375-L390
|
4,873
|
googledatalab/pydatalab
|
datalab/utils/commands/_utils.py
|
validate_config_value
|
def validate_config_value(value, possible_values):
""" Validate a config value to make sure it is one of the possible values.
Args:
value: the config value to validate.
possible_values: the possible values the value can be
Raises:
Exception if the value is not one of possible values.
"""
if value not in possible_values:
raise Exception('Invalid config value "%s". Possible values are '
'%s' % (value, ', '.join(e for e in possible_values)))
|
python
|
def validate_config_value(value, possible_values):
""" Validate a config value to make sure it is one of the possible values.
Args:
value: the config value to validate.
possible_values: the possible values the value can be
Raises:
Exception if the value is not one of possible values.
"""
if value not in possible_values:
raise Exception('Invalid config value "%s". Possible values are '
'%s' % (value, ', '.join(e for e in possible_values)))
|
[
"def",
"validate_config_value",
"(",
"value",
",",
"possible_values",
")",
":",
"if",
"value",
"not",
"in",
"possible_values",
":",
"raise",
"Exception",
"(",
"'Invalid config value \"%s\". Possible values are '",
"'%s'",
"%",
"(",
"value",
",",
"', '",
".",
"join",
"(",
"e",
"for",
"e",
"in",
"possible_values",
")",
")",
")"
] |
Validate a config value to make sure it is one of the possible values.
Args:
value: the config value to validate.
possible_values: the possible values the value can be
Raises:
Exception if the value is not one of possible values.
|
[
"Validate",
"a",
"config",
"value",
"to",
"make",
"sure",
"it",
"is",
"one",
"of",
"the",
"possible",
"values",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/utils/commands/_utils.py#L393-L405
|
4,874
|
googledatalab/pydatalab
|
datalab/utils/commands/_utils.py
|
validate_gcs_path
|
def validate_gcs_path(path, require_object):
""" Check whether a given path is a valid GCS path.
Args:
path: the config to check.
require_object: if True, the path has to be an object path but not bucket path.
Raises:
Exception if the path is invalid
"""
bucket, key = datalab.storage._bucket.parse_name(path)
if bucket is None:
raise Exception('Invalid GCS path "%s"' % path)
if require_object and key is None:
raise Exception('It appears the GCS path "%s" is a bucket path but not an object path' % path)
|
python
|
def validate_gcs_path(path, require_object):
""" Check whether a given path is a valid GCS path.
Args:
path: the config to check.
require_object: if True, the path has to be an object path but not bucket path.
Raises:
Exception if the path is invalid
"""
bucket, key = datalab.storage._bucket.parse_name(path)
if bucket is None:
raise Exception('Invalid GCS path "%s"' % path)
if require_object and key is None:
raise Exception('It appears the GCS path "%s" is a bucket path but not an object path' % path)
|
[
"def",
"validate_gcs_path",
"(",
"path",
",",
"require_object",
")",
":",
"bucket",
",",
"key",
"=",
"datalab",
".",
"storage",
".",
"_bucket",
".",
"parse_name",
"(",
"path",
")",
"if",
"bucket",
"is",
"None",
":",
"raise",
"Exception",
"(",
"'Invalid GCS path \"%s\"'",
"%",
"path",
")",
"if",
"require_object",
"and",
"key",
"is",
"None",
":",
"raise",
"Exception",
"(",
"'It appears the GCS path \"%s\" is a bucket path but not an object path'",
"%",
"path",
")"
] |
Check whether a given path is a valid GCS path.
Args:
path: the config to check.
require_object: if True, the path has to be an object path but not bucket path.
Raises:
Exception if the path is invalid
|
[
"Check",
"whether",
"a",
"given",
"path",
"is",
"a",
"valid",
"GCS",
"path",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/utils/commands/_utils.py#L421-L435
|
4,875
|
googledatalab/pydatalab
|
datalab/utils/commands/_utils.py
|
profile_df
|
def profile_df(df):
""" Generate a profile of data in a dataframe.
Args:
df: the Pandas dataframe.
"""
# The bootstrap CSS messes up the Datalab display so we tweak it to not have an effect.
# TODO(gram): strip it out rather than this kludge.
return IPython.core.display.HTML(
pandas_profiling.ProfileReport(df).html.replace('bootstrap', 'nonexistent'))
|
python
|
def profile_df(df):
""" Generate a profile of data in a dataframe.
Args:
df: the Pandas dataframe.
"""
# The bootstrap CSS messes up the Datalab display so we tweak it to not have an effect.
# TODO(gram): strip it out rather than this kludge.
return IPython.core.display.HTML(
pandas_profiling.ProfileReport(df).html.replace('bootstrap', 'nonexistent'))
|
[
"def",
"profile_df",
"(",
"df",
")",
":",
"# The bootstrap CSS messes up the Datalab display so we tweak it to not have an effect.",
"# TODO(gram): strip it out rather than this kludge.",
"return",
"IPython",
".",
"core",
".",
"display",
".",
"HTML",
"(",
"pandas_profiling",
".",
"ProfileReport",
"(",
"df",
")",
".",
"html",
".",
"replace",
"(",
"'bootstrap'",
",",
"'nonexistent'",
")",
")"
] |
Generate a profile of data in a dataframe.
Args:
df: the Pandas dataframe.
|
[
"Generate",
"a",
"profile",
"of",
"data",
"in",
"a",
"dataframe",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/utils/commands/_utils.py#L675-L684
|
4,876
|
googledatalab/pydatalab
|
solutionbox/structured_data/mltoolbox/_structured_data/_package.py
|
_package_to_staging
|
def _package_to_staging(staging_package_url):
"""Repackage this package from local installed location and copy it to GCS.
Args:
staging_package_url: GCS path.
"""
import google.datalab.ml as ml
# Find the package root. __file__ is under [package_root]/mltoolbox/_structured_data/this_file
package_root = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../../'))
setup_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'master_setup.py'))
tar_gz_path = os.path.join(staging_package_url, 'staging', 'trainer.tar.gz')
print('Building package and uploading to %s' % tar_gz_path)
ml.package_and_copy(package_root, setup_path, tar_gz_path)
return tar_gz_path
|
python
|
def _package_to_staging(staging_package_url):
"""Repackage this package from local installed location and copy it to GCS.
Args:
staging_package_url: GCS path.
"""
import google.datalab.ml as ml
# Find the package root. __file__ is under [package_root]/mltoolbox/_structured_data/this_file
package_root = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../../'))
setup_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'master_setup.py'))
tar_gz_path = os.path.join(staging_package_url, 'staging', 'trainer.tar.gz')
print('Building package and uploading to %s' % tar_gz_path)
ml.package_and_copy(package_root, setup_path, tar_gz_path)
return tar_gz_path
|
[
"def",
"_package_to_staging",
"(",
"staging_package_url",
")",
":",
"import",
"google",
".",
"datalab",
".",
"ml",
"as",
"ml",
"# Find the package root. __file__ is under [package_root]/mltoolbox/_structured_data/this_file",
"package_root",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"'../../'",
")",
")",
"setup_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"'master_setup.py'",
")",
")",
"tar_gz_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"staging_package_url",
",",
"'staging'",
",",
"'trainer.tar.gz'",
")",
"print",
"(",
"'Building package and uploading to %s'",
"%",
"tar_gz_path",
")",
"ml",
".",
"package_and_copy",
"(",
"package_root",
",",
"setup_path",
",",
"tar_gz_path",
")",
"return",
"tar_gz_path"
] |
Repackage this package from local installed location and copy it to GCS.
Args:
staging_package_url: GCS path.
|
[
"Repackage",
"this",
"package",
"from",
"local",
"installed",
"location",
"and",
"copy",
"it",
"to",
"GCS",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/_package.py#L87-L105
|
4,877
|
googledatalab/pydatalab
|
solutionbox/structured_data/mltoolbox/_structured_data/_package.py
|
analyze
|
def analyze(output_dir, dataset, cloud=False, project_id=None):
"""Blocking version of analyze_async. See documentation of analyze_async."""
job = analyze_async(
output_dir=output_dir,
dataset=dataset,
cloud=cloud,
project_id=project_id)
job.wait()
print('Analyze: ' + str(job.state))
|
python
|
def analyze(output_dir, dataset, cloud=False, project_id=None):
"""Blocking version of analyze_async. See documentation of analyze_async."""
job = analyze_async(
output_dir=output_dir,
dataset=dataset,
cloud=cloud,
project_id=project_id)
job.wait()
print('Analyze: ' + str(job.state))
|
[
"def",
"analyze",
"(",
"output_dir",
",",
"dataset",
",",
"cloud",
"=",
"False",
",",
"project_id",
"=",
"None",
")",
":",
"job",
"=",
"analyze_async",
"(",
"output_dir",
"=",
"output_dir",
",",
"dataset",
"=",
"dataset",
",",
"cloud",
"=",
"cloud",
",",
"project_id",
"=",
"project_id",
")",
"job",
".",
"wait",
"(",
")",
"print",
"(",
"'Analyze: '",
"+",
"str",
"(",
"job",
".",
"state",
")",
")"
] |
Blocking version of analyze_async. See documentation of analyze_async.
|
[
"Blocking",
"version",
"of",
"analyze_async",
".",
"See",
"documentation",
"of",
"analyze_async",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/_package.py#L136-L144
|
4,878
|
googledatalab/pydatalab
|
solutionbox/structured_data/mltoolbox/_structured_data/_package.py
|
analyze_async
|
def analyze_async(output_dir, dataset, cloud=False, project_id=None):
"""Analyze data locally or in the cloud with BigQuery.
Produce analysis used by training. This can take a while, even for small
datasets. For small datasets, it may be faster to use local_analysis.
Args:
output_dir: The output directory to use.
dataset: only CsvDataSet is supported currently.
cloud: If False, runs analysis locally with Pandas. If Ture, runs analysis
in the cloud with BigQuery.
project_id: Uses BigQuery with this project id. Default is datalab's
default project id.
Returns:
A google.datalab.utils.Job object that can be used to query state from or wait.
"""
import google.datalab.utils as du
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fn = lambda: _analyze(output_dir, dataset, cloud, project_id) # noqa
return du.LambdaJob(fn, job_id=None)
|
python
|
def analyze_async(output_dir, dataset, cloud=False, project_id=None):
"""Analyze data locally or in the cloud with BigQuery.
Produce analysis used by training. This can take a while, even for small
datasets. For small datasets, it may be faster to use local_analysis.
Args:
output_dir: The output directory to use.
dataset: only CsvDataSet is supported currently.
cloud: If False, runs analysis locally with Pandas. If Ture, runs analysis
in the cloud with BigQuery.
project_id: Uses BigQuery with this project id. Default is datalab's
default project id.
Returns:
A google.datalab.utils.Job object that can be used to query state from or wait.
"""
import google.datalab.utils as du
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fn = lambda: _analyze(output_dir, dataset, cloud, project_id) # noqa
return du.LambdaJob(fn, job_id=None)
|
[
"def",
"analyze_async",
"(",
"output_dir",
",",
"dataset",
",",
"cloud",
"=",
"False",
",",
"project_id",
"=",
"None",
")",
":",
"import",
"google",
".",
"datalab",
".",
"utils",
"as",
"du",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"warnings",
".",
"simplefilter",
"(",
"\"ignore\"",
")",
"fn",
"=",
"lambda",
":",
"_analyze",
"(",
"output_dir",
",",
"dataset",
",",
"cloud",
",",
"project_id",
")",
"# noqa",
"return",
"du",
".",
"LambdaJob",
"(",
"fn",
",",
"job_id",
"=",
"None",
")"
] |
Analyze data locally or in the cloud with BigQuery.
Produce analysis used by training. This can take a while, even for small
datasets. For small datasets, it may be faster to use local_analysis.
Args:
output_dir: The output directory to use.
dataset: only CsvDataSet is supported currently.
cloud: If False, runs analysis locally with Pandas. If Ture, runs analysis
in the cloud with BigQuery.
project_id: Uses BigQuery with this project id. Default is datalab's
default project id.
Returns:
A google.datalab.utils.Job object that can be used to query state from or wait.
|
[
"Analyze",
"data",
"locally",
"or",
"in",
"the",
"cloud",
"with",
"BigQuery",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/_package.py#L147-L168
|
4,879
|
googledatalab/pydatalab
|
solutionbox/structured_data/mltoolbox/_structured_data/_package.py
|
cloud_train
|
def cloud_train(train_dataset,
eval_dataset,
analysis_dir,
output_dir,
features,
model_type,
max_steps,
num_epochs,
train_batch_size,
eval_batch_size,
min_eval_frequency,
top_n,
layer_sizes,
learning_rate,
epsilon,
job_name,
job_name_prefix,
config):
"""Train model using CloudML.
See local_train() for a description of the args.
Args:
config: A CloudTrainingConfig object.
job_name: Training job name. A default will be picked if None.
"""
import google.datalab.ml as ml
if len(train_dataset.input_files) != 1 or len(eval_dataset.input_files) != 1:
raise ValueError('CsvDataSets must be built with a file pattern, not list '
'of files.')
if file_io.file_exists(output_dir):
raise ValueError('output_dir already exist. Use a new output path.')
if isinstance(features, dict):
# Make a features file.
if not file_io.file_exists(output_dir):
file_io.recursive_create_dir(output_dir)
features_file = os.path.join(output_dir, 'features_file.json')
file_io.write_string_to_file(
features_file,
json.dumps(features))
else:
features_file = features
if not isinstance(config, ml.CloudTrainingConfig):
raise ValueError('cloud should be an instance of '
'google.datalab.ml.CloudTrainingConfig for cloud training.')
_assert_gcs_files([output_dir, train_dataset.input_files[0], eval_dataset.input_files[0],
features_file, analysis_dir])
args = ['--train-data-paths=%s' % train_dataset.input_files[0],
'--eval-data-paths=%s' % eval_dataset.input_files[0],
'--preprocess-output-dir=%s' % analysis_dir,
'--transforms-file=%s' % features_file,
'--model-type=%s' % model_type,
'--max-steps=%s' % str(max_steps),
'--train-batch-size=%s' % str(train_batch_size),
'--eval-batch-size=%s' % str(eval_batch_size),
'--min-eval-frequency=%s' % str(min_eval_frequency),
'--learning-rate=%s' % str(learning_rate),
'--epsilon=%s' % str(epsilon)]
if num_epochs:
args.append('--num-epochs=%s' % str(num_epochs))
if top_n:
args.append('--top-n=%s' % str(top_n))
if layer_sizes:
for i in range(len(layer_sizes)):
args.append('--layer-size%s=%s' % (i + 1, str(layer_sizes[i])))
job_request = {
'package_uris': [_package_to_staging(output_dir), _TF_GS_URL, _PROTOBUF_GS_URL],
'python_module': 'mltoolbox._structured_data.trainer.task',
'job_dir': output_dir,
'args': args
}
job_request.update(dict(config._asdict()))
if not job_name:
job_name = job_name_prefix or 'structured_data_train'
job_name += '_' + datetime.datetime.now().strftime('%y%m%d_%H%M%S')
job = ml.Job.submit_training(job_request, job_name)
print('Job request send. View status of job at')
print('https://console.developers.google.com/ml/jobs?project=%s' %
_default_project())
return job
|
python
|
def cloud_train(train_dataset,
eval_dataset,
analysis_dir,
output_dir,
features,
model_type,
max_steps,
num_epochs,
train_batch_size,
eval_batch_size,
min_eval_frequency,
top_n,
layer_sizes,
learning_rate,
epsilon,
job_name,
job_name_prefix,
config):
"""Train model using CloudML.
See local_train() for a description of the args.
Args:
config: A CloudTrainingConfig object.
job_name: Training job name. A default will be picked if None.
"""
import google.datalab.ml as ml
if len(train_dataset.input_files) != 1 or len(eval_dataset.input_files) != 1:
raise ValueError('CsvDataSets must be built with a file pattern, not list '
'of files.')
if file_io.file_exists(output_dir):
raise ValueError('output_dir already exist. Use a new output path.')
if isinstance(features, dict):
# Make a features file.
if not file_io.file_exists(output_dir):
file_io.recursive_create_dir(output_dir)
features_file = os.path.join(output_dir, 'features_file.json')
file_io.write_string_to_file(
features_file,
json.dumps(features))
else:
features_file = features
if not isinstance(config, ml.CloudTrainingConfig):
raise ValueError('cloud should be an instance of '
'google.datalab.ml.CloudTrainingConfig for cloud training.')
_assert_gcs_files([output_dir, train_dataset.input_files[0], eval_dataset.input_files[0],
features_file, analysis_dir])
args = ['--train-data-paths=%s' % train_dataset.input_files[0],
'--eval-data-paths=%s' % eval_dataset.input_files[0],
'--preprocess-output-dir=%s' % analysis_dir,
'--transforms-file=%s' % features_file,
'--model-type=%s' % model_type,
'--max-steps=%s' % str(max_steps),
'--train-batch-size=%s' % str(train_batch_size),
'--eval-batch-size=%s' % str(eval_batch_size),
'--min-eval-frequency=%s' % str(min_eval_frequency),
'--learning-rate=%s' % str(learning_rate),
'--epsilon=%s' % str(epsilon)]
if num_epochs:
args.append('--num-epochs=%s' % str(num_epochs))
if top_n:
args.append('--top-n=%s' % str(top_n))
if layer_sizes:
for i in range(len(layer_sizes)):
args.append('--layer-size%s=%s' % (i + 1, str(layer_sizes[i])))
job_request = {
'package_uris': [_package_to_staging(output_dir), _TF_GS_URL, _PROTOBUF_GS_URL],
'python_module': 'mltoolbox._structured_data.trainer.task',
'job_dir': output_dir,
'args': args
}
job_request.update(dict(config._asdict()))
if not job_name:
job_name = job_name_prefix or 'structured_data_train'
job_name += '_' + datetime.datetime.now().strftime('%y%m%d_%H%M%S')
job = ml.Job.submit_training(job_request, job_name)
print('Job request send. View status of job at')
print('https://console.developers.google.com/ml/jobs?project=%s' %
_default_project())
return job
|
[
"def",
"cloud_train",
"(",
"train_dataset",
",",
"eval_dataset",
",",
"analysis_dir",
",",
"output_dir",
",",
"features",
",",
"model_type",
",",
"max_steps",
",",
"num_epochs",
",",
"train_batch_size",
",",
"eval_batch_size",
",",
"min_eval_frequency",
",",
"top_n",
",",
"layer_sizes",
",",
"learning_rate",
",",
"epsilon",
",",
"job_name",
",",
"job_name_prefix",
",",
"config",
")",
":",
"import",
"google",
".",
"datalab",
".",
"ml",
"as",
"ml",
"if",
"len",
"(",
"train_dataset",
".",
"input_files",
")",
"!=",
"1",
"or",
"len",
"(",
"eval_dataset",
".",
"input_files",
")",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"'CsvDataSets must be built with a file pattern, not list '",
"'of files.'",
")",
"if",
"file_io",
".",
"file_exists",
"(",
"output_dir",
")",
":",
"raise",
"ValueError",
"(",
"'output_dir already exist. Use a new output path.'",
")",
"if",
"isinstance",
"(",
"features",
",",
"dict",
")",
":",
"# Make a features file.",
"if",
"not",
"file_io",
".",
"file_exists",
"(",
"output_dir",
")",
":",
"file_io",
".",
"recursive_create_dir",
"(",
"output_dir",
")",
"features_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"'features_file.json'",
")",
"file_io",
".",
"write_string_to_file",
"(",
"features_file",
",",
"json",
".",
"dumps",
"(",
"features",
")",
")",
"else",
":",
"features_file",
"=",
"features",
"if",
"not",
"isinstance",
"(",
"config",
",",
"ml",
".",
"CloudTrainingConfig",
")",
":",
"raise",
"ValueError",
"(",
"'cloud should be an instance of '",
"'google.datalab.ml.CloudTrainingConfig for cloud training.'",
")",
"_assert_gcs_files",
"(",
"[",
"output_dir",
",",
"train_dataset",
".",
"input_files",
"[",
"0",
"]",
",",
"eval_dataset",
".",
"input_files",
"[",
"0",
"]",
",",
"features_file",
",",
"analysis_dir",
"]",
")",
"args",
"=",
"[",
"'--train-data-paths=%s'",
"%",
"train_dataset",
".",
"input_files",
"[",
"0",
"]",
",",
"'--eval-data-paths=%s'",
"%",
"eval_dataset",
".",
"input_files",
"[",
"0",
"]",
",",
"'--preprocess-output-dir=%s'",
"%",
"analysis_dir",
",",
"'--transforms-file=%s'",
"%",
"features_file",
",",
"'--model-type=%s'",
"%",
"model_type",
",",
"'--max-steps=%s'",
"%",
"str",
"(",
"max_steps",
")",
",",
"'--train-batch-size=%s'",
"%",
"str",
"(",
"train_batch_size",
")",
",",
"'--eval-batch-size=%s'",
"%",
"str",
"(",
"eval_batch_size",
")",
",",
"'--min-eval-frequency=%s'",
"%",
"str",
"(",
"min_eval_frequency",
")",
",",
"'--learning-rate=%s'",
"%",
"str",
"(",
"learning_rate",
")",
",",
"'--epsilon=%s'",
"%",
"str",
"(",
"epsilon",
")",
"]",
"if",
"num_epochs",
":",
"args",
".",
"append",
"(",
"'--num-epochs=%s'",
"%",
"str",
"(",
"num_epochs",
")",
")",
"if",
"top_n",
":",
"args",
".",
"append",
"(",
"'--top-n=%s'",
"%",
"str",
"(",
"top_n",
")",
")",
"if",
"layer_sizes",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"layer_sizes",
")",
")",
":",
"args",
".",
"append",
"(",
"'--layer-size%s=%s'",
"%",
"(",
"i",
"+",
"1",
",",
"str",
"(",
"layer_sizes",
"[",
"i",
"]",
")",
")",
")",
"job_request",
"=",
"{",
"'package_uris'",
":",
"[",
"_package_to_staging",
"(",
"output_dir",
")",
",",
"_TF_GS_URL",
",",
"_PROTOBUF_GS_URL",
"]",
",",
"'python_module'",
":",
"'mltoolbox._structured_data.trainer.task'",
",",
"'job_dir'",
":",
"output_dir",
",",
"'args'",
":",
"args",
"}",
"job_request",
".",
"update",
"(",
"dict",
"(",
"config",
".",
"_asdict",
"(",
")",
")",
")",
"if",
"not",
"job_name",
":",
"job_name",
"=",
"job_name_prefix",
"or",
"'structured_data_train'",
"job_name",
"+=",
"'_'",
"+",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"'%y%m%d_%H%M%S'",
")",
"job",
"=",
"ml",
".",
"Job",
".",
"submit_training",
"(",
"job_request",
",",
"job_name",
")",
"print",
"(",
"'Job request send. View status of job at'",
")",
"print",
"(",
"'https://console.developers.google.com/ml/jobs?project=%s'",
"%",
"_default_project",
"(",
")",
")",
"return",
"job"
] |
Train model using CloudML.
See local_train() for a description of the args.
Args:
config: A CloudTrainingConfig object.
job_name: Training job name. A default will be picked if None.
|
[
"Train",
"model",
"using",
"CloudML",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/_package.py#L456-L543
|
4,880
|
googledatalab/pydatalab
|
solutionbox/structured_data/mltoolbox/_structured_data/_package.py
|
predict
|
def predict(data, training_dir=None, model_name=None, model_version=None, cloud=False):
"""Runs prediction locally or on the cloud.
Args:
data: List of csv strings or a Pandas DataFrame that match the model schema.
training_dir: local path to the trained output folder.
model_name: deployed model name
model_version: depoyed model version
cloud: bool. If False, does local prediction and data and training_dir
must be set. If True, does cloud prediction and data, model_name,
and model_version must be set.
For cloud prediction, the model must be created. This can be done by running
two gcloud commands::
1) gcloud beta ml models create NAME
2) gcloud beta ml versions create VERSION --model NAME --origin gs://BUCKET/training_dir/model
or these datalab commands:
1) import google.datalab as datalab
model = datalab.ml.ModelVersions(MODEL_NAME)
model.deploy(version_name=VERSION, path='gs://BUCKET/training_dir/model')
Note that the model must be on GCS.
Returns:
Pandas DataFrame.
"""
if cloud:
if not model_version or not model_name:
raise ValueError('model_version or model_name is not set')
if training_dir:
raise ValueError('training_dir not needed when cloud is True')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return cloud_predict(model_name, model_version, data)
else:
if not training_dir:
raise ValueError('training_dir is not set')
if model_version or model_name:
raise ValueError('model_name and model_version not needed when cloud is '
'False.')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return local_predict(training_dir, data)
|
python
|
def predict(data, training_dir=None, model_name=None, model_version=None, cloud=False):
"""Runs prediction locally or on the cloud.
Args:
data: List of csv strings or a Pandas DataFrame that match the model schema.
training_dir: local path to the trained output folder.
model_name: deployed model name
model_version: depoyed model version
cloud: bool. If False, does local prediction and data and training_dir
must be set. If True, does cloud prediction and data, model_name,
and model_version must be set.
For cloud prediction, the model must be created. This can be done by running
two gcloud commands::
1) gcloud beta ml models create NAME
2) gcloud beta ml versions create VERSION --model NAME --origin gs://BUCKET/training_dir/model
or these datalab commands:
1) import google.datalab as datalab
model = datalab.ml.ModelVersions(MODEL_NAME)
model.deploy(version_name=VERSION, path='gs://BUCKET/training_dir/model')
Note that the model must be on GCS.
Returns:
Pandas DataFrame.
"""
if cloud:
if not model_version or not model_name:
raise ValueError('model_version or model_name is not set')
if training_dir:
raise ValueError('training_dir not needed when cloud is True')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return cloud_predict(model_name, model_version, data)
else:
if not training_dir:
raise ValueError('training_dir is not set')
if model_version or model_name:
raise ValueError('model_name and model_version not needed when cloud is '
'False.')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return local_predict(training_dir, data)
|
[
"def",
"predict",
"(",
"data",
",",
"training_dir",
"=",
"None",
",",
"model_name",
"=",
"None",
",",
"model_version",
"=",
"None",
",",
"cloud",
"=",
"False",
")",
":",
"if",
"cloud",
":",
"if",
"not",
"model_version",
"or",
"not",
"model_name",
":",
"raise",
"ValueError",
"(",
"'model_version or model_name is not set'",
")",
"if",
"training_dir",
":",
"raise",
"ValueError",
"(",
"'training_dir not needed when cloud is True'",
")",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"warnings",
".",
"simplefilter",
"(",
"\"ignore\"",
")",
"return",
"cloud_predict",
"(",
"model_name",
",",
"model_version",
",",
"data",
")",
"else",
":",
"if",
"not",
"training_dir",
":",
"raise",
"ValueError",
"(",
"'training_dir is not set'",
")",
"if",
"model_version",
"or",
"model_name",
":",
"raise",
"ValueError",
"(",
"'model_name and model_version not needed when cloud is '",
"'False.'",
")",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"warnings",
".",
"simplefilter",
"(",
"\"ignore\"",
")",
"return",
"local_predict",
"(",
"training_dir",
",",
"data",
")"
] |
Runs prediction locally or on the cloud.
Args:
data: List of csv strings or a Pandas DataFrame that match the model schema.
training_dir: local path to the trained output folder.
model_name: deployed model name
model_version: depoyed model version
cloud: bool. If False, does local prediction and data and training_dir
must be set. If True, does cloud prediction and data, model_name,
and model_version must be set.
For cloud prediction, the model must be created. This can be done by running
two gcloud commands::
1) gcloud beta ml models create NAME
2) gcloud beta ml versions create VERSION --model NAME --origin gs://BUCKET/training_dir/model
or these datalab commands:
1) import google.datalab as datalab
model = datalab.ml.ModelVersions(MODEL_NAME)
model.deploy(version_name=VERSION, path='gs://BUCKET/training_dir/model')
Note that the model must be on GCS.
Returns:
Pandas DataFrame.
|
[
"Runs",
"prediction",
"locally",
"or",
"on",
"the",
"cloud",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/_package.py#L550-L592
|
4,881
|
googledatalab/pydatalab
|
solutionbox/structured_data/mltoolbox/_structured_data/_package.py
|
local_predict
|
def local_predict(training_dir, data):
"""Runs local prediction on the prediction graph.
Runs local prediction and returns the result in a Pandas DataFrame. For
running prediction on a large dataset or saving the results, run
local_batch_prediction or batch_prediction. Input data should fully match
the schema that was used at training, except the target column should not
exist.
Args:
training_dir: local path to the trained output folder.
data: List of csv strings or a Pandas DataFrame that match the model schema.
Raises:
ValueError: if training_dir does not contain the folder 'model'.
FileNotFoundError: if the prediction data is not found.
"""
# from . import predict as predict_module
from .prediction import predict as predict_module
# Save the instances to a file, call local batch prediction, and return it
tmp_dir = tempfile.mkdtemp()
_, input_file_path = tempfile.mkstemp(dir=tmp_dir, suffix='.csv',
prefix='input')
try:
if isinstance(data, pd.DataFrame):
data.to_csv(input_file_path, header=False, index=False)
else:
with open(input_file_path, 'w') as f:
for line in data:
f.write(line + '\n')
model_dir = os.path.join(training_dir, 'model')
if not file_io.file_exists(model_dir):
raise ValueError('training_dir should contain the folder model')
cmd = ['predict.py',
'--predict-data=%s' % input_file_path,
'--trained-model-dir=%s' % model_dir,
'--output-dir=%s' % tmp_dir,
'--output-format=csv',
'--batch-size=16',
'--mode=prediction',
'--no-shard-files']
# runner_results = predict_module.predict.main(cmd)
runner_results = predict_module.main(cmd)
runner_results.wait_until_finish()
# Read the header file.
schema_file = os.path.join(tmp_dir, 'csv_schema.json')
with open(schema_file, 'r') as f:
schema = json.loads(f.read())
# Print any errors to the screen.
errors_file = glob.glob(os.path.join(tmp_dir, 'errors*'))
if errors_file and os.path.getsize(errors_file[0]) > 0:
print('Warning: there are errors. See below:')
with open(errors_file[0], 'r') as f:
text = f.read()
print(text)
# Read the predictions data.
prediction_file = glob.glob(os.path.join(tmp_dir, 'predictions*'))
if not prediction_file:
raise FileNotFoundError('Prediction results not found')
predictions = pd.read_csv(prediction_file[0],
header=None,
names=[col['name'] for col in schema])
return predictions
finally:
shutil.rmtree(tmp_dir)
|
python
|
def local_predict(training_dir, data):
"""Runs local prediction on the prediction graph.
Runs local prediction and returns the result in a Pandas DataFrame. For
running prediction on a large dataset or saving the results, run
local_batch_prediction or batch_prediction. Input data should fully match
the schema that was used at training, except the target column should not
exist.
Args:
training_dir: local path to the trained output folder.
data: List of csv strings or a Pandas DataFrame that match the model schema.
Raises:
ValueError: if training_dir does not contain the folder 'model'.
FileNotFoundError: if the prediction data is not found.
"""
# from . import predict as predict_module
from .prediction import predict as predict_module
# Save the instances to a file, call local batch prediction, and return it
tmp_dir = tempfile.mkdtemp()
_, input_file_path = tempfile.mkstemp(dir=tmp_dir, suffix='.csv',
prefix='input')
try:
if isinstance(data, pd.DataFrame):
data.to_csv(input_file_path, header=False, index=False)
else:
with open(input_file_path, 'w') as f:
for line in data:
f.write(line + '\n')
model_dir = os.path.join(training_dir, 'model')
if not file_io.file_exists(model_dir):
raise ValueError('training_dir should contain the folder model')
cmd = ['predict.py',
'--predict-data=%s' % input_file_path,
'--trained-model-dir=%s' % model_dir,
'--output-dir=%s' % tmp_dir,
'--output-format=csv',
'--batch-size=16',
'--mode=prediction',
'--no-shard-files']
# runner_results = predict_module.predict.main(cmd)
runner_results = predict_module.main(cmd)
runner_results.wait_until_finish()
# Read the header file.
schema_file = os.path.join(tmp_dir, 'csv_schema.json')
with open(schema_file, 'r') as f:
schema = json.loads(f.read())
# Print any errors to the screen.
errors_file = glob.glob(os.path.join(tmp_dir, 'errors*'))
if errors_file and os.path.getsize(errors_file[0]) > 0:
print('Warning: there are errors. See below:')
with open(errors_file[0], 'r') as f:
text = f.read()
print(text)
# Read the predictions data.
prediction_file = glob.glob(os.path.join(tmp_dir, 'predictions*'))
if not prediction_file:
raise FileNotFoundError('Prediction results not found')
predictions = pd.read_csv(prediction_file[0],
header=None,
names=[col['name'] for col in schema])
return predictions
finally:
shutil.rmtree(tmp_dir)
|
[
"def",
"local_predict",
"(",
"training_dir",
",",
"data",
")",
":",
"# from . import predict as predict_module",
"from",
".",
"prediction",
"import",
"predict",
"as",
"predict_module",
"# Save the instances to a file, call local batch prediction, and return it",
"tmp_dir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"_",
",",
"input_file_path",
"=",
"tempfile",
".",
"mkstemp",
"(",
"dir",
"=",
"tmp_dir",
",",
"suffix",
"=",
"'.csv'",
",",
"prefix",
"=",
"'input'",
")",
"try",
":",
"if",
"isinstance",
"(",
"data",
",",
"pd",
".",
"DataFrame",
")",
":",
"data",
".",
"to_csv",
"(",
"input_file_path",
",",
"header",
"=",
"False",
",",
"index",
"=",
"False",
")",
"else",
":",
"with",
"open",
"(",
"input_file_path",
",",
"'w'",
")",
"as",
"f",
":",
"for",
"line",
"in",
"data",
":",
"f",
".",
"write",
"(",
"line",
"+",
"'\\n'",
")",
"model_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"training_dir",
",",
"'model'",
")",
"if",
"not",
"file_io",
".",
"file_exists",
"(",
"model_dir",
")",
":",
"raise",
"ValueError",
"(",
"'training_dir should contain the folder model'",
")",
"cmd",
"=",
"[",
"'predict.py'",
",",
"'--predict-data=%s'",
"%",
"input_file_path",
",",
"'--trained-model-dir=%s'",
"%",
"model_dir",
",",
"'--output-dir=%s'",
"%",
"tmp_dir",
",",
"'--output-format=csv'",
",",
"'--batch-size=16'",
",",
"'--mode=prediction'",
",",
"'--no-shard-files'",
"]",
"# runner_results = predict_module.predict.main(cmd)",
"runner_results",
"=",
"predict_module",
".",
"main",
"(",
"cmd",
")",
"runner_results",
".",
"wait_until_finish",
"(",
")",
"# Read the header file.",
"schema_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"'csv_schema.json'",
")",
"with",
"open",
"(",
"schema_file",
",",
"'r'",
")",
"as",
"f",
":",
"schema",
"=",
"json",
".",
"loads",
"(",
"f",
".",
"read",
"(",
")",
")",
"# Print any errors to the screen.",
"errors_file",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"'errors*'",
")",
")",
"if",
"errors_file",
"and",
"os",
".",
"path",
".",
"getsize",
"(",
"errors_file",
"[",
"0",
"]",
")",
">",
"0",
":",
"print",
"(",
"'Warning: there are errors. See below:'",
")",
"with",
"open",
"(",
"errors_file",
"[",
"0",
"]",
",",
"'r'",
")",
"as",
"f",
":",
"text",
"=",
"f",
".",
"read",
"(",
")",
"print",
"(",
"text",
")",
"# Read the predictions data.",
"prediction_file",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"'predictions*'",
")",
")",
"if",
"not",
"prediction_file",
":",
"raise",
"FileNotFoundError",
"(",
"'Prediction results not found'",
")",
"predictions",
"=",
"pd",
".",
"read_csv",
"(",
"prediction_file",
"[",
"0",
"]",
",",
"header",
"=",
"None",
",",
"names",
"=",
"[",
"col",
"[",
"'name'",
"]",
"for",
"col",
"in",
"schema",
"]",
")",
"return",
"predictions",
"finally",
":",
"shutil",
".",
"rmtree",
"(",
"tmp_dir",
")"
] |
Runs local prediction on the prediction graph.
Runs local prediction and returns the result in a Pandas DataFrame. For
running prediction on a large dataset or saving the results, run
local_batch_prediction or batch_prediction. Input data should fully match
the schema that was used at training, except the target column should not
exist.
Args:
training_dir: local path to the trained output folder.
data: List of csv strings or a Pandas DataFrame that match the model schema.
Raises:
ValueError: if training_dir does not contain the folder 'model'.
FileNotFoundError: if the prediction data is not found.
|
[
"Runs",
"local",
"prediction",
"on",
"the",
"prediction",
"graph",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/_package.py#L595-L668
|
4,882
|
googledatalab/pydatalab
|
solutionbox/structured_data/mltoolbox/_structured_data/_package.py
|
cloud_predict
|
def cloud_predict(model_name, model_version, data):
"""Use Online prediction.
Runs online prediction in the cloud and prints the results to the screen. For
running prediction on a large dataset or saving the results, run
local_batch_prediction or batch_prediction.
Args:
model_name: deployed model name
model_version: depoyed model version
data: List of csv strings or a Pandas DataFrame that match the model schema.
Before using this, the model must be created. This can be done by running
two gcloud commands:
1) gcloud beta ml models create NAME
2) gcloud beta ml versions create VERSION --model NAME \
--origin gs://BUCKET/training_dir/model
or these datalab commands:
1) import google.datalab as datalab
model = datalab.ml.ModelVersions(MODEL_NAME)
model.deploy(version_name=VERSION,
path='gs://BUCKET/training_dir/model')
Note that the model must be on GCS.
"""
import google.datalab.ml as ml
if isinstance(data, pd.DataFrame):
# write the df to csv.
string_buffer = io.StringIO()
data.to_csv(string_buffer, header=None, index=False)
input_data = string_buffer.getvalue().split('\n')
# remove empty strings
input_data = [line for line in input_data if line]
else:
input_data = data
predictions = ml.ModelVersions(model_name).predict(model_version, input_data)
# Convert predictions into a dataframe
df = pd.DataFrame(columns=sorted(predictions[0].keys()))
for i in range(len(predictions)):
for k, v in predictions[i].iteritems():
df.loc[i, k] = v
return df
|
python
|
def cloud_predict(model_name, model_version, data):
"""Use Online prediction.
Runs online prediction in the cloud and prints the results to the screen. For
running prediction on a large dataset or saving the results, run
local_batch_prediction or batch_prediction.
Args:
model_name: deployed model name
model_version: depoyed model version
data: List of csv strings or a Pandas DataFrame that match the model schema.
Before using this, the model must be created. This can be done by running
two gcloud commands:
1) gcloud beta ml models create NAME
2) gcloud beta ml versions create VERSION --model NAME \
--origin gs://BUCKET/training_dir/model
or these datalab commands:
1) import google.datalab as datalab
model = datalab.ml.ModelVersions(MODEL_NAME)
model.deploy(version_name=VERSION,
path='gs://BUCKET/training_dir/model')
Note that the model must be on GCS.
"""
import google.datalab.ml as ml
if isinstance(data, pd.DataFrame):
# write the df to csv.
string_buffer = io.StringIO()
data.to_csv(string_buffer, header=None, index=False)
input_data = string_buffer.getvalue().split('\n')
# remove empty strings
input_data = [line for line in input_data if line]
else:
input_data = data
predictions = ml.ModelVersions(model_name).predict(model_version, input_data)
# Convert predictions into a dataframe
df = pd.DataFrame(columns=sorted(predictions[0].keys()))
for i in range(len(predictions)):
for k, v in predictions[i].iteritems():
df.loc[i, k] = v
return df
|
[
"def",
"cloud_predict",
"(",
"model_name",
",",
"model_version",
",",
"data",
")",
":",
"import",
"google",
".",
"datalab",
".",
"ml",
"as",
"ml",
"if",
"isinstance",
"(",
"data",
",",
"pd",
".",
"DataFrame",
")",
":",
"# write the df to csv.",
"string_buffer",
"=",
"io",
".",
"StringIO",
"(",
")",
"data",
".",
"to_csv",
"(",
"string_buffer",
",",
"header",
"=",
"None",
",",
"index",
"=",
"False",
")",
"input_data",
"=",
"string_buffer",
".",
"getvalue",
"(",
")",
".",
"split",
"(",
"'\\n'",
")",
"# remove empty strings",
"input_data",
"=",
"[",
"line",
"for",
"line",
"in",
"input_data",
"if",
"line",
"]",
"else",
":",
"input_data",
"=",
"data",
"predictions",
"=",
"ml",
".",
"ModelVersions",
"(",
"model_name",
")",
".",
"predict",
"(",
"model_version",
",",
"input_data",
")",
"# Convert predictions into a dataframe",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"columns",
"=",
"sorted",
"(",
"predictions",
"[",
"0",
"]",
".",
"keys",
"(",
")",
")",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"predictions",
")",
")",
":",
"for",
"k",
",",
"v",
"in",
"predictions",
"[",
"i",
"]",
".",
"iteritems",
"(",
")",
":",
"df",
".",
"loc",
"[",
"i",
",",
"k",
"]",
"=",
"v",
"return",
"df"
] |
Use Online prediction.
Runs online prediction in the cloud and prints the results to the screen. For
running prediction on a large dataset or saving the results, run
local_batch_prediction or batch_prediction.
Args:
model_name: deployed model name
model_version: depoyed model version
data: List of csv strings or a Pandas DataFrame that match the model schema.
Before using this, the model must be created. This can be done by running
two gcloud commands:
1) gcloud beta ml models create NAME
2) gcloud beta ml versions create VERSION --model NAME \
--origin gs://BUCKET/training_dir/model
or these datalab commands:
1) import google.datalab as datalab
model = datalab.ml.ModelVersions(MODEL_NAME)
model.deploy(version_name=VERSION,
path='gs://BUCKET/training_dir/model')
Note that the model must be on GCS.
|
[
"Use",
"Online",
"prediction",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/_package.py#L671-L715
|
4,883
|
googledatalab/pydatalab
|
solutionbox/structured_data/mltoolbox/_structured_data/_package.py
|
batch_predict
|
def batch_predict(training_dir, prediction_input_file, output_dir,
mode, batch_size=16, shard_files=True, output_format='csv',
cloud=False):
"""Blocking versoin of batch_predict.
See documentation of batch_prediction_async.
"""
job = batch_predict_async(
training_dir=training_dir,
prediction_input_file=prediction_input_file,
output_dir=output_dir,
mode=mode,
batch_size=batch_size,
shard_files=shard_files,
output_format=output_format,
cloud=cloud)
job.wait()
print('Batch predict: ' + str(job.state))
|
python
|
def batch_predict(training_dir, prediction_input_file, output_dir,
mode, batch_size=16, shard_files=True, output_format='csv',
cloud=False):
"""Blocking versoin of batch_predict.
See documentation of batch_prediction_async.
"""
job = batch_predict_async(
training_dir=training_dir,
prediction_input_file=prediction_input_file,
output_dir=output_dir,
mode=mode,
batch_size=batch_size,
shard_files=shard_files,
output_format=output_format,
cloud=cloud)
job.wait()
print('Batch predict: ' + str(job.state))
|
[
"def",
"batch_predict",
"(",
"training_dir",
",",
"prediction_input_file",
",",
"output_dir",
",",
"mode",
",",
"batch_size",
"=",
"16",
",",
"shard_files",
"=",
"True",
",",
"output_format",
"=",
"'csv'",
",",
"cloud",
"=",
"False",
")",
":",
"job",
"=",
"batch_predict_async",
"(",
"training_dir",
"=",
"training_dir",
",",
"prediction_input_file",
"=",
"prediction_input_file",
",",
"output_dir",
"=",
"output_dir",
",",
"mode",
"=",
"mode",
",",
"batch_size",
"=",
"batch_size",
",",
"shard_files",
"=",
"shard_files",
",",
"output_format",
"=",
"output_format",
",",
"cloud",
"=",
"cloud",
")",
"job",
".",
"wait",
"(",
")",
"print",
"(",
"'Batch predict: '",
"+",
"str",
"(",
"job",
".",
"state",
")",
")"
] |
Blocking versoin of batch_predict.
See documentation of batch_prediction_async.
|
[
"Blocking",
"versoin",
"of",
"batch_predict",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/_package.py#L722-L739
|
4,884
|
googledatalab/pydatalab
|
solutionbox/structured_data/mltoolbox/_structured_data/_package.py
|
batch_predict_async
|
def batch_predict_async(training_dir, prediction_input_file, output_dir,
mode, batch_size=16, shard_files=True, output_format='csv', cloud=False):
"""Local and cloud batch prediction.
Args:
training_dir: The output folder of training.
prediction_input_file: csv file pattern to a file. File must be on GCS if
running cloud prediction
output_dir: output location to save the results. Must be a GSC path if
running cloud prediction.
mode: 'evaluation' or 'prediction'. If 'evaluation', the input data must
contain a target column. If 'prediction', the input data must not
contain a target column.
batch_size: Int. How many instances to run in memory at once. Larger values
mean better performace but more memeory consumed.
shard_files: If False, the output files are not shardded.
output_format: csv or json. Json file are json-newlined.
cloud: If ture, does cloud batch prediction. If False, runs batch prediction
locally.
Returns:
A google.datalab.utils.Job object that can be used to query state from or wait.
"""
import google.datalab.utils as du
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if cloud:
runner_results = cloud_batch_predict(training_dir, prediction_input_file, output_dir, mode,
batch_size, shard_files, output_format)
job = du.DataflowJob(runner_results)
else:
runner_results = local_batch_predict(training_dir, prediction_input_file, output_dir, mode,
batch_size, shard_files, output_format)
job = du.LambdaJob(lambda: runner_results.wait_until_finish(), job_id=None)
return job
|
python
|
def batch_predict_async(training_dir, prediction_input_file, output_dir,
mode, batch_size=16, shard_files=True, output_format='csv', cloud=False):
"""Local and cloud batch prediction.
Args:
training_dir: The output folder of training.
prediction_input_file: csv file pattern to a file. File must be on GCS if
running cloud prediction
output_dir: output location to save the results. Must be a GSC path if
running cloud prediction.
mode: 'evaluation' or 'prediction'. If 'evaluation', the input data must
contain a target column. If 'prediction', the input data must not
contain a target column.
batch_size: Int. How many instances to run in memory at once. Larger values
mean better performace but more memeory consumed.
shard_files: If False, the output files are not shardded.
output_format: csv or json. Json file are json-newlined.
cloud: If ture, does cloud batch prediction. If False, runs batch prediction
locally.
Returns:
A google.datalab.utils.Job object that can be used to query state from or wait.
"""
import google.datalab.utils as du
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if cloud:
runner_results = cloud_batch_predict(training_dir, prediction_input_file, output_dir, mode,
batch_size, shard_files, output_format)
job = du.DataflowJob(runner_results)
else:
runner_results = local_batch_predict(training_dir, prediction_input_file, output_dir, mode,
batch_size, shard_files, output_format)
job = du.LambdaJob(lambda: runner_results.wait_until_finish(), job_id=None)
return job
|
[
"def",
"batch_predict_async",
"(",
"training_dir",
",",
"prediction_input_file",
",",
"output_dir",
",",
"mode",
",",
"batch_size",
"=",
"16",
",",
"shard_files",
"=",
"True",
",",
"output_format",
"=",
"'csv'",
",",
"cloud",
"=",
"False",
")",
":",
"import",
"google",
".",
"datalab",
".",
"utils",
"as",
"du",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"warnings",
".",
"simplefilter",
"(",
"\"ignore\"",
")",
"if",
"cloud",
":",
"runner_results",
"=",
"cloud_batch_predict",
"(",
"training_dir",
",",
"prediction_input_file",
",",
"output_dir",
",",
"mode",
",",
"batch_size",
",",
"shard_files",
",",
"output_format",
")",
"job",
"=",
"du",
".",
"DataflowJob",
"(",
"runner_results",
")",
"else",
":",
"runner_results",
"=",
"local_batch_predict",
"(",
"training_dir",
",",
"prediction_input_file",
",",
"output_dir",
",",
"mode",
",",
"batch_size",
",",
"shard_files",
",",
"output_format",
")",
"job",
"=",
"du",
".",
"LambdaJob",
"(",
"lambda",
":",
"runner_results",
".",
"wait_until_finish",
"(",
")",
",",
"job_id",
"=",
"None",
")",
"return",
"job"
] |
Local and cloud batch prediction.
Args:
training_dir: The output folder of training.
prediction_input_file: csv file pattern to a file. File must be on GCS if
running cloud prediction
output_dir: output location to save the results. Must be a GSC path if
running cloud prediction.
mode: 'evaluation' or 'prediction'. If 'evaluation', the input data must
contain a target column. If 'prediction', the input data must not
contain a target column.
batch_size: Int. How many instances to run in memory at once. Larger values
mean better performace but more memeory consumed.
shard_files: If False, the output files are not shardded.
output_format: csv or json. Json file are json-newlined.
cloud: If ture, does cloud batch prediction. If False, runs batch prediction
locally.
Returns:
A google.datalab.utils.Job object that can be used to query state from or wait.
|
[
"Local",
"and",
"cloud",
"batch",
"prediction",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/_package.py#L742-L777
|
4,885
|
googledatalab/pydatalab
|
solutionbox/structured_data/mltoolbox/_structured_data/prediction/predict.py
|
make_prediction_pipeline
|
def make_prediction_pipeline(pipeline, args):
"""Builds the prediction pipeline.
Reads the csv files, prepends a ',' if the target column is missing, run
prediction, and then prints the formated results to a file.
Args:
pipeline: the pipeline
args: command line args
"""
# DF bug: DF does not work with unicode strings
predicted_values, errors = (
pipeline |
'Read CSV Files' >>
beam.io.ReadFromText(str(args.predict_data),
strip_trailing_newlines=True) |
'Batch Input' >>
beam.ParDo(EmitAsBatchDoFn(args.batch_size)) |
'Run TF Graph on Batches' >>
beam.ParDo(RunGraphDoFn(args.trained_model_dir)).with_outputs('errors', main='main'))
((predicted_values, errors) |
'Format and Save' >>
FormatAndSave(args))
|
python
|
def make_prediction_pipeline(pipeline, args):
"""Builds the prediction pipeline.
Reads the csv files, prepends a ',' if the target column is missing, run
prediction, and then prints the formated results to a file.
Args:
pipeline: the pipeline
args: command line args
"""
# DF bug: DF does not work with unicode strings
predicted_values, errors = (
pipeline |
'Read CSV Files' >>
beam.io.ReadFromText(str(args.predict_data),
strip_trailing_newlines=True) |
'Batch Input' >>
beam.ParDo(EmitAsBatchDoFn(args.batch_size)) |
'Run TF Graph on Batches' >>
beam.ParDo(RunGraphDoFn(args.trained_model_dir)).with_outputs('errors', main='main'))
((predicted_values, errors) |
'Format and Save' >>
FormatAndSave(args))
|
[
"def",
"make_prediction_pipeline",
"(",
"pipeline",
",",
"args",
")",
":",
"# DF bug: DF does not work with unicode strings",
"predicted_values",
",",
"errors",
"=",
"(",
"pipeline",
"|",
"'Read CSV Files'",
">>",
"beam",
".",
"io",
".",
"ReadFromText",
"(",
"str",
"(",
"args",
".",
"predict_data",
")",
",",
"strip_trailing_newlines",
"=",
"True",
")",
"|",
"'Batch Input'",
">>",
"beam",
".",
"ParDo",
"(",
"EmitAsBatchDoFn",
"(",
"args",
".",
"batch_size",
")",
")",
"|",
"'Run TF Graph on Batches'",
">>",
"beam",
".",
"ParDo",
"(",
"RunGraphDoFn",
"(",
"args",
".",
"trained_model_dir",
")",
")",
".",
"with_outputs",
"(",
"'errors'",
",",
"main",
"=",
"'main'",
")",
")",
"(",
"(",
"predicted_values",
",",
"errors",
")",
"|",
"'Format and Save'",
">>",
"FormatAndSave",
"(",
"args",
")",
")"
] |
Builds the prediction pipeline.
Reads the csv files, prepends a ',' if the target column is missing, run
prediction, and then prints the formated results to a file.
Args:
pipeline: the pipeline
args: command line args
|
[
"Builds",
"the",
"prediction",
"pipeline",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/prediction/predict.py#L349-L373
|
4,886
|
googledatalab/pydatalab
|
solutionbox/structured_data/mltoolbox/_structured_data/prediction/predict.py
|
RunGraphDoFn.process
|
def process(self, element):
"""Run batch prediciton on a TF graph.
Args:
element: list of strings, representing one batch input to the TF graph.
"""
import collections
import apache_beam as beam
num_in_batch = 0
try:
assert self._session is not None
feed_dict = collections.defaultdict(list)
for line in element:
# Remove trailing newline.
if line.endswith('\n'):
line = line[:-1]
feed_dict[self._input_alias_map.values()[0]].append(line)
num_in_batch += 1
# batch_result is list of numpy arrays with batch_size many rows.
batch_result = self._session.run(fetches=self._tensor_names,
feed_dict=feed_dict)
# ex batch_result for batch_size > 1:
# (array([value1, value2, ..., value_batch_size]),
# array([[a1, b1, c1]], ..., [a_batch_size, b_batch_size, c_batch_size]]),
# ...)
# ex batch_result for batch_size == 1:
# (value,
# array([a1, b1, c1]),
# ...)
# Convert the results into a dict and unbatch the results.
if num_in_batch > 1:
for result in zip(*batch_result):
predictions = {}
for name, value in zip(self._aliases, result):
predictions[name] = (value.tolist() if getattr(value, 'tolist', None) else value)
yield predictions
else:
predictions = {}
for i in range(len(self._aliases)):
value = batch_result[i]
value = (value.tolist() if getattr(value, 'tolist', None)
else value)
predictions[self._aliases[i]] = value
yield predictions
except Exception as e: # pylint: disable=broad-except
yield beam.pvalue.TaggedOutput('errors', (str(e), element))
|
python
|
def process(self, element):
"""Run batch prediciton on a TF graph.
Args:
element: list of strings, representing one batch input to the TF graph.
"""
import collections
import apache_beam as beam
num_in_batch = 0
try:
assert self._session is not None
feed_dict = collections.defaultdict(list)
for line in element:
# Remove trailing newline.
if line.endswith('\n'):
line = line[:-1]
feed_dict[self._input_alias_map.values()[0]].append(line)
num_in_batch += 1
# batch_result is list of numpy arrays with batch_size many rows.
batch_result = self._session.run(fetches=self._tensor_names,
feed_dict=feed_dict)
# ex batch_result for batch_size > 1:
# (array([value1, value2, ..., value_batch_size]),
# array([[a1, b1, c1]], ..., [a_batch_size, b_batch_size, c_batch_size]]),
# ...)
# ex batch_result for batch_size == 1:
# (value,
# array([a1, b1, c1]),
# ...)
# Convert the results into a dict and unbatch the results.
if num_in_batch > 1:
for result in zip(*batch_result):
predictions = {}
for name, value in zip(self._aliases, result):
predictions[name] = (value.tolist() if getattr(value, 'tolist', None) else value)
yield predictions
else:
predictions = {}
for i in range(len(self._aliases)):
value = batch_result[i]
value = (value.tolist() if getattr(value, 'tolist', None)
else value)
predictions[self._aliases[i]] = value
yield predictions
except Exception as e: # pylint: disable=broad-except
yield beam.pvalue.TaggedOutput('errors', (str(e), element))
|
[
"def",
"process",
"(",
"self",
",",
"element",
")",
":",
"import",
"collections",
"import",
"apache_beam",
"as",
"beam",
"num_in_batch",
"=",
"0",
"try",
":",
"assert",
"self",
".",
"_session",
"is",
"not",
"None",
"feed_dict",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"for",
"line",
"in",
"element",
":",
"# Remove trailing newline.",
"if",
"line",
".",
"endswith",
"(",
"'\\n'",
")",
":",
"line",
"=",
"line",
"[",
":",
"-",
"1",
"]",
"feed_dict",
"[",
"self",
".",
"_input_alias_map",
".",
"values",
"(",
")",
"[",
"0",
"]",
"]",
".",
"append",
"(",
"line",
")",
"num_in_batch",
"+=",
"1",
"# batch_result is list of numpy arrays with batch_size many rows.",
"batch_result",
"=",
"self",
".",
"_session",
".",
"run",
"(",
"fetches",
"=",
"self",
".",
"_tensor_names",
",",
"feed_dict",
"=",
"feed_dict",
")",
"# ex batch_result for batch_size > 1:",
"# (array([value1, value2, ..., value_batch_size]),",
"# array([[a1, b1, c1]], ..., [a_batch_size, b_batch_size, c_batch_size]]),",
"# ...)",
"# ex batch_result for batch_size == 1:",
"# (value,",
"# array([a1, b1, c1]),",
"# ...)",
"# Convert the results into a dict and unbatch the results.",
"if",
"num_in_batch",
">",
"1",
":",
"for",
"result",
"in",
"zip",
"(",
"*",
"batch_result",
")",
":",
"predictions",
"=",
"{",
"}",
"for",
"name",
",",
"value",
"in",
"zip",
"(",
"self",
".",
"_aliases",
",",
"result",
")",
":",
"predictions",
"[",
"name",
"]",
"=",
"(",
"value",
".",
"tolist",
"(",
")",
"if",
"getattr",
"(",
"value",
",",
"'tolist'",
",",
"None",
")",
"else",
"value",
")",
"yield",
"predictions",
"else",
":",
"predictions",
"=",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"_aliases",
")",
")",
":",
"value",
"=",
"batch_result",
"[",
"i",
"]",
"value",
"=",
"(",
"value",
".",
"tolist",
"(",
")",
"if",
"getattr",
"(",
"value",
",",
"'tolist'",
",",
"None",
")",
"else",
"value",
")",
"predictions",
"[",
"self",
".",
"_aliases",
"[",
"i",
"]",
"]",
"=",
"value",
"yield",
"predictions",
"except",
"Exception",
"as",
"e",
":",
"# pylint: disable=broad-except",
"yield",
"beam",
".",
"pvalue",
".",
"TaggedOutput",
"(",
"'errors'",
",",
"(",
"str",
"(",
"e",
")",
",",
"element",
")",
")"
] |
Run batch prediciton on a TF graph.
Args:
element: list of strings, representing one batch input to the TF graph.
|
[
"Run",
"batch",
"prediciton",
"on",
"a",
"TF",
"graph",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/prediction/predict.py#L165-L218
|
4,887
|
googledatalab/pydatalab
|
solutionbox/structured_data/mltoolbox/_structured_data/prediction/predict.py
|
CSVCoder.encode
|
def encode(self, tf_graph_predictions):
"""Encodes the graph json prediction into csv.
Args:
tf_graph_predictions: python dict.
Returns:
csv string.
"""
row = []
for col in self._header:
row.append(str(tf_graph_predictions[col]))
return ','.join(row)
|
python
|
def encode(self, tf_graph_predictions):
"""Encodes the graph json prediction into csv.
Args:
tf_graph_predictions: python dict.
Returns:
csv string.
"""
row = []
for col in self._header:
row.append(str(tf_graph_predictions[col]))
return ','.join(row)
|
[
"def",
"encode",
"(",
"self",
",",
"tf_graph_predictions",
")",
":",
"row",
"=",
"[",
"]",
"for",
"col",
"in",
"self",
".",
"_header",
":",
"row",
".",
"append",
"(",
"str",
"(",
"tf_graph_predictions",
"[",
"col",
"]",
")",
")",
"return",
"','",
".",
"join",
"(",
"row",
")"
] |
Encodes the graph json prediction into csv.
Args:
tf_graph_predictions: python dict.
Returns:
csv string.
|
[
"Encodes",
"the",
"graph",
"json",
"prediction",
"into",
"csv",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/prediction/predict.py#L251-L264
|
4,888
|
googledatalab/pydatalab
|
google/datalab/stackdriver/monitoring/_query_metadata.py
|
QueryMetadata.as_dataframe
|
def as_dataframe(self, max_rows=None):
"""Creates a pandas dataframe from the query metadata.
Args:
max_rows: The maximum number of timeseries metadata to return. If None,
return all.
Returns:
A pandas dataframe containing the resource type, resource labels and
metric labels. Each row in this dataframe corresponds to the metadata
from one time series.
"""
max_rows = len(self._timeseries_list) if max_rows is None else max_rows
headers = [{
'resource': ts.resource._asdict(), 'metric': ts.metric._asdict()}
for ts in self._timeseries_list[:max_rows]]
if not headers:
return pandas.DataFrame()
dataframe = pandas.io.json.json_normalize(headers)
# Add a 2 level column header.
dataframe.columns = pandas.MultiIndex.from_tuples(
[(col, '') if col == 'resource.type' else col.rsplit('.', 1)
for col in dataframe.columns])
# Re-order the columns.
resource_keys = google.cloud.monitoring._dataframe._sorted_resource_labels(
dataframe['resource.labels'].columns)
sorted_columns = [('resource.type', '')]
sorted_columns += [('resource.labels', key) for key in resource_keys]
sorted_columns += sorted(col for col in dataframe.columns
if col[0] == 'metric.labels')
dataframe = dataframe[sorted_columns]
# Sort the data, and clean up index values, and NaNs.
dataframe = dataframe.sort_values(sorted_columns)
dataframe = dataframe.reset_index(drop=True).fillna('')
return dataframe
|
python
|
def as_dataframe(self, max_rows=None):
"""Creates a pandas dataframe from the query metadata.
Args:
max_rows: The maximum number of timeseries metadata to return. If None,
return all.
Returns:
A pandas dataframe containing the resource type, resource labels and
metric labels. Each row in this dataframe corresponds to the metadata
from one time series.
"""
max_rows = len(self._timeseries_list) if max_rows is None else max_rows
headers = [{
'resource': ts.resource._asdict(), 'metric': ts.metric._asdict()}
for ts in self._timeseries_list[:max_rows]]
if not headers:
return pandas.DataFrame()
dataframe = pandas.io.json.json_normalize(headers)
# Add a 2 level column header.
dataframe.columns = pandas.MultiIndex.from_tuples(
[(col, '') if col == 'resource.type' else col.rsplit('.', 1)
for col in dataframe.columns])
# Re-order the columns.
resource_keys = google.cloud.monitoring._dataframe._sorted_resource_labels(
dataframe['resource.labels'].columns)
sorted_columns = [('resource.type', '')]
sorted_columns += [('resource.labels', key) for key in resource_keys]
sorted_columns += sorted(col for col in dataframe.columns
if col[0] == 'metric.labels')
dataframe = dataframe[sorted_columns]
# Sort the data, and clean up index values, and NaNs.
dataframe = dataframe.sort_values(sorted_columns)
dataframe = dataframe.reset_index(drop=True).fillna('')
return dataframe
|
[
"def",
"as_dataframe",
"(",
"self",
",",
"max_rows",
"=",
"None",
")",
":",
"max_rows",
"=",
"len",
"(",
"self",
".",
"_timeseries_list",
")",
"if",
"max_rows",
"is",
"None",
"else",
"max_rows",
"headers",
"=",
"[",
"{",
"'resource'",
":",
"ts",
".",
"resource",
".",
"_asdict",
"(",
")",
",",
"'metric'",
":",
"ts",
".",
"metric",
".",
"_asdict",
"(",
")",
"}",
"for",
"ts",
"in",
"self",
".",
"_timeseries_list",
"[",
":",
"max_rows",
"]",
"]",
"if",
"not",
"headers",
":",
"return",
"pandas",
".",
"DataFrame",
"(",
")",
"dataframe",
"=",
"pandas",
".",
"io",
".",
"json",
".",
"json_normalize",
"(",
"headers",
")",
"# Add a 2 level column header.",
"dataframe",
".",
"columns",
"=",
"pandas",
".",
"MultiIndex",
".",
"from_tuples",
"(",
"[",
"(",
"col",
",",
"''",
")",
"if",
"col",
"==",
"'resource.type'",
"else",
"col",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"for",
"col",
"in",
"dataframe",
".",
"columns",
"]",
")",
"# Re-order the columns.",
"resource_keys",
"=",
"google",
".",
"cloud",
".",
"monitoring",
".",
"_dataframe",
".",
"_sorted_resource_labels",
"(",
"dataframe",
"[",
"'resource.labels'",
"]",
".",
"columns",
")",
"sorted_columns",
"=",
"[",
"(",
"'resource.type'",
",",
"''",
")",
"]",
"sorted_columns",
"+=",
"[",
"(",
"'resource.labels'",
",",
"key",
")",
"for",
"key",
"in",
"resource_keys",
"]",
"sorted_columns",
"+=",
"sorted",
"(",
"col",
"for",
"col",
"in",
"dataframe",
".",
"columns",
"if",
"col",
"[",
"0",
"]",
"==",
"'metric.labels'",
")",
"dataframe",
"=",
"dataframe",
"[",
"sorted_columns",
"]",
"# Sort the data, and clean up index values, and NaNs.",
"dataframe",
"=",
"dataframe",
".",
"sort_values",
"(",
"sorted_columns",
")",
"dataframe",
"=",
"dataframe",
".",
"reset_index",
"(",
"drop",
"=",
"True",
")",
".",
"fillna",
"(",
"''",
")",
"return",
"dataframe"
] |
Creates a pandas dataframe from the query metadata.
Args:
max_rows: The maximum number of timeseries metadata to return. If None,
return all.
Returns:
A pandas dataframe containing the resource type, resource labels and
metric labels. Each row in this dataframe corresponds to the metadata
from one time series.
|
[
"Creates",
"a",
"pandas",
"dataframe",
"from",
"the",
"query",
"metadata",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/stackdriver/monitoring/_query_metadata.py#L53-L92
|
4,889
|
googledatalab/pydatalab
|
solutionbox/image_classification/mltoolbox/image/classification/_util.py
|
get_train_eval_files
|
def get_train_eval_files(input_dir):
"""Get preprocessed training and eval files."""
data_dir = _get_latest_data_dir(input_dir)
train_pattern = os.path.join(data_dir, 'train*.tfrecord.gz')
eval_pattern = os.path.join(data_dir, 'eval*.tfrecord.gz')
train_files = file_io.get_matching_files(train_pattern)
eval_files = file_io.get_matching_files(eval_pattern)
return train_files, eval_files
|
python
|
def get_train_eval_files(input_dir):
"""Get preprocessed training and eval files."""
data_dir = _get_latest_data_dir(input_dir)
train_pattern = os.path.join(data_dir, 'train*.tfrecord.gz')
eval_pattern = os.path.join(data_dir, 'eval*.tfrecord.gz')
train_files = file_io.get_matching_files(train_pattern)
eval_files = file_io.get_matching_files(eval_pattern)
return train_files, eval_files
|
[
"def",
"get_train_eval_files",
"(",
"input_dir",
")",
":",
"data_dir",
"=",
"_get_latest_data_dir",
"(",
"input_dir",
")",
"train_pattern",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_dir",
",",
"'train*.tfrecord.gz'",
")",
"eval_pattern",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_dir",
",",
"'eval*.tfrecord.gz'",
")",
"train_files",
"=",
"file_io",
".",
"get_matching_files",
"(",
"train_pattern",
")",
"eval_files",
"=",
"file_io",
".",
"get_matching_files",
"(",
"eval_pattern",
")",
"return",
"train_files",
",",
"eval_files"
] |
Get preprocessed training and eval files.
|
[
"Get",
"preprocessed",
"training",
"and",
"eval",
"files",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_util.py#L53-L60
|
4,890
|
googledatalab/pydatalab
|
solutionbox/image_classification/mltoolbox/image/classification/_util.py
|
get_labels
|
def get_labels(input_dir):
"""Get a list of labels from preprocessed output dir."""
data_dir = _get_latest_data_dir(input_dir)
labels_file = os.path.join(data_dir, 'labels')
with file_io.FileIO(labels_file, 'r') as f:
labels = f.read().rstrip().split('\n')
return labels
|
python
|
def get_labels(input_dir):
"""Get a list of labels from preprocessed output dir."""
data_dir = _get_latest_data_dir(input_dir)
labels_file = os.path.join(data_dir, 'labels')
with file_io.FileIO(labels_file, 'r') as f:
labels = f.read().rstrip().split('\n')
return labels
|
[
"def",
"get_labels",
"(",
"input_dir",
")",
":",
"data_dir",
"=",
"_get_latest_data_dir",
"(",
"input_dir",
")",
"labels_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_dir",
",",
"'labels'",
")",
"with",
"file_io",
".",
"FileIO",
"(",
"labels_file",
",",
"'r'",
")",
"as",
"f",
":",
"labels",
"=",
"f",
".",
"read",
"(",
")",
".",
"rstrip",
"(",
")",
".",
"split",
"(",
"'\\n'",
")",
"return",
"labels"
] |
Get a list of labels from preprocessed output dir.
|
[
"Get",
"a",
"list",
"of",
"labels",
"from",
"preprocessed",
"output",
"dir",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_util.py#L63-L69
|
4,891
|
googledatalab/pydatalab
|
solutionbox/image_classification/mltoolbox/image/classification/_util.py
|
override_if_not_in_args
|
def override_if_not_in_args(flag, argument, args):
"""Checks if flags is in args, and if not it adds the flag to args."""
if flag not in args:
args.extend([flag, argument])
|
python
|
def override_if_not_in_args(flag, argument, args):
"""Checks if flags is in args, and if not it adds the flag to args."""
if flag not in args:
args.extend([flag, argument])
|
[
"def",
"override_if_not_in_args",
"(",
"flag",
",",
"argument",
",",
"args",
")",
":",
"if",
"flag",
"not",
"in",
"args",
":",
"args",
".",
"extend",
"(",
"[",
"flag",
",",
"argument",
"]",
")"
] |
Checks if flags is in args, and if not it adds the flag to args.
|
[
"Checks",
"if",
"flags",
"is",
"in",
"args",
"and",
"if",
"not",
"it",
"adds",
"the",
"flag",
"to",
"args",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_util.py#L120-L123
|
4,892
|
googledatalab/pydatalab
|
solutionbox/image_classification/mltoolbox/image/classification/_util.py
|
loss
|
def loss(loss_value):
"""Calculates aggregated mean loss."""
total_loss = tf.Variable(0.0, False)
loss_count = tf.Variable(0, False)
total_loss_update = tf.assign_add(total_loss, loss_value)
loss_count_update = tf.assign_add(loss_count, 1)
loss_op = total_loss / tf.cast(loss_count, tf.float32)
return [total_loss_update, loss_count_update], loss_op
|
python
|
def loss(loss_value):
"""Calculates aggregated mean loss."""
total_loss = tf.Variable(0.0, False)
loss_count = tf.Variable(0, False)
total_loss_update = tf.assign_add(total_loss, loss_value)
loss_count_update = tf.assign_add(loss_count, 1)
loss_op = total_loss / tf.cast(loss_count, tf.float32)
return [total_loss_update, loss_count_update], loss_op
|
[
"def",
"loss",
"(",
"loss_value",
")",
":",
"total_loss",
"=",
"tf",
".",
"Variable",
"(",
"0.0",
",",
"False",
")",
"loss_count",
"=",
"tf",
".",
"Variable",
"(",
"0",
",",
"False",
")",
"total_loss_update",
"=",
"tf",
".",
"assign_add",
"(",
"total_loss",
",",
"loss_value",
")",
"loss_count_update",
"=",
"tf",
".",
"assign_add",
"(",
"loss_count",
",",
"1",
")",
"loss_op",
"=",
"total_loss",
"/",
"tf",
".",
"cast",
"(",
"loss_count",
",",
"tf",
".",
"float32",
")",
"return",
"[",
"total_loss_update",
",",
"loss_count_update",
"]",
",",
"loss_op"
] |
Calculates aggregated mean loss.
|
[
"Calculates",
"aggregated",
"mean",
"loss",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_util.py#L126-L133
|
4,893
|
googledatalab/pydatalab
|
solutionbox/image_classification/mltoolbox/image/classification/_util.py
|
accuracy
|
def accuracy(logits, labels):
"""Calculates aggregated accuracy."""
is_correct = tf.nn.in_top_k(logits, labels, 1)
correct = tf.reduce_sum(tf.cast(is_correct, tf.int32))
incorrect = tf.reduce_sum(tf.cast(tf.logical_not(is_correct), tf.int32))
correct_count = tf.Variable(0, False)
incorrect_count = tf.Variable(0, False)
correct_count_update = tf.assign_add(correct_count, correct)
incorrect_count_update = tf.assign_add(incorrect_count, incorrect)
accuracy_op = tf.cast(correct_count, tf.float32) / tf.cast(
correct_count + incorrect_count, tf.float32)
return [correct_count_update, incorrect_count_update], accuracy_op
|
python
|
def accuracy(logits, labels):
"""Calculates aggregated accuracy."""
is_correct = tf.nn.in_top_k(logits, labels, 1)
correct = tf.reduce_sum(tf.cast(is_correct, tf.int32))
incorrect = tf.reduce_sum(tf.cast(tf.logical_not(is_correct), tf.int32))
correct_count = tf.Variable(0, False)
incorrect_count = tf.Variable(0, False)
correct_count_update = tf.assign_add(correct_count, correct)
incorrect_count_update = tf.assign_add(incorrect_count, incorrect)
accuracy_op = tf.cast(correct_count, tf.float32) / tf.cast(
correct_count + incorrect_count, tf.float32)
return [correct_count_update, incorrect_count_update], accuracy_op
|
[
"def",
"accuracy",
"(",
"logits",
",",
"labels",
")",
":",
"is_correct",
"=",
"tf",
".",
"nn",
".",
"in_top_k",
"(",
"logits",
",",
"labels",
",",
"1",
")",
"correct",
"=",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"cast",
"(",
"is_correct",
",",
"tf",
".",
"int32",
")",
")",
"incorrect",
"=",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"cast",
"(",
"tf",
".",
"logical_not",
"(",
"is_correct",
")",
",",
"tf",
".",
"int32",
")",
")",
"correct_count",
"=",
"tf",
".",
"Variable",
"(",
"0",
",",
"False",
")",
"incorrect_count",
"=",
"tf",
".",
"Variable",
"(",
"0",
",",
"False",
")",
"correct_count_update",
"=",
"tf",
".",
"assign_add",
"(",
"correct_count",
",",
"correct",
")",
"incorrect_count_update",
"=",
"tf",
".",
"assign_add",
"(",
"incorrect_count",
",",
"incorrect",
")",
"accuracy_op",
"=",
"tf",
".",
"cast",
"(",
"correct_count",
",",
"tf",
".",
"float32",
")",
"/",
"tf",
".",
"cast",
"(",
"correct_count",
"+",
"incorrect_count",
",",
"tf",
".",
"float32",
")",
"return",
"[",
"correct_count_update",
",",
"incorrect_count_update",
"]",
",",
"accuracy_op"
] |
Calculates aggregated accuracy.
|
[
"Calculates",
"aggregated",
"accuracy",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_util.py#L136-L147
|
4,894
|
googledatalab/pydatalab
|
solutionbox/image_classification/mltoolbox/image/classification/_util.py
|
check_dataset
|
def check_dataset(dataset, mode):
"""Validate we have a good dataset."""
names = [x['name'] for x in dataset.schema]
types = [x['type'] for x in dataset.schema]
if mode == 'train':
if (set(['image_url', 'label']) != set(names) or any(t != 'STRING' for t in types)):
raise ValueError('Invalid dataset. Expect only "image_url,label" STRING columns.')
else:
if (set(['image_url']) != set(names) and set(['image_url', 'label']) != set(names)) or \
any(t != 'STRING' for t in types):
raise ValueError('Invalid dataset. Expect only "image_url" or "image_url,label" ' +
'STRING columns.')
|
python
|
def check_dataset(dataset, mode):
"""Validate we have a good dataset."""
names = [x['name'] for x in dataset.schema]
types = [x['type'] for x in dataset.schema]
if mode == 'train':
if (set(['image_url', 'label']) != set(names) or any(t != 'STRING' for t in types)):
raise ValueError('Invalid dataset. Expect only "image_url,label" STRING columns.')
else:
if (set(['image_url']) != set(names) and set(['image_url', 'label']) != set(names)) or \
any(t != 'STRING' for t in types):
raise ValueError('Invalid dataset. Expect only "image_url" or "image_url,label" ' +
'STRING columns.')
|
[
"def",
"check_dataset",
"(",
"dataset",
",",
"mode",
")",
":",
"names",
"=",
"[",
"x",
"[",
"'name'",
"]",
"for",
"x",
"in",
"dataset",
".",
"schema",
"]",
"types",
"=",
"[",
"x",
"[",
"'type'",
"]",
"for",
"x",
"in",
"dataset",
".",
"schema",
"]",
"if",
"mode",
"==",
"'train'",
":",
"if",
"(",
"set",
"(",
"[",
"'image_url'",
",",
"'label'",
"]",
")",
"!=",
"set",
"(",
"names",
")",
"or",
"any",
"(",
"t",
"!=",
"'STRING'",
"for",
"t",
"in",
"types",
")",
")",
":",
"raise",
"ValueError",
"(",
"'Invalid dataset. Expect only \"image_url,label\" STRING columns.'",
")",
"else",
":",
"if",
"(",
"set",
"(",
"[",
"'image_url'",
"]",
")",
"!=",
"set",
"(",
"names",
")",
"and",
"set",
"(",
"[",
"'image_url'",
",",
"'label'",
"]",
")",
"!=",
"set",
"(",
"names",
")",
")",
"or",
"any",
"(",
"t",
"!=",
"'STRING'",
"for",
"t",
"in",
"types",
")",
":",
"raise",
"ValueError",
"(",
"'Invalid dataset. Expect only \"image_url\" or \"image_url,label\" '",
"+",
"'STRING columns.'",
")"
] |
Validate we have a good dataset.
|
[
"Validate",
"we",
"have",
"a",
"good",
"dataset",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_util.py#L150-L162
|
4,895
|
googledatalab/pydatalab
|
solutionbox/image_classification/mltoolbox/image/classification/_util.py
|
get_sources_from_dataset
|
def get_sources_from_dataset(p, dataset, mode):
"""get pcollection from dataset."""
import apache_beam as beam
import csv
from google.datalab.ml import CsvDataSet, BigQueryDataSet
check_dataset(dataset, mode)
if type(dataset) is CsvDataSet:
source_list = []
for ii, input_path in enumerate(dataset.files):
source_list.append(p | 'Read from Csv %d (%s)' % (ii, mode) >>
beam.io.ReadFromText(input_path, strip_trailing_newlines=True))
return (source_list |
'Flatten Sources (%s)' % mode >>
beam.Flatten() |
'Create Dict from Csv (%s)' % mode >>
beam.Map(lambda line: csv.DictReader([line], fieldnames=['image_url',
'label']).next()))
elif type(dataset) is BigQueryDataSet:
bq_source = (beam.io.BigQuerySource(table=dataset.table) if dataset.table is not None else
beam.io.BigQuerySource(query=dataset.query))
return p | 'Read source from BigQuery (%s)' % mode >> beam.io.Read(bq_source)
else:
raise ValueError('Invalid DataSet. Expect CsvDataSet or BigQueryDataSet')
|
python
|
def get_sources_from_dataset(p, dataset, mode):
"""get pcollection from dataset."""
import apache_beam as beam
import csv
from google.datalab.ml import CsvDataSet, BigQueryDataSet
check_dataset(dataset, mode)
if type(dataset) is CsvDataSet:
source_list = []
for ii, input_path in enumerate(dataset.files):
source_list.append(p | 'Read from Csv %d (%s)' % (ii, mode) >>
beam.io.ReadFromText(input_path, strip_trailing_newlines=True))
return (source_list |
'Flatten Sources (%s)' % mode >>
beam.Flatten() |
'Create Dict from Csv (%s)' % mode >>
beam.Map(lambda line: csv.DictReader([line], fieldnames=['image_url',
'label']).next()))
elif type(dataset) is BigQueryDataSet:
bq_source = (beam.io.BigQuerySource(table=dataset.table) if dataset.table is not None else
beam.io.BigQuerySource(query=dataset.query))
return p | 'Read source from BigQuery (%s)' % mode >> beam.io.Read(bq_source)
else:
raise ValueError('Invalid DataSet. Expect CsvDataSet or BigQueryDataSet')
|
[
"def",
"get_sources_from_dataset",
"(",
"p",
",",
"dataset",
",",
"mode",
")",
":",
"import",
"apache_beam",
"as",
"beam",
"import",
"csv",
"from",
"google",
".",
"datalab",
".",
"ml",
"import",
"CsvDataSet",
",",
"BigQueryDataSet",
"check_dataset",
"(",
"dataset",
",",
"mode",
")",
"if",
"type",
"(",
"dataset",
")",
"is",
"CsvDataSet",
":",
"source_list",
"=",
"[",
"]",
"for",
"ii",
",",
"input_path",
"in",
"enumerate",
"(",
"dataset",
".",
"files",
")",
":",
"source_list",
".",
"append",
"(",
"p",
"|",
"'Read from Csv %d (%s)'",
"%",
"(",
"ii",
",",
"mode",
")",
">>",
"beam",
".",
"io",
".",
"ReadFromText",
"(",
"input_path",
",",
"strip_trailing_newlines",
"=",
"True",
")",
")",
"return",
"(",
"source_list",
"|",
"'Flatten Sources (%s)'",
"%",
"mode",
">>",
"beam",
".",
"Flatten",
"(",
")",
"|",
"'Create Dict from Csv (%s)'",
"%",
"mode",
">>",
"beam",
".",
"Map",
"(",
"lambda",
"line",
":",
"csv",
".",
"DictReader",
"(",
"[",
"line",
"]",
",",
"fieldnames",
"=",
"[",
"'image_url'",
",",
"'label'",
"]",
")",
".",
"next",
"(",
")",
")",
")",
"elif",
"type",
"(",
"dataset",
")",
"is",
"BigQueryDataSet",
":",
"bq_source",
"=",
"(",
"beam",
".",
"io",
".",
"BigQuerySource",
"(",
"table",
"=",
"dataset",
".",
"table",
")",
"if",
"dataset",
".",
"table",
"is",
"not",
"None",
"else",
"beam",
".",
"io",
".",
"BigQuerySource",
"(",
"query",
"=",
"dataset",
".",
"query",
")",
")",
"return",
"p",
"|",
"'Read source from BigQuery (%s)'",
"%",
"mode",
">>",
"beam",
".",
"io",
".",
"Read",
"(",
"bq_source",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Invalid DataSet. Expect CsvDataSet or BigQueryDataSet'",
")"
] |
get pcollection from dataset.
|
[
"get",
"pcollection",
"from",
"dataset",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_util.py#L165-L189
|
4,896
|
googledatalab/pydatalab
|
solutionbox/image_classification/mltoolbox/image/classification/_util.py
|
decode_and_resize
|
def decode_and_resize(image_str_tensor):
"""Decodes jpeg string, resizes it and returns a uint8 tensor."""
# These constants are set by Inception v3's expectations.
height = 299
width = 299
channels = 3
image = tf.image.decode_jpeg(image_str_tensor, channels=channels)
# Note resize expects a batch_size, but tf_map supresses that index,
# thus we have to expand then squeeze. Resize returns float32 in the
# range [0, uint8_max]
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width], align_corners=False)
image = tf.squeeze(image, squeeze_dims=[0])
image = tf.cast(image, dtype=tf.uint8)
return image
|
python
|
def decode_and_resize(image_str_tensor):
"""Decodes jpeg string, resizes it and returns a uint8 tensor."""
# These constants are set by Inception v3's expectations.
height = 299
width = 299
channels = 3
image = tf.image.decode_jpeg(image_str_tensor, channels=channels)
# Note resize expects a batch_size, but tf_map supresses that index,
# thus we have to expand then squeeze. Resize returns float32 in the
# range [0, uint8_max]
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width], align_corners=False)
image = tf.squeeze(image, squeeze_dims=[0])
image = tf.cast(image, dtype=tf.uint8)
return image
|
[
"def",
"decode_and_resize",
"(",
"image_str_tensor",
")",
":",
"# These constants are set by Inception v3's expectations.",
"height",
"=",
"299",
"width",
"=",
"299",
"channels",
"=",
"3",
"image",
"=",
"tf",
".",
"image",
".",
"decode_jpeg",
"(",
"image_str_tensor",
",",
"channels",
"=",
"channels",
")",
"# Note resize expects a batch_size, but tf_map supresses that index,",
"# thus we have to expand then squeeze. Resize returns float32 in the",
"# range [0, uint8_max]",
"image",
"=",
"tf",
".",
"expand_dims",
"(",
"image",
",",
"0",
")",
"image",
"=",
"tf",
".",
"image",
".",
"resize_bilinear",
"(",
"image",
",",
"[",
"height",
",",
"width",
"]",
",",
"align_corners",
"=",
"False",
")",
"image",
"=",
"tf",
".",
"squeeze",
"(",
"image",
",",
"squeeze_dims",
"=",
"[",
"0",
"]",
")",
"image",
"=",
"tf",
".",
"cast",
"(",
"image",
",",
"dtype",
"=",
"tf",
".",
"uint8",
")",
"return",
"image"
] |
Decodes jpeg string, resizes it and returns a uint8 tensor.
|
[
"Decodes",
"jpeg",
"string",
"resizes",
"it",
"and",
"returns",
"a",
"uint8",
"tensor",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_util.py#L192-L208
|
4,897
|
googledatalab/pydatalab
|
solutionbox/image_classification/mltoolbox/image/classification/_util.py
|
resize_image
|
def resize_image(image_str_tensor):
"""Decodes jpeg string, resizes it and re-encode it to jpeg."""
image = decode_and_resize(image_str_tensor)
image = tf.image.encode_jpeg(image, quality=100)
return image
|
python
|
def resize_image(image_str_tensor):
"""Decodes jpeg string, resizes it and re-encode it to jpeg."""
image = decode_and_resize(image_str_tensor)
image = tf.image.encode_jpeg(image, quality=100)
return image
|
[
"def",
"resize_image",
"(",
"image_str_tensor",
")",
":",
"image",
"=",
"decode_and_resize",
"(",
"image_str_tensor",
")",
"image",
"=",
"tf",
".",
"image",
".",
"encode_jpeg",
"(",
"image",
",",
"quality",
"=",
"100",
")",
"return",
"image"
] |
Decodes jpeg string, resizes it and re-encode it to jpeg.
|
[
"Decodes",
"jpeg",
"string",
"resizes",
"it",
"and",
"re",
"-",
"encode",
"it",
"to",
"jpeg",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_util.py#L211-L216
|
4,898
|
googledatalab/pydatalab
|
solutionbox/image_classification/mltoolbox/image/classification/_util.py
|
load_images
|
def load_images(image_files, resize=True):
"""Load images from files and optionally resize it."""
images = []
for image_file in image_files:
with file_io.FileIO(image_file, 'r') as ff:
images.append(ff.read())
if resize is False:
return images
# To resize, run a tf session so we can reuse 'decode_and_resize()'
# which is used in prediction graph. This makes sure we don't lose
# any quality in prediction, while decreasing the size of the images
# submitted to the model over network.
image_str_tensor = tf.placeholder(tf.string, shape=[None])
image = tf.map_fn(resize_image, image_str_tensor, back_prop=False)
feed_dict = collections.defaultdict(list)
feed_dict[image_str_tensor.name] = images
with tf.Session() as sess:
images_resized = sess.run(image, feed_dict=feed_dict)
return images_resized
|
python
|
def load_images(image_files, resize=True):
"""Load images from files and optionally resize it."""
images = []
for image_file in image_files:
with file_io.FileIO(image_file, 'r') as ff:
images.append(ff.read())
if resize is False:
return images
# To resize, run a tf session so we can reuse 'decode_and_resize()'
# which is used in prediction graph. This makes sure we don't lose
# any quality in prediction, while decreasing the size of the images
# submitted to the model over network.
image_str_tensor = tf.placeholder(tf.string, shape=[None])
image = tf.map_fn(resize_image, image_str_tensor, back_prop=False)
feed_dict = collections.defaultdict(list)
feed_dict[image_str_tensor.name] = images
with tf.Session() as sess:
images_resized = sess.run(image, feed_dict=feed_dict)
return images_resized
|
[
"def",
"load_images",
"(",
"image_files",
",",
"resize",
"=",
"True",
")",
":",
"images",
"=",
"[",
"]",
"for",
"image_file",
"in",
"image_files",
":",
"with",
"file_io",
".",
"FileIO",
"(",
"image_file",
",",
"'r'",
")",
"as",
"ff",
":",
"images",
".",
"append",
"(",
"ff",
".",
"read",
"(",
")",
")",
"if",
"resize",
"is",
"False",
":",
"return",
"images",
"# To resize, run a tf session so we can reuse 'decode_and_resize()'",
"# which is used in prediction graph. This makes sure we don't lose",
"# any quality in prediction, while decreasing the size of the images",
"# submitted to the model over network.",
"image_str_tensor",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"string",
",",
"shape",
"=",
"[",
"None",
"]",
")",
"image",
"=",
"tf",
".",
"map_fn",
"(",
"resize_image",
",",
"image_str_tensor",
",",
"back_prop",
"=",
"False",
")",
"feed_dict",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"feed_dict",
"[",
"image_str_tensor",
".",
"name",
"]",
"=",
"images",
"with",
"tf",
".",
"Session",
"(",
")",
"as",
"sess",
":",
"images_resized",
"=",
"sess",
".",
"run",
"(",
"image",
",",
"feed_dict",
"=",
"feed_dict",
")",
"return",
"images_resized"
] |
Load images from files and optionally resize it.
|
[
"Load",
"images",
"from",
"files",
"and",
"optionally",
"resize",
"it",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_util.py#L219-L239
|
4,899
|
googledatalab/pydatalab
|
solutionbox/image_classification/mltoolbox/image/classification/_util.py
|
process_prediction_results
|
def process_prediction_results(results, show_image):
"""Create DataFrames out of prediction results, and display images in IPython if requested."""
import pandas as pd
if (is_in_IPython() and show_image is True):
import IPython
for image_url, image, label_and_score in results:
IPython.display.display_html('<p style="font-size:28px">%s(%.5f)</p>' % label_and_score,
raw=True)
IPython.display.display(IPython.display.Image(data=image))
result_dict = [{'image_url': url, 'label': r[0], 'score': r[1]} for url, _, r in results]
return pd.DataFrame(result_dict)
|
python
|
def process_prediction_results(results, show_image):
"""Create DataFrames out of prediction results, and display images in IPython if requested."""
import pandas as pd
if (is_in_IPython() and show_image is True):
import IPython
for image_url, image, label_and_score in results:
IPython.display.display_html('<p style="font-size:28px">%s(%.5f)</p>' % label_and_score,
raw=True)
IPython.display.display(IPython.display.Image(data=image))
result_dict = [{'image_url': url, 'label': r[0], 'score': r[1]} for url, _, r in results]
return pd.DataFrame(result_dict)
|
[
"def",
"process_prediction_results",
"(",
"results",
",",
"show_image",
")",
":",
"import",
"pandas",
"as",
"pd",
"if",
"(",
"is_in_IPython",
"(",
")",
"and",
"show_image",
"is",
"True",
")",
":",
"import",
"IPython",
"for",
"image_url",
",",
"image",
",",
"label_and_score",
"in",
"results",
":",
"IPython",
".",
"display",
".",
"display_html",
"(",
"'<p style=\"font-size:28px\">%s(%.5f)</p>'",
"%",
"label_and_score",
",",
"raw",
"=",
"True",
")",
"IPython",
".",
"display",
".",
"display",
"(",
"IPython",
".",
"display",
".",
"Image",
"(",
"data",
"=",
"image",
")",
")",
"result_dict",
"=",
"[",
"{",
"'image_url'",
":",
"url",
",",
"'label'",
":",
"r",
"[",
"0",
"]",
",",
"'score'",
":",
"r",
"[",
"1",
"]",
"}",
"for",
"url",
",",
"_",
",",
"r",
"in",
"results",
"]",
"return",
"pd",
".",
"DataFrame",
"(",
"result_dict",
")"
] |
Create DataFrames out of prediction results, and display images in IPython if requested.
|
[
"Create",
"DataFrames",
"out",
"of",
"prediction",
"results",
"and",
"display",
"images",
"in",
"IPython",
"if",
"requested",
"."
] |
d9031901d5bca22fe0d5925d204e6698df9852e1
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_util.py#L242-L254
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.