repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
googledatalab/pydatalab | google/datalab/contrib/mlworkbench/_prediction_explainer.py | PredictionExplainer._make_tabular_predict_fn | def _make_tabular_predict_fn(self, labels, instance, categories):
"""Create a predict_fn that can be used by LIME tabular explainer. """
def _predict_fn(np_instance):
df = pd.DataFrame(
np_instance,
columns=(self._categorical_columns + self._numeric_columns))
# Convert categorical indices back to categories.
for col_name, col_categories in zip(self._categorical_columns, categories):
df[col_name] = df[col_name].apply(lambda x: col_categories[int(x)])
# Add columns that do not exist in the perturbed data,
# such as key, text, and image data.
for col_name in self._headers:
if col_name not in (self._categorical_columns + self._numeric_columns):
df[col_name] = instance[col_name]
r = _local_predict.get_prediction_results(
self._model_dir, df, self._headers, with_source=False)
probs = _local_predict.get_probs_for_labels(labels, r)
probs = np.asarray(probs)
return probs
return _predict_fn | python | def _make_tabular_predict_fn(self, labels, instance, categories):
"""Create a predict_fn that can be used by LIME tabular explainer. """
def _predict_fn(np_instance):
df = pd.DataFrame(
np_instance,
columns=(self._categorical_columns + self._numeric_columns))
# Convert categorical indices back to categories.
for col_name, col_categories in zip(self._categorical_columns, categories):
df[col_name] = df[col_name].apply(lambda x: col_categories[int(x)])
# Add columns that do not exist in the perturbed data,
# such as key, text, and image data.
for col_name in self._headers:
if col_name not in (self._categorical_columns + self._numeric_columns):
df[col_name] = instance[col_name]
r = _local_predict.get_prediction_results(
self._model_dir, df, self._headers, with_source=False)
probs = _local_predict.get_probs_for_labels(labels, r)
probs = np.asarray(probs)
return probs
return _predict_fn | [
"def",
"_make_tabular_predict_fn",
"(",
"self",
",",
"labels",
",",
"instance",
",",
"categories",
")",
":",
"def",
"_predict_fn",
"(",
"np_instance",
")",
":",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"np_instance",
",",
"columns",
"=",
"(",
"self",
".",
"_categorical_columns",
"+",
"self",
".",
"_numeric_columns",
")",
")",
"# Convert categorical indices back to categories.",
"for",
"col_name",
",",
"col_categories",
"in",
"zip",
"(",
"self",
".",
"_categorical_columns",
",",
"categories",
")",
":",
"df",
"[",
"col_name",
"]",
"=",
"df",
"[",
"col_name",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"col_categories",
"[",
"int",
"(",
"x",
")",
"]",
")",
"# Add columns that do not exist in the perturbed data,",
"# such as key, text, and image data.",
"for",
"col_name",
"in",
"self",
".",
"_headers",
":",
"if",
"col_name",
"not",
"in",
"(",
"self",
".",
"_categorical_columns",
"+",
"self",
".",
"_numeric_columns",
")",
":",
"df",
"[",
"col_name",
"]",
"=",
"instance",
"[",
"col_name",
"]",
"r",
"=",
"_local_predict",
".",
"get_prediction_results",
"(",
"self",
".",
"_model_dir",
",",
"df",
",",
"self",
".",
"_headers",
",",
"with_source",
"=",
"False",
")",
"probs",
"=",
"_local_predict",
".",
"get_probs_for_labels",
"(",
"labels",
",",
"r",
")",
"probs",
"=",
"np",
".",
"asarray",
"(",
"probs",
")",
"return",
"probs",
"return",
"_predict_fn"
] | Create a predict_fn that can be used by LIME tabular explainer. | [
"Create",
"a",
"predict_fn",
"that",
"can",
"be",
"used",
"by",
"LIME",
"tabular",
"explainer",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/mlworkbench/_prediction_explainer.py#L130-L155 | train | 237,800 |
googledatalab/pydatalab | google/datalab/contrib/mlworkbench/_prediction_explainer.py | PredictionExplainer.explain_tabular | def explain_tabular(self, trainset, labels, instance, num_features=5, kernel_width=3):
"""Explain categorical and numeric features for a prediction.
It analyze the prediction by LIME, and returns a report of the most impactful tabular
features contributing to certain labels.
Args:
trainset: a DataFrame representing the training features that LIME can use to decide
value distributions.
labels: a list of labels to explain.
instance: the prediction instance. It needs to conform to model's input. Can be a csv
line string, or a dict.
num_features: maximum number of features to show.
kernel_width: Passed to LIME LimeTabularExplainer directly.
Returns:
A LIME's lime.explanation.Explanation.
"""
from lime.lime_tabular import LimeTabularExplainer
if isinstance(instance, six.string_types):
instance = next(csv.DictReader([instance], fieldnames=self._headers))
categories = self._get_unique_categories(trainset)
np_trainset = self._preprocess_data_for_tabular_explain(trainset, categories)
predict_fn = self._make_tabular_predict_fn(labels, instance, categories)
prediction_df = pd.DataFrame([instance])
prediction_instance = self._preprocess_data_for_tabular_explain(prediction_df, categories)
explainer = LimeTabularExplainer(
np_trainset,
feature_names=(self._categorical_columns + self._numeric_columns),
class_names=labels,
categorical_features=range(len(categories)),
categorical_names={i: v for i, v in enumerate(categories)},
kernel_width=kernel_width)
exp = explainer.explain_instance(
prediction_instance[0],
predict_fn,
num_features=num_features,
labels=range(len(labels)))
return exp | python | def explain_tabular(self, trainset, labels, instance, num_features=5, kernel_width=3):
"""Explain categorical and numeric features for a prediction.
It analyze the prediction by LIME, and returns a report of the most impactful tabular
features contributing to certain labels.
Args:
trainset: a DataFrame representing the training features that LIME can use to decide
value distributions.
labels: a list of labels to explain.
instance: the prediction instance. It needs to conform to model's input. Can be a csv
line string, or a dict.
num_features: maximum number of features to show.
kernel_width: Passed to LIME LimeTabularExplainer directly.
Returns:
A LIME's lime.explanation.Explanation.
"""
from lime.lime_tabular import LimeTabularExplainer
if isinstance(instance, six.string_types):
instance = next(csv.DictReader([instance], fieldnames=self._headers))
categories = self._get_unique_categories(trainset)
np_trainset = self._preprocess_data_for_tabular_explain(trainset, categories)
predict_fn = self._make_tabular_predict_fn(labels, instance, categories)
prediction_df = pd.DataFrame([instance])
prediction_instance = self._preprocess_data_for_tabular_explain(prediction_df, categories)
explainer = LimeTabularExplainer(
np_trainset,
feature_names=(self._categorical_columns + self._numeric_columns),
class_names=labels,
categorical_features=range(len(categories)),
categorical_names={i: v for i, v in enumerate(categories)},
kernel_width=kernel_width)
exp = explainer.explain_instance(
prediction_instance[0],
predict_fn,
num_features=num_features,
labels=range(len(labels)))
return exp | [
"def",
"explain_tabular",
"(",
"self",
",",
"trainset",
",",
"labels",
",",
"instance",
",",
"num_features",
"=",
"5",
",",
"kernel_width",
"=",
"3",
")",
":",
"from",
"lime",
".",
"lime_tabular",
"import",
"LimeTabularExplainer",
"if",
"isinstance",
"(",
"instance",
",",
"six",
".",
"string_types",
")",
":",
"instance",
"=",
"next",
"(",
"csv",
".",
"DictReader",
"(",
"[",
"instance",
"]",
",",
"fieldnames",
"=",
"self",
".",
"_headers",
")",
")",
"categories",
"=",
"self",
".",
"_get_unique_categories",
"(",
"trainset",
")",
"np_trainset",
"=",
"self",
".",
"_preprocess_data_for_tabular_explain",
"(",
"trainset",
",",
"categories",
")",
"predict_fn",
"=",
"self",
".",
"_make_tabular_predict_fn",
"(",
"labels",
",",
"instance",
",",
"categories",
")",
"prediction_df",
"=",
"pd",
".",
"DataFrame",
"(",
"[",
"instance",
"]",
")",
"prediction_instance",
"=",
"self",
".",
"_preprocess_data_for_tabular_explain",
"(",
"prediction_df",
",",
"categories",
")",
"explainer",
"=",
"LimeTabularExplainer",
"(",
"np_trainset",
",",
"feature_names",
"=",
"(",
"self",
".",
"_categorical_columns",
"+",
"self",
".",
"_numeric_columns",
")",
",",
"class_names",
"=",
"labels",
",",
"categorical_features",
"=",
"range",
"(",
"len",
"(",
"categories",
")",
")",
",",
"categorical_names",
"=",
"{",
"i",
":",
"v",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"categories",
")",
"}",
",",
"kernel_width",
"=",
"kernel_width",
")",
"exp",
"=",
"explainer",
".",
"explain_instance",
"(",
"prediction_instance",
"[",
"0",
"]",
",",
"predict_fn",
",",
"num_features",
"=",
"num_features",
",",
"labels",
"=",
"range",
"(",
"len",
"(",
"labels",
")",
")",
")",
"return",
"exp"
] | Explain categorical and numeric features for a prediction.
It analyze the prediction by LIME, and returns a report of the most impactful tabular
features contributing to certain labels.
Args:
trainset: a DataFrame representing the training features that LIME can use to decide
value distributions.
labels: a list of labels to explain.
instance: the prediction instance. It needs to conform to model's input. Can be a csv
line string, or a dict.
num_features: maximum number of features to show.
kernel_width: Passed to LIME LimeTabularExplainer directly.
Returns:
A LIME's lime.explanation.Explanation. | [
"Explain",
"categorical",
"and",
"numeric",
"features",
"for",
"a",
"prediction",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/mlworkbench/_prediction_explainer.py#L157-L199 | train | 237,801 |
googledatalab/pydatalab | google/datalab/contrib/mlworkbench/_prediction_explainer.py | PredictionExplainer.explain_text | def explain_text(self, labels, instance, column_name=None, num_features=10, num_samples=5000):
"""Explain a text field of a prediction.
It analyze the prediction by LIME, and returns a report of which words are most impactful
in contributing to certain labels.
Args:
labels: a list of labels to explain.
instance: the prediction instance. It needs to conform to model's input. Can be a csv
line string, or a dict.
column_name: which text column to explain. Can be None if there is only one text column
in the model input.
num_features: maximum number of words (features) to analyze. Passed to
LIME LimeTextExplainer directly.
num_samples: size of the neighborhood to learn the linear model. Passed to
LIME LimeTextExplainer directly.
Returns:
A LIME's lime.explanation.Explanation.
Throws:
ValueError if the given text column is not found in model input or column_name is None
but there are multiple text columns in model input.
"""
from lime.lime_text import LimeTextExplainer
if len(self._text_columns) > 1 and not column_name:
raise ValueError('There are multiple text columns in the input of the model. ' +
'Please specify "column_name".')
elif column_name and column_name not in self._text_columns:
raise ValueError('Specified column_name "%s" not found in the model input.'
% column_name)
text_column_name = column_name if column_name else self._text_columns[0]
if isinstance(instance, six.string_types):
instance = next(csv.DictReader([instance], fieldnames=self._headers))
predict_fn = self._make_text_predict_fn(labels, instance, text_column_name)
explainer = LimeTextExplainer(class_names=labels)
exp = explainer.explain_instance(
instance[text_column_name], predict_fn, labels=range(len(labels)),
num_features=num_features, num_samples=num_samples)
return exp | python | def explain_text(self, labels, instance, column_name=None, num_features=10, num_samples=5000):
"""Explain a text field of a prediction.
It analyze the prediction by LIME, and returns a report of which words are most impactful
in contributing to certain labels.
Args:
labels: a list of labels to explain.
instance: the prediction instance. It needs to conform to model's input. Can be a csv
line string, or a dict.
column_name: which text column to explain. Can be None if there is only one text column
in the model input.
num_features: maximum number of words (features) to analyze. Passed to
LIME LimeTextExplainer directly.
num_samples: size of the neighborhood to learn the linear model. Passed to
LIME LimeTextExplainer directly.
Returns:
A LIME's lime.explanation.Explanation.
Throws:
ValueError if the given text column is not found in model input or column_name is None
but there are multiple text columns in model input.
"""
from lime.lime_text import LimeTextExplainer
if len(self._text_columns) > 1 and not column_name:
raise ValueError('There are multiple text columns in the input of the model. ' +
'Please specify "column_name".')
elif column_name and column_name not in self._text_columns:
raise ValueError('Specified column_name "%s" not found in the model input.'
% column_name)
text_column_name = column_name if column_name else self._text_columns[0]
if isinstance(instance, six.string_types):
instance = next(csv.DictReader([instance], fieldnames=self._headers))
predict_fn = self._make_text_predict_fn(labels, instance, text_column_name)
explainer = LimeTextExplainer(class_names=labels)
exp = explainer.explain_instance(
instance[text_column_name], predict_fn, labels=range(len(labels)),
num_features=num_features, num_samples=num_samples)
return exp | [
"def",
"explain_text",
"(",
"self",
",",
"labels",
",",
"instance",
",",
"column_name",
"=",
"None",
",",
"num_features",
"=",
"10",
",",
"num_samples",
"=",
"5000",
")",
":",
"from",
"lime",
".",
"lime_text",
"import",
"LimeTextExplainer",
"if",
"len",
"(",
"self",
".",
"_text_columns",
")",
">",
"1",
"and",
"not",
"column_name",
":",
"raise",
"ValueError",
"(",
"'There are multiple text columns in the input of the model. '",
"+",
"'Please specify \"column_name\".'",
")",
"elif",
"column_name",
"and",
"column_name",
"not",
"in",
"self",
".",
"_text_columns",
":",
"raise",
"ValueError",
"(",
"'Specified column_name \"%s\" not found in the model input.'",
"%",
"column_name",
")",
"text_column_name",
"=",
"column_name",
"if",
"column_name",
"else",
"self",
".",
"_text_columns",
"[",
"0",
"]",
"if",
"isinstance",
"(",
"instance",
",",
"six",
".",
"string_types",
")",
":",
"instance",
"=",
"next",
"(",
"csv",
".",
"DictReader",
"(",
"[",
"instance",
"]",
",",
"fieldnames",
"=",
"self",
".",
"_headers",
")",
")",
"predict_fn",
"=",
"self",
".",
"_make_text_predict_fn",
"(",
"labels",
",",
"instance",
",",
"text_column_name",
")",
"explainer",
"=",
"LimeTextExplainer",
"(",
"class_names",
"=",
"labels",
")",
"exp",
"=",
"explainer",
".",
"explain_instance",
"(",
"instance",
"[",
"text_column_name",
"]",
",",
"predict_fn",
",",
"labels",
"=",
"range",
"(",
"len",
"(",
"labels",
")",
")",
",",
"num_features",
"=",
"num_features",
",",
"num_samples",
"=",
"num_samples",
")",
"return",
"exp"
] | Explain a text field of a prediction.
It analyze the prediction by LIME, and returns a report of which words are most impactful
in contributing to certain labels.
Args:
labels: a list of labels to explain.
instance: the prediction instance. It needs to conform to model's input. Can be a csv
line string, or a dict.
column_name: which text column to explain. Can be None if there is only one text column
in the model input.
num_features: maximum number of words (features) to analyze. Passed to
LIME LimeTextExplainer directly.
num_samples: size of the neighborhood to learn the linear model. Passed to
LIME LimeTextExplainer directly.
Returns:
A LIME's lime.explanation.Explanation.
Throws:
ValueError if the given text column is not found in model input or column_name is None
but there are multiple text columns in model input. | [
"Explain",
"a",
"text",
"field",
"of",
"a",
"prediction",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/mlworkbench/_prediction_explainer.py#L201-L244 | train | 237,802 |
googledatalab/pydatalab | google/datalab/contrib/mlworkbench/_prediction_explainer.py | PredictionExplainer.explain_image | def explain_image(self, labels, instance, column_name=None, num_features=100000,
num_samples=300, batch_size=200, hide_color=0):
"""Explain an image of a prediction.
It analyze the prediction by LIME, and returns a report of which words are most impactful
in contributing to certain labels.
Args:
labels: a list of labels to explain.
instance: the prediction instance. It needs to conform to model's input. Can be a csv
line string, or a dict.
column_name: which image column to explain. Can be None if there is only one image column
in the model input.
num_features: maximum number of areas (features) to analyze. Passed to
LIME LimeImageExplainer directly.
num_samples: size of the neighborhood to learn the linear model. Passed to
LIME LimeImageExplainer directly.
batch_size: size of batches passed to predict_fn. Passed to
LIME LimeImageExplainer directly.
hide_color: the color used to perturb images. Passed to
LIME LimeImageExplainer directly.
Returns:
A LIME's lime.explanation.Explanation.
Throws:
ValueError if the given image column is not found in model input or column_name is None
but there are multiple image columns in model input.
"""
from lime.lime_image import LimeImageExplainer
if len(self._image_columns) > 1 and not column_name:
raise ValueError('There are multiple image columns in the input of the model. ' +
'Please specify "column_name".')
elif column_name and column_name not in self._image_columns:
raise ValueError('Specified column_name "%s" not found in the model input.'
% column_name)
image_column_name = column_name if column_name else self._image_columns[0]
if isinstance(instance, six.string_types):
instance = next(csv.DictReader([instance], fieldnames=self._headers))
predict_fn = self._make_image_predict_fn(labels, instance, image_column_name)
explainer = LimeImageExplainer()
with file_io.FileIO(instance[image_column_name], 'rb') as fi:
im = Image.open(fi)
im.thumbnail((299, 299), Image.ANTIALIAS)
rgb_im = np.asarray(im.convert('RGB'))
exp = explainer.explain_instance(
rgb_im, predict_fn, labels=range(len(labels)), top_labels=None,
hide_color=hide_color, num_features=num_features,
num_samples=num_samples, batch_size=batch_size)
return exp | python | def explain_image(self, labels, instance, column_name=None, num_features=100000,
num_samples=300, batch_size=200, hide_color=0):
"""Explain an image of a prediction.
It analyze the prediction by LIME, and returns a report of which words are most impactful
in contributing to certain labels.
Args:
labels: a list of labels to explain.
instance: the prediction instance. It needs to conform to model's input. Can be a csv
line string, or a dict.
column_name: which image column to explain. Can be None if there is only one image column
in the model input.
num_features: maximum number of areas (features) to analyze. Passed to
LIME LimeImageExplainer directly.
num_samples: size of the neighborhood to learn the linear model. Passed to
LIME LimeImageExplainer directly.
batch_size: size of batches passed to predict_fn. Passed to
LIME LimeImageExplainer directly.
hide_color: the color used to perturb images. Passed to
LIME LimeImageExplainer directly.
Returns:
A LIME's lime.explanation.Explanation.
Throws:
ValueError if the given image column is not found in model input or column_name is None
but there are multiple image columns in model input.
"""
from lime.lime_image import LimeImageExplainer
if len(self._image_columns) > 1 and not column_name:
raise ValueError('There are multiple image columns in the input of the model. ' +
'Please specify "column_name".')
elif column_name and column_name not in self._image_columns:
raise ValueError('Specified column_name "%s" not found in the model input.'
% column_name)
image_column_name = column_name if column_name else self._image_columns[0]
if isinstance(instance, six.string_types):
instance = next(csv.DictReader([instance], fieldnames=self._headers))
predict_fn = self._make_image_predict_fn(labels, instance, image_column_name)
explainer = LimeImageExplainer()
with file_io.FileIO(instance[image_column_name], 'rb') as fi:
im = Image.open(fi)
im.thumbnail((299, 299), Image.ANTIALIAS)
rgb_im = np.asarray(im.convert('RGB'))
exp = explainer.explain_instance(
rgb_im, predict_fn, labels=range(len(labels)), top_labels=None,
hide_color=hide_color, num_features=num_features,
num_samples=num_samples, batch_size=batch_size)
return exp | [
"def",
"explain_image",
"(",
"self",
",",
"labels",
",",
"instance",
",",
"column_name",
"=",
"None",
",",
"num_features",
"=",
"100000",
",",
"num_samples",
"=",
"300",
",",
"batch_size",
"=",
"200",
",",
"hide_color",
"=",
"0",
")",
":",
"from",
"lime",
".",
"lime_image",
"import",
"LimeImageExplainer",
"if",
"len",
"(",
"self",
".",
"_image_columns",
")",
">",
"1",
"and",
"not",
"column_name",
":",
"raise",
"ValueError",
"(",
"'There are multiple image columns in the input of the model. '",
"+",
"'Please specify \"column_name\".'",
")",
"elif",
"column_name",
"and",
"column_name",
"not",
"in",
"self",
".",
"_image_columns",
":",
"raise",
"ValueError",
"(",
"'Specified column_name \"%s\" not found in the model input.'",
"%",
"column_name",
")",
"image_column_name",
"=",
"column_name",
"if",
"column_name",
"else",
"self",
".",
"_image_columns",
"[",
"0",
"]",
"if",
"isinstance",
"(",
"instance",
",",
"six",
".",
"string_types",
")",
":",
"instance",
"=",
"next",
"(",
"csv",
".",
"DictReader",
"(",
"[",
"instance",
"]",
",",
"fieldnames",
"=",
"self",
".",
"_headers",
")",
")",
"predict_fn",
"=",
"self",
".",
"_make_image_predict_fn",
"(",
"labels",
",",
"instance",
",",
"image_column_name",
")",
"explainer",
"=",
"LimeImageExplainer",
"(",
")",
"with",
"file_io",
".",
"FileIO",
"(",
"instance",
"[",
"image_column_name",
"]",
",",
"'rb'",
")",
"as",
"fi",
":",
"im",
"=",
"Image",
".",
"open",
"(",
"fi",
")",
"im",
".",
"thumbnail",
"(",
"(",
"299",
",",
"299",
")",
",",
"Image",
".",
"ANTIALIAS",
")",
"rgb_im",
"=",
"np",
".",
"asarray",
"(",
"im",
".",
"convert",
"(",
"'RGB'",
")",
")",
"exp",
"=",
"explainer",
".",
"explain_instance",
"(",
"rgb_im",
",",
"predict_fn",
",",
"labels",
"=",
"range",
"(",
"len",
"(",
"labels",
")",
")",
",",
"top_labels",
"=",
"None",
",",
"hide_color",
"=",
"hide_color",
",",
"num_features",
"=",
"num_features",
",",
"num_samples",
"=",
"num_samples",
",",
"batch_size",
"=",
"batch_size",
")",
"return",
"exp"
] | Explain an image of a prediction.
It analyze the prediction by LIME, and returns a report of which words are most impactful
in contributing to certain labels.
Args:
labels: a list of labels to explain.
instance: the prediction instance. It needs to conform to model's input. Can be a csv
line string, or a dict.
column_name: which image column to explain. Can be None if there is only one image column
in the model input.
num_features: maximum number of areas (features) to analyze. Passed to
LIME LimeImageExplainer directly.
num_samples: size of the neighborhood to learn the linear model. Passed to
LIME LimeImageExplainer directly.
batch_size: size of batches passed to predict_fn. Passed to
LIME LimeImageExplainer directly.
hide_color: the color used to perturb images. Passed to
LIME LimeImageExplainer directly.
Returns:
A LIME's lime.explanation.Explanation.
Throws:
ValueError if the given image column is not found in model input or column_name is None
but there are multiple image columns in model input. | [
"Explain",
"an",
"image",
"of",
"a",
"prediction",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/mlworkbench/_prediction_explainer.py#L246-L299 | train | 237,803 |
googledatalab/pydatalab | google/datalab/contrib/mlworkbench/_prediction_explainer.py | PredictionExplainer.probe_image | def probe_image(self, labels, instance, column_name=None, num_scaled_images=50,
top_percent=10):
""" Get pixel importance of the image.
It performs pixel sensitivity analysis by showing only the most important pixels to a
certain label in the image. It uses integrated gradients to measure the
importance of each pixel.
Args:
labels: labels to compute gradients from.
instance: the prediction instance. It needs to conform to model's input. Can be a csv
line string, or a dict.
img_column_name: the name of the image column to probe. If there is only one image
column it can be None.
num_scaled_images: Number of scaled images to get grads from. For example, if 10,
the image will be scaled by 0.1, 0.2, ..., 0,9, 1.0 and it will produce
10 images for grads computation.
top_percent: The percentile of pixels to show only. for example, if 10,
only top 10% impactful pixels will be shown and rest of the pixels will be black.
Returns:
A tuple. First is the resized original image (299x299x3). Second is a list of
the visualization with same size that highlights the most important pixels, one
per each label.
"""
if len(self._image_columns) > 1 and not column_name:
raise ValueError('There are multiple image columns in the input of the model. ' +
'Please specify "column_name".')
elif column_name and column_name not in self._image_columns:
raise ValueError('Specified column_name "%s" not found in the model input.' %
column_name)
image_column_name = column_name if column_name else self._image_columns[0]
if isinstance(instance, six.string_types):
instance = next(csv.DictReader([instance], fieldnames=self._headers))
image_path = instance[image_column_name]
with file_io.FileIO(image_path, 'rb') as fi:
im = Image.open(fi)
resized_image = im.resize((299, 299))
# Produce a list of scaled images, create instances (csv lines) from these images.
step = 1. / num_scaled_images
scales = np.arange(0.0, 1.0, step) + step
csv_lines = []
for s in scales:
pixels = (np.asarray(resized_image) * s).astype('uint8')
scaled_image = Image.fromarray(pixels)
buf = io.BytesIO()
scaled_image.save(buf, "JPEG")
encoded_image = base64.urlsafe_b64encode(buf.getvalue()).decode('ascii')
instance_copy = dict(instance)
instance_copy[image_column_name] = encoded_image
buf = six.StringIO()
writer = csv.DictWriter(buf, fieldnames=self._headers, lineterminator='')
writer.writerow(instance_copy)
csv_lines.append(buf.getvalue())
integrated_gradients_images = []
for label in labels:
# Send to tf model to get gradients.
grads = self._image_gradients(csv_lines, label, image_column_name)
integrated_grads = resized_image * np.average(grads, axis=0)
# Gray scale the grads by removing color dimension.
# abs() is for getting the most impactful pixels regardless positive or negative.
grayed = np.average(abs(integrated_grads), axis=2)
grayed = np.transpose([grayed, grayed, grayed], axes=[1, 2, 0])
# Only show the most impactful pixels.
p = np.percentile(grayed, 100 - top_percent)
viz_window = np.where(grayed > p, 1, 0)
vis = resized_image * viz_window
im_vis = Image.fromarray(np.uint8(vis))
integrated_gradients_images.append(im_vis)
return resized_image, integrated_gradients_images | python | def probe_image(self, labels, instance, column_name=None, num_scaled_images=50,
top_percent=10):
""" Get pixel importance of the image.
It performs pixel sensitivity analysis by showing only the most important pixels to a
certain label in the image. It uses integrated gradients to measure the
importance of each pixel.
Args:
labels: labels to compute gradients from.
instance: the prediction instance. It needs to conform to model's input. Can be a csv
line string, or a dict.
img_column_name: the name of the image column to probe. If there is only one image
column it can be None.
num_scaled_images: Number of scaled images to get grads from. For example, if 10,
the image will be scaled by 0.1, 0.2, ..., 0,9, 1.0 and it will produce
10 images for grads computation.
top_percent: The percentile of pixels to show only. for example, if 10,
only top 10% impactful pixels will be shown and rest of the pixels will be black.
Returns:
A tuple. First is the resized original image (299x299x3). Second is a list of
the visualization with same size that highlights the most important pixels, one
per each label.
"""
if len(self._image_columns) > 1 and not column_name:
raise ValueError('There are multiple image columns in the input of the model. ' +
'Please specify "column_name".')
elif column_name and column_name not in self._image_columns:
raise ValueError('Specified column_name "%s" not found in the model input.' %
column_name)
image_column_name = column_name if column_name else self._image_columns[0]
if isinstance(instance, six.string_types):
instance = next(csv.DictReader([instance], fieldnames=self._headers))
image_path = instance[image_column_name]
with file_io.FileIO(image_path, 'rb') as fi:
im = Image.open(fi)
resized_image = im.resize((299, 299))
# Produce a list of scaled images, create instances (csv lines) from these images.
step = 1. / num_scaled_images
scales = np.arange(0.0, 1.0, step) + step
csv_lines = []
for s in scales:
pixels = (np.asarray(resized_image) * s).astype('uint8')
scaled_image = Image.fromarray(pixels)
buf = io.BytesIO()
scaled_image.save(buf, "JPEG")
encoded_image = base64.urlsafe_b64encode(buf.getvalue()).decode('ascii')
instance_copy = dict(instance)
instance_copy[image_column_name] = encoded_image
buf = six.StringIO()
writer = csv.DictWriter(buf, fieldnames=self._headers, lineterminator='')
writer.writerow(instance_copy)
csv_lines.append(buf.getvalue())
integrated_gradients_images = []
for label in labels:
# Send to tf model to get gradients.
grads = self._image_gradients(csv_lines, label, image_column_name)
integrated_grads = resized_image * np.average(grads, axis=0)
# Gray scale the grads by removing color dimension.
# abs() is for getting the most impactful pixels regardless positive or negative.
grayed = np.average(abs(integrated_grads), axis=2)
grayed = np.transpose([grayed, grayed, grayed], axes=[1, 2, 0])
# Only show the most impactful pixels.
p = np.percentile(grayed, 100 - top_percent)
viz_window = np.where(grayed > p, 1, 0)
vis = resized_image * viz_window
im_vis = Image.fromarray(np.uint8(vis))
integrated_gradients_images.append(im_vis)
return resized_image, integrated_gradients_images | [
"def",
"probe_image",
"(",
"self",
",",
"labels",
",",
"instance",
",",
"column_name",
"=",
"None",
",",
"num_scaled_images",
"=",
"50",
",",
"top_percent",
"=",
"10",
")",
":",
"if",
"len",
"(",
"self",
".",
"_image_columns",
")",
">",
"1",
"and",
"not",
"column_name",
":",
"raise",
"ValueError",
"(",
"'There are multiple image columns in the input of the model. '",
"+",
"'Please specify \"column_name\".'",
")",
"elif",
"column_name",
"and",
"column_name",
"not",
"in",
"self",
".",
"_image_columns",
":",
"raise",
"ValueError",
"(",
"'Specified column_name \"%s\" not found in the model input.'",
"%",
"column_name",
")",
"image_column_name",
"=",
"column_name",
"if",
"column_name",
"else",
"self",
".",
"_image_columns",
"[",
"0",
"]",
"if",
"isinstance",
"(",
"instance",
",",
"six",
".",
"string_types",
")",
":",
"instance",
"=",
"next",
"(",
"csv",
".",
"DictReader",
"(",
"[",
"instance",
"]",
",",
"fieldnames",
"=",
"self",
".",
"_headers",
")",
")",
"image_path",
"=",
"instance",
"[",
"image_column_name",
"]",
"with",
"file_io",
".",
"FileIO",
"(",
"image_path",
",",
"'rb'",
")",
"as",
"fi",
":",
"im",
"=",
"Image",
".",
"open",
"(",
"fi",
")",
"resized_image",
"=",
"im",
".",
"resize",
"(",
"(",
"299",
",",
"299",
")",
")",
"# Produce a list of scaled images, create instances (csv lines) from these images.",
"step",
"=",
"1.",
"/",
"num_scaled_images",
"scales",
"=",
"np",
".",
"arange",
"(",
"0.0",
",",
"1.0",
",",
"step",
")",
"+",
"step",
"csv_lines",
"=",
"[",
"]",
"for",
"s",
"in",
"scales",
":",
"pixels",
"=",
"(",
"np",
".",
"asarray",
"(",
"resized_image",
")",
"*",
"s",
")",
".",
"astype",
"(",
"'uint8'",
")",
"scaled_image",
"=",
"Image",
".",
"fromarray",
"(",
"pixels",
")",
"buf",
"=",
"io",
".",
"BytesIO",
"(",
")",
"scaled_image",
".",
"save",
"(",
"buf",
",",
"\"JPEG\"",
")",
"encoded_image",
"=",
"base64",
".",
"urlsafe_b64encode",
"(",
"buf",
".",
"getvalue",
"(",
")",
")",
".",
"decode",
"(",
"'ascii'",
")",
"instance_copy",
"=",
"dict",
"(",
"instance",
")",
"instance_copy",
"[",
"image_column_name",
"]",
"=",
"encoded_image",
"buf",
"=",
"six",
".",
"StringIO",
"(",
")",
"writer",
"=",
"csv",
".",
"DictWriter",
"(",
"buf",
",",
"fieldnames",
"=",
"self",
".",
"_headers",
",",
"lineterminator",
"=",
"''",
")",
"writer",
".",
"writerow",
"(",
"instance_copy",
")",
"csv_lines",
".",
"append",
"(",
"buf",
".",
"getvalue",
"(",
")",
")",
"integrated_gradients_images",
"=",
"[",
"]",
"for",
"label",
"in",
"labels",
":",
"# Send to tf model to get gradients.",
"grads",
"=",
"self",
".",
"_image_gradients",
"(",
"csv_lines",
",",
"label",
",",
"image_column_name",
")",
"integrated_grads",
"=",
"resized_image",
"*",
"np",
".",
"average",
"(",
"grads",
",",
"axis",
"=",
"0",
")",
"# Gray scale the grads by removing color dimension.",
"# abs() is for getting the most impactful pixels regardless positive or negative.",
"grayed",
"=",
"np",
".",
"average",
"(",
"abs",
"(",
"integrated_grads",
")",
",",
"axis",
"=",
"2",
")",
"grayed",
"=",
"np",
".",
"transpose",
"(",
"[",
"grayed",
",",
"grayed",
",",
"grayed",
"]",
",",
"axes",
"=",
"[",
"1",
",",
"2",
",",
"0",
"]",
")",
"# Only show the most impactful pixels.",
"p",
"=",
"np",
".",
"percentile",
"(",
"grayed",
",",
"100",
"-",
"top_percent",
")",
"viz_window",
"=",
"np",
".",
"where",
"(",
"grayed",
">",
"p",
",",
"1",
",",
"0",
")",
"vis",
"=",
"resized_image",
"*",
"viz_window",
"im_vis",
"=",
"Image",
".",
"fromarray",
"(",
"np",
".",
"uint8",
"(",
"vis",
")",
")",
"integrated_gradients_images",
".",
"append",
"(",
"im_vis",
")",
"return",
"resized_image",
",",
"integrated_gradients_images"
] | Get pixel importance of the image.
It performs pixel sensitivity analysis by showing only the most important pixels to a
certain label in the image. It uses integrated gradients to measure the
importance of each pixel.
Args:
labels: labels to compute gradients from.
instance: the prediction instance. It needs to conform to model's input. Can be a csv
line string, or a dict.
img_column_name: the name of the image column to probe. If there is only one image
column it can be None.
num_scaled_images: Number of scaled images to get grads from. For example, if 10,
the image will be scaled by 0.1, 0.2, ..., 0,9, 1.0 and it will produce
10 images for grads computation.
top_percent: The percentile of pixels to show only. for example, if 10,
only top 10% impactful pixels will be shown and rest of the pixels will be black.
Returns:
A tuple. First is the resized original image (299x299x3). Second is a list of
the visualization with same size that highlights the most important pixels, one
per each label. | [
"Get",
"pixel",
"importance",
"of",
"the",
"image",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/mlworkbench/_prediction_explainer.py#L334-L414 | train | 237,804 |
googledatalab/pydatalab | google/datalab/ml/_cloud_models.py | Models.get_model_details | def get_model_details(self, model_name):
"""Get details of the specified model from CloudML Service.
Args:
model_name: the name of the model. It can be a model full name
("projects/[project_id]/models/[model_name]") or just [model_name].
Returns: a dictionary of the model details.
"""
full_name = model_name
if not model_name.startswith('projects/'):
full_name = ('projects/%s/models/%s' % (self._project_id, model_name))
return self._api.projects().models().get(name=full_name).execute() | python | def get_model_details(self, model_name):
"""Get details of the specified model from CloudML Service.
Args:
model_name: the name of the model. It can be a model full name
("projects/[project_id]/models/[model_name]") or just [model_name].
Returns: a dictionary of the model details.
"""
full_name = model_name
if not model_name.startswith('projects/'):
full_name = ('projects/%s/models/%s' % (self._project_id, model_name))
return self._api.projects().models().get(name=full_name).execute() | [
"def",
"get_model_details",
"(",
"self",
",",
"model_name",
")",
":",
"full_name",
"=",
"model_name",
"if",
"not",
"model_name",
".",
"startswith",
"(",
"'projects/'",
")",
":",
"full_name",
"=",
"(",
"'projects/%s/models/%s'",
"%",
"(",
"self",
".",
"_project_id",
",",
"model_name",
")",
")",
"return",
"self",
".",
"_api",
".",
"projects",
"(",
")",
".",
"models",
"(",
")",
".",
"get",
"(",
"name",
"=",
"full_name",
")",
".",
"execute",
"(",
")"
] | Get details of the specified model from CloudML Service.
Args:
model_name: the name of the model. It can be a model full name
("projects/[project_id]/models/[model_name]") or just [model_name].
Returns: a dictionary of the model details. | [
"Get",
"details",
"of",
"the",
"specified",
"model",
"from",
"CloudML",
"Service",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_cloud_models.py#L53-L64 | train | 237,805 |
googledatalab/pydatalab | google/datalab/ml/_cloud_models.py | Models.create | def create(self, model_name):
"""Create a model.
Args:
model_name: the short name of the model, such as "iris".
Returns:
If successful, returns informaiton of the model, such as
{u'regions': [u'us-central1'], u'name': u'projects/myproject/models/mymodel'}
Raises:
If the model creation failed.
"""
body = {'name': model_name}
parent = 'projects/' + self._project_id
# Model creation is instant. If anything goes wrong, Exception will be thrown.
return self._api.projects().models().create(body=body, parent=parent).execute() | python | def create(self, model_name):
"""Create a model.
Args:
model_name: the short name of the model, such as "iris".
Returns:
If successful, returns informaiton of the model, such as
{u'regions': [u'us-central1'], u'name': u'projects/myproject/models/mymodel'}
Raises:
If the model creation failed.
"""
body = {'name': model_name}
parent = 'projects/' + self._project_id
# Model creation is instant. If anything goes wrong, Exception will be thrown.
return self._api.projects().models().create(body=body, parent=parent).execute() | [
"def",
"create",
"(",
"self",
",",
"model_name",
")",
":",
"body",
"=",
"{",
"'name'",
":",
"model_name",
"}",
"parent",
"=",
"'projects/'",
"+",
"self",
".",
"_project_id",
"# Model creation is instant. If anything goes wrong, Exception will be thrown.",
"return",
"self",
".",
"_api",
".",
"projects",
"(",
")",
".",
"models",
"(",
")",
".",
"create",
"(",
"body",
"=",
"body",
",",
"parent",
"=",
"parent",
")",
".",
"execute",
"(",
")"
] | Create a model.
Args:
model_name: the short name of the model, such as "iris".
Returns:
If successful, returns informaiton of the model, such as
{u'regions': [u'us-central1'], u'name': u'projects/myproject/models/mymodel'}
Raises:
If the model creation failed. | [
"Create",
"a",
"model",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_cloud_models.py#L66-L80 | train | 237,806 |
googledatalab/pydatalab | google/datalab/ml/_cloud_models.py | Models.list | def list(self, count=10):
"""List models under the current project in a table view.
Args:
count: upper limit of the number of models to list.
Raises:
Exception if it is called in a non-IPython environment.
"""
import IPython
data = []
# Add range(count) to loop so it will stop either it reaches count, or iteration
# on self is exhausted. "self" is iterable (see __iter__() method).
for _, model in zip(range(count), self.get_iterator()):
element = {'name': model['name']}
if 'defaultVersion' in model:
version_short_name = model['defaultVersion']['name'].split('/')[-1]
element['defaultVersion'] = version_short_name
data.append(element)
IPython.display.display(
datalab.utils.commands.render_dictionary(data, ['name', 'defaultVersion'])) | python | def list(self, count=10):
"""List models under the current project in a table view.
Args:
count: upper limit of the number of models to list.
Raises:
Exception if it is called in a non-IPython environment.
"""
import IPython
data = []
# Add range(count) to loop so it will stop either it reaches count, or iteration
# on self is exhausted. "self" is iterable (see __iter__() method).
for _, model in zip(range(count), self.get_iterator()):
element = {'name': model['name']}
if 'defaultVersion' in model:
version_short_name = model['defaultVersion']['name'].split('/')[-1]
element['defaultVersion'] = version_short_name
data.append(element)
IPython.display.display(
datalab.utils.commands.render_dictionary(data, ['name', 'defaultVersion'])) | [
"def",
"list",
"(",
"self",
",",
"count",
"=",
"10",
")",
":",
"import",
"IPython",
"data",
"=",
"[",
"]",
"# Add range(count) to loop so it will stop either it reaches count, or iteration",
"# on self is exhausted. \"self\" is iterable (see __iter__() method).",
"for",
"_",
",",
"model",
"in",
"zip",
"(",
"range",
"(",
"count",
")",
",",
"self",
".",
"get_iterator",
"(",
")",
")",
":",
"element",
"=",
"{",
"'name'",
":",
"model",
"[",
"'name'",
"]",
"}",
"if",
"'defaultVersion'",
"in",
"model",
":",
"version_short_name",
"=",
"model",
"[",
"'defaultVersion'",
"]",
"[",
"'name'",
"]",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
"element",
"[",
"'defaultVersion'",
"]",
"=",
"version_short_name",
"data",
".",
"append",
"(",
"element",
")",
"IPython",
".",
"display",
".",
"display",
"(",
"datalab",
".",
"utils",
".",
"commands",
".",
"render_dictionary",
"(",
"data",
",",
"[",
"'name'",
",",
"'defaultVersion'",
"]",
")",
")"
] | List models under the current project in a table view.
Args:
count: upper limit of the number of models to list.
Raises:
Exception if it is called in a non-IPython environment. | [
"List",
"models",
"under",
"the",
"current",
"project",
"in",
"a",
"table",
"view",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_cloud_models.py#L97-L117 | train | 237,807 |
googledatalab/pydatalab | google/datalab/ml/_cloud_models.py | ModelVersions.get_version_details | def get_version_details(self, version_name):
"""Get details of a version.
Args:
version: the name of the version in short form, such as "v1".
Returns: a dictionary containing the version details.
"""
name = ('%s/versions/%s' % (self._full_model_name, version_name))
return self._api.projects().models().versions().get(name=name).execute() | python | def get_version_details(self, version_name):
"""Get details of a version.
Args:
version: the name of the version in short form, such as "v1".
Returns: a dictionary containing the version details.
"""
name = ('%s/versions/%s' % (self._full_model_name, version_name))
return self._api.projects().models().versions().get(name=name).execute() | [
"def",
"get_version_details",
"(",
"self",
",",
"version_name",
")",
":",
"name",
"=",
"(",
"'%s/versions/%s'",
"%",
"(",
"self",
".",
"_full_model_name",
",",
"version_name",
")",
")",
"return",
"self",
".",
"_api",
".",
"projects",
"(",
")",
".",
"models",
"(",
")",
".",
"versions",
"(",
")",
".",
"get",
"(",
"name",
"=",
"name",
")",
".",
"execute",
"(",
")"
] | Get details of a version.
Args:
version: the name of the version in short form, such as "v1".
Returns: a dictionary containing the version details. | [
"Get",
"details",
"of",
"a",
"version",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_cloud_models.py#L166-L174 | train | 237,808 |
googledatalab/pydatalab | google/datalab/ml/_cloud_models.py | ModelVersions.deploy | def deploy(self, version_name, path, runtime_version=None):
"""Deploy a model version to the cloud.
Args:
version_name: the name of the version in short form, such as "v1".
path: the Google Cloud Storage path (gs://...) which contains the model files.
runtime_version: the ML Engine runtime version as a string, example '1.2'.
See https://cloud.google.com/ml-engine/docs/concepts/runtime-version-list
for a list of runtimes. If None, the ML Engine service will pick one.
Raises: Exception if the path is invalid or does not contain expected files.
Exception if the service returns invalid response.
"""
if not path.startswith('gs://'):
raise Exception('Invalid path. Only Google Cloud Storage path (gs://...) is accepted.')
# If there is no "export.meta" or"saved_model.pb" under path but there is
# path/model/export.meta or path/model/saved_model.pb, then append /model to the path.
if not datalab.storage.Object.from_url(os.path.join(path, 'export.meta')).exists() and not \
datalab.storage.Object.from_url(os.path.join(path, 'saved_model.pb')).exists():
if datalab.storage.Object.from_url(os.path.join(path, 'model', 'export.meta')).exists() or \
datalab.storage.Object.from_url(os.path.join(path, 'model',
'saved_model.pb')).exists():
path = os.path.join(path, 'model')
else:
print('Cannot find export.meta or saved_model.pb, but continue with deployment anyway.')
body = {'name': self._model_name}
parent = 'projects/' + self._project_id
try:
self._api.projects().models().create(body=body, parent=parent).execute()
except:
# Trying to create an already existing model gets an error. Ignore it.
pass
body = {
'name': version_name,
'deployment_uri': path,
}
if runtime_version:
body['runtime_version'] = runtime_version
response = self._api.projects().models().versions().create(
body=body, parent=self._full_model_name).execute()
if 'name' not in response:
raise Exception('Invalid response from service. "name" is not found.')
_util.wait_for_long_running_operation(response['name']) | python | def deploy(self, version_name, path, runtime_version=None):
"""Deploy a model version to the cloud.
Args:
version_name: the name of the version in short form, such as "v1".
path: the Google Cloud Storage path (gs://...) which contains the model files.
runtime_version: the ML Engine runtime version as a string, example '1.2'.
See https://cloud.google.com/ml-engine/docs/concepts/runtime-version-list
for a list of runtimes. If None, the ML Engine service will pick one.
Raises: Exception if the path is invalid or does not contain expected files.
Exception if the service returns invalid response.
"""
if not path.startswith('gs://'):
raise Exception('Invalid path. Only Google Cloud Storage path (gs://...) is accepted.')
# If there is no "export.meta" or"saved_model.pb" under path but there is
# path/model/export.meta or path/model/saved_model.pb, then append /model to the path.
if not datalab.storage.Object.from_url(os.path.join(path, 'export.meta')).exists() and not \
datalab.storage.Object.from_url(os.path.join(path, 'saved_model.pb')).exists():
if datalab.storage.Object.from_url(os.path.join(path, 'model', 'export.meta')).exists() or \
datalab.storage.Object.from_url(os.path.join(path, 'model',
'saved_model.pb')).exists():
path = os.path.join(path, 'model')
else:
print('Cannot find export.meta or saved_model.pb, but continue with deployment anyway.')
body = {'name': self._model_name}
parent = 'projects/' + self._project_id
try:
self._api.projects().models().create(body=body, parent=parent).execute()
except:
# Trying to create an already existing model gets an error. Ignore it.
pass
body = {
'name': version_name,
'deployment_uri': path,
}
if runtime_version:
body['runtime_version'] = runtime_version
response = self._api.projects().models().versions().create(
body=body, parent=self._full_model_name).execute()
if 'name' not in response:
raise Exception('Invalid response from service. "name" is not found.')
_util.wait_for_long_running_operation(response['name']) | [
"def",
"deploy",
"(",
"self",
",",
"version_name",
",",
"path",
",",
"runtime_version",
"=",
"None",
")",
":",
"if",
"not",
"path",
".",
"startswith",
"(",
"'gs://'",
")",
":",
"raise",
"Exception",
"(",
"'Invalid path. Only Google Cloud Storage path (gs://...) is accepted.'",
")",
"# If there is no \"export.meta\" or\"saved_model.pb\" under path but there is",
"# path/model/export.meta or path/model/saved_model.pb, then append /model to the path.",
"if",
"not",
"datalab",
".",
"storage",
".",
"Object",
".",
"from_url",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'export.meta'",
")",
")",
".",
"exists",
"(",
")",
"and",
"not",
"datalab",
".",
"storage",
".",
"Object",
".",
"from_url",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'saved_model.pb'",
")",
")",
".",
"exists",
"(",
")",
":",
"if",
"datalab",
".",
"storage",
".",
"Object",
".",
"from_url",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'model'",
",",
"'export.meta'",
")",
")",
".",
"exists",
"(",
")",
"or",
"datalab",
".",
"storage",
".",
"Object",
".",
"from_url",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'model'",
",",
"'saved_model.pb'",
")",
")",
".",
"exists",
"(",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'model'",
")",
"else",
":",
"print",
"(",
"'Cannot find export.meta or saved_model.pb, but continue with deployment anyway.'",
")",
"body",
"=",
"{",
"'name'",
":",
"self",
".",
"_model_name",
"}",
"parent",
"=",
"'projects/'",
"+",
"self",
".",
"_project_id",
"try",
":",
"self",
".",
"_api",
".",
"projects",
"(",
")",
".",
"models",
"(",
")",
".",
"create",
"(",
"body",
"=",
"body",
",",
"parent",
"=",
"parent",
")",
".",
"execute",
"(",
")",
"except",
":",
"# Trying to create an already existing model gets an error. Ignore it.",
"pass",
"body",
"=",
"{",
"'name'",
":",
"version_name",
",",
"'deployment_uri'",
":",
"path",
",",
"}",
"if",
"runtime_version",
":",
"body",
"[",
"'runtime_version'",
"]",
"=",
"runtime_version",
"response",
"=",
"self",
".",
"_api",
".",
"projects",
"(",
")",
".",
"models",
"(",
")",
".",
"versions",
"(",
")",
".",
"create",
"(",
"body",
"=",
"body",
",",
"parent",
"=",
"self",
".",
"_full_model_name",
")",
".",
"execute",
"(",
")",
"if",
"'name'",
"not",
"in",
"response",
":",
"raise",
"Exception",
"(",
"'Invalid response from service. \"name\" is not found.'",
")",
"_util",
".",
"wait_for_long_running_operation",
"(",
"response",
"[",
"'name'",
"]",
")"
] | Deploy a model version to the cloud.
Args:
version_name: the name of the version in short form, such as "v1".
path: the Google Cloud Storage path (gs://...) which contains the model files.
runtime_version: the ML Engine runtime version as a string, example '1.2'.
See https://cloud.google.com/ml-engine/docs/concepts/runtime-version-list
for a list of runtimes. If None, the ML Engine service will pick one.
Raises: Exception if the path is invalid or does not contain expected files.
Exception if the service returns invalid response. | [
"Deploy",
"a",
"model",
"version",
"to",
"the",
"cloud",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_cloud_models.py#L176-L222 | train | 237,809 |
googledatalab/pydatalab | google/datalab/ml/_cloud_models.py | ModelVersions.delete | def delete(self, version_name):
"""Delete a version of model.
Args:
version_name: the name of the version in short form, such as "v1".
"""
name = ('%s/versions/%s' % (self._full_model_name, version_name))
response = self._api.projects().models().versions().delete(name=name).execute()
if 'name' not in response:
raise Exception('Invalid response from service. "name" is not found.')
_util.wait_for_long_running_operation(response['name']) | python | def delete(self, version_name):
"""Delete a version of model.
Args:
version_name: the name of the version in short form, such as "v1".
"""
name = ('%s/versions/%s' % (self._full_model_name, version_name))
response = self._api.projects().models().versions().delete(name=name).execute()
if 'name' not in response:
raise Exception('Invalid response from service. "name" is not found.')
_util.wait_for_long_running_operation(response['name']) | [
"def",
"delete",
"(",
"self",
",",
"version_name",
")",
":",
"name",
"=",
"(",
"'%s/versions/%s'",
"%",
"(",
"self",
".",
"_full_model_name",
",",
"version_name",
")",
")",
"response",
"=",
"self",
".",
"_api",
".",
"projects",
"(",
")",
".",
"models",
"(",
")",
".",
"versions",
"(",
")",
".",
"delete",
"(",
"name",
"=",
"name",
")",
".",
"execute",
"(",
")",
"if",
"'name'",
"not",
"in",
"response",
":",
"raise",
"Exception",
"(",
"'Invalid response from service. \"name\" is not found.'",
")",
"_util",
".",
"wait_for_long_running_operation",
"(",
"response",
"[",
"'name'",
"]",
")"
] | Delete a version of model.
Args:
version_name: the name of the version in short form, such as "v1". | [
"Delete",
"a",
"version",
"of",
"model",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_cloud_models.py#L224-L234 | train | 237,810 |
googledatalab/pydatalab | google/datalab/ml/_cloud_models.py | ModelVersions.predict | def predict(self, version_name, data):
"""Get prediction results from features instances.
Args:
version_name: the name of the version used for prediction.
data: typically a list of instance to be submitted for prediction. The format of the
instance depends on the model. For example, structured data model may require
a csv line for each instance.
Note that online prediction only works on models that take one placeholder value,
such as a string encoding a csv line.
Returns:
A list of prediction results for given instances. Each element is a dictionary representing
output mapping from the graph.
An example:
[{"predictions": 1, "score": [0.00078, 0.71406, 0.28515]},
{"predictions": 1, "score": [0.00244, 0.99634, 0.00121]}]
"""
full_version_name = ('%s/versions/%s' % (self._full_model_name, version_name))
request = self._api.projects().predict(body={'instances': data},
name=full_version_name)
request.headers['user-agent'] = 'GoogleCloudDataLab/1.0'
result = request.execute()
if 'predictions' not in result:
raise Exception('Invalid response from service. Cannot find "predictions" in response.')
return result['predictions'] | python | def predict(self, version_name, data):
"""Get prediction results from features instances.
Args:
version_name: the name of the version used for prediction.
data: typically a list of instance to be submitted for prediction. The format of the
instance depends on the model. For example, structured data model may require
a csv line for each instance.
Note that online prediction only works on models that take one placeholder value,
such as a string encoding a csv line.
Returns:
A list of prediction results for given instances. Each element is a dictionary representing
output mapping from the graph.
An example:
[{"predictions": 1, "score": [0.00078, 0.71406, 0.28515]},
{"predictions": 1, "score": [0.00244, 0.99634, 0.00121]}]
"""
full_version_name = ('%s/versions/%s' % (self._full_model_name, version_name))
request = self._api.projects().predict(body={'instances': data},
name=full_version_name)
request.headers['user-agent'] = 'GoogleCloudDataLab/1.0'
result = request.execute()
if 'predictions' not in result:
raise Exception('Invalid response from service. Cannot find "predictions" in response.')
return result['predictions'] | [
"def",
"predict",
"(",
"self",
",",
"version_name",
",",
"data",
")",
":",
"full_version_name",
"=",
"(",
"'%s/versions/%s'",
"%",
"(",
"self",
".",
"_full_model_name",
",",
"version_name",
")",
")",
"request",
"=",
"self",
".",
"_api",
".",
"projects",
"(",
")",
".",
"predict",
"(",
"body",
"=",
"{",
"'instances'",
":",
"data",
"}",
",",
"name",
"=",
"full_version_name",
")",
"request",
".",
"headers",
"[",
"'user-agent'",
"]",
"=",
"'GoogleCloudDataLab/1.0'",
"result",
"=",
"request",
".",
"execute",
"(",
")",
"if",
"'predictions'",
"not",
"in",
"result",
":",
"raise",
"Exception",
"(",
"'Invalid response from service. Cannot find \"predictions\" in response.'",
")",
"return",
"result",
"[",
"'predictions'",
"]"
] | Get prediction results from features instances.
Args:
version_name: the name of the version used for prediction.
data: typically a list of instance to be submitted for prediction. The format of the
instance depends on the model. For example, structured data model may require
a csv line for each instance.
Note that online prediction only works on models that take one placeholder value,
such as a string encoding a csv line.
Returns:
A list of prediction results for given instances. Each element is a dictionary representing
output mapping from the graph.
An example:
[{"predictions": 1, "score": [0.00078, 0.71406, 0.28515]},
{"predictions": 1, "score": [0.00244, 0.99634, 0.00121]}] | [
"Get",
"prediction",
"results",
"from",
"features",
"instances",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_cloud_models.py#L236-L261 | train | 237,811 |
googledatalab/pydatalab | google/datalab/ml/_cloud_models.py | ModelVersions.list | def list(self):
"""List versions under the current model in a table view.
Raises:
Exception if it is called in a non-IPython environment.
"""
import IPython
# "self" is iterable (see __iter__() method).
data = [{'name': version['name'].split()[-1],
'deploymentUri': version['deploymentUri'], 'createTime': version['createTime']}
for version in self.get_iterator()]
IPython.display.display(
datalab.utils.commands.render_dictionary(data, ['name', 'deploymentUri', 'createTime'])) | python | def list(self):
"""List versions under the current model in a table view.
Raises:
Exception if it is called in a non-IPython environment.
"""
import IPython
# "self" is iterable (see __iter__() method).
data = [{'name': version['name'].split()[-1],
'deploymentUri': version['deploymentUri'], 'createTime': version['createTime']}
for version in self.get_iterator()]
IPython.display.display(
datalab.utils.commands.render_dictionary(data, ['name', 'deploymentUri', 'createTime'])) | [
"def",
"list",
"(",
"self",
")",
":",
"import",
"IPython",
"# \"self\" is iterable (see __iter__() method).",
"data",
"=",
"[",
"{",
"'name'",
":",
"version",
"[",
"'name'",
"]",
".",
"split",
"(",
")",
"[",
"-",
"1",
"]",
",",
"'deploymentUri'",
":",
"version",
"[",
"'deploymentUri'",
"]",
",",
"'createTime'",
":",
"version",
"[",
"'createTime'",
"]",
"}",
"for",
"version",
"in",
"self",
".",
"get_iterator",
"(",
")",
"]",
"IPython",
".",
"display",
".",
"display",
"(",
"datalab",
".",
"utils",
".",
"commands",
".",
"render_dictionary",
"(",
"data",
",",
"[",
"'name'",
",",
"'deploymentUri'",
",",
"'createTime'",
"]",
")",
")"
] | List versions under the current model in a table view.
Raises:
Exception if it is called in a non-IPython environment. | [
"List",
"versions",
"under",
"the",
"current",
"model",
"in",
"a",
"table",
"view",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_cloud_models.py#L273-L286 | train | 237,812 |
googledatalab/pydatalab | solutionbox/ml_workbench/xgboost/trainer/feature_transforms.py | create_feature_map | def create_feature_map(features, feature_indices, output_dir):
"""Returns feature_map about the transformed features.
feature_map includes information such as:
1, cat1=0
2, cat1=1
3, numeric1
...
Returns:
List in the from
[(index, feature_description)]
"""
feature_map = []
for name, info in feature_indices:
transform_name = features[name]['transform']
source_column = features[name]['source_column']
if transform_name in [IDENTITY_TRANSFORM, SCALE_TRANSFORM]:
feature_map.append((info['index_start'], name))
elif transform_name in [ONE_HOT_TRANSFORM, MULTI_HOT_TRANSFORM]:
vocab, _ = read_vocab_file(
os.path.join(output_dir, VOCAB_ANALYSIS_FILE % source_column))
for i, word in enumerate(vocab):
if transform_name == ONE_HOT_TRANSFORM:
feature_map.append((info['index_start'] + i, '%s=%s' % (source_column, word)))
elif transform_name == MULTI_HOT_TRANSFORM:
feature_map.append((info['index_start'] + i, '%s has "%s"' % (source_column, word)))
elif transform_name == IMAGE_TRANSFORM:
for i in range(info['size']):
feature_map.append((info['index_start'] + i, '%s image feature %d' % (source_column, i)))
return feature_map | python | def create_feature_map(features, feature_indices, output_dir):
"""Returns feature_map about the transformed features.
feature_map includes information such as:
1, cat1=0
2, cat1=1
3, numeric1
...
Returns:
List in the from
[(index, feature_description)]
"""
feature_map = []
for name, info in feature_indices:
transform_name = features[name]['transform']
source_column = features[name]['source_column']
if transform_name in [IDENTITY_TRANSFORM, SCALE_TRANSFORM]:
feature_map.append((info['index_start'], name))
elif transform_name in [ONE_HOT_TRANSFORM, MULTI_HOT_TRANSFORM]:
vocab, _ = read_vocab_file(
os.path.join(output_dir, VOCAB_ANALYSIS_FILE % source_column))
for i, word in enumerate(vocab):
if transform_name == ONE_HOT_TRANSFORM:
feature_map.append((info['index_start'] + i, '%s=%s' % (source_column, word)))
elif transform_name == MULTI_HOT_TRANSFORM:
feature_map.append((info['index_start'] + i, '%s has "%s"' % (source_column, word)))
elif transform_name == IMAGE_TRANSFORM:
for i in range(info['size']):
feature_map.append((info['index_start'] + i, '%s image feature %d' % (source_column, i)))
return feature_map | [
"def",
"create_feature_map",
"(",
"features",
",",
"feature_indices",
",",
"output_dir",
")",
":",
"feature_map",
"=",
"[",
"]",
"for",
"name",
",",
"info",
"in",
"feature_indices",
":",
"transform_name",
"=",
"features",
"[",
"name",
"]",
"[",
"'transform'",
"]",
"source_column",
"=",
"features",
"[",
"name",
"]",
"[",
"'source_column'",
"]",
"if",
"transform_name",
"in",
"[",
"IDENTITY_TRANSFORM",
",",
"SCALE_TRANSFORM",
"]",
":",
"feature_map",
".",
"append",
"(",
"(",
"info",
"[",
"'index_start'",
"]",
",",
"name",
")",
")",
"elif",
"transform_name",
"in",
"[",
"ONE_HOT_TRANSFORM",
",",
"MULTI_HOT_TRANSFORM",
"]",
":",
"vocab",
",",
"_",
"=",
"read_vocab_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"VOCAB_ANALYSIS_FILE",
"%",
"source_column",
")",
")",
"for",
"i",
",",
"word",
"in",
"enumerate",
"(",
"vocab",
")",
":",
"if",
"transform_name",
"==",
"ONE_HOT_TRANSFORM",
":",
"feature_map",
".",
"append",
"(",
"(",
"info",
"[",
"'index_start'",
"]",
"+",
"i",
",",
"'%s=%s'",
"%",
"(",
"source_column",
",",
"word",
")",
")",
")",
"elif",
"transform_name",
"==",
"MULTI_HOT_TRANSFORM",
":",
"feature_map",
".",
"append",
"(",
"(",
"info",
"[",
"'index_start'",
"]",
"+",
"i",
",",
"'%s has \"%s\"'",
"%",
"(",
"source_column",
",",
"word",
")",
")",
")",
"elif",
"transform_name",
"==",
"IMAGE_TRANSFORM",
":",
"for",
"i",
"in",
"range",
"(",
"info",
"[",
"'size'",
"]",
")",
":",
"feature_map",
".",
"append",
"(",
"(",
"info",
"[",
"'index_start'",
"]",
"+",
"i",
",",
"'%s image feature %d'",
"%",
"(",
"source_column",
",",
"i",
")",
")",
")",
"return",
"feature_map"
] | Returns feature_map about the transformed features.
feature_map includes information such as:
1, cat1=0
2, cat1=1
3, numeric1
...
Returns:
List in the from
[(index, feature_description)] | [
"Returns",
"feature_map",
"about",
"the",
"transformed",
"features",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/ml_workbench/xgboost/trainer/feature_transforms.py#L447-L477 | train | 237,813 |
googledatalab/pydatalab | datalab/bigquery/_view.py | View.create | def create(self, query):
""" Creates the view with the specified query.
Args:
query: the query to use to for the View; either a string containing a SQL query or
a Query object.
Returns:
The View instance.
Raises:
Exception if the view couldn't be created or already exists and overwrite was False.
"""
if isinstance(query, _query.Query):
query = query.sql
try:
response = self._table._api.tables_insert(self._table.name, query=query)
except Exception as e:
raise e
if 'selfLink' in response:
return self
raise Exception("View %s could not be created as it already exists" % str(self)) | python | def create(self, query):
""" Creates the view with the specified query.
Args:
query: the query to use to for the View; either a string containing a SQL query or
a Query object.
Returns:
The View instance.
Raises:
Exception if the view couldn't be created or already exists and overwrite was False.
"""
if isinstance(query, _query.Query):
query = query.sql
try:
response = self._table._api.tables_insert(self._table.name, query=query)
except Exception as e:
raise e
if 'selfLink' in response:
return self
raise Exception("View %s could not be created as it already exists" % str(self)) | [
"def",
"create",
"(",
"self",
",",
"query",
")",
":",
"if",
"isinstance",
"(",
"query",
",",
"_query",
".",
"Query",
")",
":",
"query",
"=",
"query",
".",
"sql",
"try",
":",
"response",
"=",
"self",
".",
"_table",
".",
"_api",
".",
"tables_insert",
"(",
"self",
".",
"_table",
".",
"name",
",",
"query",
"=",
"query",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"if",
"'selfLink'",
"in",
"response",
":",
"return",
"self",
"raise",
"Exception",
"(",
"\"View %s could not be created as it already exists\"",
"%",
"str",
"(",
"self",
")",
")"
] | Creates the view with the specified query.
Args:
query: the query to use to for the View; either a string containing a SQL query or
a Query object.
Returns:
The View instance.
Raises:
Exception if the view couldn't be created or already exists and overwrite was False. | [
"Creates",
"the",
"view",
"with",
"the",
"specified",
"query",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_view.py#L91-L110 | train | 237,814 |
googledatalab/pydatalab | datalab/bigquery/_view.py | View.sample | def sample(self, fields=None, count=5, sampling=None, use_cache=True, dialect=None,
billing_tier=None):
"""Retrieves a sampling of data from the view.
Args:
fields: an optional list of field names to retrieve.
count: an optional count of rows to retrieve which is used if a specific
sampling is not specified.
sampling: an optional sampling strategy to apply to the view.
use_cache: whether to use cached results or not.
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A QueryResultsTable object containing the resulting data.
Raises:
Exception if the sample query could not be executed or the query response was malformed.
"""
return self._table.sample(fields=fields, count=count, sampling=sampling, use_cache=use_cache,
dialect=dialect, billing_tier=billing_tier) | python | def sample(self, fields=None, count=5, sampling=None, use_cache=True, dialect=None,
billing_tier=None):
"""Retrieves a sampling of data from the view.
Args:
fields: an optional list of field names to retrieve.
count: an optional count of rows to retrieve which is used if a specific
sampling is not specified.
sampling: an optional sampling strategy to apply to the view.
use_cache: whether to use cached results or not.
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A QueryResultsTable object containing the resulting data.
Raises:
Exception if the sample query could not be executed or the query response was malformed.
"""
return self._table.sample(fields=fields, count=count, sampling=sampling, use_cache=use_cache,
dialect=dialect, billing_tier=billing_tier) | [
"def",
"sample",
"(",
"self",
",",
"fields",
"=",
"None",
",",
"count",
"=",
"5",
",",
"sampling",
"=",
"None",
",",
"use_cache",
"=",
"True",
",",
"dialect",
"=",
"None",
",",
"billing_tier",
"=",
"None",
")",
":",
"return",
"self",
".",
"_table",
".",
"sample",
"(",
"fields",
"=",
"fields",
",",
"count",
"=",
"count",
",",
"sampling",
"=",
"sampling",
",",
"use_cache",
"=",
"use_cache",
",",
"dialect",
"=",
"dialect",
",",
"billing_tier",
"=",
"billing_tier",
")"
] | Retrieves a sampling of data from the view.
Args:
fields: an optional list of field names to retrieve.
count: an optional count of rows to retrieve which is used if a specific
sampling is not specified.
sampling: an optional sampling strategy to apply to the view.
use_cache: whether to use cached results or not.
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A QueryResultsTable object containing the resulting data.
Raises:
Exception if the sample query could not be executed or the query response was malformed. | [
"Retrieves",
"a",
"sampling",
"of",
"data",
"from",
"the",
"view",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_view.py#L112-L136 | train | 237,815 |
googledatalab/pydatalab | datalab/bigquery/_view.py | View.update | def update(self, friendly_name=None, description=None, query=None):
""" Selectively updates View information.
Any parameters that are None (the default) are not applied in the update.
Args:
friendly_name: if not None, the new friendly name.
description: if not None, the new description.
query: if not None, a new query string for the View.
"""
self._table._load_info()
if query is not None:
if isinstance(query, _query.Query):
query = query.sql
self._table._info['view'] = {'query': query}
self._table.update(friendly_name=friendly_name, description=description) | python | def update(self, friendly_name=None, description=None, query=None):
""" Selectively updates View information.
Any parameters that are None (the default) are not applied in the update.
Args:
friendly_name: if not None, the new friendly name.
description: if not None, the new description.
query: if not None, a new query string for the View.
"""
self._table._load_info()
if query is not None:
if isinstance(query, _query.Query):
query = query.sql
self._table._info['view'] = {'query': query}
self._table.update(friendly_name=friendly_name, description=description) | [
"def",
"update",
"(",
"self",
",",
"friendly_name",
"=",
"None",
",",
"description",
"=",
"None",
",",
"query",
"=",
"None",
")",
":",
"self",
".",
"_table",
".",
"_load_info",
"(",
")",
"if",
"query",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"query",
",",
"_query",
".",
"Query",
")",
":",
"query",
"=",
"query",
".",
"sql",
"self",
".",
"_table",
".",
"_info",
"[",
"'view'",
"]",
"=",
"{",
"'query'",
":",
"query",
"}",
"self",
".",
"_table",
".",
"update",
"(",
"friendly_name",
"=",
"friendly_name",
",",
"description",
"=",
"description",
")"
] | Selectively updates View information.
Any parameters that are None (the default) are not applied in the update.
Args:
friendly_name: if not None, the new friendly name.
description: if not None, the new description.
query: if not None, a new query string for the View. | [
"Selectively",
"updates",
"View",
"information",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_view.py#L149-L164 | train | 237,816 |
googledatalab/pydatalab | datalab/bigquery/_view.py | View.results | def results(self, use_cache=True, dialect=None, billing_tier=None):
"""Materialize the view synchronously.
If you require more control over the execution, use execute() or execute_async().
Args:
use_cache: whether to use cached results or not.
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A QueryResultsTable containing the result set.
Raises:
Exception if the query could not be executed or query response was malformed.
"""
return self._materialization.results(use_cache=use_cache, dialect=dialect,
billing_tier=billing_tier) | python | def results(self, use_cache=True, dialect=None, billing_tier=None):
"""Materialize the view synchronously.
If you require more control over the execution, use execute() or execute_async().
Args:
use_cache: whether to use cached results or not.
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A QueryResultsTable containing the result set.
Raises:
Exception if the query could not be executed or query response was malformed.
"""
return self._materialization.results(use_cache=use_cache, dialect=dialect,
billing_tier=billing_tier) | [
"def",
"results",
"(",
"self",
",",
"use_cache",
"=",
"True",
",",
"dialect",
"=",
"None",
",",
"billing_tier",
"=",
"None",
")",
":",
"return",
"self",
".",
"_materialization",
".",
"results",
"(",
"use_cache",
"=",
"use_cache",
",",
"dialect",
"=",
"dialect",
",",
"billing_tier",
"=",
"billing_tier",
")"
] | Materialize the view synchronously.
If you require more control over the execution, use execute() or execute_async().
Args:
use_cache: whether to use cached results or not.
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A QueryResultsTable containing the result set.
Raises:
Exception if the query could not be executed or query response was malformed. | [
"Materialize",
"the",
"view",
"synchronously",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_view.py#L166-L187 | train | 237,817 |
googledatalab/pydatalab | datalab/bigquery/_view.py | View.execute_async | def execute_async(self, table_name=None, table_mode='create', use_cache=True, priority='high',
allow_large_results=False, dialect=None, billing_tier=None):
"""Materialize the View asynchronously.
Args:
table_name: the result table name; if None, then a temporary table will be used.
table_mode: one of 'create', 'overwrite' or 'append'. If 'create' (the default), the request
will fail if the table exists.
use_cache: whether to use past query results or ignore cache. Has no effect if destination is
specified (default True).
priority:one of 'low' or 'high' (default). Note that 'high' is more expensive, but is
better suited to exploratory analysis.
allow_large_results: whether to allow large results; i.e. compressed data over 100MB. This is
slower and requires a table_name to be specified) (default False).
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A QueryJob for the materialization
Raises:
Exception (KeyError) if View could not be materialized.
"""
return self._materialization.execute_async(table_name=table_name, table_mode=table_mode,
use_cache=use_cache, priority=priority,
allow_large_results=allow_large_results,
dialect=dialect, billing_tier=billing_tier) | python | def execute_async(self, table_name=None, table_mode='create', use_cache=True, priority='high',
allow_large_results=False, dialect=None, billing_tier=None):
"""Materialize the View asynchronously.
Args:
table_name: the result table name; if None, then a temporary table will be used.
table_mode: one of 'create', 'overwrite' or 'append'. If 'create' (the default), the request
will fail if the table exists.
use_cache: whether to use past query results or ignore cache. Has no effect if destination is
specified (default True).
priority:one of 'low' or 'high' (default). Note that 'high' is more expensive, but is
better suited to exploratory analysis.
allow_large_results: whether to allow large results; i.e. compressed data over 100MB. This is
slower and requires a table_name to be specified) (default False).
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A QueryJob for the materialization
Raises:
Exception (KeyError) if View could not be materialized.
"""
return self._materialization.execute_async(table_name=table_name, table_mode=table_mode,
use_cache=use_cache, priority=priority,
allow_large_results=allow_large_results,
dialect=dialect, billing_tier=billing_tier) | [
"def",
"execute_async",
"(",
"self",
",",
"table_name",
"=",
"None",
",",
"table_mode",
"=",
"'create'",
",",
"use_cache",
"=",
"True",
",",
"priority",
"=",
"'high'",
",",
"allow_large_results",
"=",
"False",
",",
"dialect",
"=",
"None",
",",
"billing_tier",
"=",
"None",
")",
":",
"return",
"self",
".",
"_materialization",
".",
"execute_async",
"(",
"table_name",
"=",
"table_name",
",",
"table_mode",
"=",
"table_mode",
",",
"use_cache",
"=",
"use_cache",
",",
"priority",
"=",
"priority",
",",
"allow_large_results",
"=",
"allow_large_results",
",",
"dialect",
"=",
"dialect",
",",
"billing_tier",
"=",
"billing_tier",
")"
] | Materialize the View asynchronously.
Args:
table_name: the result table name; if None, then a temporary table will be used.
table_mode: one of 'create', 'overwrite' or 'append'. If 'create' (the default), the request
will fail if the table exists.
use_cache: whether to use past query results or ignore cache. Has no effect if destination is
specified (default True).
priority:one of 'low' or 'high' (default). Note that 'high' is more expensive, but is
better suited to exploratory analysis.
allow_large_results: whether to allow large results; i.e. compressed data over 100MB. This is
slower and requires a table_name to be specified) (default False).
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A QueryJob for the materialization
Raises:
Exception (KeyError) if View could not be materialized. | [
"Materialize",
"the",
"View",
"asynchronously",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_view.py#L189-L219 | train | 237,818 |
googledatalab/pydatalab | google/datalab/utils/commands/_utils.py | get_notebook_item | def get_notebook_item(name):
""" Get an item from the IPython environment. """
env = notebook_environment()
return google.datalab.utils.get_item(env, name) | python | def get_notebook_item(name):
""" Get an item from the IPython environment. """
env = notebook_environment()
return google.datalab.utils.get_item(env, name) | [
"def",
"get_notebook_item",
"(",
"name",
")",
":",
"env",
"=",
"notebook_environment",
"(",
")",
"return",
"google",
".",
"datalab",
".",
"utils",
".",
"get_item",
"(",
"env",
",",
"name",
")"
] | Get an item from the IPython environment. | [
"Get",
"an",
"item",
"from",
"the",
"IPython",
"environment",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/commands/_utils.py#L50-L53 | train | 237,819 |
googledatalab/pydatalab | google/datalab/utils/commands/_utils.py | _get_data_from_list_of_dicts | def _get_data_from_list_of_dicts(source, fields='*', first_row=0, count=-1, schema=None):
""" Helper function for _get_data that handles lists of dicts. """
if schema is None:
schema = google.datalab.bigquery.Schema.from_data(source)
fields = get_field_list(fields, schema)
gen = source[first_row:first_row + count] if count >= 0 else source
rows = [{'c': [{'v': row[c]} if c in row else {} for c in fields]} for row in gen]
return {'cols': _get_cols(fields, schema), 'rows': rows}, len(source) | python | def _get_data_from_list_of_dicts(source, fields='*', first_row=0, count=-1, schema=None):
""" Helper function for _get_data that handles lists of dicts. """
if schema is None:
schema = google.datalab.bigquery.Schema.from_data(source)
fields = get_field_list(fields, schema)
gen = source[first_row:first_row + count] if count >= 0 else source
rows = [{'c': [{'v': row[c]} if c in row else {} for c in fields]} for row in gen]
return {'cols': _get_cols(fields, schema), 'rows': rows}, len(source) | [
"def",
"_get_data_from_list_of_dicts",
"(",
"source",
",",
"fields",
"=",
"'*'",
",",
"first_row",
"=",
"0",
",",
"count",
"=",
"-",
"1",
",",
"schema",
"=",
"None",
")",
":",
"if",
"schema",
"is",
"None",
":",
"schema",
"=",
"google",
".",
"datalab",
".",
"bigquery",
".",
"Schema",
".",
"from_data",
"(",
"source",
")",
"fields",
"=",
"get_field_list",
"(",
"fields",
",",
"schema",
")",
"gen",
"=",
"source",
"[",
"first_row",
":",
"first_row",
"+",
"count",
"]",
"if",
"count",
">=",
"0",
"else",
"source",
"rows",
"=",
"[",
"{",
"'c'",
":",
"[",
"{",
"'v'",
":",
"row",
"[",
"c",
"]",
"}",
"if",
"c",
"in",
"row",
"else",
"{",
"}",
"for",
"c",
"in",
"fields",
"]",
"}",
"for",
"row",
"in",
"gen",
"]",
"return",
"{",
"'cols'",
":",
"_get_cols",
"(",
"fields",
",",
"schema",
")",
",",
"'rows'",
":",
"rows",
"}",
",",
"len",
"(",
"source",
")"
] | Helper function for _get_data that handles lists of dicts. | [
"Helper",
"function",
"for",
"_get_data",
"that",
"handles",
"lists",
"of",
"dicts",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/commands/_utils.py#L144-L151 | train | 237,820 |
googledatalab/pydatalab | google/datalab/utils/commands/_utils.py | _get_data_from_list_of_lists | def _get_data_from_list_of_lists(source, fields='*', first_row=0, count=-1, schema=None):
""" Helper function for _get_data that handles lists of lists. """
if schema is None:
schema = google.datalab.bigquery.Schema.from_data(source)
fields = get_field_list(fields, schema)
gen = source[first_row:first_row + count] if count >= 0 else source
cols = [schema.find(name) for name in fields]
rows = [{'c': [{'v': row[i]} for i in cols]} for row in gen]
return {'cols': _get_cols(fields, schema), 'rows': rows}, len(source) | python | def _get_data_from_list_of_lists(source, fields='*', first_row=0, count=-1, schema=None):
""" Helper function for _get_data that handles lists of lists. """
if schema is None:
schema = google.datalab.bigquery.Schema.from_data(source)
fields = get_field_list(fields, schema)
gen = source[first_row:first_row + count] if count >= 0 else source
cols = [schema.find(name) for name in fields]
rows = [{'c': [{'v': row[i]} for i in cols]} for row in gen]
return {'cols': _get_cols(fields, schema), 'rows': rows}, len(source) | [
"def",
"_get_data_from_list_of_lists",
"(",
"source",
",",
"fields",
"=",
"'*'",
",",
"first_row",
"=",
"0",
",",
"count",
"=",
"-",
"1",
",",
"schema",
"=",
"None",
")",
":",
"if",
"schema",
"is",
"None",
":",
"schema",
"=",
"google",
".",
"datalab",
".",
"bigquery",
".",
"Schema",
".",
"from_data",
"(",
"source",
")",
"fields",
"=",
"get_field_list",
"(",
"fields",
",",
"schema",
")",
"gen",
"=",
"source",
"[",
"first_row",
":",
"first_row",
"+",
"count",
"]",
"if",
"count",
">=",
"0",
"else",
"source",
"cols",
"=",
"[",
"schema",
".",
"find",
"(",
"name",
")",
"for",
"name",
"in",
"fields",
"]",
"rows",
"=",
"[",
"{",
"'c'",
":",
"[",
"{",
"'v'",
":",
"row",
"[",
"i",
"]",
"}",
"for",
"i",
"in",
"cols",
"]",
"}",
"for",
"row",
"in",
"gen",
"]",
"return",
"{",
"'cols'",
":",
"_get_cols",
"(",
"fields",
",",
"schema",
")",
",",
"'rows'",
":",
"rows",
"}",
",",
"len",
"(",
"source",
")"
] | Helper function for _get_data that handles lists of lists. | [
"Helper",
"function",
"for",
"_get_data",
"that",
"handles",
"lists",
"of",
"lists",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/commands/_utils.py#L154-L162 | train | 237,821 |
googledatalab/pydatalab | google/datalab/utils/commands/_utils.py | _get_data_from_dataframe | def _get_data_from_dataframe(source, fields='*', first_row=0, count=-1, schema=None):
""" Helper function for _get_data that handles Pandas DataFrames. """
if schema is None:
schema = google.datalab.bigquery.Schema.from_data(source)
fields = get_field_list(fields, schema)
rows = []
if count < 0:
count = len(source.index)
df_slice = source.reset_index(drop=True)[first_row:first_row + count]
for index, data_frame_row in df_slice.iterrows():
row = data_frame_row.to_dict()
for key in list(row.keys()):
val = row[key]
if isinstance(val, pandas.Timestamp):
row[key] = val.to_pydatetime()
rows.append({'c': [{'v': row[c]} if c in row else {} for c in fields]})
cols = _get_cols(fields, schema)
return {'cols': cols, 'rows': rows}, len(source) | python | def _get_data_from_dataframe(source, fields='*', first_row=0, count=-1, schema=None):
""" Helper function for _get_data that handles Pandas DataFrames. """
if schema is None:
schema = google.datalab.bigquery.Schema.from_data(source)
fields = get_field_list(fields, schema)
rows = []
if count < 0:
count = len(source.index)
df_slice = source.reset_index(drop=True)[first_row:first_row + count]
for index, data_frame_row in df_slice.iterrows():
row = data_frame_row.to_dict()
for key in list(row.keys()):
val = row[key]
if isinstance(val, pandas.Timestamp):
row[key] = val.to_pydatetime()
rows.append({'c': [{'v': row[c]} if c in row else {} for c in fields]})
cols = _get_cols(fields, schema)
return {'cols': cols, 'rows': rows}, len(source) | [
"def",
"_get_data_from_dataframe",
"(",
"source",
",",
"fields",
"=",
"'*'",
",",
"first_row",
"=",
"0",
",",
"count",
"=",
"-",
"1",
",",
"schema",
"=",
"None",
")",
":",
"if",
"schema",
"is",
"None",
":",
"schema",
"=",
"google",
".",
"datalab",
".",
"bigquery",
".",
"Schema",
".",
"from_data",
"(",
"source",
")",
"fields",
"=",
"get_field_list",
"(",
"fields",
",",
"schema",
")",
"rows",
"=",
"[",
"]",
"if",
"count",
"<",
"0",
":",
"count",
"=",
"len",
"(",
"source",
".",
"index",
")",
"df_slice",
"=",
"source",
".",
"reset_index",
"(",
"drop",
"=",
"True",
")",
"[",
"first_row",
":",
"first_row",
"+",
"count",
"]",
"for",
"index",
",",
"data_frame_row",
"in",
"df_slice",
".",
"iterrows",
"(",
")",
":",
"row",
"=",
"data_frame_row",
".",
"to_dict",
"(",
")",
"for",
"key",
"in",
"list",
"(",
"row",
".",
"keys",
"(",
")",
")",
":",
"val",
"=",
"row",
"[",
"key",
"]",
"if",
"isinstance",
"(",
"val",
",",
"pandas",
".",
"Timestamp",
")",
":",
"row",
"[",
"key",
"]",
"=",
"val",
".",
"to_pydatetime",
"(",
")",
"rows",
".",
"append",
"(",
"{",
"'c'",
":",
"[",
"{",
"'v'",
":",
"row",
"[",
"c",
"]",
"}",
"if",
"c",
"in",
"row",
"else",
"{",
"}",
"for",
"c",
"in",
"fields",
"]",
"}",
")",
"cols",
"=",
"_get_cols",
"(",
"fields",
",",
"schema",
")",
"return",
"{",
"'cols'",
":",
"cols",
",",
"'rows'",
":",
"rows",
"}",
",",
"len",
"(",
"source",
")"
] | Helper function for _get_data that handles Pandas DataFrames. | [
"Helper",
"function",
"for",
"_get_data",
"that",
"handles",
"Pandas",
"DataFrames",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/commands/_utils.py#L165-L183 | train | 237,822 |
googledatalab/pydatalab | google/datalab/utils/commands/_utils.py | parse_config_for_selected_keys | def parse_config_for_selected_keys(content, keys):
""" Parse a config from a magic cell body for selected config keys.
For example, if 'content' is:
config_item1: value1
config_item2: value2
config_item3: value3
and 'keys' are: [config_item1, config_item3]
The results will be a tuple of
1. The parsed config items (dict): {config_item1: value1, config_item3: value3}
2. The remaining content (string): config_item2: value2
Args:
content: the input content. A string. It has to be a yaml or JSON string.
keys: a list of keys to retrieve from content. Note that it only checks top level keys
in the dict.
Returns:
A tuple. First is the parsed config including only selected keys. Second is
the remaining content.
Raises:
Exception if the content is not a valid yaml or JSON string.
"""
config_items = {key: None for key in keys}
if not content:
return config_items, content
stripped = content.strip()
if len(stripped) == 0:
return {}, None
elif stripped[0] == '{':
config = json.loads(content)
else:
config = yaml.load(content)
if not isinstance(config, dict):
raise ValueError('Invalid config.')
for key in keys:
config_items[key] = config.pop(key, None)
if not config:
return config_items, None
if stripped[0] == '{':
content_out = json.dumps(config, indent=4)
else:
content_out = yaml.dump(config, default_flow_style=False)
return config_items, content_out | python | def parse_config_for_selected_keys(content, keys):
""" Parse a config from a magic cell body for selected config keys.
For example, if 'content' is:
config_item1: value1
config_item2: value2
config_item3: value3
and 'keys' are: [config_item1, config_item3]
The results will be a tuple of
1. The parsed config items (dict): {config_item1: value1, config_item3: value3}
2. The remaining content (string): config_item2: value2
Args:
content: the input content. A string. It has to be a yaml or JSON string.
keys: a list of keys to retrieve from content. Note that it only checks top level keys
in the dict.
Returns:
A tuple. First is the parsed config including only selected keys. Second is
the remaining content.
Raises:
Exception if the content is not a valid yaml or JSON string.
"""
config_items = {key: None for key in keys}
if not content:
return config_items, content
stripped = content.strip()
if len(stripped) == 0:
return {}, None
elif stripped[0] == '{':
config = json.loads(content)
else:
config = yaml.load(content)
if not isinstance(config, dict):
raise ValueError('Invalid config.')
for key in keys:
config_items[key] = config.pop(key, None)
if not config:
return config_items, None
if stripped[0] == '{':
content_out = json.dumps(config, indent=4)
else:
content_out = yaml.dump(config, default_flow_style=False)
return config_items, content_out | [
"def",
"parse_config_for_selected_keys",
"(",
"content",
",",
"keys",
")",
":",
"config_items",
"=",
"{",
"key",
":",
"None",
"for",
"key",
"in",
"keys",
"}",
"if",
"not",
"content",
":",
"return",
"config_items",
",",
"content",
"stripped",
"=",
"content",
".",
"strip",
"(",
")",
"if",
"len",
"(",
"stripped",
")",
"==",
"0",
":",
"return",
"{",
"}",
",",
"None",
"elif",
"stripped",
"[",
"0",
"]",
"==",
"'{'",
":",
"config",
"=",
"json",
".",
"loads",
"(",
"content",
")",
"else",
":",
"config",
"=",
"yaml",
".",
"load",
"(",
"content",
")",
"if",
"not",
"isinstance",
"(",
"config",
",",
"dict",
")",
":",
"raise",
"ValueError",
"(",
"'Invalid config.'",
")",
"for",
"key",
"in",
"keys",
":",
"config_items",
"[",
"key",
"]",
"=",
"config",
".",
"pop",
"(",
"key",
",",
"None",
")",
"if",
"not",
"config",
":",
"return",
"config_items",
",",
"None",
"if",
"stripped",
"[",
"0",
"]",
"==",
"'{'",
":",
"content_out",
"=",
"json",
".",
"dumps",
"(",
"config",
",",
"indent",
"=",
"4",
")",
"else",
":",
"content_out",
"=",
"yaml",
".",
"dump",
"(",
"config",
",",
"default_flow_style",
"=",
"False",
")",
"return",
"config_items",
",",
"content_out"
] | Parse a config from a magic cell body for selected config keys.
For example, if 'content' is:
config_item1: value1
config_item2: value2
config_item3: value3
and 'keys' are: [config_item1, config_item3]
The results will be a tuple of
1. The parsed config items (dict): {config_item1: value1, config_item3: value3}
2. The remaining content (string): config_item2: value2
Args:
content: the input content. A string. It has to be a yaml or JSON string.
keys: a list of keys to retrieve from content. Note that it only checks top level keys
in the dict.
Returns:
A tuple. First is the parsed config including only selected keys. Second is
the remaining content.
Raises:
Exception if the content is not a valid yaml or JSON string. | [
"Parse",
"a",
"config",
"from",
"a",
"magic",
"cell",
"body",
"for",
"selected",
"config",
"keys",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/commands/_utils.py#L341-L393 | train | 237,823 |
googledatalab/pydatalab | google/datalab/utils/commands/_utils.py | chart_html | def chart_html(driver_name, chart_type, source, chart_options=None, fields='*', refresh_interval=0,
refresh_data=None, control_defaults=None, control_ids=None, schema=None):
""" Return HTML for a chart.
Args:
driver_name: the name of the chart driver. Currently we support 'plotly' or 'gcharts'.
chart_type: string specifying type of chart.
source: the data source for the chart. Can be actual data (e.g. list) or the name of
a data source (e.g. the name of a query module).
chart_options: a dictionary of options for the chart. Can contain a 'controls' entry
specifying controls. Other entries are passed as JSON to Google Charts.
fields: the fields to chart. Can be '*' for all fields (only sensible if the columns are
ordered; e.g. a Query or list of lists, but not a list of dictionaries); otherwise a
string containing a comma-separated list of field names.
refresh_interval: a time in seconds after which the chart data will be refreshed. 0 if the
chart should not be refreshed (i.e. the data is static).
refresh_data: if the source is a list or other raw data, this is a YAML string containing
metadata needed to support calls to refresh (get_chart_data).
control_defaults: the default variable values for controls that are shared across charts
including this one.
control_ids: the DIV IDs for controls that are shared across charts including this one.
schema: an optional schema for the data; if not supplied one will be inferred.
Returns:
A string containing the HTML for the chart.
"""
div_id = _html.Html.next_id()
controls_html = ''
if control_defaults is None:
control_defaults = {}
if control_ids is None:
control_ids = []
if chart_options is not None and 'variables' in chart_options:
controls = chart_options['variables']
del chart_options['variables'] # Just to make sure GCharts doesn't see them.
controls_html, defaults, ids = parse_control_options(controls)
# We augment what we are passed so that in principle we can have controls that are
# shared by charts as well as controls that are specific to a chart.
control_defaults.update(defaults)
control_ids.extend(ids),
_HTML_TEMPLATE = """
<div class="bqgc-container">
{controls}
<div class="bqgc {extra_class}" id="{id}">
</div>
</div>
<script src="/static/components/requirejs/require.js"></script>
<script>
require.config({{
paths: {{
base: '/static/base',
d3: '//cdnjs.cloudflare.com/ajax/libs/d3/3.4.13/d3',
plotly: 'https://cdn.plot.ly/plotly-1.5.1.min.js?noext',
jquery: '//ajax.googleapis.com/ajax/libs/jquery/2.0.0/jquery.min'
}},
map: {{
'*': {{
datalab: 'nbextensions/gcpdatalab'
}}
}},
shim: {{
plotly: {{
deps: ['d3', 'jquery'],
exports: 'plotly'
}}
}}
}});
require(['datalab/charting',
'datalab/element!{id}',
'base/js/events',
'datalab/style!/nbextensions/gcpdatalab/charting.css'
],
function(charts, dom, events) {{
charts.render(
'{driver}',
dom,
events,
'{chart_type}',
{control_ids},
{data},
{options},
{refresh_data},
{refresh_interval},
{total_rows});
}}
);
</script>
"""
count = 25 if chart_type == 'paged_table' else -1
data, total_count = get_data(source, fields, control_defaults, 0, count, schema)
if refresh_data is None:
if isinstance(source, basestring):
source_index = get_data_source_index(source)
refresh_data = {'source_index': source_index, 'name': source_index}
else:
refresh_data = {'name': 'raw data'}
refresh_data['fields'] = fields
# TODO(gram): check if we need to augment env with user_ns
return _HTML_TEMPLATE \
.format(driver=driver_name,
controls=controls_html,
id=div_id,
chart_type=chart_type,
extra_class=" bqgc-controlled" if len(controls_html) else '',
data=json.dumps(data, cls=google.datalab.utils.JSONEncoder),
options=json.dumps(chart_options, cls=google.datalab.utils.JSONEncoder),
refresh_data=json.dumps(refresh_data, cls=google.datalab.utils.JSONEncoder),
refresh_interval=refresh_interval,
control_ids=str(control_ids),
total_rows=total_count) | python | def chart_html(driver_name, chart_type, source, chart_options=None, fields='*', refresh_interval=0,
refresh_data=None, control_defaults=None, control_ids=None, schema=None):
""" Return HTML for a chart.
Args:
driver_name: the name of the chart driver. Currently we support 'plotly' or 'gcharts'.
chart_type: string specifying type of chart.
source: the data source for the chart. Can be actual data (e.g. list) or the name of
a data source (e.g. the name of a query module).
chart_options: a dictionary of options for the chart. Can contain a 'controls' entry
specifying controls. Other entries are passed as JSON to Google Charts.
fields: the fields to chart. Can be '*' for all fields (only sensible if the columns are
ordered; e.g. a Query or list of lists, but not a list of dictionaries); otherwise a
string containing a comma-separated list of field names.
refresh_interval: a time in seconds after which the chart data will be refreshed. 0 if the
chart should not be refreshed (i.e. the data is static).
refresh_data: if the source is a list or other raw data, this is a YAML string containing
metadata needed to support calls to refresh (get_chart_data).
control_defaults: the default variable values for controls that are shared across charts
including this one.
control_ids: the DIV IDs for controls that are shared across charts including this one.
schema: an optional schema for the data; if not supplied one will be inferred.
Returns:
A string containing the HTML for the chart.
"""
div_id = _html.Html.next_id()
controls_html = ''
if control_defaults is None:
control_defaults = {}
if control_ids is None:
control_ids = []
if chart_options is not None and 'variables' in chart_options:
controls = chart_options['variables']
del chart_options['variables'] # Just to make sure GCharts doesn't see them.
controls_html, defaults, ids = parse_control_options(controls)
# We augment what we are passed so that in principle we can have controls that are
# shared by charts as well as controls that are specific to a chart.
control_defaults.update(defaults)
control_ids.extend(ids),
_HTML_TEMPLATE = """
<div class="bqgc-container">
{controls}
<div class="bqgc {extra_class}" id="{id}">
</div>
</div>
<script src="/static/components/requirejs/require.js"></script>
<script>
require.config({{
paths: {{
base: '/static/base',
d3: '//cdnjs.cloudflare.com/ajax/libs/d3/3.4.13/d3',
plotly: 'https://cdn.plot.ly/plotly-1.5.1.min.js?noext',
jquery: '//ajax.googleapis.com/ajax/libs/jquery/2.0.0/jquery.min'
}},
map: {{
'*': {{
datalab: 'nbextensions/gcpdatalab'
}}
}},
shim: {{
plotly: {{
deps: ['d3', 'jquery'],
exports: 'plotly'
}}
}}
}});
require(['datalab/charting',
'datalab/element!{id}',
'base/js/events',
'datalab/style!/nbextensions/gcpdatalab/charting.css'
],
function(charts, dom, events) {{
charts.render(
'{driver}',
dom,
events,
'{chart_type}',
{control_ids},
{data},
{options},
{refresh_data},
{refresh_interval},
{total_rows});
}}
);
</script>
"""
count = 25 if chart_type == 'paged_table' else -1
data, total_count = get_data(source, fields, control_defaults, 0, count, schema)
if refresh_data is None:
if isinstance(source, basestring):
source_index = get_data_source_index(source)
refresh_data = {'source_index': source_index, 'name': source_index}
else:
refresh_data = {'name': 'raw data'}
refresh_data['fields'] = fields
# TODO(gram): check if we need to augment env with user_ns
return _HTML_TEMPLATE \
.format(driver=driver_name,
controls=controls_html,
id=div_id,
chart_type=chart_type,
extra_class=" bqgc-controlled" if len(controls_html) else '',
data=json.dumps(data, cls=google.datalab.utils.JSONEncoder),
options=json.dumps(chart_options, cls=google.datalab.utils.JSONEncoder),
refresh_data=json.dumps(refresh_data, cls=google.datalab.utils.JSONEncoder),
refresh_interval=refresh_interval,
control_ids=str(control_ids),
total_rows=total_count) | [
"def",
"chart_html",
"(",
"driver_name",
",",
"chart_type",
",",
"source",
",",
"chart_options",
"=",
"None",
",",
"fields",
"=",
"'*'",
",",
"refresh_interval",
"=",
"0",
",",
"refresh_data",
"=",
"None",
",",
"control_defaults",
"=",
"None",
",",
"control_ids",
"=",
"None",
",",
"schema",
"=",
"None",
")",
":",
"div_id",
"=",
"_html",
".",
"Html",
".",
"next_id",
"(",
")",
"controls_html",
"=",
"''",
"if",
"control_defaults",
"is",
"None",
":",
"control_defaults",
"=",
"{",
"}",
"if",
"control_ids",
"is",
"None",
":",
"control_ids",
"=",
"[",
"]",
"if",
"chart_options",
"is",
"not",
"None",
"and",
"'variables'",
"in",
"chart_options",
":",
"controls",
"=",
"chart_options",
"[",
"'variables'",
"]",
"del",
"chart_options",
"[",
"'variables'",
"]",
"# Just to make sure GCharts doesn't see them.",
"controls_html",
",",
"defaults",
",",
"ids",
"=",
"parse_control_options",
"(",
"controls",
")",
"# We augment what we are passed so that in principle we can have controls that are",
"# shared by charts as well as controls that are specific to a chart.",
"control_defaults",
".",
"update",
"(",
"defaults",
")",
"control_ids",
".",
"extend",
"(",
"ids",
")",
",",
"_HTML_TEMPLATE",
"=",
"\"\"\"\n <div class=\"bqgc-container\">\n {controls}\n <div class=\"bqgc {extra_class}\" id=\"{id}\">\n </div>\n </div>\n <script src=\"/static/components/requirejs/require.js\"></script>\n <script>\n require.config({{\n paths: {{\n base: '/static/base',\n d3: '//cdnjs.cloudflare.com/ajax/libs/d3/3.4.13/d3',\n plotly: 'https://cdn.plot.ly/plotly-1.5.1.min.js?noext',\n jquery: '//ajax.googleapis.com/ajax/libs/jquery/2.0.0/jquery.min'\n }},\n map: {{\n '*': {{\n datalab: 'nbextensions/gcpdatalab'\n }}\n }},\n shim: {{\n plotly: {{\n deps: ['d3', 'jquery'],\n exports: 'plotly'\n }}\n }}\n }});\n\n require(['datalab/charting',\n 'datalab/element!{id}',\n 'base/js/events',\n 'datalab/style!/nbextensions/gcpdatalab/charting.css'\n ],\n function(charts, dom, events) {{\n charts.render(\n '{driver}',\n dom,\n events,\n '{chart_type}',\n {control_ids},\n {data},\n {options},\n {refresh_data},\n {refresh_interval},\n {total_rows});\n }}\n );\n </script>\n \"\"\"",
"count",
"=",
"25",
"if",
"chart_type",
"==",
"'paged_table'",
"else",
"-",
"1",
"data",
",",
"total_count",
"=",
"get_data",
"(",
"source",
",",
"fields",
",",
"control_defaults",
",",
"0",
",",
"count",
",",
"schema",
")",
"if",
"refresh_data",
"is",
"None",
":",
"if",
"isinstance",
"(",
"source",
",",
"basestring",
")",
":",
"source_index",
"=",
"get_data_source_index",
"(",
"source",
")",
"refresh_data",
"=",
"{",
"'source_index'",
":",
"source_index",
",",
"'name'",
":",
"source_index",
"}",
"else",
":",
"refresh_data",
"=",
"{",
"'name'",
":",
"'raw data'",
"}",
"refresh_data",
"[",
"'fields'",
"]",
"=",
"fields",
"# TODO(gram): check if we need to augment env with user_ns",
"return",
"_HTML_TEMPLATE",
".",
"format",
"(",
"driver",
"=",
"driver_name",
",",
"controls",
"=",
"controls_html",
",",
"id",
"=",
"div_id",
",",
"chart_type",
"=",
"chart_type",
",",
"extra_class",
"=",
"\" bqgc-controlled\"",
"if",
"len",
"(",
"controls_html",
")",
"else",
"''",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"data",
",",
"cls",
"=",
"google",
".",
"datalab",
".",
"utils",
".",
"JSONEncoder",
")",
",",
"options",
"=",
"json",
".",
"dumps",
"(",
"chart_options",
",",
"cls",
"=",
"google",
".",
"datalab",
".",
"utils",
".",
"JSONEncoder",
")",
",",
"refresh_data",
"=",
"json",
".",
"dumps",
"(",
"refresh_data",
",",
"cls",
"=",
"google",
".",
"datalab",
".",
"utils",
".",
"JSONEncoder",
")",
",",
"refresh_interval",
"=",
"refresh_interval",
",",
"control_ids",
"=",
"str",
"(",
"control_ids",
")",
",",
"total_rows",
"=",
"total_count",
")"
] | Return HTML for a chart.
Args:
driver_name: the name of the chart driver. Currently we support 'plotly' or 'gcharts'.
chart_type: string specifying type of chart.
source: the data source for the chart. Can be actual data (e.g. list) or the name of
a data source (e.g. the name of a query module).
chart_options: a dictionary of options for the chart. Can contain a 'controls' entry
specifying controls. Other entries are passed as JSON to Google Charts.
fields: the fields to chart. Can be '*' for all fields (only sensible if the columns are
ordered; e.g. a Query or list of lists, but not a list of dictionaries); otherwise a
string containing a comma-separated list of field names.
refresh_interval: a time in seconds after which the chart data will be refreshed. 0 if the
chart should not be refreshed (i.e. the data is static).
refresh_data: if the source is a list or other raw data, this is a YAML string containing
metadata needed to support calls to refresh (get_chart_data).
control_defaults: the default variable values for controls that are shared across charts
including this one.
control_ids: the DIV IDs for controls that are shared across charts including this one.
schema: an optional schema for the data; if not supplied one will be inferred.
Returns:
A string containing the HTML for the chart. | [
"Return",
"HTML",
"for",
"a",
"chart",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/commands/_utils.py#L614-L727 | train | 237,824 |
googledatalab/pydatalab | google/datalab/bigquery/_sampling.py | Sampling.default | def default(fields=None, count=5):
"""Provides a simple default sampling strategy which limits the result set by a count.
Args:
fields: an optional list of field names to retrieve.
count: optional number of rows to limit the sampled results to.
Returns:
A sampling function that can be applied to get a random sampling.
"""
projection = Sampling._create_projection(fields)
return lambda sql: 'SELECT %s FROM (%s) LIMIT %d' % (projection, sql, count) | python | def default(fields=None, count=5):
"""Provides a simple default sampling strategy which limits the result set by a count.
Args:
fields: an optional list of field names to retrieve.
count: optional number of rows to limit the sampled results to.
Returns:
A sampling function that can be applied to get a random sampling.
"""
projection = Sampling._create_projection(fields)
return lambda sql: 'SELECT %s FROM (%s) LIMIT %d' % (projection, sql, count) | [
"def",
"default",
"(",
"fields",
"=",
"None",
",",
"count",
"=",
"5",
")",
":",
"projection",
"=",
"Sampling",
".",
"_create_projection",
"(",
"fields",
")",
"return",
"lambda",
"sql",
":",
"'SELECT %s FROM (%s) LIMIT %d'",
"%",
"(",
"projection",
",",
"sql",
",",
"count",
")"
] | Provides a simple default sampling strategy which limits the result set by a count.
Args:
fields: an optional list of field names to retrieve.
count: optional number of rows to limit the sampled results to.
Returns:
A sampling function that can be applied to get a random sampling. | [
"Provides",
"a",
"simple",
"default",
"sampling",
"strategy",
"which",
"limits",
"the",
"result",
"set",
"by",
"a",
"count",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_sampling.py#L44-L54 | train | 237,825 |
googledatalab/pydatalab | google/datalab/bigquery/_sampling.py | Sampling.sorted | def sorted(field_name, ascending=True, fields=None, count=5):
"""Provides a sampling strategy that picks from an ordered set of rows.
Args:
field_name: the name of the field to sort the rows by.
ascending: whether to sort in ascending direction or not.
fields: an optional list of field names to retrieve.
count: optional number of rows to limit the sampled results to.
Returns:
A sampling function that can be applied to get the initial few rows.
"""
if field_name is None:
raise Exception('Sort field must be specified')
direction = '' if ascending else ' DESC'
projection = Sampling._create_projection(fields)
return lambda sql: 'SELECT %s FROM (%s) ORDER BY %s%s LIMIT %d' % (projection, sql, field_name,
direction, count) | python | def sorted(field_name, ascending=True, fields=None, count=5):
"""Provides a sampling strategy that picks from an ordered set of rows.
Args:
field_name: the name of the field to sort the rows by.
ascending: whether to sort in ascending direction or not.
fields: an optional list of field names to retrieve.
count: optional number of rows to limit the sampled results to.
Returns:
A sampling function that can be applied to get the initial few rows.
"""
if field_name is None:
raise Exception('Sort field must be specified')
direction = '' if ascending else ' DESC'
projection = Sampling._create_projection(fields)
return lambda sql: 'SELECT %s FROM (%s) ORDER BY %s%s LIMIT %d' % (projection, sql, field_name,
direction, count) | [
"def",
"sorted",
"(",
"field_name",
",",
"ascending",
"=",
"True",
",",
"fields",
"=",
"None",
",",
"count",
"=",
"5",
")",
":",
"if",
"field_name",
"is",
"None",
":",
"raise",
"Exception",
"(",
"'Sort field must be specified'",
")",
"direction",
"=",
"''",
"if",
"ascending",
"else",
"' DESC'",
"projection",
"=",
"Sampling",
".",
"_create_projection",
"(",
"fields",
")",
"return",
"lambda",
"sql",
":",
"'SELECT %s FROM (%s) ORDER BY %s%s LIMIT %d'",
"%",
"(",
"projection",
",",
"sql",
",",
"field_name",
",",
"direction",
",",
"count",
")"
] | Provides a sampling strategy that picks from an ordered set of rows.
Args:
field_name: the name of the field to sort the rows by.
ascending: whether to sort in ascending direction or not.
fields: an optional list of field names to retrieve.
count: optional number of rows to limit the sampled results to.
Returns:
A sampling function that can be applied to get the initial few rows. | [
"Provides",
"a",
"sampling",
"strategy",
"that",
"picks",
"from",
"an",
"ordered",
"set",
"of",
"rows",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_sampling.py#L57-L73 | train | 237,826 |
googledatalab/pydatalab | google/datalab/bigquery/_sampling.py | Sampling.hashed | def hashed(field_name, percent, fields=None, count=0):
"""Provides a sampling strategy based on hashing and selecting a percentage of data.
Args:
field_name: the name of the field to hash.
percent: the percentage of the resulting hashes to select.
fields: an optional list of field names to retrieve.
count: optional maximum count of rows to pick.
Returns:
A sampling function that can be applied to get a hash-based sampling.
"""
if field_name is None:
raise Exception('Hash field must be specified')
def _hashed_sampling(sql):
projection = Sampling._create_projection(fields)
sql = 'SELECT %s FROM (%s) WHERE MOD(ABS(FARM_FINGERPRINT(CAST(%s AS STRING))), 100) < %d' % \
(projection, sql, field_name, percent)
if count != 0:
sql = '%s LIMIT %d' % (sql, count)
return sql
return _hashed_sampling | python | def hashed(field_name, percent, fields=None, count=0):
"""Provides a sampling strategy based on hashing and selecting a percentage of data.
Args:
field_name: the name of the field to hash.
percent: the percentage of the resulting hashes to select.
fields: an optional list of field names to retrieve.
count: optional maximum count of rows to pick.
Returns:
A sampling function that can be applied to get a hash-based sampling.
"""
if field_name is None:
raise Exception('Hash field must be specified')
def _hashed_sampling(sql):
projection = Sampling._create_projection(fields)
sql = 'SELECT %s FROM (%s) WHERE MOD(ABS(FARM_FINGERPRINT(CAST(%s AS STRING))), 100) < %d' % \
(projection, sql, field_name, percent)
if count != 0:
sql = '%s LIMIT %d' % (sql, count)
return sql
return _hashed_sampling | [
"def",
"hashed",
"(",
"field_name",
",",
"percent",
",",
"fields",
"=",
"None",
",",
"count",
"=",
"0",
")",
":",
"if",
"field_name",
"is",
"None",
":",
"raise",
"Exception",
"(",
"'Hash field must be specified'",
")",
"def",
"_hashed_sampling",
"(",
"sql",
")",
":",
"projection",
"=",
"Sampling",
".",
"_create_projection",
"(",
"fields",
")",
"sql",
"=",
"'SELECT %s FROM (%s) WHERE MOD(ABS(FARM_FINGERPRINT(CAST(%s AS STRING))), 100) < %d'",
"%",
"(",
"projection",
",",
"sql",
",",
"field_name",
",",
"percent",
")",
"if",
"count",
"!=",
"0",
":",
"sql",
"=",
"'%s LIMIT %d'",
"%",
"(",
"sql",
",",
"count",
")",
"return",
"sql",
"return",
"_hashed_sampling"
] | Provides a sampling strategy based on hashing and selecting a percentage of data.
Args:
field_name: the name of the field to hash.
percent: the percentage of the resulting hashes to select.
fields: an optional list of field names to retrieve.
count: optional maximum count of rows to pick.
Returns:
A sampling function that can be applied to get a hash-based sampling. | [
"Provides",
"a",
"sampling",
"strategy",
"based",
"on",
"hashing",
"and",
"selecting",
"a",
"percentage",
"of",
"data",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_sampling.py#L76-L97 | train | 237,827 |
googledatalab/pydatalab | google/datalab/bigquery/_sampling.py | Sampling.random | def random(percent, fields=None, count=0):
"""Provides a sampling strategy that picks a semi-random set of rows.
Args:
percent: the percentage of the resulting hashes to select.
fields: an optional list of field names to retrieve.
count: maximum number of rows to limit the sampled results to (default 5).
Returns:
A sampling function that can be applied to get some random rows. In order for this to
provide a good random sample percent should be chosen to be ~count/#rows where #rows
is the number of rows in the object (query, view or table) being sampled.
The rows will be returned in order; i.e. the order itself is not randomized.
"""
def _random_sampling(sql):
projection = Sampling._create_projection(fields)
sql = 'SELECT %s FROM (%s) WHERE rand() < %f' % (projection, sql, (float(percent) / 100.0))
if count != 0:
sql = '%s LIMIT %d' % (sql, count)
return sql
return _random_sampling | python | def random(percent, fields=None, count=0):
"""Provides a sampling strategy that picks a semi-random set of rows.
Args:
percent: the percentage of the resulting hashes to select.
fields: an optional list of field names to retrieve.
count: maximum number of rows to limit the sampled results to (default 5).
Returns:
A sampling function that can be applied to get some random rows. In order for this to
provide a good random sample percent should be chosen to be ~count/#rows where #rows
is the number of rows in the object (query, view or table) being sampled.
The rows will be returned in order; i.e. the order itself is not randomized.
"""
def _random_sampling(sql):
projection = Sampling._create_projection(fields)
sql = 'SELECT %s FROM (%s) WHERE rand() < %f' % (projection, sql, (float(percent) / 100.0))
if count != 0:
sql = '%s LIMIT %d' % (sql, count)
return sql
return _random_sampling | [
"def",
"random",
"(",
"percent",
",",
"fields",
"=",
"None",
",",
"count",
"=",
"0",
")",
":",
"def",
"_random_sampling",
"(",
"sql",
")",
":",
"projection",
"=",
"Sampling",
".",
"_create_projection",
"(",
"fields",
")",
"sql",
"=",
"'SELECT %s FROM (%s) WHERE rand() < %f'",
"%",
"(",
"projection",
",",
"sql",
",",
"(",
"float",
"(",
"percent",
")",
"/",
"100.0",
")",
")",
"if",
"count",
"!=",
"0",
":",
"sql",
"=",
"'%s LIMIT %d'",
"%",
"(",
"sql",
",",
"count",
")",
"return",
"sql",
"return",
"_random_sampling"
] | Provides a sampling strategy that picks a semi-random set of rows.
Args:
percent: the percentage of the resulting hashes to select.
fields: an optional list of field names to retrieve.
count: maximum number of rows to limit the sampled results to (default 5).
Returns:
A sampling function that can be applied to get some random rows. In order for this to
provide a good random sample percent should be chosen to be ~count/#rows where #rows
is the number of rows in the object (query, view or table) being sampled.
The rows will be returned in order; i.e. the order itself is not randomized. | [
"Provides",
"a",
"sampling",
"strategy",
"that",
"picks",
"a",
"semi",
"-",
"random",
"set",
"of",
"rows",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_sampling.py#L100-L119 | train | 237,828 |
googledatalab/pydatalab | google/datalab/bigquery/_sampling.py | Sampling._auto | def _auto(method, fields, count, percent, key_field, ascending):
"""Construct a sampling function according to the provided sampling technique, provided all
its needed fields are passed as arguments
Args:
method: one of the supported sampling methods: {limit,random,hashed,sorted}
fields: an optional list of field names to retrieve.
count: maximum number of rows to limit the sampled results to.
percent: the percentage of the resulting hashes to select if using hashed sampling
key_field: the name of the field to sort the rows by or use for hashing
ascending: whether to sort in ascending direction or not.
Returns:
A sampling function using the provided arguments
Raises:
Exception if an unsupported mathod name is passed
"""
if method == 'limit':
return Sampling.default(fields=fields, count=count)
elif method == 'random':
return Sampling.random(fields=fields, percent=percent, count=count)
elif method == 'hashed':
return Sampling.hashed(fields=fields, field_name=key_field, percent=percent, count=count)
elif method == 'sorted':
return Sampling.sorted(fields=fields, field_name=key_field, ascending=ascending, count=count)
else:
raise Exception('Unsupported sampling method: %s' % method) | python | def _auto(method, fields, count, percent, key_field, ascending):
"""Construct a sampling function according to the provided sampling technique, provided all
its needed fields are passed as arguments
Args:
method: one of the supported sampling methods: {limit,random,hashed,sorted}
fields: an optional list of field names to retrieve.
count: maximum number of rows to limit the sampled results to.
percent: the percentage of the resulting hashes to select if using hashed sampling
key_field: the name of the field to sort the rows by or use for hashing
ascending: whether to sort in ascending direction or not.
Returns:
A sampling function using the provided arguments
Raises:
Exception if an unsupported mathod name is passed
"""
if method == 'limit':
return Sampling.default(fields=fields, count=count)
elif method == 'random':
return Sampling.random(fields=fields, percent=percent, count=count)
elif method == 'hashed':
return Sampling.hashed(fields=fields, field_name=key_field, percent=percent, count=count)
elif method == 'sorted':
return Sampling.sorted(fields=fields, field_name=key_field, ascending=ascending, count=count)
else:
raise Exception('Unsupported sampling method: %s' % method) | [
"def",
"_auto",
"(",
"method",
",",
"fields",
",",
"count",
",",
"percent",
",",
"key_field",
",",
"ascending",
")",
":",
"if",
"method",
"==",
"'limit'",
":",
"return",
"Sampling",
".",
"default",
"(",
"fields",
"=",
"fields",
",",
"count",
"=",
"count",
")",
"elif",
"method",
"==",
"'random'",
":",
"return",
"Sampling",
".",
"random",
"(",
"fields",
"=",
"fields",
",",
"percent",
"=",
"percent",
",",
"count",
"=",
"count",
")",
"elif",
"method",
"==",
"'hashed'",
":",
"return",
"Sampling",
".",
"hashed",
"(",
"fields",
"=",
"fields",
",",
"field_name",
"=",
"key_field",
",",
"percent",
"=",
"percent",
",",
"count",
"=",
"count",
")",
"elif",
"method",
"==",
"'sorted'",
":",
"return",
"Sampling",
".",
"sorted",
"(",
"fields",
"=",
"fields",
",",
"field_name",
"=",
"key_field",
",",
"ascending",
"=",
"ascending",
",",
"count",
"=",
"count",
")",
"else",
":",
"raise",
"Exception",
"(",
"'Unsupported sampling method: %s'",
"%",
"method",
")"
] | Construct a sampling function according to the provided sampling technique, provided all
its needed fields are passed as arguments
Args:
method: one of the supported sampling methods: {limit,random,hashed,sorted}
fields: an optional list of field names to retrieve.
count: maximum number of rows to limit the sampled results to.
percent: the percentage of the resulting hashes to select if using hashed sampling
key_field: the name of the field to sort the rows by or use for hashing
ascending: whether to sort in ascending direction or not.
Returns:
A sampling function using the provided arguments
Raises:
Exception if an unsupported mathod name is passed | [
"Construct",
"a",
"sampling",
"function",
"according",
"to",
"the",
"provided",
"sampling",
"technique",
"provided",
"all",
"its",
"needed",
"fields",
"are",
"passed",
"as",
"arguments"
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_sampling.py#L122-L147 | train | 237,829 |
googledatalab/pydatalab | google/datalab/bigquery/_csv_options.py | CSVOptions._to_query_json | def _to_query_json(self):
""" Return the options as a dictionary to be used as JSON in a query job. """
return {
'quote': self._quote,
'fieldDelimiter': self._delimiter,
'encoding': self._encoding.upper(),
'skipLeadingRows': self._skip_leading_rows,
'allowQuotedNewlines': self._allow_quoted_newlines,
'allowJaggedRows': self._allow_jagged_rows
} | python | def _to_query_json(self):
""" Return the options as a dictionary to be used as JSON in a query job. """
return {
'quote': self._quote,
'fieldDelimiter': self._delimiter,
'encoding': self._encoding.upper(),
'skipLeadingRows': self._skip_leading_rows,
'allowQuotedNewlines': self._allow_quoted_newlines,
'allowJaggedRows': self._allow_jagged_rows
} | [
"def",
"_to_query_json",
"(",
"self",
")",
":",
"return",
"{",
"'quote'",
":",
"self",
".",
"_quote",
",",
"'fieldDelimiter'",
":",
"self",
".",
"_delimiter",
",",
"'encoding'",
":",
"self",
".",
"_encoding",
".",
"upper",
"(",
")",
",",
"'skipLeadingRows'",
":",
"self",
".",
"_skip_leading_rows",
",",
"'allowQuotedNewlines'",
":",
"self",
".",
"_allow_quoted_newlines",
",",
"'allowJaggedRows'",
":",
"self",
".",
"_allow_jagged_rows",
"}"
] | Return the options as a dictionary to be used as JSON in a query job. | [
"Return",
"the",
"options",
"as",
"a",
"dictionary",
"to",
"be",
"used",
"as",
"JSON",
"in",
"a",
"query",
"job",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_csv_options.py#L75-L84 | train | 237,830 |
googledatalab/pydatalab | datalab/bigquery/_api.py | Api.jobs_insert_load | def jobs_insert_load(self, source, table_name, append=False, overwrite=False, create=False,
source_format='CSV', field_delimiter=',', allow_jagged_rows=False,
allow_quoted_newlines=False, encoding='UTF-8', ignore_unknown_values=False,
max_bad_records=0, quote='"', skip_leading_rows=0):
""" Issues a request to load data from GCS to a BQ table
Args:
source: the URL of the source bucket(s). Can include wildcards, and can be a single
string argument or a list.
table_name: a tuple representing the full name of the destination table.
append: if True append onto existing table contents.
overwrite: if True overwrite existing table contents.
create: if True, create the table if it doesn't exist
source_format: the format of the data; default 'CSV'. Other options are DATASTORE_BACKUP
or NEWLINE_DELIMITED_JSON.
field_delimiter: The separator for fields in a CSV file. BigQuery converts the string to
ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data
as raw binary (default ',').
allow_jagged_rows: If True, accept rows in CSV files that are missing trailing optional
columns; the missing values are treated as nulls (default False).
allow_quoted_newlines: If True, allow quoted data sections in CSV files that contain newline
characters (default False).
encoding: The character encoding of the data, either 'UTF-8' (the default) or 'ISO-8859-1'.
ignore_unknown_values: If True, accept rows that contain values that do not match the schema;
the unknown values are ignored (default False).
max_bad_records: The maximum number of bad records that are allowed (and ignored) before
returning an 'invalid' error in the Job result (default 0).
quote: The value used to quote data sections in a CSV file; default '"'. If your data does
not contain quoted sections, set the property value to an empty string. If your data
contains quoted newline characters, you must also enable allow_quoted_newlines.
skip_leading_rows: A number of rows at the top of a CSV file to skip (default 0).
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._JOBS_PATH % (table_name.project_id, ''))
if isinstance(source, basestring):
source = [source]
write_disposition = 'WRITE_EMPTY'
if overwrite:
write_disposition = 'WRITE_TRUNCATE'
if append:
write_disposition = 'WRITE_APPEND'
data = {
'kind': 'bigquery#job',
'configuration': {
'load': {
'sourceUris': source,
'destinationTable': {
'projectId': table_name.project_id,
'datasetId': table_name.dataset_id,
'tableId': table_name.table_id
},
'createDisposition': 'CREATE_IF_NEEDED' if create else 'CREATE_NEVER',
'writeDisposition': write_disposition,
'sourceFormat': source_format,
'ignoreUnknownValues': ignore_unknown_values,
'maxBadRecords': max_bad_records,
}
}
}
if source_format == 'CSV':
load_config = data['configuration']['load']
load_config.update({
'fieldDelimiter': field_delimiter,
'allowJaggedRows': allow_jagged_rows,
'allowQuotedNewlines': allow_quoted_newlines,
'quote': quote,
'encoding': encoding,
'skipLeadingRows': skip_leading_rows
})
return datalab.utils.Http.request(url, data=data, credentials=self._credentials) | python | def jobs_insert_load(self, source, table_name, append=False, overwrite=False, create=False,
source_format='CSV', field_delimiter=',', allow_jagged_rows=False,
allow_quoted_newlines=False, encoding='UTF-8', ignore_unknown_values=False,
max_bad_records=0, quote='"', skip_leading_rows=0):
""" Issues a request to load data from GCS to a BQ table
Args:
source: the URL of the source bucket(s). Can include wildcards, and can be a single
string argument or a list.
table_name: a tuple representing the full name of the destination table.
append: if True append onto existing table contents.
overwrite: if True overwrite existing table contents.
create: if True, create the table if it doesn't exist
source_format: the format of the data; default 'CSV'. Other options are DATASTORE_BACKUP
or NEWLINE_DELIMITED_JSON.
field_delimiter: The separator for fields in a CSV file. BigQuery converts the string to
ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data
as raw binary (default ',').
allow_jagged_rows: If True, accept rows in CSV files that are missing trailing optional
columns; the missing values are treated as nulls (default False).
allow_quoted_newlines: If True, allow quoted data sections in CSV files that contain newline
characters (default False).
encoding: The character encoding of the data, either 'UTF-8' (the default) or 'ISO-8859-1'.
ignore_unknown_values: If True, accept rows that contain values that do not match the schema;
the unknown values are ignored (default False).
max_bad_records: The maximum number of bad records that are allowed (and ignored) before
returning an 'invalid' error in the Job result (default 0).
quote: The value used to quote data sections in a CSV file; default '"'. If your data does
not contain quoted sections, set the property value to an empty string. If your data
contains quoted newline characters, you must also enable allow_quoted_newlines.
skip_leading_rows: A number of rows at the top of a CSV file to skip (default 0).
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._JOBS_PATH % (table_name.project_id, ''))
if isinstance(source, basestring):
source = [source]
write_disposition = 'WRITE_EMPTY'
if overwrite:
write_disposition = 'WRITE_TRUNCATE'
if append:
write_disposition = 'WRITE_APPEND'
data = {
'kind': 'bigquery#job',
'configuration': {
'load': {
'sourceUris': source,
'destinationTable': {
'projectId': table_name.project_id,
'datasetId': table_name.dataset_id,
'tableId': table_name.table_id
},
'createDisposition': 'CREATE_IF_NEEDED' if create else 'CREATE_NEVER',
'writeDisposition': write_disposition,
'sourceFormat': source_format,
'ignoreUnknownValues': ignore_unknown_values,
'maxBadRecords': max_bad_records,
}
}
}
if source_format == 'CSV':
load_config = data['configuration']['load']
load_config.update({
'fieldDelimiter': field_delimiter,
'allowJaggedRows': allow_jagged_rows,
'allowQuotedNewlines': allow_quoted_newlines,
'quote': quote,
'encoding': encoding,
'skipLeadingRows': skip_leading_rows
})
return datalab.utils.Http.request(url, data=data, credentials=self._credentials) | [
"def",
"jobs_insert_load",
"(",
"self",
",",
"source",
",",
"table_name",
",",
"append",
"=",
"False",
",",
"overwrite",
"=",
"False",
",",
"create",
"=",
"False",
",",
"source_format",
"=",
"'CSV'",
",",
"field_delimiter",
"=",
"','",
",",
"allow_jagged_rows",
"=",
"False",
",",
"allow_quoted_newlines",
"=",
"False",
",",
"encoding",
"=",
"'UTF-8'",
",",
"ignore_unknown_values",
"=",
"False",
",",
"max_bad_records",
"=",
"0",
",",
"quote",
"=",
"'\"'",
",",
"skip_leading_rows",
"=",
"0",
")",
":",
"url",
"=",
"Api",
".",
"_ENDPOINT",
"+",
"(",
"Api",
".",
"_JOBS_PATH",
"%",
"(",
"table_name",
".",
"project_id",
",",
"''",
")",
")",
"if",
"isinstance",
"(",
"source",
",",
"basestring",
")",
":",
"source",
"=",
"[",
"source",
"]",
"write_disposition",
"=",
"'WRITE_EMPTY'",
"if",
"overwrite",
":",
"write_disposition",
"=",
"'WRITE_TRUNCATE'",
"if",
"append",
":",
"write_disposition",
"=",
"'WRITE_APPEND'",
"data",
"=",
"{",
"'kind'",
":",
"'bigquery#job'",
",",
"'configuration'",
":",
"{",
"'load'",
":",
"{",
"'sourceUris'",
":",
"source",
",",
"'destinationTable'",
":",
"{",
"'projectId'",
":",
"table_name",
".",
"project_id",
",",
"'datasetId'",
":",
"table_name",
".",
"dataset_id",
",",
"'tableId'",
":",
"table_name",
".",
"table_id",
"}",
",",
"'createDisposition'",
":",
"'CREATE_IF_NEEDED'",
"if",
"create",
"else",
"'CREATE_NEVER'",
",",
"'writeDisposition'",
":",
"write_disposition",
",",
"'sourceFormat'",
":",
"source_format",
",",
"'ignoreUnknownValues'",
":",
"ignore_unknown_values",
",",
"'maxBadRecords'",
":",
"max_bad_records",
",",
"}",
"}",
"}",
"if",
"source_format",
"==",
"'CSV'",
":",
"load_config",
"=",
"data",
"[",
"'configuration'",
"]",
"[",
"'load'",
"]",
"load_config",
".",
"update",
"(",
"{",
"'fieldDelimiter'",
":",
"field_delimiter",
",",
"'allowJaggedRows'",
":",
"allow_jagged_rows",
",",
"'allowQuotedNewlines'",
":",
"allow_quoted_newlines",
",",
"'quote'",
":",
"quote",
",",
"'encoding'",
":",
"encoding",
",",
"'skipLeadingRows'",
":",
"skip_leading_rows",
"}",
")",
"return",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"data",
"=",
"data",
",",
"credentials",
"=",
"self",
".",
"_credentials",
")"
] | Issues a request to load data from GCS to a BQ table
Args:
source: the URL of the source bucket(s). Can include wildcards, and can be a single
string argument or a list.
table_name: a tuple representing the full name of the destination table.
append: if True append onto existing table contents.
overwrite: if True overwrite existing table contents.
create: if True, create the table if it doesn't exist
source_format: the format of the data; default 'CSV'. Other options are DATASTORE_BACKUP
or NEWLINE_DELIMITED_JSON.
field_delimiter: The separator for fields in a CSV file. BigQuery converts the string to
ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data
as raw binary (default ',').
allow_jagged_rows: If True, accept rows in CSV files that are missing trailing optional
columns; the missing values are treated as nulls (default False).
allow_quoted_newlines: If True, allow quoted data sections in CSV files that contain newline
characters (default False).
encoding: The character encoding of the data, either 'UTF-8' (the default) or 'ISO-8859-1'.
ignore_unknown_values: If True, accept rows that contain values that do not match the schema;
the unknown values are ignored (default False).
max_bad_records: The maximum number of bad records that are allowed (and ignored) before
returning an 'invalid' error in the Job result (default 0).
quote: The value used to quote data sections in a CSV file; default '"'. If your data does
not contain quoted sections, set the property value to an empty string. If your data
contains quoted newline characters, you must also enable allow_quoted_newlines.
skip_leading_rows: A number of rows at the top of a CSV file to skip (default 0).
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation. | [
"Issues",
"a",
"request",
"to",
"load",
"data",
"from",
"GCS",
"to",
"a",
"BQ",
"table"
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_api.py#L50-L123 | train | 237,831 |
googledatalab/pydatalab | datalab/bigquery/_api.py | Api.jobs_get | def jobs_get(self, job_id, project_id=None):
"""Issues a request to retrieve information about a job.
Args:
job_id: the id of the job
project_id: the project id to use to fetch the results; use None for the default project.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
if project_id is None:
project_id = self._project_id
url = Api._ENDPOINT + (Api._JOBS_PATH % (project_id, job_id))
return datalab.utils.Http.request(url, credentials=self._credentials) | python | def jobs_get(self, job_id, project_id=None):
"""Issues a request to retrieve information about a job.
Args:
job_id: the id of the job
project_id: the project id to use to fetch the results; use None for the default project.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
if project_id is None:
project_id = self._project_id
url = Api._ENDPOINT + (Api._JOBS_PATH % (project_id, job_id))
return datalab.utils.Http.request(url, credentials=self._credentials) | [
"def",
"jobs_get",
"(",
"self",
",",
"job_id",
",",
"project_id",
"=",
"None",
")",
":",
"if",
"project_id",
"is",
"None",
":",
"project_id",
"=",
"self",
".",
"_project_id",
"url",
"=",
"Api",
".",
"_ENDPOINT",
"+",
"(",
"Api",
".",
"_JOBS_PATH",
"%",
"(",
"project_id",
",",
"job_id",
")",
")",
"return",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"credentials",
"=",
"self",
".",
"_credentials",
")"
] | Issues a request to retrieve information about a job.
Args:
job_id: the id of the job
project_id: the project id to use to fetch the results; use None for the default project.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation. | [
"Issues",
"a",
"request",
"to",
"retrieve",
"information",
"about",
"a",
"job",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_api.py#L239-L253 | train | 237,832 |
googledatalab/pydatalab | datalab/bigquery/_api.py | Api.datasets_insert | def datasets_insert(self, dataset_name, friendly_name=None, description=None):
"""Issues a request to create a dataset.
Args:
dataset_name: the name of the dataset to create.
friendly_name: (optional) the friendly name for the dataset
description: (optional) a description for the dataset
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._DATASETS_PATH % (dataset_name.project_id, ''))
data = {
'kind': 'bigquery#dataset',
'datasetReference': {
'projectId': dataset_name.project_id,
'datasetId': dataset_name.dataset_id
},
}
if friendly_name:
data['friendlyName'] = friendly_name
if description:
data['description'] = description
return datalab.utils.Http.request(url, data=data, credentials=self._credentials) | python | def datasets_insert(self, dataset_name, friendly_name=None, description=None):
"""Issues a request to create a dataset.
Args:
dataset_name: the name of the dataset to create.
friendly_name: (optional) the friendly name for the dataset
description: (optional) a description for the dataset
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._DATASETS_PATH % (dataset_name.project_id, ''))
data = {
'kind': 'bigquery#dataset',
'datasetReference': {
'projectId': dataset_name.project_id,
'datasetId': dataset_name.dataset_id
},
}
if friendly_name:
data['friendlyName'] = friendly_name
if description:
data['description'] = description
return datalab.utils.Http.request(url, data=data, credentials=self._credentials) | [
"def",
"datasets_insert",
"(",
"self",
",",
"dataset_name",
",",
"friendly_name",
"=",
"None",
",",
"description",
"=",
"None",
")",
":",
"url",
"=",
"Api",
".",
"_ENDPOINT",
"+",
"(",
"Api",
".",
"_DATASETS_PATH",
"%",
"(",
"dataset_name",
".",
"project_id",
",",
"''",
")",
")",
"data",
"=",
"{",
"'kind'",
":",
"'bigquery#dataset'",
",",
"'datasetReference'",
":",
"{",
"'projectId'",
":",
"dataset_name",
".",
"project_id",
",",
"'datasetId'",
":",
"dataset_name",
".",
"dataset_id",
"}",
",",
"}",
"if",
"friendly_name",
":",
"data",
"[",
"'friendlyName'",
"]",
"=",
"friendly_name",
"if",
"description",
":",
"data",
"[",
"'description'",
"]",
"=",
"description",
"return",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"data",
"=",
"data",
",",
"credentials",
"=",
"self",
".",
"_credentials",
")"
] | Issues a request to create a dataset.
Args:
dataset_name: the name of the dataset to create.
friendly_name: (optional) the friendly name for the dataset
description: (optional) a description for the dataset
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation. | [
"Issues",
"a",
"request",
"to",
"create",
"a",
"dataset",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_api.py#L255-L279 | train | 237,833 |
googledatalab/pydatalab | datalab/bigquery/_api.py | Api.datasets_delete | def datasets_delete(self, dataset_name, delete_contents):
"""Issues a request to delete a dataset.
Args:
dataset_name: the name of the dataset to delete.
delete_contents: if True, any tables in the dataset will be deleted. If False and the
dataset is non-empty an exception will be raised.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._DATASETS_PATH % dataset_name)
args = {}
if delete_contents:
args['deleteContents'] = True
return datalab.utils.Http.request(url, method='DELETE', args=args,
credentials=self._credentials, raw_response=True) | python | def datasets_delete(self, dataset_name, delete_contents):
"""Issues a request to delete a dataset.
Args:
dataset_name: the name of the dataset to delete.
delete_contents: if True, any tables in the dataset will be deleted. If False and the
dataset is non-empty an exception will be raised.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._DATASETS_PATH % dataset_name)
args = {}
if delete_contents:
args['deleteContents'] = True
return datalab.utils.Http.request(url, method='DELETE', args=args,
credentials=self._credentials, raw_response=True) | [
"def",
"datasets_delete",
"(",
"self",
",",
"dataset_name",
",",
"delete_contents",
")",
":",
"url",
"=",
"Api",
".",
"_ENDPOINT",
"+",
"(",
"Api",
".",
"_DATASETS_PATH",
"%",
"dataset_name",
")",
"args",
"=",
"{",
"}",
"if",
"delete_contents",
":",
"args",
"[",
"'deleteContents'",
"]",
"=",
"True",
"return",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"method",
"=",
"'DELETE'",
",",
"args",
"=",
"args",
",",
"credentials",
"=",
"self",
".",
"_credentials",
",",
"raw_response",
"=",
"True",
")"
] | Issues a request to delete a dataset.
Args:
dataset_name: the name of the dataset to delete.
delete_contents: if True, any tables in the dataset will be deleted. If False and the
dataset is non-empty an exception will be raised.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation. | [
"Issues",
"a",
"request",
"to",
"delete",
"a",
"dataset",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_api.py#L281-L298 | train | 237,834 |
googledatalab/pydatalab | datalab/bigquery/_api.py | Api.datasets_update | def datasets_update(self, dataset_name, dataset_info):
"""Updates the Dataset info.
Args:
dataset_name: the name of the dataset to update as a tuple of components.
dataset_info: the Dataset resource with updated fields.
"""
url = Api._ENDPOINT + (Api._DATASETS_PATH % dataset_name)
return datalab.utils.Http.request(url, method='PUT', data=dataset_info,
credentials=self._credentials) | python | def datasets_update(self, dataset_name, dataset_info):
"""Updates the Dataset info.
Args:
dataset_name: the name of the dataset to update as a tuple of components.
dataset_info: the Dataset resource with updated fields.
"""
url = Api._ENDPOINT + (Api._DATASETS_PATH % dataset_name)
return datalab.utils.Http.request(url, method='PUT', data=dataset_info,
credentials=self._credentials) | [
"def",
"datasets_update",
"(",
"self",
",",
"dataset_name",
",",
"dataset_info",
")",
":",
"url",
"=",
"Api",
".",
"_ENDPOINT",
"+",
"(",
"Api",
".",
"_DATASETS_PATH",
"%",
"dataset_name",
")",
"return",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"method",
"=",
"'PUT'",
",",
"data",
"=",
"dataset_info",
",",
"credentials",
"=",
"self",
".",
"_credentials",
")"
] | Updates the Dataset info.
Args:
dataset_name: the name of the dataset to update as a tuple of components.
dataset_info: the Dataset resource with updated fields. | [
"Updates",
"the",
"Dataset",
"info",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_api.py#L300-L309 | train | 237,835 |
googledatalab/pydatalab | datalab/bigquery/_api.py | Api.datasets_get | def datasets_get(self, dataset_name):
"""Issues a request to retrieve information about a dataset.
Args:
dataset_name: the name of the dataset
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._DATASETS_PATH % dataset_name)
return datalab.utils.Http.request(url, credentials=self._credentials) | python | def datasets_get(self, dataset_name):
"""Issues a request to retrieve information about a dataset.
Args:
dataset_name: the name of the dataset
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._DATASETS_PATH % dataset_name)
return datalab.utils.Http.request(url, credentials=self._credentials) | [
"def",
"datasets_get",
"(",
"self",
",",
"dataset_name",
")",
":",
"url",
"=",
"Api",
".",
"_ENDPOINT",
"+",
"(",
"Api",
".",
"_DATASETS_PATH",
"%",
"dataset_name",
")",
"return",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"credentials",
"=",
"self",
".",
"_credentials",
")"
] | Issues a request to retrieve information about a dataset.
Args:
dataset_name: the name of the dataset
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation. | [
"Issues",
"a",
"request",
"to",
"retrieve",
"information",
"about",
"a",
"dataset",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_api.py#L311-L322 | train | 237,836 |
googledatalab/pydatalab | datalab/bigquery/_api.py | Api.datasets_list | def datasets_list(self, project_id=None, max_results=0, page_token=None):
"""Issues a request to list the datasets in the project.
Args:
project_id: the project id to use to fetch the results; use None for the default project.
max_results: an optional maximum number of tables to retrieve.
page_token: an optional token to continue the retrieval.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
if project_id is None:
project_id = self._project_id
url = Api._ENDPOINT + (Api._DATASETS_PATH % (project_id, ''))
args = {}
if max_results != 0:
args['maxResults'] = max_results
if page_token is not None:
args['pageToken'] = page_token
return datalab.utils.Http.request(url, args=args, credentials=self._credentials) | python | def datasets_list(self, project_id=None, max_results=0, page_token=None):
"""Issues a request to list the datasets in the project.
Args:
project_id: the project id to use to fetch the results; use None for the default project.
max_results: an optional maximum number of tables to retrieve.
page_token: an optional token to continue the retrieval.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
if project_id is None:
project_id = self._project_id
url = Api._ENDPOINT + (Api._DATASETS_PATH % (project_id, ''))
args = {}
if max_results != 0:
args['maxResults'] = max_results
if page_token is not None:
args['pageToken'] = page_token
return datalab.utils.Http.request(url, args=args, credentials=self._credentials) | [
"def",
"datasets_list",
"(",
"self",
",",
"project_id",
"=",
"None",
",",
"max_results",
"=",
"0",
",",
"page_token",
"=",
"None",
")",
":",
"if",
"project_id",
"is",
"None",
":",
"project_id",
"=",
"self",
".",
"_project_id",
"url",
"=",
"Api",
".",
"_ENDPOINT",
"+",
"(",
"Api",
".",
"_DATASETS_PATH",
"%",
"(",
"project_id",
",",
"''",
")",
")",
"args",
"=",
"{",
"}",
"if",
"max_results",
"!=",
"0",
":",
"args",
"[",
"'maxResults'",
"]",
"=",
"max_results",
"if",
"page_token",
"is",
"not",
"None",
":",
"args",
"[",
"'pageToken'",
"]",
"=",
"page_token",
"return",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"args",
"=",
"args",
",",
"credentials",
"=",
"self",
".",
"_credentials",
")"
] | Issues a request to list the datasets in the project.
Args:
project_id: the project id to use to fetch the results; use None for the default project.
max_results: an optional maximum number of tables to retrieve.
page_token: an optional token to continue the retrieval.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation. | [
"Issues",
"a",
"request",
"to",
"list",
"the",
"datasets",
"in",
"the",
"project",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_api.py#L324-L346 | train | 237,837 |
googledatalab/pydatalab | datalab/bigquery/_api.py | Api.tables_get | def tables_get(self, table_name):
"""Issues a request to retrieve information about a table.
Args:
table_name: a tuple representing the full name of the table.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._TABLES_PATH % table_name)
return datalab.utils.Http.request(url, credentials=self._credentials) | python | def tables_get(self, table_name):
"""Issues a request to retrieve information about a table.
Args:
table_name: a tuple representing the full name of the table.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._TABLES_PATH % table_name)
return datalab.utils.Http.request(url, credentials=self._credentials) | [
"def",
"tables_get",
"(",
"self",
",",
"table_name",
")",
":",
"url",
"=",
"Api",
".",
"_ENDPOINT",
"+",
"(",
"Api",
".",
"_TABLES_PATH",
"%",
"table_name",
")",
"return",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"credentials",
"=",
"self",
".",
"_credentials",
")"
] | Issues a request to retrieve information about a table.
Args:
table_name: a tuple representing the full name of the table.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation. | [
"Issues",
"a",
"request",
"to",
"retrieve",
"information",
"about",
"a",
"table",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_api.py#L348-L359 | train | 237,838 |
googledatalab/pydatalab | datalab/bigquery/_api.py | Api.tables_insert | def tables_insert(self, table_name, schema=None, query=None, friendly_name=None,
description=None):
"""Issues a request to create a table or view in the specified dataset with the specified id.
A schema must be provided to create a Table, or a query must be provided to create a View.
Args:
table_name: the name of the table as a tuple of components.
schema: the schema, if this is a Table creation.
query: the query, if this is a View creation.
friendly_name: an optional friendly name.
description: an optional description.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + \
(Api._TABLES_PATH % (table_name.project_id, table_name.dataset_id, '', ''))
data = {
'kind': 'bigquery#table',
'tableReference': {
'projectId': table_name.project_id,
'datasetId': table_name.dataset_id,
'tableId': table_name.table_id
}
}
if schema:
data['schema'] = {'fields': schema}
if query:
data['view'] = {'query': query}
if friendly_name:
data['friendlyName'] = friendly_name
if description:
data['description'] = description
return datalab.utils.Http.request(url, data=data, credentials=self._credentials) | python | def tables_insert(self, table_name, schema=None, query=None, friendly_name=None,
description=None):
"""Issues a request to create a table or view in the specified dataset with the specified id.
A schema must be provided to create a Table, or a query must be provided to create a View.
Args:
table_name: the name of the table as a tuple of components.
schema: the schema, if this is a Table creation.
query: the query, if this is a View creation.
friendly_name: an optional friendly name.
description: an optional description.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + \
(Api._TABLES_PATH % (table_name.project_id, table_name.dataset_id, '', ''))
data = {
'kind': 'bigquery#table',
'tableReference': {
'projectId': table_name.project_id,
'datasetId': table_name.dataset_id,
'tableId': table_name.table_id
}
}
if schema:
data['schema'] = {'fields': schema}
if query:
data['view'] = {'query': query}
if friendly_name:
data['friendlyName'] = friendly_name
if description:
data['description'] = description
return datalab.utils.Http.request(url, data=data, credentials=self._credentials) | [
"def",
"tables_insert",
"(",
"self",
",",
"table_name",
",",
"schema",
"=",
"None",
",",
"query",
"=",
"None",
",",
"friendly_name",
"=",
"None",
",",
"description",
"=",
"None",
")",
":",
"url",
"=",
"Api",
".",
"_ENDPOINT",
"+",
"(",
"Api",
".",
"_TABLES_PATH",
"%",
"(",
"table_name",
".",
"project_id",
",",
"table_name",
".",
"dataset_id",
",",
"''",
",",
"''",
")",
")",
"data",
"=",
"{",
"'kind'",
":",
"'bigquery#table'",
",",
"'tableReference'",
":",
"{",
"'projectId'",
":",
"table_name",
".",
"project_id",
",",
"'datasetId'",
":",
"table_name",
".",
"dataset_id",
",",
"'tableId'",
":",
"table_name",
".",
"table_id",
"}",
"}",
"if",
"schema",
":",
"data",
"[",
"'schema'",
"]",
"=",
"{",
"'fields'",
":",
"schema",
"}",
"if",
"query",
":",
"data",
"[",
"'view'",
"]",
"=",
"{",
"'query'",
":",
"query",
"}",
"if",
"friendly_name",
":",
"data",
"[",
"'friendlyName'",
"]",
"=",
"friendly_name",
"if",
"description",
":",
"data",
"[",
"'description'",
"]",
"=",
"description",
"return",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"data",
"=",
"data",
",",
"credentials",
"=",
"self",
".",
"_credentials",
")"
] | Issues a request to create a table or view in the specified dataset with the specified id.
A schema must be provided to create a Table, or a query must be provided to create a View.
Args:
table_name: the name of the table as a tuple of components.
schema: the schema, if this is a Table creation.
query: the query, if this is a View creation.
friendly_name: an optional friendly name.
description: an optional description.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation. | [
"Issues",
"a",
"request",
"to",
"create",
"a",
"table",
"or",
"view",
"in",
"the",
"specified",
"dataset",
"with",
"the",
"specified",
"id",
".",
"A",
"schema",
"must",
"be",
"provided",
"to",
"create",
"a",
"Table",
"or",
"a",
"query",
"must",
"be",
"provided",
"to",
"create",
"a",
"View",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_api.py#L384-L420 | train | 237,839 |
googledatalab/pydatalab | datalab/bigquery/_api.py | Api.tabledata_insert_all | def tabledata_insert_all(self, table_name, rows):
"""Issues a request to insert data into a table.
Args:
table_name: the name of the table as a tuple of components.
rows: the data to populate the table, as a list of dictionaries.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._TABLES_PATH % table_name) + "/insertAll"
data = {
'kind': 'bigquery#tableDataInsertAllRequest',
'rows': rows
}
return datalab.utils.Http.request(url, data=data, credentials=self._credentials) | python | def tabledata_insert_all(self, table_name, rows):
"""Issues a request to insert data into a table.
Args:
table_name: the name of the table as a tuple of components.
rows: the data to populate the table, as a list of dictionaries.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._TABLES_PATH % table_name) + "/insertAll"
data = {
'kind': 'bigquery#tableDataInsertAllRequest',
'rows': rows
}
return datalab.utils.Http.request(url, data=data, credentials=self._credentials) | [
"def",
"tabledata_insert_all",
"(",
"self",
",",
"table_name",
",",
"rows",
")",
":",
"url",
"=",
"Api",
".",
"_ENDPOINT",
"+",
"(",
"Api",
".",
"_TABLES_PATH",
"%",
"table_name",
")",
"+",
"\"/insertAll\"",
"data",
"=",
"{",
"'kind'",
":",
"'bigquery#tableDataInsertAllRequest'",
",",
"'rows'",
":",
"rows",
"}",
"return",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"data",
"=",
"data",
",",
"credentials",
"=",
"self",
".",
"_credentials",
")"
] | Issues a request to insert data into a table.
Args:
table_name: the name of the table as a tuple of components.
rows: the data to populate the table, as a list of dictionaries.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation. | [
"Issues",
"a",
"request",
"to",
"insert",
"data",
"into",
"a",
"table",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_api.py#L422-L440 | train | 237,840 |
googledatalab/pydatalab | datalab/bigquery/_api.py | Api.tabledata_list | def tabledata_list(self, table_name, start_index=None, max_results=None, page_token=None):
""" Retrieves the contents of a table.
Args:
table_name: the name of the table as a tuple of components.
start_index: the index of the row at which to start retrieval.
max_results: an optional maximum number of rows to retrieve.
page_token: an optional token to continue the retrieval.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._TABLEDATA_PATH % table_name)
args = {}
if start_index:
args['startIndex'] = start_index
if max_results:
args['maxResults'] = max_results
if page_token is not None:
args['pageToken'] = page_token
return datalab.utils.Http.request(url, args=args, credentials=self._credentials) | python | def tabledata_list(self, table_name, start_index=None, max_results=None, page_token=None):
""" Retrieves the contents of a table.
Args:
table_name: the name of the table as a tuple of components.
start_index: the index of the row at which to start retrieval.
max_results: an optional maximum number of rows to retrieve.
page_token: an optional token to continue the retrieval.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._TABLEDATA_PATH % table_name)
args = {}
if start_index:
args['startIndex'] = start_index
if max_results:
args['maxResults'] = max_results
if page_token is not None:
args['pageToken'] = page_token
return datalab.utils.Http.request(url, args=args, credentials=self._credentials) | [
"def",
"tabledata_list",
"(",
"self",
",",
"table_name",
",",
"start_index",
"=",
"None",
",",
"max_results",
"=",
"None",
",",
"page_token",
"=",
"None",
")",
":",
"url",
"=",
"Api",
".",
"_ENDPOINT",
"+",
"(",
"Api",
".",
"_TABLEDATA_PATH",
"%",
"table_name",
")",
"args",
"=",
"{",
"}",
"if",
"start_index",
":",
"args",
"[",
"'startIndex'",
"]",
"=",
"start_index",
"if",
"max_results",
":",
"args",
"[",
"'maxResults'",
"]",
"=",
"max_results",
"if",
"page_token",
"is",
"not",
"None",
":",
"args",
"[",
"'pageToken'",
"]",
"=",
"page_token",
"return",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"args",
"=",
"args",
",",
"credentials",
"=",
"self",
".",
"_credentials",
")"
] | Retrieves the contents of a table.
Args:
table_name: the name of the table as a tuple of components.
start_index: the index of the row at which to start retrieval.
max_results: an optional maximum number of rows to retrieve.
page_token: an optional token to continue the retrieval.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation. | [
"Retrieves",
"the",
"contents",
"of",
"a",
"table",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_api.py#L442-L463 | train | 237,841 |
googledatalab/pydatalab | datalab/bigquery/_api.py | Api.table_delete | def table_delete(self, table_name):
"""Issues a request to delete a table.
Args:
table_name: the name of the table as a tuple of components.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._TABLES_PATH % table_name)
return datalab.utils.Http.request(url, method='DELETE', credentials=self._credentials,
raw_response=True) | python | def table_delete(self, table_name):
"""Issues a request to delete a table.
Args:
table_name: the name of the table as a tuple of components.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._TABLES_PATH % table_name)
return datalab.utils.Http.request(url, method='DELETE', credentials=self._credentials,
raw_response=True) | [
"def",
"table_delete",
"(",
"self",
",",
"table_name",
")",
":",
"url",
"=",
"Api",
".",
"_ENDPOINT",
"+",
"(",
"Api",
".",
"_TABLES_PATH",
"%",
"table_name",
")",
"return",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"method",
"=",
"'DELETE'",
",",
"credentials",
"=",
"self",
".",
"_credentials",
",",
"raw_response",
"=",
"True",
")"
] | Issues a request to delete a table.
Args:
table_name: the name of the table as a tuple of components.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation. | [
"Issues",
"a",
"request",
"to",
"delete",
"a",
"table",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_api.py#L465-L477 | train | 237,842 |
googledatalab/pydatalab | datalab/bigquery/_api.py | Api.table_extract | def table_extract(self, table_name, destination, format='CSV', compress=True,
field_delimiter=',', print_header=True):
"""Exports the table to GCS.
Args:
table_name: the name of the table as a tuple of components.
destination: the destination URI(s). Can be a single URI or a list.
format: the format to use for the exported data; one of CSV, NEWLINE_DELIMITED_JSON or AVRO.
Defaults to CSV.
compress: whether to compress the data on export. Compression is not supported for
AVRO format. Defaults to False.
field_delimiter: for CSV exports, the field delimiter to use. Defaults to ','
print_header: for CSV exports, whether to include an initial header line. Default true.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._JOBS_PATH % (table_name.project_id, ''))
if isinstance(destination, basestring):
destination = [destination]
data = {
# 'projectId': table_name.project_id, # Code sample shows this but it is not in job
# reference spec. Filed as b/19235843
'kind': 'bigquery#job',
'configuration': {
'extract': {
'sourceTable': {
'projectId': table_name.project_id,
'datasetId': table_name.dataset_id,
'tableId': table_name.table_id,
},
'compression': 'GZIP' if compress else 'NONE',
'fieldDelimiter': field_delimiter,
'printHeader': print_header,
'destinationUris': destination,
'destinationFormat': format,
}
}
}
return datalab.utils.Http.request(url, data=data, credentials=self._credentials) | python | def table_extract(self, table_name, destination, format='CSV', compress=True,
field_delimiter=',', print_header=True):
"""Exports the table to GCS.
Args:
table_name: the name of the table as a tuple of components.
destination: the destination URI(s). Can be a single URI or a list.
format: the format to use for the exported data; one of CSV, NEWLINE_DELIMITED_JSON or AVRO.
Defaults to CSV.
compress: whether to compress the data on export. Compression is not supported for
AVRO format. Defaults to False.
field_delimiter: for CSV exports, the field delimiter to use. Defaults to ','
print_header: for CSV exports, whether to include an initial header line. Default true.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._JOBS_PATH % (table_name.project_id, ''))
if isinstance(destination, basestring):
destination = [destination]
data = {
# 'projectId': table_name.project_id, # Code sample shows this but it is not in job
# reference spec. Filed as b/19235843
'kind': 'bigquery#job',
'configuration': {
'extract': {
'sourceTable': {
'projectId': table_name.project_id,
'datasetId': table_name.dataset_id,
'tableId': table_name.table_id,
},
'compression': 'GZIP' if compress else 'NONE',
'fieldDelimiter': field_delimiter,
'printHeader': print_header,
'destinationUris': destination,
'destinationFormat': format,
}
}
}
return datalab.utils.Http.request(url, data=data, credentials=self._credentials) | [
"def",
"table_extract",
"(",
"self",
",",
"table_name",
",",
"destination",
",",
"format",
"=",
"'CSV'",
",",
"compress",
"=",
"True",
",",
"field_delimiter",
"=",
"','",
",",
"print_header",
"=",
"True",
")",
":",
"url",
"=",
"Api",
".",
"_ENDPOINT",
"+",
"(",
"Api",
".",
"_JOBS_PATH",
"%",
"(",
"table_name",
".",
"project_id",
",",
"''",
")",
")",
"if",
"isinstance",
"(",
"destination",
",",
"basestring",
")",
":",
"destination",
"=",
"[",
"destination",
"]",
"data",
"=",
"{",
"# 'projectId': table_name.project_id, # Code sample shows this but it is not in job",
"# reference spec. Filed as b/19235843",
"'kind'",
":",
"'bigquery#job'",
",",
"'configuration'",
":",
"{",
"'extract'",
":",
"{",
"'sourceTable'",
":",
"{",
"'projectId'",
":",
"table_name",
".",
"project_id",
",",
"'datasetId'",
":",
"table_name",
".",
"dataset_id",
",",
"'tableId'",
":",
"table_name",
".",
"table_id",
",",
"}",
",",
"'compression'",
":",
"'GZIP'",
"if",
"compress",
"else",
"'NONE'",
",",
"'fieldDelimiter'",
":",
"field_delimiter",
",",
"'printHeader'",
":",
"print_header",
",",
"'destinationUris'",
":",
"destination",
",",
"'destinationFormat'",
":",
"format",
",",
"}",
"}",
"}",
"return",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"data",
"=",
"data",
",",
"credentials",
"=",
"self",
".",
"_credentials",
")"
] | Exports the table to GCS.
Args:
table_name: the name of the table as a tuple of components.
destination: the destination URI(s). Can be a single URI or a list.
format: the format to use for the exported data; one of CSV, NEWLINE_DELIMITED_JSON or AVRO.
Defaults to CSV.
compress: whether to compress the data on export. Compression is not supported for
AVRO format. Defaults to False.
field_delimiter: for CSV exports, the field delimiter to use. Defaults to ','
print_header: for CSV exports, whether to include an initial header line. Default true.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation. | [
"Exports",
"the",
"table",
"to",
"GCS",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_api.py#L479-L519 | train | 237,843 |
googledatalab/pydatalab | datalab/bigquery/_api.py | Api.table_update | def table_update(self, table_name, table_info):
"""Updates the Table info.
Args:
table_name: the name of the table to update as a tuple of components.
table_info: the Table resource with updated fields.
"""
url = Api._ENDPOINT + (Api._TABLES_PATH % table_name)
return datalab.utils.Http.request(url, method='PUT', data=table_info,
credentials=self._credentials) | python | def table_update(self, table_name, table_info):
"""Updates the Table info.
Args:
table_name: the name of the table to update as a tuple of components.
table_info: the Table resource with updated fields.
"""
url = Api._ENDPOINT + (Api._TABLES_PATH % table_name)
return datalab.utils.Http.request(url, method='PUT', data=table_info,
credentials=self._credentials) | [
"def",
"table_update",
"(",
"self",
",",
"table_name",
",",
"table_info",
")",
":",
"url",
"=",
"Api",
".",
"_ENDPOINT",
"+",
"(",
"Api",
".",
"_TABLES_PATH",
"%",
"table_name",
")",
"return",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"method",
"=",
"'PUT'",
",",
"data",
"=",
"table_info",
",",
"credentials",
"=",
"self",
".",
"_credentials",
")"
] | Updates the Table info.
Args:
table_name: the name of the table to update as a tuple of components.
table_info: the Table resource with updated fields. | [
"Updates",
"the",
"Table",
"info",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_api.py#L521-L530 | train | 237,844 |
googledatalab/pydatalab | google/datalab/contrib/mlworkbench/_archive.py | extract_archive | def extract_archive(archive_path, dest):
"""Extract a local or GCS archive file to a folder.
Args:
archive_path: local or gcs path to a *.tar.gz or *.tar file
dest: local folder the archive will be extracted to
"""
# Make the dest folder if it does not exist
if not os.path.isdir(dest):
os.makedirs(dest)
try:
tmpfolder = None
if (not tf.gfile.Exists(archive_path)) or tf.gfile.IsDirectory(archive_path):
raise ValueError('archive path %s is not a file' % archive_path)
if archive_path.startswith('gs://'):
# Copy the file to a local temp folder
tmpfolder = tempfile.mkdtemp()
cmd_args = ['gsutil', 'cp', archive_path, tmpfolder]
_shell_process.run_and_monitor(cmd_args, os.getpid())
archive_path = os.path.join(tmpfolder, os.path.name(archive_path))
if archive_path.lower().endswith('.tar.gz'):
flags = '-xzf'
elif archive_path.lower().endswith('.tar'):
flags = '-xf'
else:
raise ValueError('Only tar.gz or tar.Z files are supported.')
cmd_args = ['tar', flags, archive_path, '-C', dest]
_shell_process.run_and_monitor(cmd_args, os.getpid())
finally:
if tmpfolder:
shutil.rmtree(tmpfolder) | python | def extract_archive(archive_path, dest):
"""Extract a local or GCS archive file to a folder.
Args:
archive_path: local or gcs path to a *.tar.gz or *.tar file
dest: local folder the archive will be extracted to
"""
# Make the dest folder if it does not exist
if not os.path.isdir(dest):
os.makedirs(dest)
try:
tmpfolder = None
if (not tf.gfile.Exists(archive_path)) or tf.gfile.IsDirectory(archive_path):
raise ValueError('archive path %s is not a file' % archive_path)
if archive_path.startswith('gs://'):
# Copy the file to a local temp folder
tmpfolder = tempfile.mkdtemp()
cmd_args = ['gsutil', 'cp', archive_path, tmpfolder]
_shell_process.run_and_monitor(cmd_args, os.getpid())
archive_path = os.path.join(tmpfolder, os.path.name(archive_path))
if archive_path.lower().endswith('.tar.gz'):
flags = '-xzf'
elif archive_path.lower().endswith('.tar'):
flags = '-xf'
else:
raise ValueError('Only tar.gz or tar.Z files are supported.')
cmd_args = ['tar', flags, archive_path, '-C', dest]
_shell_process.run_and_monitor(cmd_args, os.getpid())
finally:
if tmpfolder:
shutil.rmtree(tmpfolder) | [
"def",
"extract_archive",
"(",
"archive_path",
",",
"dest",
")",
":",
"# Make the dest folder if it does not exist",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"dest",
")",
":",
"os",
".",
"makedirs",
"(",
"dest",
")",
"try",
":",
"tmpfolder",
"=",
"None",
"if",
"(",
"not",
"tf",
".",
"gfile",
".",
"Exists",
"(",
"archive_path",
")",
")",
"or",
"tf",
".",
"gfile",
".",
"IsDirectory",
"(",
"archive_path",
")",
":",
"raise",
"ValueError",
"(",
"'archive path %s is not a file'",
"%",
"archive_path",
")",
"if",
"archive_path",
".",
"startswith",
"(",
"'gs://'",
")",
":",
"# Copy the file to a local temp folder",
"tmpfolder",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"cmd_args",
"=",
"[",
"'gsutil'",
",",
"'cp'",
",",
"archive_path",
",",
"tmpfolder",
"]",
"_shell_process",
".",
"run_and_monitor",
"(",
"cmd_args",
",",
"os",
".",
"getpid",
"(",
")",
")",
"archive_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmpfolder",
",",
"os",
".",
"path",
".",
"name",
"(",
"archive_path",
")",
")",
"if",
"archive_path",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"'.tar.gz'",
")",
":",
"flags",
"=",
"'-xzf'",
"elif",
"archive_path",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"'.tar'",
")",
":",
"flags",
"=",
"'-xf'",
"else",
":",
"raise",
"ValueError",
"(",
"'Only tar.gz or tar.Z files are supported.'",
")",
"cmd_args",
"=",
"[",
"'tar'",
",",
"flags",
",",
"archive_path",
",",
"'-C'",
",",
"dest",
"]",
"_shell_process",
".",
"run_and_monitor",
"(",
"cmd_args",
",",
"os",
".",
"getpid",
"(",
")",
")",
"finally",
":",
"if",
"tmpfolder",
":",
"shutil",
".",
"rmtree",
"(",
"tmpfolder",
")"
] | Extract a local or GCS archive file to a folder.
Args:
archive_path: local or gcs path to a *.tar.gz or *.tar file
dest: local folder the archive will be extracted to | [
"Extract",
"a",
"local",
"or",
"GCS",
"archive",
"file",
"to",
"a",
"folder",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/mlworkbench/_archive.py#L27-L62 | train | 237,845 |
googledatalab/pydatalab | solutionbox/image_classification/mltoolbox/image/classification/_cloud.py | Cloud.preprocess | def preprocess(train_dataset, output_dir, eval_dataset, checkpoint, pipeline_option):
"""Preprocess data in Cloud with DataFlow."""
import apache_beam as beam
import google.datalab.utils
from . import _preprocess
if checkpoint is None:
checkpoint = _util._DEFAULT_CHECKPOINT_GSURL
job_name = ('preprocess-image-classification-' +
datetime.datetime.now().strftime('%y%m%d-%H%M%S'))
staging_package_url = _util.repackage_to_staging(output_dir)
tmpdir = tempfile.mkdtemp()
# suppress DataFlow warnings about wheel package as extra package.
original_level = logging.getLogger().getEffectiveLevel()
logging.getLogger().setLevel(logging.ERROR)
try:
# Workaround for DataFlow 2.0, which doesn't work well with extra packages in GCS.
# Remove when the issue is fixed and new version of DataFlow is included in Datalab.
extra_packages = [staging_package_url, _TF_GS_URL, _PROTOBUF_GS_URL]
local_packages = [os.path.join(tmpdir, os.path.basename(p))
for p in extra_packages]
for source, dest in zip(extra_packages, local_packages):
file_io.copy(source, dest, overwrite=True)
options = {
'staging_location': os.path.join(output_dir, 'tmp', 'staging'),
'temp_location': os.path.join(output_dir, 'tmp'),
'job_name': job_name,
'project': _util.default_project(),
'extra_packages': local_packages,
'teardown_policy': 'TEARDOWN_ALWAYS',
'no_save_main_session': True
}
if pipeline_option is not None:
options.update(pipeline_option)
opts = beam.pipeline.PipelineOptions(flags=[], **options)
p = beam.Pipeline('DataflowRunner', options=opts)
_preprocess.configure_pipeline(p, train_dataset, eval_dataset,
checkpoint, output_dir, job_name)
job_results = p.run()
finally:
shutil.rmtree(tmpdir)
logging.getLogger().setLevel(original_level)
if (_util.is_in_IPython()):
import IPython
dataflow_url = 'https://console.developers.google.com/dataflow?project=%s' % \
_util.default_project()
html = 'Job "%s" submitted.' % job_name
html += '<p>Click <a href="%s" target="_blank">here</a> to track preprocessing job. <br/>' \
% dataflow_url
IPython.display.display_html(html, raw=True)
return google.datalab.utils.DataflowJob(job_results) | python | def preprocess(train_dataset, output_dir, eval_dataset, checkpoint, pipeline_option):
"""Preprocess data in Cloud with DataFlow."""
import apache_beam as beam
import google.datalab.utils
from . import _preprocess
if checkpoint is None:
checkpoint = _util._DEFAULT_CHECKPOINT_GSURL
job_name = ('preprocess-image-classification-' +
datetime.datetime.now().strftime('%y%m%d-%H%M%S'))
staging_package_url = _util.repackage_to_staging(output_dir)
tmpdir = tempfile.mkdtemp()
# suppress DataFlow warnings about wheel package as extra package.
original_level = logging.getLogger().getEffectiveLevel()
logging.getLogger().setLevel(logging.ERROR)
try:
# Workaround for DataFlow 2.0, which doesn't work well with extra packages in GCS.
# Remove when the issue is fixed and new version of DataFlow is included in Datalab.
extra_packages = [staging_package_url, _TF_GS_URL, _PROTOBUF_GS_URL]
local_packages = [os.path.join(tmpdir, os.path.basename(p))
for p in extra_packages]
for source, dest in zip(extra_packages, local_packages):
file_io.copy(source, dest, overwrite=True)
options = {
'staging_location': os.path.join(output_dir, 'tmp', 'staging'),
'temp_location': os.path.join(output_dir, 'tmp'),
'job_name': job_name,
'project': _util.default_project(),
'extra_packages': local_packages,
'teardown_policy': 'TEARDOWN_ALWAYS',
'no_save_main_session': True
}
if pipeline_option is not None:
options.update(pipeline_option)
opts = beam.pipeline.PipelineOptions(flags=[], **options)
p = beam.Pipeline('DataflowRunner', options=opts)
_preprocess.configure_pipeline(p, train_dataset, eval_dataset,
checkpoint, output_dir, job_name)
job_results = p.run()
finally:
shutil.rmtree(tmpdir)
logging.getLogger().setLevel(original_level)
if (_util.is_in_IPython()):
import IPython
dataflow_url = 'https://console.developers.google.com/dataflow?project=%s' % \
_util.default_project()
html = 'Job "%s" submitted.' % job_name
html += '<p>Click <a href="%s" target="_blank">here</a> to track preprocessing job. <br/>' \
% dataflow_url
IPython.display.display_html(html, raw=True)
return google.datalab.utils.DataflowJob(job_results) | [
"def",
"preprocess",
"(",
"train_dataset",
",",
"output_dir",
",",
"eval_dataset",
",",
"checkpoint",
",",
"pipeline_option",
")",
":",
"import",
"apache_beam",
"as",
"beam",
"import",
"google",
".",
"datalab",
".",
"utils",
"from",
".",
"import",
"_preprocess",
"if",
"checkpoint",
"is",
"None",
":",
"checkpoint",
"=",
"_util",
".",
"_DEFAULT_CHECKPOINT_GSURL",
"job_name",
"=",
"(",
"'preprocess-image-classification-'",
"+",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"'%y%m%d-%H%M%S'",
")",
")",
"staging_package_url",
"=",
"_util",
".",
"repackage_to_staging",
"(",
"output_dir",
")",
"tmpdir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"# suppress DataFlow warnings about wheel package as extra package.",
"original_level",
"=",
"logging",
".",
"getLogger",
"(",
")",
".",
"getEffectiveLevel",
"(",
")",
"logging",
".",
"getLogger",
"(",
")",
".",
"setLevel",
"(",
"logging",
".",
"ERROR",
")",
"try",
":",
"# Workaround for DataFlow 2.0, which doesn't work well with extra packages in GCS.",
"# Remove when the issue is fixed and new version of DataFlow is included in Datalab.",
"extra_packages",
"=",
"[",
"staging_package_url",
",",
"_TF_GS_URL",
",",
"_PROTOBUF_GS_URL",
"]",
"local_packages",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"tmpdir",
",",
"os",
".",
"path",
".",
"basename",
"(",
"p",
")",
")",
"for",
"p",
"in",
"extra_packages",
"]",
"for",
"source",
",",
"dest",
"in",
"zip",
"(",
"extra_packages",
",",
"local_packages",
")",
":",
"file_io",
".",
"copy",
"(",
"source",
",",
"dest",
",",
"overwrite",
"=",
"True",
")",
"options",
"=",
"{",
"'staging_location'",
":",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"'tmp'",
",",
"'staging'",
")",
",",
"'temp_location'",
":",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"'tmp'",
")",
",",
"'job_name'",
":",
"job_name",
",",
"'project'",
":",
"_util",
".",
"default_project",
"(",
")",
",",
"'extra_packages'",
":",
"local_packages",
",",
"'teardown_policy'",
":",
"'TEARDOWN_ALWAYS'",
",",
"'no_save_main_session'",
":",
"True",
"}",
"if",
"pipeline_option",
"is",
"not",
"None",
":",
"options",
".",
"update",
"(",
"pipeline_option",
")",
"opts",
"=",
"beam",
".",
"pipeline",
".",
"PipelineOptions",
"(",
"flags",
"=",
"[",
"]",
",",
"*",
"*",
"options",
")",
"p",
"=",
"beam",
".",
"Pipeline",
"(",
"'DataflowRunner'",
",",
"options",
"=",
"opts",
")",
"_preprocess",
".",
"configure_pipeline",
"(",
"p",
",",
"train_dataset",
",",
"eval_dataset",
",",
"checkpoint",
",",
"output_dir",
",",
"job_name",
")",
"job_results",
"=",
"p",
".",
"run",
"(",
")",
"finally",
":",
"shutil",
".",
"rmtree",
"(",
"tmpdir",
")",
"logging",
".",
"getLogger",
"(",
")",
".",
"setLevel",
"(",
"original_level",
")",
"if",
"(",
"_util",
".",
"is_in_IPython",
"(",
")",
")",
":",
"import",
"IPython",
"dataflow_url",
"=",
"'https://console.developers.google.com/dataflow?project=%s'",
"%",
"_util",
".",
"default_project",
"(",
")",
"html",
"=",
"'Job \"%s\" submitted.'",
"%",
"job_name",
"html",
"+=",
"'<p>Click <a href=\"%s\" target=\"_blank\">here</a> to track preprocessing job. <br/>'",
"%",
"dataflow_url",
"IPython",
".",
"display",
".",
"display_html",
"(",
"html",
",",
"raw",
"=",
"True",
")",
"return",
"google",
".",
"datalab",
".",
"utils",
".",
"DataflowJob",
"(",
"job_results",
")"
] | Preprocess data in Cloud with DataFlow. | [
"Preprocess",
"data",
"in",
"Cloud",
"with",
"DataFlow",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_cloud.py#L40-L96 | train | 237,846 |
googledatalab/pydatalab | solutionbox/image_classification/mltoolbox/image/classification/_cloud.py | Cloud.train | def train(input_dir, batch_size, max_steps, output_dir, checkpoint, cloud_train_config):
"""Train model in the cloud with CloudML trainer service."""
import google.datalab.ml as ml
if checkpoint is None:
checkpoint = _util._DEFAULT_CHECKPOINT_GSURL
staging_package_url = _util.repackage_to_staging(output_dir)
job_args = {
'input_dir': input_dir,
'max_steps': max_steps,
'batch_size': batch_size,
'checkpoint': checkpoint
}
job_request = {
'package_uris': [staging_package_url, _TF_GS_URL, _PROTOBUF_GS_URL],
'python_module': 'mltoolbox.image.classification.task',
'job_dir': output_dir,
'args': job_args
}
job_request.update(dict(cloud_train_config._asdict()))
job_id = 'image_classification_train_' + datetime.datetime.now().strftime('%y%m%d_%H%M%S')
job = ml.Job.submit_training(job_request, job_id)
if (_util.is_in_IPython()):
import IPython
log_url_query_strings = {
'project': _util.default_project(),
'resource': 'ml.googleapis.com/job_id/' + job.info['jobId']
}
log_url = 'https://console.developers.google.com/logs/viewer?' + \
urllib.urlencode(log_url_query_strings)
html = 'Job "%s" submitted.' % job.info['jobId']
html += '<p>Click <a href="%s" target="_blank">here</a> to view cloud log. <br/>' % log_url
IPython.display.display_html(html, raw=True)
return job | python | def train(input_dir, batch_size, max_steps, output_dir, checkpoint, cloud_train_config):
"""Train model in the cloud with CloudML trainer service."""
import google.datalab.ml as ml
if checkpoint is None:
checkpoint = _util._DEFAULT_CHECKPOINT_GSURL
staging_package_url = _util.repackage_to_staging(output_dir)
job_args = {
'input_dir': input_dir,
'max_steps': max_steps,
'batch_size': batch_size,
'checkpoint': checkpoint
}
job_request = {
'package_uris': [staging_package_url, _TF_GS_URL, _PROTOBUF_GS_URL],
'python_module': 'mltoolbox.image.classification.task',
'job_dir': output_dir,
'args': job_args
}
job_request.update(dict(cloud_train_config._asdict()))
job_id = 'image_classification_train_' + datetime.datetime.now().strftime('%y%m%d_%H%M%S')
job = ml.Job.submit_training(job_request, job_id)
if (_util.is_in_IPython()):
import IPython
log_url_query_strings = {
'project': _util.default_project(),
'resource': 'ml.googleapis.com/job_id/' + job.info['jobId']
}
log_url = 'https://console.developers.google.com/logs/viewer?' + \
urllib.urlencode(log_url_query_strings)
html = 'Job "%s" submitted.' % job.info['jobId']
html += '<p>Click <a href="%s" target="_blank">here</a> to view cloud log. <br/>' % log_url
IPython.display.display_html(html, raw=True)
return job | [
"def",
"train",
"(",
"input_dir",
",",
"batch_size",
",",
"max_steps",
",",
"output_dir",
",",
"checkpoint",
",",
"cloud_train_config",
")",
":",
"import",
"google",
".",
"datalab",
".",
"ml",
"as",
"ml",
"if",
"checkpoint",
"is",
"None",
":",
"checkpoint",
"=",
"_util",
".",
"_DEFAULT_CHECKPOINT_GSURL",
"staging_package_url",
"=",
"_util",
".",
"repackage_to_staging",
"(",
"output_dir",
")",
"job_args",
"=",
"{",
"'input_dir'",
":",
"input_dir",
",",
"'max_steps'",
":",
"max_steps",
",",
"'batch_size'",
":",
"batch_size",
",",
"'checkpoint'",
":",
"checkpoint",
"}",
"job_request",
"=",
"{",
"'package_uris'",
":",
"[",
"staging_package_url",
",",
"_TF_GS_URL",
",",
"_PROTOBUF_GS_URL",
"]",
",",
"'python_module'",
":",
"'mltoolbox.image.classification.task'",
",",
"'job_dir'",
":",
"output_dir",
",",
"'args'",
":",
"job_args",
"}",
"job_request",
".",
"update",
"(",
"dict",
"(",
"cloud_train_config",
".",
"_asdict",
"(",
")",
")",
")",
"job_id",
"=",
"'image_classification_train_'",
"+",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"'%y%m%d_%H%M%S'",
")",
"job",
"=",
"ml",
".",
"Job",
".",
"submit_training",
"(",
"job_request",
",",
"job_id",
")",
"if",
"(",
"_util",
".",
"is_in_IPython",
"(",
")",
")",
":",
"import",
"IPython",
"log_url_query_strings",
"=",
"{",
"'project'",
":",
"_util",
".",
"default_project",
"(",
")",
",",
"'resource'",
":",
"'ml.googleapis.com/job_id/'",
"+",
"job",
".",
"info",
"[",
"'jobId'",
"]",
"}",
"log_url",
"=",
"'https://console.developers.google.com/logs/viewer?'",
"+",
"urllib",
".",
"urlencode",
"(",
"log_url_query_strings",
")",
"html",
"=",
"'Job \"%s\" submitted.'",
"%",
"job",
".",
"info",
"[",
"'jobId'",
"]",
"html",
"+=",
"'<p>Click <a href=\"%s\" target=\"_blank\">here</a> to view cloud log. <br/>'",
"%",
"log_url",
"IPython",
".",
"display",
".",
"display_html",
"(",
"html",
",",
"raw",
"=",
"True",
")",
"return",
"job"
] | Train model in the cloud with CloudML trainer service. | [
"Train",
"model",
"in",
"the",
"cloud",
"with",
"CloudML",
"trainer",
"service",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_cloud.py#L99-L132 | train | 237,847 |
googledatalab/pydatalab | google/datalab/bigquery/_query.py | Query.from_table | def from_table(table, fields=None):
""" Return a Query for the given Table object
Args:
table: the Table object to construct a Query out of
fields: the fields to return. If None, all fields will be returned. This can be a string
which will be injected into the Query after SELECT, or a list of field names.
Returns:
A Query object that will return the specified fields from the records in the Table.
"""
if fields is None:
fields = '*'
elif isinstance(fields, list):
fields = ','.join(fields)
return Query('SELECT %s FROM %s' % (fields, table._repr_sql_())) | python | def from_table(table, fields=None):
""" Return a Query for the given Table object
Args:
table: the Table object to construct a Query out of
fields: the fields to return. If None, all fields will be returned. This can be a string
which will be injected into the Query after SELECT, or a list of field names.
Returns:
A Query object that will return the specified fields from the records in the Table.
"""
if fields is None:
fields = '*'
elif isinstance(fields, list):
fields = ','.join(fields)
return Query('SELECT %s FROM %s' % (fields, table._repr_sql_())) | [
"def",
"from_table",
"(",
"table",
",",
"fields",
"=",
"None",
")",
":",
"if",
"fields",
"is",
"None",
":",
"fields",
"=",
"'*'",
"elif",
"isinstance",
"(",
"fields",
",",
"list",
")",
":",
"fields",
"=",
"','",
".",
"join",
"(",
"fields",
")",
"return",
"Query",
"(",
"'SELECT %s FROM %s'",
"%",
"(",
"fields",
",",
"table",
".",
"_repr_sql_",
"(",
")",
")",
")"
] | Return a Query for the given Table object
Args:
table: the Table object to construct a Query out of
fields: the fields to return. If None, all fields will be returned. This can be a string
which will be injected into the Query after SELECT, or a list of field names.
Returns:
A Query object that will return the specified fields from the records in the Table. | [
"Return",
"a",
"Query",
"for",
"the",
"given",
"Table",
"object"
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_query.py#L105-L120 | train | 237,848 |
googledatalab/pydatalab | google/datalab/bigquery/_query.py | Query._expanded_sql | def _expanded_sql(self, sampling=None):
"""Get the expanded SQL of this object, including all subqueries, UDFs, and external datasources
Returns:
The expanded SQL string of this object
"""
# use lists to preserve the order of subqueries, bigquery will not like listing subqueries
# out of order if they depend on each other. for example. the following will be rejected:
# WITH q2 as (SELECT * FROM q1),
# q1 as (SELECT * FROM mytable),
# SELECT * FROM q2
# so when we're getting the dependencies, use recursion into a list to maintain the order
udfs = []
subqueries = []
expanded_sql = ''
def _recurse_subqueries(query):
"""Recursively scan subqueries and add their pieces to global scope udfs and subqueries
"""
if query._subqueries:
for subquery in query._subqueries:
_recurse_subqueries(subquery[1])
subqueries.extend([s for s in query._subqueries if s not in subqueries])
if query._udfs:
# query._udfs is a list of (name, UDF) tuples; we just want the UDF.
udfs.extend([u[1] for u in query._udfs if u[1] not in udfs])
_recurse_subqueries(self)
if udfs:
expanded_sql += '\n'.join([udf._expanded_sql() for udf in udfs])
expanded_sql += '\n'
def _indent_query(subquery):
return ' ' + subquery._sql.replace('\n', '\n ')
if subqueries:
expanded_sql += 'WITH ' + \
'\n),\n'.join(['%s AS (\n%s' % (sq[0], _indent_query(sq[1]))
for sq in subqueries])
expanded_sql += '\n)\n\n'
expanded_sql += sampling(self._sql) if sampling else self._sql
return expanded_sql | python | def _expanded_sql(self, sampling=None):
"""Get the expanded SQL of this object, including all subqueries, UDFs, and external datasources
Returns:
The expanded SQL string of this object
"""
# use lists to preserve the order of subqueries, bigquery will not like listing subqueries
# out of order if they depend on each other. for example. the following will be rejected:
# WITH q2 as (SELECT * FROM q1),
# q1 as (SELECT * FROM mytable),
# SELECT * FROM q2
# so when we're getting the dependencies, use recursion into a list to maintain the order
udfs = []
subqueries = []
expanded_sql = ''
def _recurse_subqueries(query):
"""Recursively scan subqueries and add their pieces to global scope udfs and subqueries
"""
if query._subqueries:
for subquery in query._subqueries:
_recurse_subqueries(subquery[1])
subqueries.extend([s for s in query._subqueries if s not in subqueries])
if query._udfs:
# query._udfs is a list of (name, UDF) tuples; we just want the UDF.
udfs.extend([u[1] for u in query._udfs if u[1] not in udfs])
_recurse_subqueries(self)
if udfs:
expanded_sql += '\n'.join([udf._expanded_sql() for udf in udfs])
expanded_sql += '\n'
def _indent_query(subquery):
return ' ' + subquery._sql.replace('\n', '\n ')
if subqueries:
expanded_sql += 'WITH ' + \
'\n),\n'.join(['%s AS (\n%s' % (sq[0], _indent_query(sq[1]))
for sq in subqueries])
expanded_sql += '\n)\n\n'
expanded_sql += sampling(self._sql) if sampling else self._sql
return expanded_sql | [
"def",
"_expanded_sql",
"(",
"self",
",",
"sampling",
"=",
"None",
")",
":",
"# use lists to preserve the order of subqueries, bigquery will not like listing subqueries",
"# out of order if they depend on each other. for example. the following will be rejected:",
"# WITH q2 as (SELECT * FROM q1),",
"# q1 as (SELECT * FROM mytable),",
"# SELECT * FROM q2",
"# so when we're getting the dependencies, use recursion into a list to maintain the order",
"udfs",
"=",
"[",
"]",
"subqueries",
"=",
"[",
"]",
"expanded_sql",
"=",
"''",
"def",
"_recurse_subqueries",
"(",
"query",
")",
":",
"\"\"\"Recursively scan subqueries and add their pieces to global scope udfs and subqueries\n \"\"\"",
"if",
"query",
".",
"_subqueries",
":",
"for",
"subquery",
"in",
"query",
".",
"_subqueries",
":",
"_recurse_subqueries",
"(",
"subquery",
"[",
"1",
"]",
")",
"subqueries",
".",
"extend",
"(",
"[",
"s",
"for",
"s",
"in",
"query",
".",
"_subqueries",
"if",
"s",
"not",
"in",
"subqueries",
"]",
")",
"if",
"query",
".",
"_udfs",
":",
"# query._udfs is a list of (name, UDF) tuples; we just want the UDF.",
"udfs",
".",
"extend",
"(",
"[",
"u",
"[",
"1",
"]",
"for",
"u",
"in",
"query",
".",
"_udfs",
"if",
"u",
"[",
"1",
"]",
"not",
"in",
"udfs",
"]",
")",
"_recurse_subqueries",
"(",
"self",
")",
"if",
"udfs",
":",
"expanded_sql",
"+=",
"'\\n'",
".",
"join",
"(",
"[",
"udf",
".",
"_expanded_sql",
"(",
")",
"for",
"udf",
"in",
"udfs",
"]",
")",
"expanded_sql",
"+=",
"'\\n'",
"def",
"_indent_query",
"(",
"subquery",
")",
":",
"return",
"' '",
"+",
"subquery",
".",
"_sql",
".",
"replace",
"(",
"'\\n'",
",",
"'\\n '",
")",
"if",
"subqueries",
":",
"expanded_sql",
"+=",
"'WITH '",
"+",
"'\\n),\\n'",
".",
"join",
"(",
"[",
"'%s AS (\\n%s'",
"%",
"(",
"sq",
"[",
"0",
"]",
",",
"_indent_query",
"(",
"sq",
"[",
"1",
"]",
")",
")",
"for",
"sq",
"in",
"subqueries",
"]",
")",
"expanded_sql",
"+=",
"'\\n)\\n\\n'",
"expanded_sql",
"+=",
"sampling",
"(",
"self",
".",
"_sql",
")",
"if",
"sampling",
"else",
"self",
".",
"_sql",
"return",
"expanded_sql"
] | Get the expanded SQL of this object, including all subqueries, UDFs, and external datasources
Returns:
The expanded SQL string of this object | [
"Get",
"the",
"expanded",
"SQL",
"of",
"this",
"object",
"including",
"all",
"subqueries",
"UDFs",
"and",
"external",
"datasources"
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_query.py#L122-L167 | train | 237,849 |
googledatalab/pydatalab | google/datalab/contrib/mlworkbench/_shell_process.py | run_and_monitor | def run_and_monitor(args, pid_to_wait, std_out_filter_fn=None, cwd=None):
""" Start a process, and have it depend on another specified process.
Args:
args: the args of the process to start and monitor.
pid_to_wait: the process to wait on. If the process ends, also kill the started process.
std_out_filter_fn: a filter function which takes a string content from the stdout of the
started process, and returns True if the string should be redirected to console stdout.
cwd: the current working directory for the process to start.
"""
monitor_process = None
try:
p = subprocess.Popen(args,
cwd=cwd,
env=os.environ,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
pids_to_kill = [p.pid]
script = ('import %s;%s._wait_and_kill(%s, %s)' %
(__name__, __name__, str(pid_to_wait), str(pids_to_kill)))
monitor_process = subprocess.Popen(['python', '-c', script], env=os.environ)
while p.poll() is None:
line = p.stdout.readline()
if not six.PY2:
line = line.decode()
if std_out_filter_fn is None or std_out_filter_fn(line):
sys.stdout.write(line)
# Cannot do sys.stdout.flush(). It appears that too many flush() calls will hang browser.
finally:
if monitor_process:
monitor_process.kill() | python | def run_and_monitor(args, pid_to_wait, std_out_filter_fn=None, cwd=None):
""" Start a process, and have it depend on another specified process.
Args:
args: the args of the process to start and monitor.
pid_to_wait: the process to wait on. If the process ends, also kill the started process.
std_out_filter_fn: a filter function which takes a string content from the stdout of the
started process, and returns True if the string should be redirected to console stdout.
cwd: the current working directory for the process to start.
"""
monitor_process = None
try:
p = subprocess.Popen(args,
cwd=cwd,
env=os.environ,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
pids_to_kill = [p.pid]
script = ('import %s;%s._wait_and_kill(%s, %s)' %
(__name__, __name__, str(pid_to_wait), str(pids_to_kill)))
monitor_process = subprocess.Popen(['python', '-c', script], env=os.environ)
while p.poll() is None:
line = p.stdout.readline()
if not six.PY2:
line = line.decode()
if std_out_filter_fn is None or std_out_filter_fn(line):
sys.stdout.write(line)
# Cannot do sys.stdout.flush(). It appears that too many flush() calls will hang browser.
finally:
if monitor_process:
monitor_process.kill() | [
"def",
"run_and_monitor",
"(",
"args",
",",
"pid_to_wait",
",",
"std_out_filter_fn",
"=",
"None",
",",
"cwd",
"=",
"None",
")",
":",
"monitor_process",
"=",
"None",
"try",
":",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"args",
",",
"cwd",
"=",
"cwd",
",",
"env",
"=",
"os",
".",
"environ",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
")",
"pids_to_kill",
"=",
"[",
"p",
".",
"pid",
"]",
"script",
"=",
"(",
"'import %s;%s._wait_and_kill(%s, %s)'",
"%",
"(",
"__name__",
",",
"__name__",
",",
"str",
"(",
"pid_to_wait",
")",
",",
"str",
"(",
"pids_to_kill",
")",
")",
")",
"monitor_process",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"'python'",
",",
"'-c'",
",",
"script",
"]",
",",
"env",
"=",
"os",
".",
"environ",
")",
"while",
"p",
".",
"poll",
"(",
")",
"is",
"None",
":",
"line",
"=",
"p",
".",
"stdout",
".",
"readline",
"(",
")",
"if",
"not",
"six",
".",
"PY2",
":",
"line",
"=",
"line",
".",
"decode",
"(",
")",
"if",
"std_out_filter_fn",
"is",
"None",
"or",
"std_out_filter_fn",
"(",
"line",
")",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"line",
")",
"# Cannot do sys.stdout.flush(). It appears that too many flush() calls will hang browser.",
"finally",
":",
"if",
"monitor_process",
":",
"monitor_process",
".",
"kill",
"(",
")"
] | Start a process, and have it depend on another specified process.
Args:
args: the args of the process to start and monitor.
pid_to_wait: the process to wait on. If the process ends, also kill the started process.
std_out_filter_fn: a filter function which takes a string content from the stdout of the
started process, and returns True if the string should be redirected to console stdout.
cwd: the current working directory for the process to start. | [
"Start",
"a",
"process",
"and",
"have",
"it",
"depend",
"on",
"another",
"specified",
"process",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/mlworkbench/_shell_process.py#L43-L77 | train | 237,850 |
googledatalab/pydatalab | google/datalab/bigquery/_table.py | TableMetadata.created_on | def created_on(self):
"""The creation timestamp."""
timestamp = self._info.get('creationTime')
return _parser.Parser.parse_timestamp(timestamp) | python | def created_on(self):
"""The creation timestamp."""
timestamp = self._info.get('creationTime')
return _parser.Parser.parse_timestamp(timestamp) | [
"def",
"created_on",
"(",
"self",
")",
":",
"timestamp",
"=",
"self",
".",
"_info",
".",
"get",
"(",
"'creationTime'",
")",
"return",
"_parser",
".",
"Parser",
".",
"parse_timestamp",
"(",
"timestamp",
")"
] | The creation timestamp. | [
"The",
"creation",
"timestamp",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_table.py#L56-L59 | train | 237,851 |
googledatalab/pydatalab | google/datalab/bigquery/_table.py | TableMetadata.expires_on | def expires_on(self):
"""The timestamp for when the table will expire, or None if unknown."""
timestamp = self._info.get('expirationTime', None)
if timestamp is None:
return None
return _parser.Parser.parse_timestamp(timestamp) | python | def expires_on(self):
"""The timestamp for when the table will expire, or None if unknown."""
timestamp = self._info.get('expirationTime', None)
if timestamp is None:
return None
return _parser.Parser.parse_timestamp(timestamp) | [
"def",
"expires_on",
"(",
"self",
")",
":",
"timestamp",
"=",
"self",
".",
"_info",
".",
"get",
"(",
"'expirationTime'",
",",
"None",
")",
"if",
"timestamp",
"is",
"None",
":",
"return",
"None",
"return",
"_parser",
".",
"Parser",
".",
"parse_timestamp",
"(",
"timestamp",
")"
] | The timestamp for when the table will expire, or None if unknown. | [
"The",
"timestamp",
"for",
"when",
"the",
"table",
"will",
"expire",
"or",
"None",
"if",
"unknown",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_table.py#L67-L72 | train | 237,852 |
googledatalab/pydatalab | google/datalab/bigquery/_table.py | TableMetadata.modified_on | def modified_on(self):
"""The timestamp for when the table was last modified."""
timestamp = self._info.get('lastModifiedTime')
return _parser.Parser.parse_timestamp(timestamp) | python | def modified_on(self):
"""The timestamp for when the table was last modified."""
timestamp = self._info.get('lastModifiedTime')
return _parser.Parser.parse_timestamp(timestamp) | [
"def",
"modified_on",
"(",
"self",
")",
":",
"timestamp",
"=",
"self",
".",
"_info",
".",
"get",
"(",
"'lastModifiedTime'",
")",
"return",
"_parser",
".",
"Parser",
".",
"parse_timestamp",
"(",
"timestamp",
")"
] | The timestamp for when the table was last modified. | [
"The",
"timestamp",
"for",
"when",
"the",
"table",
"was",
"last",
"modified",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_table.py#L80-L83 | train | 237,853 |
googledatalab/pydatalab | google/datalab/bigquery/_table.py | Table._load_info | def _load_info(self):
"""Loads metadata about this table."""
if self._info is None:
try:
self._info = self._api.tables_get(self._name_parts)
except Exception as e:
raise e | python | def _load_info(self):
"""Loads metadata about this table."""
if self._info is None:
try:
self._info = self._api.tables_get(self._name_parts)
except Exception as e:
raise e | [
"def",
"_load_info",
"(",
"self",
")",
":",
"if",
"self",
".",
"_info",
"is",
"None",
":",
"try",
":",
"self",
".",
"_info",
"=",
"self",
".",
"_api",
".",
"tables_get",
"(",
"self",
".",
"_name_parts",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e"
] | Loads metadata about this table. | [
"Loads",
"metadata",
"about",
"this",
"table",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_table.py#L161-L167 | train | 237,854 |
googledatalab/pydatalab | google/datalab/bigquery/_table.py | Table.exists | def exists(self):
"""Checks if the table exists.
Returns:
True if the table exists; False otherwise.
Raises:
Exception if there was an error requesting information about the table.
"""
try:
info = self._api.tables_get(self._name_parts)
except google.datalab.utils.RequestException as e:
if e.status == 404:
return False
raise e
except Exception as e:
raise e
self._info = info
return True | python | def exists(self):
"""Checks if the table exists.
Returns:
True if the table exists; False otherwise.
Raises:
Exception if there was an error requesting information about the table.
"""
try:
info = self._api.tables_get(self._name_parts)
except google.datalab.utils.RequestException as e:
if e.status == 404:
return False
raise e
except Exception as e:
raise e
self._info = info
return True | [
"def",
"exists",
"(",
"self",
")",
":",
"try",
":",
"info",
"=",
"self",
".",
"_api",
".",
"tables_get",
"(",
"self",
".",
"_name_parts",
")",
"except",
"google",
".",
"datalab",
".",
"utils",
".",
"RequestException",
"as",
"e",
":",
"if",
"e",
".",
"status",
"==",
"404",
":",
"return",
"False",
"raise",
"e",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"self",
".",
"_info",
"=",
"info",
"return",
"True"
] | Checks if the table exists.
Returns:
True if the table exists; False otherwise.
Raises:
Exception if there was an error requesting information about the table. | [
"Checks",
"if",
"the",
"table",
"exists",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_table.py#L181-L198 | train | 237,855 |
googledatalab/pydatalab | google/datalab/bigquery/_table.py | Table.delete | def delete(self):
""" Delete the table.
Returns:
True if the Table no longer exists; False otherwise.
"""
try:
self._api.table_delete(self._name_parts)
except google.datalab.utils.RequestException:
# TODO(gram): May want to check the error reasons here and if it is not
# because the file didn't exist, return an error.
pass
except Exception as e:
raise e
return not self.exists() | python | def delete(self):
""" Delete the table.
Returns:
True if the Table no longer exists; False otherwise.
"""
try:
self._api.table_delete(self._name_parts)
except google.datalab.utils.RequestException:
# TODO(gram): May want to check the error reasons here and if it is not
# because the file didn't exist, return an error.
pass
except Exception as e:
raise e
return not self.exists() | [
"def",
"delete",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"_api",
".",
"table_delete",
"(",
"self",
".",
"_name_parts",
")",
"except",
"google",
".",
"datalab",
".",
"utils",
".",
"RequestException",
":",
"# TODO(gram): May want to check the error reasons here and if it is not",
"# because the file didn't exist, return an error.",
"pass",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"return",
"not",
"self",
".",
"exists",
"(",
")"
] | Delete the table.
Returns:
True if the Table no longer exists; False otherwise. | [
"Delete",
"the",
"table",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_table.py#L209-L223 | train | 237,856 |
googledatalab/pydatalab | google/datalab/bigquery/_table.py | Table.create | def create(self, schema, overwrite=False):
""" Create the table with the specified schema.
Args:
schema: the schema to use to create the table. Should be a list of dictionaries, each
containing at least a pair of entries, 'name' and 'type'.
See https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
overwrite: if True, delete the table first if it exists. If False and the table exists,
creation will fail and raise an Exception.
Returns:
The Table instance.
Raises:
Exception if the table couldn't be created or already exists and truncate was False.
"""
if overwrite and self.exists():
self.delete()
if not isinstance(schema, _schema.Schema):
# Convert to a Schema object
schema = _schema.Schema(schema)
try:
response = self._api.tables_insert(self._name_parts, schema=schema._bq_schema)
except Exception as e:
raise e
if 'selfLink' in response:
self._schema = schema
return self
raise Exception("Table %s could not be created as it already exists" % self._full_name) | python | def create(self, schema, overwrite=False):
""" Create the table with the specified schema.
Args:
schema: the schema to use to create the table. Should be a list of dictionaries, each
containing at least a pair of entries, 'name' and 'type'.
See https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
overwrite: if True, delete the table first if it exists. If False and the table exists,
creation will fail and raise an Exception.
Returns:
The Table instance.
Raises:
Exception if the table couldn't be created or already exists and truncate was False.
"""
if overwrite and self.exists():
self.delete()
if not isinstance(schema, _schema.Schema):
# Convert to a Schema object
schema = _schema.Schema(schema)
try:
response = self._api.tables_insert(self._name_parts, schema=schema._bq_schema)
except Exception as e:
raise e
if 'selfLink' in response:
self._schema = schema
return self
raise Exception("Table %s could not be created as it already exists" % self._full_name) | [
"def",
"create",
"(",
"self",
",",
"schema",
",",
"overwrite",
"=",
"False",
")",
":",
"if",
"overwrite",
"and",
"self",
".",
"exists",
"(",
")",
":",
"self",
".",
"delete",
"(",
")",
"if",
"not",
"isinstance",
"(",
"schema",
",",
"_schema",
".",
"Schema",
")",
":",
"# Convert to a Schema object",
"schema",
"=",
"_schema",
".",
"Schema",
"(",
"schema",
")",
"try",
":",
"response",
"=",
"self",
".",
"_api",
".",
"tables_insert",
"(",
"self",
".",
"_name_parts",
",",
"schema",
"=",
"schema",
".",
"_bq_schema",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"if",
"'selfLink'",
"in",
"response",
":",
"self",
".",
"_schema",
"=",
"schema",
"return",
"self",
"raise",
"Exception",
"(",
"\"Table %s could not be created as it already exists\"",
"%",
"self",
".",
"_full_name",
")"
] | Create the table with the specified schema.
Args:
schema: the schema to use to create the table. Should be a list of dictionaries, each
containing at least a pair of entries, 'name' and 'type'.
See https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
overwrite: if True, delete the table first if it exists. If False and the table exists,
creation will fail and raise an Exception.
Returns:
The Table instance.
Raises:
Exception if the table couldn't be created or already exists and truncate was False. | [
"Create",
"the",
"table",
"with",
"the",
"specified",
"schema",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_table.py#L225-L251 | train | 237,857 |
googledatalab/pydatalab | google/datalab/bigquery/_table.py | Table._init_job_from_response | def _init_job_from_response(self, response):
""" Helper function to create a Job instance from a response. """
job = None
if response and 'jobReference' in response:
job = _job.Job(job_id=response['jobReference']['jobId'], context=self._context)
return job | python | def _init_job_from_response(self, response):
""" Helper function to create a Job instance from a response. """
job = None
if response and 'jobReference' in response:
job = _job.Job(job_id=response['jobReference']['jobId'], context=self._context)
return job | [
"def",
"_init_job_from_response",
"(",
"self",
",",
"response",
")",
":",
"job",
"=",
"None",
"if",
"response",
"and",
"'jobReference'",
"in",
"response",
":",
"job",
"=",
"_job",
".",
"Job",
"(",
"job_id",
"=",
"response",
"[",
"'jobReference'",
"]",
"[",
"'jobId'",
"]",
",",
"context",
"=",
"self",
".",
"_context",
")",
"return",
"job"
] | Helper function to create a Job instance from a response. | [
"Helper",
"function",
"to",
"create",
"a",
"Job",
"instance",
"from",
"a",
"response",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_table.py#L393-L398 | train | 237,858 |
googledatalab/pydatalab | google/datalab/bigquery/_table.py | Table.extract_async | def extract_async(self, destination, format='csv', csv_delimiter=None, csv_header=True,
compress=False):
"""Starts a job to export the table to GCS.
Args:
destination: the destination URI(s). Can be a single URI or a list.
format: the format to use for the exported data; one of 'csv', 'json', or 'avro'
(default 'csv').
csv_delimiter: for CSV exports, the field delimiter to use. Defaults to ','
csv_header: for CSV exports, whether to include an initial header line. Default true.
compress: whether to compress the data on export. Compression is not supported for
AVRO format. Defaults to False.
Returns:
A Job object for the export Job if it was started successfully; else None.
"""
format = format.upper()
if format == 'JSON':
format = 'NEWLINE_DELIMITED_JSON'
if format == 'CSV' and csv_delimiter is None:
csv_delimiter = ','
try:
response = self._api.table_extract(self._name_parts, destination, format, compress,
csv_delimiter, csv_header)
return self._init_job_from_response(response)
except Exception as e:
raise google.datalab.JobError(location=traceback.format_exc(), message=str(e),
reason=str(type(e))) | python | def extract_async(self, destination, format='csv', csv_delimiter=None, csv_header=True,
compress=False):
"""Starts a job to export the table to GCS.
Args:
destination: the destination URI(s). Can be a single URI or a list.
format: the format to use for the exported data; one of 'csv', 'json', or 'avro'
(default 'csv').
csv_delimiter: for CSV exports, the field delimiter to use. Defaults to ','
csv_header: for CSV exports, whether to include an initial header line. Default true.
compress: whether to compress the data on export. Compression is not supported for
AVRO format. Defaults to False.
Returns:
A Job object for the export Job if it was started successfully; else None.
"""
format = format.upper()
if format == 'JSON':
format = 'NEWLINE_DELIMITED_JSON'
if format == 'CSV' and csv_delimiter is None:
csv_delimiter = ','
try:
response = self._api.table_extract(self._name_parts, destination, format, compress,
csv_delimiter, csv_header)
return self._init_job_from_response(response)
except Exception as e:
raise google.datalab.JobError(location=traceback.format_exc(), message=str(e),
reason=str(type(e))) | [
"def",
"extract_async",
"(",
"self",
",",
"destination",
",",
"format",
"=",
"'csv'",
",",
"csv_delimiter",
"=",
"None",
",",
"csv_header",
"=",
"True",
",",
"compress",
"=",
"False",
")",
":",
"format",
"=",
"format",
".",
"upper",
"(",
")",
"if",
"format",
"==",
"'JSON'",
":",
"format",
"=",
"'NEWLINE_DELIMITED_JSON'",
"if",
"format",
"==",
"'CSV'",
"and",
"csv_delimiter",
"is",
"None",
":",
"csv_delimiter",
"=",
"','",
"try",
":",
"response",
"=",
"self",
".",
"_api",
".",
"table_extract",
"(",
"self",
".",
"_name_parts",
",",
"destination",
",",
"format",
",",
"compress",
",",
"csv_delimiter",
",",
"csv_header",
")",
"return",
"self",
".",
"_init_job_from_response",
"(",
"response",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"google",
".",
"datalab",
".",
"JobError",
"(",
"location",
"=",
"traceback",
".",
"format_exc",
"(",
")",
",",
"message",
"=",
"str",
"(",
"e",
")",
",",
"reason",
"=",
"str",
"(",
"type",
"(",
"e",
")",
")",
")"
] | Starts a job to export the table to GCS.
Args:
destination: the destination URI(s). Can be a single URI or a list.
format: the format to use for the exported data; one of 'csv', 'json', or 'avro'
(default 'csv').
csv_delimiter: for CSV exports, the field delimiter to use. Defaults to ','
csv_header: for CSV exports, whether to include an initial header line. Default true.
compress: whether to compress the data on export. Compression is not supported for
AVRO format. Defaults to False.
Returns:
A Job object for the export Job if it was started successfully; else None. | [
"Starts",
"a",
"job",
"to",
"export",
"the",
"table",
"to",
"GCS",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_table.py#L400-L426 | train | 237,859 |
googledatalab/pydatalab | google/datalab/bigquery/_table.py | Table.extract | def extract(self, destination, format='csv', csv_delimiter=None, csv_header=True, compress=False):
"""Exports the table to GCS; blocks until complete.
Args:
destination: the destination URI(s). Can be a single URI or a list.
format: the format to use for the exported data; one of 'csv', 'json', or 'avro'
(default 'csv').
csv_delimiter: for CSV exports, the field delimiter to use. Defaults to ','
csv_header: for CSV exports, whether to include an initial header line. Default true.
compress: whether to compress the data on export. Compression is not supported for
AVRO format. Defaults to False.
Returns:
A Job object for the completed export Job if it was started successfully; else None.
"""
job = self.extract_async(destination, format=format, csv_delimiter=csv_delimiter,
csv_header=csv_header, compress=compress)
if job is not None:
job.wait()
return job | python | def extract(self, destination, format='csv', csv_delimiter=None, csv_header=True, compress=False):
"""Exports the table to GCS; blocks until complete.
Args:
destination: the destination URI(s). Can be a single URI or a list.
format: the format to use for the exported data; one of 'csv', 'json', or 'avro'
(default 'csv').
csv_delimiter: for CSV exports, the field delimiter to use. Defaults to ','
csv_header: for CSV exports, whether to include an initial header line. Default true.
compress: whether to compress the data on export. Compression is not supported for
AVRO format. Defaults to False.
Returns:
A Job object for the completed export Job if it was started successfully; else None.
"""
job = self.extract_async(destination, format=format, csv_delimiter=csv_delimiter,
csv_header=csv_header, compress=compress)
if job is not None:
job.wait()
return job | [
"def",
"extract",
"(",
"self",
",",
"destination",
",",
"format",
"=",
"'csv'",
",",
"csv_delimiter",
"=",
"None",
",",
"csv_header",
"=",
"True",
",",
"compress",
"=",
"False",
")",
":",
"job",
"=",
"self",
".",
"extract_async",
"(",
"destination",
",",
"format",
"=",
"format",
",",
"csv_delimiter",
"=",
"csv_delimiter",
",",
"csv_header",
"=",
"csv_header",
",",
"compress",
"=",
"compress",
")",
"if",
"job",
"is",
"not",
"None",
":",
"job",
".",
"wait",
"(",
")",
"return",
"job"
] | Exports the table to GCS; blocks until complete.
Args:
destination: the destination URI(s). Can be a single URI or a list.
format: the format to use for the exported data; one of 'csv', 'json', or 'avro'
(default 'csv').
csv_delimiter: for CSV exports, the field delimiter to use. Defaults to ','
csv_header: for CSV exports, whether to include an initial header line. Default true.
compress: whether to compress the data on export. Compression is not supported for
AVRO format. Defaults to False.
Returns:
A Job object for the completed export Job if it was started successfully; else None. | [
"Exports",
"the",
"table",
"to",
"GCS",
";",
"blocks",
"until",
"complete",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_table.py#L428-L446 | train | 237,860 |
googledatalab/pydatalab | google/datalab/bigquery/_table.py | Table.load_async | def load_async(self, source, mode='create', source_format='csv', csv_options=None,
ignore_unknown_values=False, max_bad_records=0):
""" Starts importing a table from GCS and return a Future.
Args:
source: the URL of the source objects(s). Can include a wildcard '*' at the end of the item
name. Can be a single source or a list.
mode: one of 'create', 'append', or 'overwrite'. 'append' or 'overwrite' will fail if the
table does not already exist, while 'create' will fail if it does. The default is
'create'. If 'create' the schema will be inferred if necessary.
source_format: the format of the data, 'csv' or 'json'; default 'csv'.
csv_options: if source format is 'csv', additional options as a CSVOptions object.
ignore_unknown_values: If True, accept rows that contain values that do not match the schema;
the unknown values are ignored (default False).
max_bad_records: the maximum number of bad records that are allowed (and ignored) before
returning an 'invalid' error in the Job result (default 0).
Returns:
A Job object for the import if it was started successfully or None if not.
Raises:
Exception if the load job failed to be started or invalid arguments were supplied.
"""
if source_format == 'csv':
source_format = 'CSV'
elif source_format == 'json':
source_format = 'NEWLINE_DELIMITED_JSON'
else:
raise Exception("Invalid source format %s" % source_format)
if not(mode == 'create' or mode == 'append' or mode == 'overwrite'):
raise Exception("Invalid mode %s" % mode)
if csv_options is None:
csv_options = _csv_options.CSVOptions()
try:
response = self._api.jobs_insert_load(source, self._name_parts,
append=(mode == 'append'),
overwrite=(mode == 'overwrite'),
create=(mode == 'create'),
source_format=source_format,
field_delimiter=csv_options.delimiter,
allow_jagged_rows=csv_options.allow_jagged_rows,
allow_quoted_newlines=csv_options.allow_quoted_newlines,
encoding=csv_options.encoding.upper(),
ignore_unknown_values=ignore_unknown_values,
max_bad_records=max_bad_records,
quote=csv_options.quote,
skip_leading_rows=csv_options.skip_leading_rows)
except Exception as e:
raise e
return self._init_job_from_response(response) | python | def load_async(self, source, mode='create', source_format='csv', csv_options=None,
ignore_unknown_values=False, max_bad_records=0):
""" Starts importing a table from GCS and return a Future.
Args:
source: the URL of the source objects(s). Can include a wildcard '*' at the end of the item
name. Can be a single source or a list.
mode: one of 'create', 'append', or 'overwrite'. 'append' or 'overwrite' will fail if the
table does not already exist, while 'create' will fail if it does. The default is
'create'. If 'create' the schema will be inferred if necessary.
source_format: the format of the data, 'csv' or 'json'; default 'csv'.
csv_options: if source format is 'csv', additional options as a CSVOptions object.
ignore_unknown_values: If True, accept rows that contain values that do not match the schema;
the unknown values are ignored (default False).
max_bad_records: the maximum number of bad records that are allowed (and ignored) before
returning an 'invalid' error in the Job result (default 0).
Returns:
A Job object for the import if it was started successfully or None if not.
Raises:
Exception if the load job failed to be started or invalid arguments were supplied.
"""
if source_format == 'csv':
source_format = 'CSV'
elif source_format == 'json':
source_format = 'NEWLINE_DELIMITED_JSON'
else:
raise Exception("Invalid source format %s" % source_format)
if not(mode == 'create' or mode == 'append' or mode == 'overwrite'):
raise Exception("Invalid mode %s" % mode)
if csv_options is None:
csv_options = _csv_options.CSVOptions()
try:
response = self._api.jobs_insert_load(source, self._name_parts,
append=(mode == 'append'),
overwrite=(mode == 'overwrite'),
create=(mode == 'create'),
source_format=source_format,
field_delimiter=csv_options.delimiter,
allow_jagged_rows=csv_options.allow_jagged_rows,
allow_quoted_newlines=csv_options.allow_quoted_newlines,
encoding=csv_options.encoding.upper(),
ignore_unknown_values=ignore_unknown_values,
max_bad_records=max_bad_records,
quote=csv_options.quote,
skip_leading_rows=csv_options.skip_leading_rows)
except Exception as e:
raise e
return self._init_job_from_response(response) | [
"def",
"load_async",
"(",
"self",
",",
"source",
",",
"mode",
"=",
"'create'",
",",
"source_format",
"=",
"'csv'",
",",
"csv_options",
"=",
"None",
",",
"ignore_unknown_values",
"=",
"False",
",",
"max_bad_records",
"=",
"0",
")",
":",
"if",
"source_format",
"==",
"'csv'",
":",
"source_format",
"=",
"'CSV'",
"elif",
"source_format",
"==",
"'json'",
":",
"source_format",
"=",
"'NEWLINE_DELIMITED_JSON'",
"else",
":",
"raise",
"Exception",
"(",
"\"Invalid source format %s\"",
"%",
"source_format",
")",
"if",
"not",
"(",
"mode",
"==",
"'create'",
"or",
"mode",
"==",
"'append'",
"or",
"mode",
"==",
"'overwrite'",
")",
":",
"raise",
"Exception",
"(",
"\"Invalid mode %s\"",
"%",
"mode",
")",
"if",
"csv_options",
"is",
"None",
":",
"csv_options",
"=",
"_csv_options",
".",
"CSVOptions",
"(",
")",
"try",
":",
"response",
"=",
"self",
".",
"_api",
".",
"jobs_insert_load",
"(",
"source",
",",
"self",
".",
"_name_parts",
",",
"append",
"=",
"(",
"mode",
"==",
"'append'",
")",
",",
"overwrite",
"=",
"(",
"mode",
"==",
"'overwrite'",
")",
",",
"create",
"=",
"(",
"mode",
"==",
"'create'",
")",
",",
"source_format",
"=",
"source_format",
",",
"field_delimiter",
"=",
"csv_options",
".",
"delimiter",
",",
"allow_jagged_rows",
"=",
"csv_options",
".",
"allow_jagged_rows",
",",
"allow_quoted_newlines",
"=",
"csv_options",
".",
"allow_quoted_newlines",
",",
"encoding",
"=",
"csv_options",
".",
"encoding",
".",
"upper",
"(",
")",
",",
"ignore_unknown_values",
"=",
"ignore_unknown_values",
",",
"max_bad_records",
"=",
"max_bad_records",
",",
"quote",
"=",
"csv_options",
".",
"quote",
",",
"skip_leading_rows",
"=",
"csv_options",
".",
"skip_leading_rows",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"return",
"self",
".",
"_init_job_from_response",
"(",
"response",
")"
] | Starts importing a table from GCS and return a Future.
Args:
source: the URL of the source objects(s). Can include a wildcard '*' at the end of the item
name. Can be a single source or a list.
mode: one of 'create', 'append', or 'overwrite'. 'append' or 'overwrite' will fail if the
table does not already exist, while 'create' will fail if it does. The default is
'create'. If 'create' the schema will be inferred if necessary.
source_format: the format of the data, 'csv' or 'json'; default 'csv'.
csv_options: if source format is 'csv', additional options as a CSVOptions object.
ignore_unknown_values: If True, accept rows that contain values that do not match the schema;
the unknown values are ignored (default False).
max_bad_records: the maximum number of bad records that are allowed (and ignored) before
returning an 'invalid' error in the Job result (default 0).
Returns:
A Job object for the import if it was started successfully or None if not.
Raises:
Exception if the load job failed to be started or invalid arguments were supplied. | [
"Starts",
"importing",
"a",
"table",
"from",
"GCS",
"and",
"return",
"a",
"Future",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_table.py#L448-L499 | train | 237,861 |
googledatalab/pydatalab | google/datalab/bigquery/_table.py | Table.load | def load(self, source, mode='create', source_format='csv', csv_options=None,
ignore_unknown_values=False, max_bad_records=0):
""" Load the table from GCS.
Args:
source: the URL of the source objects(s). Can include a wildcard '*' at the end of the item
name. Can be a single source or a list.
mode: one of 'create', 'append', or 'overwrite'. 'append' or 'overwrite' will fail if the
table does not already exist, while 'create' will fail if it does. The default is
'create'. If 'create' the schema will be inferred if necessary.
source_format: the format of the data, 'csv' or 'json'; default 'csv'.
csv_options: if source format is 'csv', additional options as a CSVOptions object.
ignore_unknown_values: if True, accept rows that contain values that do not match the schema;
the unknown values are ignored (default False).
max_bad_records: the maximum number of bad records that are allowed (and ignored) before
returning an 'invalid' error in the Job result (default 0).
Returns:
A Job object for the completed load Job if it was started successfully; else None.
"""
job = self.load_async(source,
mode=mode,
source_format=source_format,
csv_options=csv_options,
ignore_unknown_values=ignore_unknown_values,
max_bad_records=max_bad_records)
if job is not None:
job.wait()
return job | python | def load(self, source, mode='create', source_format='csv', csv_options=None,
ignore_unknown_values=False, max_bad_records=0):
""" Load the table from GCS.
Args:
source: the URL of the source objects(s). Can include a wildcard '*' at the end of the item
name. Can be a single source or a list.
mode: one of 'create', 'append', or 'overwrite'. 'append' or 'overwrite' will fail if the
table does not already exist, while 'create' will fail if it does. The default is
'create'. If 'create' the schema will be inferred if necessary.
source_format: the format of the data, 'csv' or 'json'; default 'csv'.
csv_options: if source format is 'csv', additional options as a CSVOptions object.
ignore_unknown_values: if True, accept rows that contain values that do not match the schema;
the unknown values are ignored (default False).
max_bad_records: the maximum number of bad records that are allowed (and ignored) before
returning an 'invalid' error in the Job result (default 0).
Returns:
A Job object for the completed load Job if it was started successfully; else None.
"""
job = self.load_async(source,
mode=mode,
source_format=source_format,
csv_options=csv_options,
ignore_unknown_values=ignore_unknown_values,
max_bad_records=max_bad_records)
if job is not None:
job.wait()
return job | [
"def",
"load",
"(",
"self",
",",
"source",
",",
"mode",
"=",
"'create'",
",",
"source_format",
"=",
"'csv'",
",",
"csv_options",
"=",
"None",
",",
"ignore_unknown_values",
"=",
"False",
",",
"max_bad_records",
"=",
"0",
")",
":",
"job",
"=",
"self",
".",
"load_async",
"(",
"source",
",",
"mode",
"=",
"mode",
",",
"source_format",
"=",
"source_format",
",",
"csv_options",
"=",
"csv_options",
",",
"ignore_unknown_values",
"=",
"ignore_unknown_values",
",",
"max_bad_records",
"=",
"max_bad_records",
")",
"if",
"job",
"is",
"not",
"None",
":",
"job",
".",
"wait",
"(",
")",
"return",
"job"
] | Load the table from GCS.
Args:
source: the URL of the source objects(s). Can include a wildcard '*' at the end of the item
name. Can be a single source or a list.
mode: one of 'create', 'append', or 'overwrite'. 'append' or 'overwrite' will fail if the
table does not already exist, while 'create' will fail if it does. The default is
'create'. If 'create' the schema will be inferred if necessary.
source_format: the format of the data, 'csv' or 'json'; default 'csv'.
csv_options: if source format is 'csv', additional options as a CSVOptions object.
ignore_unknown_values: if True, accept rows that contain values that do not match the schema;
the unknown values are ignored (default False).
max_bad_records: the maximum number of bad records that are allowed (and ignored) before
returning an 'invalid' error in the Job result (default 0).
Returns:
A Job object for the completed load Job if it was started successfully; else None. | [
"Load",
"the",
"table",
"from",
"GCS",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_table.py#L501-L529 | train | 237,862 |
googledatalab/pydatalab | google/datalab/bigquery/_table.py | Table._get_row_fetcher | def _get_row_fetcher(self, start_row=0, max_rows=None, page_size=_DEFAULT_PAGE_SIZE):
""" Get a function that can retrieve a page of rows.
The function returned is a closure so that it can have a signature suitable for use
by Iterator.
Args:
start_row: the row to start fetching from; default 0.
max_rows: the maximum number of rows to fetch (across all calls, not per-call). Default
is None which means no limit.
page_size: the maximum number of results to fetch per page; default 1024.
Returns:
A function that can be called repeatedly with a page token and running count, and that
will return an array of rows and a next page token; when the returned page token is None
the fetch is complete.
"""
if not start_row:
start_row = 0
elif start_row < 0: # We are measuring from the table end
if self.length >= 0:
start_row += self.length
else:
raise Exception('Cannot use negative indices for table of unknown length')
schema = self.schema._bq_schema
name_parts = self._name_parts
def _retrieve_rows(page_token, count):
page_rows = []
if max_rows and count >= max_rows:
page_token = None
else:
if max_rows and page_size > (max_rows - count):
max_results = max_rows - count
else:
max_results = page_size
try:
if page_token:
response = self._api.tabledata_list(name_parts, page_token=page_token,
max_results=max_results)
else:
response = self._api.tabledata_list(name_parts, start_index=start_row,
max_results=max_results)
except Exception as e:
raise e
page_token = response['pageToken'] if 'pageToken' in response else None
if 'rows' in response:
page_rows = response['rows']
rows = []
for row_dict in page_rows:
rows.append(_parser.Parser.parse_row(schema, row_dict))
return rows, page_token
return _retrieve_rows | python | def _get_row_fetcher(self, start_row=0, max_rows=None, page_size=_DEFAULT_PAGE_SIZE):
""" Get a function that can retrieve a page of rows.
The function returned is a closure so that it can have a signature suitable for use
by Iterator.
Args:
start_row: the row to start fetching from; default 0.
max_rows: the maximum number of rows to fetch (across all calls, not per-call). Default
is None which means no limit.
page_size: the maximum number of results to fetch per page; default 1024.
Returns:
A function that can be called repeatedly with a page token and running count, and that
will return an array of rows and a next page token; when the returned page token is None
the fetch is complete.
"""
if not start_row:
start_row = 0
elif start_row < 0: # We are measuring from the table end
if self.length >= 0:
start_row += self.length
else:
raise Exception('Cannot use negative indices for table of unknown length')
schema = self.schema._bq_schema
name_parts = self._name_parts
def _retrieve_rows(page_token, count):
page_rows = []
if max_rows and count >= max_rows:
page_token = None
else:
if max_rows and page_size > (max_rows - count):
max_results = max_rows - count
else:
max_results = page_size
try:
if page_token:
response = self._api.tabledata_list(name_parts, page_token=page_token,
max_results=max_results)
else:
response = self._api.tabledata_list(name_parts, start_index=start_row,
max_results=max_results)
except Exception as e:
raise e
page_token = response['pageToken'] if 'pageToken' in response else None
if 'rows' in response:
page_rows = response['rows']
rows = []
for row_dict in page_rows:
rows.append(_parser.Parser.parse_row(schema, row_dict))
return rows, page_token
return _retrieve_rows | [
"def",
"_get_row_fetcher",
"(",
"self",
",",
"start_row",
"=",
"0",
",",
"max_rows",
"=",
"None",
",",
"page_size",
"=",
"_DEFAULT_PAGE_SIZE",
")",
":",
"if",
"not",
"start_row",
":",
"start_row",
"=",
"0",
"elif",
"start_row",
"<",
"0",
":",
"# We are measuring from the table end",
"if",
"self",
".",
"length",
">=",
"0",
":",
"start_row",
"+=",
"self",
".",
"length",
"else",
":",
"raise",
"Exception",
"(",
"'Cannot use negative indices for table of unknown length'",
")",
"schema",
"=",
"self",
".",
"schema",
".",
"_bq_schema",
"name_parts",
"=",
"self",
".",
"_name_parts",
"def",
"_retrieve_rows",
"(",
"page_token",
",",
"count",
")",
":",
"page_rows",
"=",
"[",
"]",
"if",
"max_rows",
"and",
"count",
">=",
"max_rows",
":",
"page_token",
"=",
"None",
"else",
":",
"if",
"max_rows",
"and",
"page_size",
">",
"(",
"max_rows",
"-",
"count",
")",
":",
"max_results",
"=",
"max_rows",
"-",
"count",
"else",
":",
"max_results",
"=",
"page_size",
"try",
":",
"if",
"page_token",
":",
"response",
"=",
"self",
".",
"_api",
".",
"tabledata_list",
"(",
"name_parts",
",",
"page_token",
"=",
"page_token",
",",
"max_results",
"=",
"max_results",
")",
"else",
":",
"response",
"=",
"self",
".",
"_api",
".",
"tabledata_list",
"(",
"name_parts",
",",
"start_index",
"=",
"start_row",
",",
"max_results",
"=",
"max_results",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"page_token",
"=",
"response",
"[",
"'pageToken'",
"]",
"if",
"'pageToken'",
"in",
"response",
"else",
"None",
"if",
"'rows'",
"in",
"response",
":",
"page_rows",
"=",
"response",
"[",
"'rows'",
"]",
"rows",
"=",
"[",
"]",
"for",
"row_dict",
"in",
"page_rows",
":",
"rows",
".",
"append",
"(",
"_parser",
".",
"Parser",
".",
"parse_row",
"(",
"schema",
",",
"row_dict",
")",
")",
"return",
"rows",
",",
"page_token",
"return",
"_retrieve_rows"
] | Get a function that can retrieve a page of rows.
The function returned is a closure so that it can have a signature suitable for use
by Iterator.
Args:
start_row: the row to start fetching from; default 0.
max_rows: the maximum number of rows to fetch (across all calls, not per-call). Default
is None which means no limit.
page_size: the maximum number of results to fetch per page; default 1024.
Returns:
A function that can be called repeatedly with a page token and running count, and that
will return an array of rows and a next page token; when the returned page token is None
the fetch is complete. | [
"Get",
"a",
"function",
"that",
"can",
"retrieve",
"a",
"page",
"of",
"rows",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_table.py#L531-L588 | train | 237,863 |
googledatalab/pydatalab | google/datalab/bigquery/_table.py | Table.schema | def schema(self):
"""Retrieves the schema of the table.
Returns:
A Schema object containing a list of schema fields and associated metadata.
Raises
Exception if the request could not be executed or the response was malformed.
"""
if not self._schema:
try:
self._load_info()
self._schema = _schema.Schema(self._info['schema']['fields'])
except KeyError:
raise Exception('Unexpected table response: missing schema')
return self._schema | python | def schema(self):
"""Retrieves the schema of the table.
Returns:
A Schema object containing a list of schema fields and associated metadata.
Raises
Exception if the request could not be executed or the response was malformed.
"""
if not self._schema:
try:
self._load_info()
self._schema = _schema.Schema(self._info['schema']['fields'])
except KeyError:
raise Exception('Unexpected table response: missing schema')
return self._schema | [
"def",
"schema",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_schema",
":",
"try",
":",
"self",
".",
"_load_info",
"(",
")",
"self",
".",
"_schema",
"=",
"_schema",
".",
"Schema",
"(",
"self",
".",
"_info",
"[",
"'schema'",
"]",
"[",
"'fields'",
"]",
")",
"except",
"KeyError",
":",
"raise",
"Exception",
"(",
"'Unexpected table response: missing schema'",
")",
"return",
"self",
".",
"_schema"
] | Retrieves the schema of the table.
Returns:
A Schema object containing a list of schema fields and associated metadata.
Raises
Exception if the request could not be executed or the response was malformed. | [
"Retrieves",
"the",
"schema",
"of",
"the",
"table",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_table.py#L662-L676 | train | 237,864 |
googledatalab/pydatalab | google/datalab/bigquery/_table.py | Table.snapshot | def snapshot(self, at):
""" Return a new Table which is a snapshot of this table at the specified time.
Args:
at: the time of the snapshot. This can be a Python datetime (absolute) or timedelta
(relative to current time). The result must be after the table was created and no more
than seven days in the past. Passing None will get a reference the oldest snapshot.
Note that using a datetime will get a snapshot at an absolute point in time, while
a timedelta will provide a varying snapshot; any queries issued against such a Table
will be done against a snapshot that has an age relative to the execution time of the
query.
Returns:
A new Table object referencing the snapshot.
Raises:
An exception if this Table is already decorated, or if the time specified is invalid.
"""
if self._name_parts.decorator != '':
raise Exception("Cannot use snapshot() on an already decorated table")
value = Table._convert_decorator_time(at)
return Table("%s@%s" % (self._full_name, str(value)), context=self._context) | python | def snapshot(self, at):
""" Return a new Table which is a snapshot of this table at the specified time.
Args:
at: the time of the snapshot. This can be a Python datetime (absolute) or timedelta
(relative to current time). The result must be after the table was created and no more
than seven days in the past. Passing None will get a reference the oldest snapshot.
Note that using a datetime will get a snapshot at an absolute point in time, while
a timedelta will provide a varying snapshot; any queries issued against such a Table
will be done against a snapshot that has an age relative to the execution time of the
query.
Returns:
A new Table object referencing the snapshot.
Raises:
An exception if this Table is already decorated, or if the time specified is invalid.
"""
if self._name_parts.decorator != '':
raise Exception("Cannot use snapshot() on an already decorated table")
value = Table._convert_decorator_time(at)
return Table("%s@%s" % (self._full_name, str(value)), context=self._context) | [
"def",
"snapshot",
"(",
"self",
",",
"at",
")",
":",
"if",
"self",
".",
"_name_parts",
".",
"decorator",
"!=",
"''",
":",
"raise",
"Exception",
"(",
"\"Cannot use snapshot() on an already decorated table\"",
")",
"value",
"=",
"Table",
".",
"_convert_decorator_time",
"(",
"at",
")",
"return",
"Table",
"(",
"\"%s@%s\"",
"%",
"(",
"self",
".",
"_full_name",
",",
"str",
"(",
"value",
")",
")",
",",
"context",
"=",
"self",
".",
"_context",
")"
] | Return a new Table which is a snapshot of this table at the specified time.
Args:
at: the time of the snapshot. This can be a Python datetime (absolute) or timedelta
(relative to current time). The result must be after the table was created and no more
than seven days in the past. Passing None will get a reference the oldest snapshot.
Note that using a datetime will get a snapshot at an absolute point in time, while
a timedelta will provide a varying snapshot; any queries issued against such a Table
will be done against a snapshot that has an age relative to the execution time of the
query.
Returns:
A new Table object referencing the snapshot.
Raises:
An exception if this Table is already decorated, or if the time specified is invalid. | [
"Return",
"a",
"new",
"Table",
"which",
"is",
"a",
"snapshot",
"of",
"this",
"table",
"at",
"the",
"specified",
"time",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_table.py#L803-L826 | train | 237,865 |
googledatalab/pydatalab | google/datalab/bigquery/_table.py | Table.window | def window(self, begin, end=None):
""" Return a new Table limited to the rows added to this Table during the specified time range.
Args:
begin: the start time of the window. This can be a Python datetime (absolute) or timedelta
(relative to current time). The result must be after the table was created and no more
than seven days in the past.
Note that using a relative value will provide a varying snapshot, not a fixed
snapshot; any queries issued against such a Table will be done against a snapshot
that has an age relative to the execution time of the query.
end: the end time of the snapshot; if None, then the current time is used. The types and
interpretation of values is as for start.
Returns:
A new Table object referencing the window.
Raises:
An exception if this Table is already decorated, or if the time specified is invalid.
"""
if self._name_parts.decorator != '':
raise Exception("Cannot use window() on an already decorated table")
start = Table._convert_decorator_time(begin)
if end is None:
if isinstance(begin, datetime.timedelta):
end = datetime.timedelta(0)
else:
end = datetime.datetime.utcnow()
stop = Table._convert_decorator_time(end)
# Both values must have the same sign
if (start > 0 >= stop) or (stop > 0 >= start):
raise Exception("window: Between arguments must both be absolute or relative: %s, %s" %
(str(begin), str(end)))
# start must be less than stop
if start > stop:
raise Exception("window: Between arguments: begin must be before end: %s, %s" %
(str(begin), str(end)))
return Table("%s@%s-%s" % (self._full_name, str(start), str(stop)), context=self._context) | python | def window(self, begin, end=None):
""" Return a new Table limited to the rows added to this Table during the specified time range.
Args:
begin: the start time of the window. This can be a Python datetime (absolute) or timedelta
(relative to current time). The result must be after the table was created and no more
than seven days in the past.
Note that using a relative value will provide a varying snapshot, not a fixed
snapshot; any queries issued against such a Table will be done against a snapshot
that has an age relative to the execution time of the query.
end: the end time of the snapshot; if None, then the current time is used. The types and
interpretation of values is as for start.
Returns:
A new Table object referencing the window.
Raises:
An exception if this Table is already decorated, or if the time specified is invalid.
"""
if self._name_parts.decorator != '':
raise Exception("Cannot use window() on an already decorated table")
start = Table._convert_decorator_time(begin)
if end is None:
if isinstance(begin, datetime.timedelta):
end = datetime.timedelta(0)
else:
end = datetime.datetime.utcnow()
stop = Table._convert_decorator_time(end)
# Both values must have the same sign
if (start > 0 >= stop) or (stop > 0 >= start):
raise Exception("window: Between arguments must both be absolute or relative: %s, %s" %
(str(begin), str(end)))
# start must be less than stop
if start > stop:
raise Exception("window: Between arguments: begin must be before end: %s, %s" %
(str(begin), str(end)))
return Table("%s@%s-%s" % (self._full_name, str(start), str(stop)), context=self._context) | [
"def",
"window",
"(",
"self",
",",
"begin",
",",
"end",
"=",
"None",
")",
":",
"if",
"self",
".",
"_name_parts",
".",
"decorator",
"!=",
"''",
":",
"raise",
"Exception",
"(",
"\"Cannot use window() on an already decorated table\"",
")",
"start",
"=",
"Table",
".",
"_convert_decorator_time",
"(",
"begin",
")",
"if",
"end",
"is",
"None",
":",
"if",
"isinstance",
"(",
"begin",
",",
"datetime",
".",
"timedelta",
")",
":",
"end",
"=",
"datetime",
".",
"timedelta",
"(",
"0",
")",
"else",
":",
"end",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"stop",
"=",
"Table",
".",
"_convert_decorator_time",
"(",
"end",
")",
"# Both values must have the same sign",
"if",
"(",
"start",
">",
"0",
">=",
"stop",
")",
"or",
"(",
"stop",
">",
"0",
">=",
"start",
")",
":",
"raise",
"Exception",
"(",
"\"window: Between arguments must both be absolute or relative: %s, %s\"",
"%",
"(",
"str",
"(",
"begin",
")",
",",
"str",
"(",
"end",
")",
")",
")",
"# start must be less than stop",
"if",
"start",
">",
"stop",
":",
"raise",
"Exception",
"(",
"\"window: Between arguments: begin must be before end: %s, %s\"",
"%",
"(",
"str",
"(",
"begin",
")",
",",
"str",
"(",
"end",
")",
")",
")",
"return",
"Table",
"(",
"\"%s@%s-%s\"",
"%",
"(",
"self",
".",
"_full_name",
",",
"str",
"(",
"start",
")",
",",
"str",
"(",
"stop",
")",
")",
",",
"context",
"=",
"self",
".",
"_context",
")"
] | Return a new Table limited to the rows added to this Table during the specified time range.
Args:
begin: the start time of the window. This can be a Python datetime (absolute) or timedelta
(relative to current time). The result must be after the table was created and no more
than seven days in the past.
Note that using a relative value will provide a varying snapshot, not a fixed
snapshot; any queries issued against such a Table will be done against a snapshot
that has an age relative to the execution time of the query.
end: the end time of the snapshot; if None, then the current time is used. The types and
interpretation of values is as for start.
Returns:
A new Table object referencing the window.
Raises:
An exception if this Table is already decorated, or if the time specified is invalid. | [
"Return",
"a",
"new",
"Table",
"limited",
"to",
"the",
"rows",
"added",
"to",
"this",
"Table",
"during",
"the",
"specified",
"time",
"range",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_table.py#L828-L870 | train | 237,866 |
googledatalab/pydatalab | solutionbox/ml_workbench/xgboost/transform.py | serialize_example | def serialize_example(transformed_json_data, features, feature_indices, target_name):
"""Makes an instance of data in libsvm format.
Args:
transformed_json_data: dict of transformed data.
features: features config.
feature_indices: output of feature_transforms.get_transformed_feature_indices()
Returns:
The text line representation of an instance in libsvm format.
"""
import six
import tensorflow as tf
from trainer import feature_transforms
line = str(transformed_json_data[target_name][0])
for name, info in feature_indices:
if features[name]['transform'] in [feature_transforms.IDENTITY_TRANSFORM,
feature_transforms.SCALE_TRANSFORM]:
line += ' %d:%s' % (info['index_start'], str(transformed_json_data[name][0]))
elif features[name]['transform'] in [feature_transforms.ONE_HOT_TRANSFORM,
feature_transforms.MULTI_HOT_TRANSFORM]:
for i in range(info['size']):
if i in transformed_json_data[name]:
line += ' %d:1' % (info['index_start'] + i)
elif features[name]['transform'] in [feature_transforms.IMAGE_TRANSFORM]:
for i in range(info['size']):
line += ' %d:%s' % (info['index_start'] + i, str(transformed_json_data[name][i]))
return line | python | def serialize_example(transformed_json_data, features, feature_indices, target_name):
"""Makes an instance of data in libsvm format.
Args:
transformed_json_data: dict of transformed data.
features: features config.
feature_indices: output of feature_transforms.get_transformed_feature_indices()
Returns:
The text line representation of an instance in libsvm format.
"""
import six
import tensorflow as tf
from trainer import feature_transforms
line = str(transformed_json_data[target_name][0])
for name, info in feature_indices:
if features[name]['transform'] in [feature_transforms.IDENTITY_TRANSFORM,
feature_transforms.SCALE_TRANSFORM]:
line += ' %d:%s' % (info['index_start'], str(transformed_json_data[name][0]))
elif features[name]['transform'] in [feature_transforms.ONE_HOT_TRANSFORM,
feature_transforms.MULTI_HOT_TRANSFORM]:
for i in range(info['size']):
if i in transformed_json_data[name]:
line += ' %d:1' % (info['index_start'] + i)
elif features[name]['transform'] in [feature_transforms.IMAGE_TRANSFORM]:
for i in range(info['size']):
line += ' %d:%s' % (info['index_start'] + i, str(transformed_json_data[name][i]))
return line | [
"def",
"serialize_example",
"(",
"transformed_json_data",
",",
"features",
",",
"feature_indices",
",",
"target_name",
")",
":",
"import",
"six",
"import",
"tensorflow",
"as",
"tf",
"from",
"trainer",
"import",
"feature_transforms",
"line",
"=",
"str",
"(",
"transformed_json_data",
"[",
"target_name",
"]",
"[",
"0",
"]",
")",
"for",
"name",
",",
"info",
"in",
"feature_indices",
":",
"if",
"features",
"[",
"name",
"]",
"[",
"'transform'",
"]",
"in",
"[",
"feature_transforms",
".",
"IDENTITY_TRANSFORM",
",",
"feature_transforms",
".",
"SCALE_TRANSFORM",
"]",
":",
"line",
"+=",
"' %d:%s'",
"%",
"(",
"info",
"[",
"'index_start'",
"]",
",",
"str",
"(",
"transformed_json_data",
"[",
"name",
"]",
"[",
"0",
"]",
")",
")",
"elif",
"features",
"[",
"name",
"]",
"[",
"'transform'",
"]",
"in",
"[",
"feature_transforms",
".",
"ONE_HOT_TRANSFORM",
",",
"feature_transforms",
".",
"MULTI_HOT_TRANSFORM",
"]",
":",
"for",
"i",
"in",
"range",
"(",
"info",
"[",
"'size'",
"]",
")",
":",
"if",
"i",
"in",
"transformed_json_data",
"[",
"name",
"]",
":",
"line",
"+=",
"' %d:1'",
"%",
"(",
"info",
"[",
"'index_start'",
"]",
"+",
"i",
")",
"elif",
"features",
"[",
"name",
"]",
"[",
"'transform'",
"]",
"in",
"[",
"feature_transforms",
".",
"IMAGE_TRANSFORM",
"]",
":",
"for",
"i",
"in",
"range",
"(",
"info",
"[",
"'size'",
"]",
")",
":",
"line",
"+=",
"' %d:%s'",
"%",
"(",
"info",
"[",
"'index_start'",
"]",
"+",
"i",
",",
"str",
"(",
"transformed_json_data",
"[",
"name",
"]",
"[",
"i",
"]",
")",
")",
"return",
"line"
] | Makes an instance of data in libsvm format.
Args:
transformed_json_data: dict of transformed data.
features: features config.
feature_indices: output of feature_transforms.get_transformed_feature_indices()
Returns:
The text line representation of an instance in libsvm format. | [
"Makes",
"an",
"instance",
"of",
"data",
"in",
"libsvm",
"format",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/ml_workbench/xgboost/transform.py#L392-L421 | train | 237,867 |
googledatalab/pydatalab | datalab/bigquery/_dataset.py | Dataset.delete | def delete(self, delete_contents=False):
"""Issues a request to delete the dataset.
Args:
delete_contents: if True, any tables and views in the dataset will be deleted. If False
and the dataset is non-empty an exception will be raised.
Returns:
None on success.
Raises:
Exception if the delete fails (including if table was nonexistent).
"""
if not self.exists():
raise Exception('Cannot delete non-existent dataset %s' % self._full_name)
try:
self._api.datasets_delete(self._name_parts, delete_contents=delete_contents)
except Exception as e:
raise e
self._info = None
return None | python | def delete(self, delete_contents=False):
"""Issues a request to delete the dataset.
Args:
delete_contents: if True, any tables and views in the dataset will be deleted. If False
and the dataset is non-empty an exception will be raised.
Returns:
None on success.
Raises:
Exception if the delete fails (including if table was nonexistent).
"""
if not self.exists():
raise Exception('Cannot delete non-existent dataset %s' % self._full_name)
try:
self._api.datasets_delete(self._name_parts, delete_contents=delete_contents)
except Exception as e:
raise e
self._info = None
return None | [
"def",
"delete",
"(",
"self",
",",
"delete_contents",
"=",
"False",
")",
":",
"if",
"not",
"self",
".",
"exists",
"(",
")",
":",
"raise",
"Exception",
"(",
"'Cannot delete non-existent dataset %s'",
"%",
"self",
".",
"_full_name",
")",
"try",
":",
"self",
".",
"_api",
".",
"datasets_delete",
"(",
"self",
".",
"_name_parts",
",",
"delete_contents",
"=",
"delete_contents",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"self",
".",
"_info",
"=",
"None",
"return",
"None"
] | Issues a request to delete the dataset.
Args:
delete_contents: if True, any tables and views in the dataset will be deleted. If False
and the dataset is non-empty an exception will be raised.
Returns:
None on success.
Raises:
Exception if the delete fails (including if table was nonexistent). | [
"Issues",
"a",
"request",
"to",
"delete",
"the",
"dataset",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_dataset.py#L101-L119 | train | 237,868 |
googledatalab/pydatalab | datalab/bigquery/_dataset.py | Dataset.create | def create(self, friendly_name=None, description=None):
"""Creates the Dataset with the specified friendly name and description.
Args:
friendly_name: (optional) the friendly name for the dataset if it is being created.
description: (optional) a description for the dataset if it is being created.
Returns:
The Dataset.
Raises:
Exception if the Dataset could not be created.
"""
if not self.exists():
try:
response = self._api.datasets_insert(self._name_parts,
friendly_name=friendly_name,
description=description)
except Exception as e:
raise e
if 'selfLink' not in response:
raise Exception("Could not create dataset %s" % self._full_name)
return self | python | def create(self, friendly_name=None, description=None):
"""Creates the Dataset with the specified friendly name and description.
Args:
friendly_name: (optional) the friendly name for the dataset if it is being created.
description: (optional) a description for the dataset if it is being created.
Returns:
The Dataset.
Raises:
Exception if the Dataset could not be created.
"""
if not self.exists():
try:
response = self._api.datasets_insert(self._name_parts,
friendly_name=friendly_name,
description=description)
except Exception as e:
raise e
if 'selfLink' not in response:
raise Exception("Could not create dataset %s" % self._full_name)
return self | [
"def",
"create",
"(",
"self",
",",
"friendly_name",
"=",
"None",
",",
"description",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"exists",
"(",
")",
":",
"try",
":",
"response",
"=",
"self",
".",
"_api",
".",
"datasets_insert",
"(",
"self",
".",
"_name_parts",
",",
"friendly_name",
"=",
"friendly_name",
",",
"description",
"=",
"description",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"if",
"'selfLink'",
"not",
"in",
"response",
":",
"raise",
"Exception",
"(",
"\"Could not create dataset %s\"",
"%",
"self",
".",
"_full_name",
")",
"return",
"self"
] | Creates the Dataset with the specified friendly name and description.
Args:
friendly_name: (optional) the friendly name for the dataset if it is being created.
description: (optional) a description for the dataset if it is being created.
Returns:
The Dataset.
Raises:
Exception if the Dataset could not be created. | [
"Creates",
"the",
"Dataset",
"with",
"the",
"specified",
"friendly",
"name",
"and",
"description",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_dataset.py#L121-L141 | train | 237,869 |
googledatalab/pydatalab | datalab/bigquery/_dataset.py | Dataset.update | def update(self, friendly_name=None, description=None):
""" Selectively updates Dataset information.
Args:
friendly_name: if not None, the new friendly name.
description: if not None, the new description.
Returns:
"""
self._get_info()
if self._info:
if friendly_name:
self._info['friendlyName'] = friendly_name
if description:
self._info['description'] = description
try:
self._api.datasets_update(self._name_parts, self._info)
except Exception as e:
raise e
finally:
self._info = None | python | def update(self, friendly_name=None, description=None):
""" Selectively updates Dataset information.
Args:
friendly_name: if not None, the new friendly name.
description: if not None, the new description.
Returns:
"""
self._get_info()
if self._info:
if friendly_name:
self._info['friendlyName'] = friendly_name
if description:
self._info['description'] = description
try:
self._api.datasets_update(self._name_parts, self._info)
except Exception as e:
raise e
finally:
self._info = None | [
"def",
"update",
"(",
"self",
",",
"friendly_name",
"=",
"None",
",",
"description",
"=",
"None",
")",
":",
"self",
".",
"_get_info",
"(",
")",
"if",
"self",
".",
"_info",
":",
"if",
"friendly_name",
":",
"self",
".",
"_info",
"[",
"'friendlyName'",
"]",
"=",
"friendly_name",
"if",
"description",
":",
"self",
".",
"_info",
"[",
"'description'",
"]",
"=",
"description",
"try",
":",
"self",
".",
"_api",
".",
"datasets_update",
"(",
"self",
".",
"_name_parts",
",",
"self",
".",
"_info",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"finally",
":",
"self",
".",
"_info",
"=",
"None"
] | Selectively updates Dataset information.
Args:
friendly_name: if not None, the new friendly name.
description: if not None, the new description.
Returns: | [
"Selectively",
"updates",
"Dataset",
"information",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_dataset.py#L143-L164 | train | 237,870 |
googledatalab/pydatalab | google/datalab/bigquery/_view.py | View.query | def query(self):
"""The Query that defines the view."""
if not self.exists():
return None
self._table._load_info()
if 'view' in self._table._info and 'query' in self._table._info['view']:
return _query.Query(self._table._info['view']['query'])
return None | python | def query(self):
"""The Query that defines the view."""
if not self.exists():
return None
self._table._load_info()
if 'view' in self._table._info and 'query' in self._table._info['view']:
return _query.Query(self._table._info['view']['query'])
return None | [
"def",
"query",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"exists",
"(",
")",
":",
"return",
"None",
"self",
".",
"_table",
".",
"_load_info",
"(",
")",
"if",
"'view'",
"in",
"self",
".",
"_table",
".",
"_info",
"and",
"'query'",
"in",
"self",
".",
"_table",
".",
"_info",
"[",
"'view'",
"]",
":",
"return",
"_query",
".",
"Query",
"(",
"self",
".",
"_table",
".",
"_info",
"[",
"'view'",
"]",
"[",
"'query'",
"]",
")",
"return",
"None"
] | The Query that defines the view. | [
"The",
"Query",
"that",
"defines",
"the",
"view",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_view.py#L70-L77 | train | 237,871 |
googledatalab/pydatalab | solutionbox/structured_data/mltoolbox/_structured_data/preprocess/local_preprocess.py | run_numerical_categorical_analysis | def run_numerical_categorical_analysis(args, schema_list):
"""Makes the numerical and categorical analysis files.
Args:
args: the command line args
schema_list: python object of the schema json file.
Raises:
ValueError: if schema contains unknown column types.
"""
header = [column['name'] for column in schema_list]
input_files = file_io.get_matching_files(args.input_file_pattern)
# Check the schema is valid
for col_schema in schema_list:
col_type = col_schema['type'].lower()
if col_type != 'string' and col_type != 'integer' and col_type != 'float':
raise ValueError('Schema contains an unsupported type %s.' % col_type)
# initialize the results
def _init_numerical_results():
return {'min': float('inf'),
'max': float('-inf'),
'count': 0,
'sum': 0.0}
numerical_results = collections.defaultdict(_init_numerical_results)
categorical_results = collections.defaultdict(set)
# for each file, update the numerical stats from that file, and update the set
# of unique labels.
for input_file in input_files:
with file_io.FileIO(input_file, 'r') as f:
for line in f:
parsed_line = dict(zip(header, line.strip().split(',')))
for col_schema in schema_list:
col_name = col_schema['name']
col_type = col_schema['type']
if col_type.lower() == 'string':
categorical_results[col_name].update([parsed_line[col_name]])
else:
# numerical column.
# if empty, skip
if not parsed_line[col_name].strip():
continue
numerical_results[col_name]['min'] = (
min(numerical_results[col_name]['min'],
float(parsed_line[col_name])))
numerical_results[col_name]['max'] = (
max(numerical_results[col_name]['max'],
float(parsed_line[col_name])))
numerical_results[col_name]['count'] += 1
numerical_results[col_name]['sum'] += float(parsed_line[col_name])
# Update numerical_results to just have min/min/mean
for col_schema in schema_list:
if col_schema['type'].lower() != 'string':
col_name = col_schema['name']
mean = numerical_results[col_name]['sum'] / numerical_results[col_name]['count']
del numerical_results[col_name]['sum']
del numerical_results[col_name]['count']
numerical_results[col_name]['mean'] = mean
# Write the numerical_results to a json file.
file_io.write_string_to_file(
os.path.join(args.output_dir, NUMERICAL_ANALYSIS_FILE),
json.dumps(numerical_results, indent=2, separators=(',', ': ')))
# Write the vocab files. Each label is on its own line.
for name, unique_labels in six.iteritems(categorical_results):
labels = '\n'.join(list(unique_labels))
file_io.write_string_to_file(
os.path.join(args.output_dir, CATEGORICAL_ANALYSIS_FILE % name),
labels) | python | def run_numerical_categorical_analysis(args, schema_list):
"""Makes the numerical and categorical analysis files.
Args:
args: the command line args
schema_list: python object of the schema json file.
Raises:
ValueError: if schema contains unknown column types.
"""
header = [column['name'] for column in schema_list]
input_files = file_io.get_matching_files(args.input_file_pattern)
# Check the schema is valid
for col_schema in schema_list:
col_type = col_schema['type'].lower()
if col_type != 'string' and col_type != 'integer' and col_type != 'float':
raise ValueError('Schema contains an unsupported type %s.' % col_type)
# initialize the results
def _init_numerical_results():
return {'min': float('inf'),
'max': float('-inf'),
'count': 0,
'sum': 0.0}
numerical_results = collections.defaultdict(_init_numerical_results)
categorical_results = collections.defaultdict(set)
# for each file, update the numerical stats from that file, and update the set
# of unique labels.
for input_file in input_files:
with file_io.FileIO(input_file, 'r') as f:
for line in f:
parsed_line = dict(zip(header, line.strip().split(',')))
for col_schema in schema_list:
col_name = col_schema['name']
col_type = col_schema['type']
if col_type.lower() == 'string':
categorical_results[col_name].update([parsed_line[col_name]])
else:
# numerical column.
# if empty, skip
if not parsed_line[col_name].strip():
continue
numerical_results[col_name]['min'] = (
min(numerical_results[col_name]['min'],
float(parsed_line[col_name])))
numerical_results[col_name]['max'] = (
max(numerical_results[col_name]['max'],
float(parsed_line[col_name])))
numerical_results[col_name]['count'] += 1
numerical_results[col_name]['sum'] += float(parsed_line[col_name])
# Update numerical_results to just have min/min/mean
for col_schema in schema_list:
if col_schema['type'].lower() != 'string':
col_name = col_schema['name']
mean = numerical_results[col_name]['sum'] / numerical_results[col_name]['count']
del numerical_results[col_name]['sum']
del numerical_results[col_name]['count']
numerical_results[col_name]['mean'] = mean
# Write the numerical_results to a json file.
file_io.write_string_to_file(
os.path.join(args.output_dir, NUMERICAL_ANALYSIS_FILE),
json.dumps(numerical_results, indent=2, separators=(',', ': ')))
# Write the vocab files. Each label is on its own line.
for name, unique_labels in six.iteritems(categorical_results):
labels = '\n'.join(list(unique_labels))
file_io.write_string_to_file(
os.path.join(args.output_dir, CATEGORICAL_ANALYSIS_FILE % name),
labels) | [
"def",
"run_numerical_categorical_analysis",
"(",
"args",
",",
"schema_list",
")",
":",
"header",
"=",
"[",
"column",
"[",
"'name'",
"]",
"for",
"column",
"in",
"schema_list",
"]",
"input_files",
"=",
"file_io",
".",
"get_matching_files",
"(",
"args",
".",
"input_file_pattern",
")",
"# Check the schema is valid",
"for",
"col_schema",
"in",
"schema_list",
":",
"col_type",
"=",
"col_schema",
"[",
"'type'",
"]",
".",
"lower",
"(",
")",
"if",
"col_type",
"!=",
"'string'",
"and",
"col_type",
"!=",
"'integer'",
"and",
"col_type",
"!=",
"'float'",
":",
"raise",
"ValueError",
"(",
"'Schema contains an unsupported type %s.'",
"%",
"col_type",
")",
"# initialize the results",
"def",
"_init_numerical_results",
"(",
")",
":",
"return",
"{",
"'min'",
":",
"float",
"(",
"'inf'",
")",
",",
"'max'",
":",
"float",
"(",
"'-inf'",
")",
",",
"'count'",
":",
"0",
",",
"'sum'",
":",
"0.0",
"}",
"numerical_results",
"=",
"collections",
".",
"defaultdict",
"(",
"_init_numerical_results",
")",
"categorical_results",
"=",
"collections",
".",
"defaultdict",
"(",
"set",
")",
"# for each file, update the numerical stats from that file, and update the set",
"# of unique labels.",
"for",
"input_file",
"in",
"input_files",
":",
"with",
"file_io",
".",
"FileIO",
"(",
"input_file",
",",
"'r'",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"parsed_line",
"=",
"dict",
"(",
"zip",
"(",
"header",
",",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"','",
")",
")",
")",
"for",
"col_schema",
"in",
"schema_list",
":",
"col_name",
"=",
"col_schema",
"[",
"'name'",
"]",
"col_type",
"=",
"col_schema",
"[",
"'type'",
"]",
"if",
"col_type",
".",
"lower",
"(",
")",
"==",
"'string'",
":",
"categorical_results",
"[",
"col_name",
"]",
".",
"update",
"(",
"[",
"parsed_line",
"[",
"col_name",
"]",
"]",
")",
"else",
":",
"# numerical column.",
"# if empty, skip",
"if",
"not",
"parsed_line",
"[",
"col_name",
"]",
".",
"strip",
"(",
")",
":",
"continue",
"numerical_results",
"[",
"col_name",
"]",
"[",
"'min'",
"]",
"=",
"(",
"min",
"(",
"numerical_results",
"[",
"col_name",
"]",
"[",
"'min'",
"]",
",",
"float",
"(",
"parsed_line",
"[",
"col_name",
"]",
")",
")",
")",
"numerical_results",
"[",
"col_name",
"]",
"[",
"'max'",
"]",
"=",
"(",
"max",
"(",
"numerical_results",
"[",
"col_name",
"]",
"[",
"'max'",
"]",
",",
"float",
"(",
"parsed_line",
"[",
"col_name",
"]",
")",
")",
")",
"numerical_results",
"[",
"col_name",
"]",
"[",
"'count'",
"]",
"+=",
"1",
"numerical_results",
"[",
"col_name",
"]",
"[",
"'sum'",
"]",
"+=",
"float",
"(",
"parsed_line",
"[",
"col_name",
"]",
")",
"# Update numerical_results to just have min/min/mean",
"for",
"col_schema",
"in",
"schema_list",
":",
"if",
"col_schema",
"[",
"'type'",
"]",
".",
"lower",
"(",
")",
"!=",
"'string'",
":",
"col_name",
"=",
"col_schema",
"[",
"'name'",
"]",
"mean",
"=",
"numerical_results",
"[",
"col_name",
"]",
"[",
"'sum'",
"]",
"/",
"numerical_results",
"[",
"col_name",
"]",
"[",
"'count'",
"]",
"del",
"numerical_results",
"[",
"col_name",
"]",
"[",
"'sum'",
"]",
"del",
"numerical_results",
"[",
"col_name",
"]",
"[",
"'count'",
"]",
"numerical_results",
"[",
"col_name",
"]",
"[",
"'mean'",
"]",
"=",
"mean",
"# Write the numerical_results to a json file.",
"file_io",
".",
"write_string_to_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"output_dir",
",",
"NUMERICAL_ANALYSIS_FILE",
")",
",",
"json",
".",
"dumps",
"(",
"numerical_results",
",",
"indent",
"=",
"2",
",",
"separators",
"=",
"(",
"','",
",",
"': '",
")",
")",
")",
"# Write the vocab files. Each label is on its own line.",
"for",
"name",
",",
"unique_labels",
"in",
"six",
".",
"iteritems",
"(",
"categorical_results",
")",
":",
"labels",
"=",
"'\\n'",
".",
"join",
"(",
"list",
"(",
"unique_labels",
")",
")",
"file_io",
".",
"write_string_to_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"output_dir",
",",
"CATEGORICAL_ANALYSIS_FILE",
"%",
"name",
")",
",",
"labels",
")"
] | Makes the numerical and categorical analysis files.
Args:
args: the command line args
schema_list: python object of the schema json file.
Raises:
ValueError: if schema contains unknown column types. | [
"Makes",
"the",
"numerical",
"and",
"categorical",
"analysis",
"files",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/preprocess/local_preprocess.py#L69-L144 | train | 237,872 |
googledatalab/pydatalab | solutionbox/structured_data/mltoolbox/_structured_data/preprocess/local_preprocess.py | run_analysis | def run_analysis(args):
"""Builds an analysis files for training."""
# Read the schema and input feature types
schema_list = json.loads(
file_io.read_file_to_string(args.schema_file))
run_numerical_categorical_analysis(args, schema_list)
# Also save a copy of the schema in the output folder.
file_io.copy(args.schema_file,
os.path.join(args.output_dir, SCHEMA_FILE),
overwrite=True) | python | def run_analysis(args):
"""Builds an analysis files for training."""
# Read the schema and input feature types
schema_list = json.loads(
file_io.read_file_to_string(args.schema_file))
run_numerical_categorical_analysis(args, schema_list)
# Also save a copy of the schema in the output folder.
file_io.copy(args.schema_file,
os.path.join(args.output_dir, SCHEMA_FILE),
overwrite=True) | [
"def",
"run_analysis",
"(",
"args",
")",
":",
"# Read the schema and input feature types",
"schema_list",
"=",
"json",
".",
"loads",
"(",
"file_io",
".",
"read_file_to_string",
"(",
"args",
".",
"schema_file",
")",
")",
"run_numerical_categorical_analysis",
"(",
"args",
",",
"schema_list",
")",
"# Also save a copy of the schema in the output folder.",
"file_io",
".",
"copy",
"(",
"args",
".",
"schema_file",
",",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"output_dir",
",",
"SCHEMA_FILE",
")",
",",
"overwrite",
"=",
"True",
")"
] | Builds an analysis files for training. | [
"Builds",
"an",
"analysis",
"files",
"for",
"training",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/preprocess/local_preprocess.py#L147-L159 | train | 237,873 |
googledatalab/pydatalab | google/datalab/utils/commands/_html.py | Html._repr_html_ | def _repr_html_(self):
"""Generates the HTML representation.
"""
parts = []
if self._class:
parts.append('<div id="hh_%s" class="%s">%s</div>' % (self._id, self._class, self._markup))
else:
parts.append('<div id="hh_%s">%s</div>' % (self._id, self._markup))
if len(self._script) != 0:
parts.append('<script>')
parts.append('require([')
parts.append(','.join(['"%s"' % d[0] for d in self._dependencies]))
parts.append('], function(')
parts.append(','.join([d[1] for d in self._dependencies]))
parts.append(') {')
parts.append(self._script)
parts.append('});')
parts.append('</script>')
return ''.join(parts) | python | def _repr_html_(self):
"""Generates the HTML representation.
"""
parts = []
if self._class:
parts.append('<div id="hh_%s" class="%s">%s</div>' % (self._id, self._class, self._markup))
else:
parts.append('<div id="hh_%s">%s</div>' % (self._id, self._markup))
if len(self._script) != 0:
parts.append('<script>')
parts.append('require([')
parts.append(','.join(['"%s"' % d[0] for d in self._dependencies]))
parts.append('], function(')
parts.append(','.join([d[1] for d in self._dependencies]))
parts.append(') {')
parts.append(self._script)
parts.append('});')
parts.append('</script>')
return ''.join(parts) | [
"def",
"_repr_html_",
"(",
"self",
")",
":",
"parts",
"=",
"[",
"]",
"if",
"self",
".",
"_class",
":",
"parts",
".",
"append",
"(",
"'<div id=\"hh_%s\" class=\"%s\">%s</div>'",
"%",
"(",
"self",
".",
"_id",
",",
"self",
".",
"_class",
",",
"self",
".",
"_markup",
")",
")",
"else",
":",
"parts",
".",
"append",
"(",
"'<div id=\"hh_%s\">%s</div>'",
"%",
"(",
"self",
".",
"_id",
",",
"self",
".",
"_markup",
")",
")",
"if",
"len",
"(",
"self",
".",
"_script",
")",
"!=",
"0",
":",
"parts",
".",
"append",
"(",
"'<script>'",
")",
"parts",
".",
"append",
"(",
"'require(['",
")",
"parts",
".",
"append",
"(",
"','",
".",
"join",
"(",
"[",
"'\"%s\"'",
"%",
"d",
"[",
"0",
"]",
"for",
"d",
"in",
"self",
".",
"_dependencies",
"]",
")",
")",
"parts",
".",
"append",
"(",
"'], function('",
")",
"parts",
".",
"append",
"(",
"','",
".",
"join",
"(",
"[",
"d",
"[",
"1",
"]",
"for",
"d",
"in",
"self",
".",
"_dependencies",
"]",
")",
")",
"parts",
".",
"append",
"(",
"') {'",
")",
"parts",
".",
"append",
"(",
"self",
".",
"_script",
")",
"parts",
".",
"append",
"(",
"'});'",
")",
"parts",
".",
"append",
"(",
"'</script>'",
")",
"return",
"''",
".",
"join",
"(",
"parts",
")"
] | Generates the HTML representation. | [
"Generates",
"the",
"HTML",
"representation",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/commands/_html.py#L64-L84 | train | 237,874 |
googledatalab/pydatalab | google/datalab/utils/commands/_html.py | HtmlBuilder._render_objects | def _render_objects(self, items, attributes=None, datatype='object'):
"""Renders an HTML table with the specified list of objects.
Args:
items: the iterable collection of objects to render.
attributes: the optional list of properties or keys to render.
datatype: the type of data; one of 'object' for Python objects, 'dict' for a list
of dictionaries, or 'chartdata' for Google chart data.
"""
if not items:
return
if datatype == 'chartdata':
if not attributes:
attributes = [items['cols'][i]['label'] for i in range(0, len(items['cols']))]
items = items['rows']
indices = {attributes[i]: i for i in range(0, len(attributes))}
num_segments = len(self._segments)
self._segments.append('<table>')
first = True
for o in items:
if first:
first = False
if datatype == 'dict' and not attributes:
attributes = list(o.keys())
if attributes is not None:
self._segments.append('<tr>')
for attr in attributes:
self._segments.append('<th>%s</th>' % attr)
self._segments.append('</tr>')
self._segments.append('<tr>')
if attributes is None:
self._segments.append('<td>%s</td>' % HtmlBuilder._format(o))
else:
for attr in attributes:
if datatype == 'dict':
self._segments.append('<td>%s</td>' % HtmlBuilder._format(o.get(attr, None), nbsp=True))
elif datatype == 'chartdata':
self._segments.append('<td>%s</td>' % HtmlBuilder._format(o['c'][indices[attr]]['v'],
nbsp=True))
else:
self._segments.append('<td>%s</td>' % HtmlBuilder._format(o.__getattribute__(attr),
nbsp=True))
self._segments.append('</tr>')
self._segments.append('</table>')
if first:
# The table was empty; drop it from the segments.
self._segments = self._segments[:num_segments] | python | def _render_objects(self, items, attributes=None, datatype='object'):
"""Renders an HTML table with the specified list of objects.
Args:
items: the iterable collection of objects to render.
attributes: the optional list of properties or keys to render.
datatype: the type of data; one of 'object' for Python objects, 'dict' for a list
of dictionaries, or 'chartdata' for Google chart data.
"""
if not items:
return
if datatype == 'chartdata':
if not attributes:
attributes = [items['cols'][i]['label'] for i in range(0, len(items['cols']))]
items = items['rows']
indices = {attributes[i]: i for i in range(0, len(attributes))}
num_segments = len(self._segments)
self._segments.append('<table>')
first = True
for o in items:
if first:
first = False
if datatype == 'dict' and not attributes:
attributes = list(o.keys())
if attributes is not None:
self._segments.append('<tr>')
for attr in attributes:
self._segments.append('<th>%s</th>' % attr)
self._segments.append('</tr>')
self._segments.append('<tr>')
if attributes is None:
self._segments.append('<td>%s</td>' % HtmlBuilder._format(o))
else:
for attr in attributes:
if datatype == 'dict':
self._segments.append('<td>%s</td>' % HtmlBuilder._format(o.get(attr, None), nbsp=True))
elif datatype == 'chartdata':
self._segments.append('<td>%s</td>' % HtmlBuilder._format(o['c'][indices[attr]]['v'],
nbsp=True))
else:
self._segments.append('<td>%s</td>' % HtmlBuilder._format(o.__getattribute__(attr),
nbsp=True))
self._segments.append('</tr>')
self._segments.append('</table>')
if first:
# The table was empty; drop it from the segments.
self._segments = self._segments[:num_segments] | [
"def",
"_render_objects",
"(",
"self",
",",
"items",
",",
"attributes",
"=",
"None",
",",
"datatype",
"=",
"'object'",
")",
":",
"if",
"not",
"items",
":",
"return",
"if",
"datatype",
"==",
"'chartdata'",
":",
"if",
"not",
"attributes",
":",
"attributes",
"=",
"[",
"items",
"[",
"'cols'",
"]",
"[",
"i",
"]",
"[",
"'label'",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"items",
"[",
"'cols'",
"]",
")",
")",
"]",
"items",
"=",
"items",
"[",
"'rows'",
"]",
"indices",
"=",
"{",
"attributes",
"[",
"i",
"]",
":",
"i",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"attributes",
")",
")",
"}",
"num_segments",
"=",
"len",
"(",
"self",
".",
"_segments",
")",
"self",
".",
"_segments",
".",
"append",
"(",
"'<table>'",
")",
"first",
"=",
"True",
"for",
"o",
"in",
"items",
":",
"if",
"first",
":",
"first",
"=",
"False",
"if",
"datatype",
"==",
"'dict'",
"and",
"not",
"attributes",
":",
"attributes",
"=",
"list",
"(",
"o",
".",
"keys",
"(",
")",
")",
"if",
"attributes",
"is",
"not",
"None",
":",
"self",
".",
"_segments",
".",
"append",
"(",
"'<tr>'",
")",
"for",
"attr",
"in",
"attributes",
":",
"self",
".",
"_segments",
".",
"append",
"(",
"'<th>%s</th>'",
"%",
"attr",
")",
"self",
".",
"_segments",
".",
"append",
"(",
"'</tr>'",
")",
"self",
".",
"_segments",
".",
"append",
"(",
"'<tr>'",
")",
"if",
"attributes",
"is",
"None",
":",
"self",
".",
"_segments",
".",
"append",
"(",
"'<td>%s</td>'",
"%",
"HtmlBuilder",
".",
"_format",
"(",
"o",
")",
")",
"else",
":",
"for",
"attr",
"in",
"attributes",
":",
"if",
"datatype",
"==",
"'dict'",
":",
"self",
".",
"_segments",
".",
"append",
"(",
"'<td>%s</td>'",
"%",
"HtmlBuilder",
".",
"_format",
"(",
"o",
".",
"get",
"(",
"attr",
",",
"None",
")",
",",
"nbsp",
"=",
"True",
")",
")",
"elif",
"datatype",
"==",
"'chartdata'",
":",
"self",
".",
"_segments",
".",
"append",
"(",
"'<td>%s</td>'",
"%",
"HtmlBuilder",
".",
"_format",
"(",
"o",
"[",
"'c'",
"]",
"[",
"indices",
"[",
"attr",
"]",
"]",
"[",
"'v'",
"]",
",",
"nbsp",
"=",
"True",
")",
")",
"else",
":",
"self",
".",
"_segments",
".",
"append",
"(",
"'<td>%s</td>'",
"%",
"HtmlBuilder",
".",
"_format",
"(",
"o",
".",
"__getattribute__",
"(",
"attr",
")",
",",
"nbsp",
"=",
"True",
")",
")",
"self",
".",
"_segments",
".",
"append",
"(",
"'</tr>'",
")",
"self",
".",
"_segments",
".",
"append",
"(",
"'</table>'",
")",
"if",
"first",
":",
"# The table was empty; drop it from the segments.",
"self",
".",
"_segments",
"=",
"self",
".",
"_segments",
"[",
":",
"num_segments",
"]"
] | Renders an HTML table with the specified list of objects.
Args:
items: the iterable collection of objects to render.
attributes: the optional list of properties or keys to render.
datatype: the type of data; one of 'object' for Python objects, 'dict' for a list
of dictionaries, or 'chartdata' for Google chart data. | [
"Renders",
"an",
"HTML",
"table",
"with",
"the",
"specified",
"list",
"of",
"objects",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/commands/_html.py#L96-L149 | train | 237,875 |
googledatalab/pydatalab | google/datalab/utils/commands/_html.py | HtmlBuilder._render_list | def _render_list(self, items, empty='<pre><empty></pre>'):
"""Renders an HTML list with the specified list of strings.
Args:
items: the iterable collection of objects to render.
empty: what to render if the list is None or empty.
"""
if not items or len(items) == 0:
self._segments.append(empty)
return
self._segments.append('<ul>')
for o in items:
self._segments.append('<li>')
self._segments.append(str(o))
self._segments.append('</li>')
self._segments.append('</ul>') | python | def _render_list(self, items, empty='<pre><empty></pre>'):
"""Renders an HTML list with the specified list of strings.
Args:
items: the iterable collection of objects to render.
empty: what to render if the list is None or empty.
"""
if not items or len(items) == 0:
self._segments.append(empty)
return
self._segments.append('<ul>')
for o in items:
self._segments.append('<li>')
self._segments.append(str(o))
self._segments.append('</li>')
self._segments.append('</ul>') | [
"def",
"_render_list",
"(",
"self",
",",
"items",
",",
"empty",
"=",
"'<pre><empty></pre>'",
")",
":",
"if",
"not",
"items",
"or",
"len",
"(",
"items",
")",
"==",
"0",
":",
"self",
".",
"_segments",
".",
"append",
"(",
"empty",
")",
"return",
"self",
".",
"_segments",
".",
"append",
"(",
"'<ul>'",
")",
"for",
"o",
"in",
"items",
":",
"self",
".",
"_segments",
".",
"append",
"(",
"'<li>'",
")",
"self",
".",
"_segments",
".",
"append",
"(",
"str",
"(",
"o",
")",
")",
"self",
".",
"_segments",
".",
"append",
"(",
"'</li>'",
")",
"self",
".",
"_segments",
".",
"append",
"(",
"'</ul>'",
")"
] | Renders an HTML list with the specified list of strings.
Args:
items: the iterable collection of objects to render.
empty: what to render if the list is None or empty. | [
"Renders",
"an",
"HTML",
"list",
"with",
"the",
"specified",
"list",
"of",
"strings",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/commands/_html.py#L161-L176 | train | 237,876 |
googledatalab/pydatalab | datalab/bigquery/_table.py | Table.sample | def sample(self, fields=None, count=5, sampling=None, use_cache=True, dialect=None,
billing_tier=None):
"""Retrieves a sampling of data from the table.
Args:
fields: an optional list of field names to retrieve.
count: an optional count of rows to retrieve which is used if a specific
sampling is not specified.
sampling: an optional sampling strategy to apply to the table.
use_cache: whether to use cached results or not.
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A QueryResultsTable object containing the resulting data.
Raises:
Exception if the sample query could not be executed or query response was malformed.
"""
# Do import here to avoid top-level circular dependencies.
from . import _query
sql = self._repr_sql_()
return _query.Query.sampling_query(sql, context=self._context, count=count, fields=fields,
sampling=sampling).results(use_cache=use_cache,
dialect=dialect,
billing_tier=billing_tier) | python | def sample(self, fields=None, count=5, sampling=None, use_cache=True, dialect=None,
billing_tier=None):
"""Retrieves a sampling of data from the table.
Args:
fields: an optional list of field names to retrieve.
count: an optional count of rows to retrieve which is used if a specific
sampling is not specified.
sampling: an optional sampling strategy to apply to the table.
use_cache: whether to use cached results or not.
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A QueryResultsTable object containing the resulting data.
Raises:
Exception if the sample query could not be executed or query response was malformed.
"""
# Do import here to avoid top-level circular dependencies.
from . import _query
sql = self._repr_sql_()
return _query.Query.sampling_query(sql, context=self._context, count=count, fields=fields,
sampling=sampling).results(use_cache=use_cache,
dialect=dialect,
billing_tier=billing_tier) | [
"def",
"sample",
"(",
"self",
",",
"fields",
"=",
"None",
",",
"count",
"=",
"5",
",",
"sampling",
"=",
"None",
",",
"use_cache",
"=",
"True",
",",
"dialect",
"=",
"None",
",",
"billing_tier",
"=",
"None",
")",
":",
"# Do import here to avoid top-level circular dependencies.",
"from",
".",
"import",
"_query",
"sql",
"=",
"self",
".",
"_repr_sql_",
"(",
")",
"return",
"_query",
".",
"Query",
".",
"sampling_query",
"(",
"sql",
",",
"context",
"=",
"self",
".",
"_context",
",",
"count",
"=",
"count",
",",
"fields",
"=",
"fields",
",",
"sampling",
"=",
"sampling",
")",
".",
"results",
"(",
"use_cache",
"=",
"use_cache",
",",
"dialect",
"=",
"dialect",
",",
"billing_tier",
"=",
"billing_tier",
")"
] | Retrieves a sampling of data from the table.
Args:
fields: an optional list of field names to retrieve.
count: an optional count of rows to retrieve which is used if a specific
sampling is not specified.
sampling: an optional sampling strategy to apply to the table.
use_cache: whether to use cached results or not.
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A QueryResultsTable object containing the resulting data.
Raises:
Exception if the sample query could not be executed or query response was malformed. | [
"Retrieves",
"a",
"sampling",
"of",
"data",
"from",
"the",
"table",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_table.py#L248-L277 | train | 237,877 |
googledatalab/pydatalab | datalab/bigquery/_table.py | Table._encode_dict_as_row | def _encode_dict_as_row(record, column_name_map):
""" Encode a dictionary representing a table row in a form suitable for streaming to BQ.
This includes encoding timestamps as ISO-compatible strings and removing invalid
characters from column names.
Args:
record: a Python dictionary representing the table row.
column_name_map: a dictionary mapping dictionary keys to column names. This is initially
empty and built up by this method when it first encounters each column, then used as a
cache subsequently.
Returns:
The sanitized dictionary.
"""
for k in list(record.keys()):
v = record[k]
# If the column is a date, convert to ISO string.
if isinstance(v, pandas.Timestamp) or isinstance(v, datetime.datetime):
v = record[k] = record[k].isoformat()
# If k has invalid characters clean it up
if k not in column_name_map:
column_name_map[k] = ''.join(c for c in k if c in Table._VALID_COLUMN_NAME_CHARACTERS)
new_k = column_name_map[k]
if k != new_k:
record[new_k] = v
del record[k]
return record | python | def _encode_dict_as_row(record, column_name_map):
""" Encode a dictionary representing a table row in a form suitable for streaming to BQ.
This includes encoding timestamps as ISO-compatible strings and removing invalid
characters from column names.
Args:
record: a Python dictionary representing the table row.
column_name_map: a dictionary mapping dictionary keys to column names. This is initially
empty and built up by this method when it first encounters each column, then used as a
cache subsequently.
Returns:
The sanitized dictionary.
"""
for k in list(record.keys()):
v = record[k]
# If the column is a date, convert to ISO string.
if isinstance(v, pandas.Timestamp) or isinstance(v, datetime.datetime):
v = record[k] = record[k].isoformat()
# If k has invalid characters clean it up
if k not in column_name_map:
column_name_map[k] = ''.join(c for c in k if c in Table._VALID_COLUMN_NAME_CHARACTERS)
new_k = column_name_map[k]
if k != new_k:
record[new_k] = v
del record[k]
return record | [
"def",
"_encode_dict_as_row",
"(",
"record",
",",
"column_name_map",
")",
":",
"for",
"k",
"in",
"list",
"(",
"record",
".",
"keys",
"(",
")",
")",
":",
"v",
"=",
"record",
"[",
"k",
"]",
"# If the column is a date, convert to ISO string.",
"if",
"isinstance",
"(",
"v",
",",
"pandas",
".",
"Timestamp",
")",
"or",
"isinstance",
"(",
"v",
",",
"datetime",
".",
"datetime",
")",
":",
"v",
"=",
"record",
"[",
"k",
"]",
"=",
"record",
"[",
"k",
"]",
".",
"isoformat",
"(",
")",
"# If k has invalid characters clean it up",
"if",
"k",
"not",
"in",
"column_name_map",
":",
"column_name_map",
"[",
"k",
"]",
"=",
"''",
".",
"join",
"(",
"c",
"for",
"c",
"in",
"k",
"if",
"c",
"in",
"Table",
".",
"_VALID_COLUMN_NAME_CHARACTERS",
")",
"new_k",
"=",
"column_name_map",
"[",
"k",
"]",
"if",
"k",
"!=",
"new_k",
":",
"record",
"[",
"new_k",
"]",
"=",
"v",
"del",
"record",
"[",
"k",
"]",
"return",
"record"
] | Encode a dictionary representing a table row in a form suitable for streaming to BQ.
This includes encoding timestamps as ISO-compatible strings and removing invalid
characters from column names.
Args:
record: a Python dictionary representing the table row.
column_name_map: a dictionary mapping dictionary keys to column names. This is initially
empty and built up by this method when it first encounters each column, then used as a
cache subsequently.
Returns:
The sanitized dictionary. | [
"Encode",
"a",
"dictionary",
"representing",
"a",
"table",
"row",
"in",
"a",
"form",
"suitable",
"for",
"streaming",
"to",
"BQ",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_table.py#L280-L307 | train | 237,878 |
googledatalab/pydatalab | datalab/bigquery/_table.py | Table.insert_data | def insert_data(self, data, include_index=False, index_name=None):
""" Insert the contents of a Pandas DataFrame or a list of dictionaries into the table.
The insertion will be performed using at most 500 rows per POST, and at most 10 POSTs per
second, as BigQuery has some limits on streaming rates.
Args:
data: the DataFrame or list to insert.
include_index: whether to include the DataFrame or list index as a column in the BQ table.
index_name: for a list, if include_index is True, this should be the name for the index.
If not specified, 'Index' will be used.
Returns:
The table.
Raises:
Exception if the table doesn't exist, the table's schema differs from the data's schema,
or the insert failed.
"""
# TODO(gram): we could create the Table here is it doesn't exist using a schema derived
# from the data. IIRC we decided not to but doing so seems less unwieldy that having to
# create it first and then validate the schema against it itself.
# There are BigQuery limits on the streaming API:
#
# max_rows_per_post = 500
# max_bytes_per_row = 20000
# max_rows_per_second = 10000
# max_bytes_per_post = 1000000
# max_bytes_per_second = 10000000
#
# It is non-trivial to enforce these here, and the max bytes per row is not something we
# can really control. As an approximation we enforce the 500 row limit
# with a 0.05 sec POST interval (to enforce the 10,000 rows per sec limit).
max_rows_per_post = 500
post_interval = 0.05
# TODO(gram): add different exception types for each failure case.
if not self.exists():
raise Exception('Table %s does not exist.' % self._full_name)
data_schema = _schema.Schema.from_data(data)
if isinstance(data, list):
if include_index:
if not index_name:
index_name = 'Index'
data_schema._add_field(index_name, 'INTEGER')
table_schema = self.schema
# Do some validation of the two schema to make sure they are compatible.
for data_field in data_schema:
name = data_field.name
table_field = table_schema[name]
if table_field is None:
raise Exception('Table does not contain field %s' % name)
data_type = data_field.data_type
table_type = table_field.data_type
if table_type != data_type:
raise Exception('Field %s in data has type %s but in table has type %s' %
(name, data_type, table_type))
total_rows = len(data)
total_pushed = 0
job_id = uuid.uuid4().hex
rows = []
column_name_map = {}
is_dataframe = isinstance(data, pandas.DataFrame)
if is_dataframe:
# reset_index creates a new dataframe so we don't affect the original. reset_index(drop=True)
# drops the original index and uses an integer range.
gen = data.reset_index(drop=not include_index).iterrows()
else:
gen = enumerate(data)
for index, row in gen:
if is_dataframe:
row = row.to_dict()
elif include_index:
row[index_name] = index
rows.append({
'json': self._encode_dict_as_row(row, column_name_map),
'insertId': job_id + str(index)
})
total_pushed += 1
if (total_pushed == total_rows) or (len(rows) == max_rows_per_post):
try:
response = self._api.tabledata_insert_all(self._name_parts, rows)
except Exception as e:
raise e
if 'insertErrors' in response:
raise Exception('insertAll failed: %s' % response['insertErrors'])
time.sleep(post_interval) # Streaming API is rate-limited
rows = []
# Block until data is ready
while True:
self._info = self._api.tables_get(self._name_parts)
if 'streamingBuffer' not in self._info or \
'estimatedRows' not in self._info['streamingBuffer'] or \
int(self._info['streamingBuffer']['estimatedRows']) > 0:
break
time.sleep(2)
return self | python | def insert_data(self, data, include_index=False, index_name=None):
""" Insert the contents of a Pandas DataFrame or a list of dictionaries into the table.
The insertion will be performed using at most 500 rows per POST, and at most 10 POSTs per
second, as BigQuery has some limits on streaming rates.
Args:
data: the DataFrame or list to insert.
include_index: whether to include the DataFrame or list index as a column in the BQ table.
index_name: for a list, if include_index is True, this should be the name for the index.
If not specified, 'Index' will be used.
Returns:
The table.
Raises:
Exception if the table doesn't exist, the table's schema differs from the data's schema,
or the insert failed.
"""
# TODO(gram): we could create the Table here is it doesn't exist using a schema derived
# from the data. IIRC we decided not to but doing so seems less unwieldy that having to
# create it first and then validate the schema against it itself.
# There are BigQuery limits on the streaming API:
#
# max_rows_per_post = 500
# max_bytes_per_row = 20000
# max_rows_per_second = 10000
# max_bytes_per_post = 1000000
# max_bytes_per_second = 10000000
#
# It is non-trivial to enforce these here, and the max bytes per row is not something we
# can really control. As an approximation we enforce the 500 row limit
# with a 0.05 sec POST interval (to enforce the 10,000 rows per sec limit).
max_rows_per_post = 500
post_interval = 0.05
# TODO(gram): add different exception types for each failure case.
if not self.exists():
raise Exception('Table %s does not exist.' % self._full_name)
data_schema = _schema.Schema.from_data(data)
if isinstance(data, list):
if include_index:
if not index_name:
index_name = 'Index'
data_schema._add_field(index_name, 'INTEGER')
table_schema = self.schema
# Do some validation of the two schema to make sure they are compatible.
for data_field in data_schema:
name = data_field.name
table_field = table_schema[name]
if table_field is None:
raise Exception('Table does not contain field %s' % name)
data_type = data_field.data_type
table_type = table_field.data_type
if table_type != data_type:
raise Exception('Field %s in data has type %s but in table has type %s' %
(name, data_type, table_type))
total_rows = len(data)
total_pushed = 0
job_id = uuid.uuid4().hex
rows = []
column_name_map = {}
is_dataframe = isinstance(data, pandas.DataFrame)
if is_dataframe:
# reset_index creates a new dataframe so we don't affect the original. reset_index(drop=True)
# drops the original index and uses an integer range.
gen = data.reset_index(drop=not include_index).iterrows()
else:
gen = enumerate(data)
for index, row in gen:
if is_dataframe:
row = row.to_dict()
elif include_index:
row[index_name] = index
rows.append({
'json': self._encode_dict_as_row(row, column_name_map),
'insertId': job_id + str(index)
})
total_pushed += 1
if (total_pushed == total_rows) or (len(rows) == max_rows_per_post):
try:
response = self._api.tabledata_insert_all(self._name_parts, rows)
except Exception as e:
raise e
if 'insertErrors' in response:
raise Exception('insertAll failed: %s' % response['insertErrors'])
time.sleep(post_interval) # Streaming API is rate-limited
rows = []
# Block until data is ready
while True:
self._info = self._api.tables_get(self._name_parts)
if 'streamingBuffer' not in self._info or \
'estimatedRows' not in self._info['streamingBuffer'] or \
int(self._info['streamingBuffer']['estimatedRows']) > 0:
break
time.sleep(2)
return self | [
"def",
"insert_data",
"(",
"self",
",",
"data",
",",
"include_index",
"=",
"False",
",",
"index_name",
"=",
"None",
")",
":",
"# TODO(gram): we could create the Table here is it doesn't exist using a schema derived",
"# from the data. IIRC we decided not to but doing so seems less unwieldy that having to",
"# create it first and then validate the schema against it itself.",
"# There are BigQuery limits on the streaming API:",
"#",
"# max_rows_per_post = 500",
"# max_bytes_per_row = 20000",
"# max_rows_per_second = 10000",
"# max_bytes_per_post = 1000000",
"# max_bytes_per_second = 10000000",
"#",
"# It is non-trivial to enforce these here, and the max bytes per row is not something we",
"# can really control. As an approximation we enforce the 500 row limit",
"# with a 0.05 sec POST interval (to enforce the 10,000 rows per sec limit).",
"max_rows_per_post",
"=",
"500",
"post_interval",
"=",
"0.05",
"# TODO(gram): add different exception types for each failure case.",
"if",
"not",
"self",
".",
"exists",
"(",
")",
":",
"raise",
"Exception",
"(",
"'Table %s does not exist.'",
"%",
"self",
".",
"_full_name",
")",
"data_schema",
"=",
"_schema",
".",
"Schema",
".",
"from_data",
"(",
"data",
")",
"if",
"isinstance",
"(",
"data",
",",
"list",
")",
":",
"if",
"include_index",
":",
"if",
"not",
"index_name",
":",
"index_name",
"=",
"'Index'",
"data_schema",
".",
"_add_field",
"(",
"index_name",
",",
"'INTEGER'",
")",
"table_schema",
"=",
"self",
".",
"schema",
"# Do some validation of the two schema to make sure they are compatible.",
"for",
"data_field",
"in",
"data_schema",
":",
"name",
"=",
"data_field",
".",
"name",
"table_field",
"=",
"table_schema",
"[",
"name",
"]",
"if",
"table_field",
"is",
"None",
":",
"raise",
"Exception",
"(",
"'Table does not contain field %s'",
"%",
"name",
")",
"data_type",
"=",
"data_field",
".",
"data_type",
"table_type",
"=",
"table_field",
".",
"data_type",
"if",
"table_type",
"!=",
"data_type",
":",
"raise",
"Exception",
"(",
"'Field %s in data has type %s but in table has type %s'",
"%",
"(",
"name",
",",
"data_type",
",",
"table_type",
")",
")",
"total_rows",
"=",
"len",
"(",
"data",
")",
"total_pushed",
"=",
"0",
"job_id",
"=",
"uuid",
".",
"uuid4",
"(",
")",
".",
"hex",
"rows",
"=",
"[",
"]",
"column_name_map",
"=",
"{",
"}",
"is_dataframe",
"=",
"isinstance",
"(",
"data",
",",
"pandas",
".",
"DataFrame",
")",
"if",
"is_dataframe",
":",
"# reset_index creates a new dataframe so we don't affect the original. reset_index(drop=True)",
"# drops the original index and uses an integer range.",
"gen",
"=",
"data",
".",
"reset_index",
"(",
"drop",
"=",
"not",
"include_index",
")",
".",
"iterrows",
"(",
")",
"else",
":",
"gen",
"=",
"enumerate",
"(",
"data",
")",
"for",
"index",
",",
"row",
"in",
"gen",
":",
"if",
"is_dataframe",
":",
"row",
"=",
"row",
".",
"to_dict",
"(",
")",
"elif",
"include_index",
":",
"row",
"[",
"index_name",
"]",
"=",
"index",
"rows",
".",
"append",
"(",
"{",
"'json'",
":",
"self",
".",
"_encode_dict_as_row",
"(",
"row",
",",
"column_name_map",
")",
",",
"'insertId'",
":",
"job_id",
"+",
"str",
"(",
"index",
")",
"}",
")",
"total_pushed",
"+=",
"1",
"if",
"(",
"total_pushed",
"==",
"total_rows",
")",
"or",
"(",
"len",
"(",
"rows",
")",
"==",
"max_rows_per_post",
")",
":",
"try",
":",
"response",
"=",
"self",
".",
"_api",
".",
"tabledata_insert_all",
"(",
"self",
".",
"_name_parts",
",",
"rows",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"if",
"'insertErrors'",
"in",
"response",
":",
"raise",
"Exception",
"(",
"'insertAll failed: %s'",
"%",
"response",
"[",
"'insertErrors'",
"]",
")",
"time",
".",
"sleep",
"(",
"post_interval",
")",
"# Streaming API is rate-limited",
"rows",
"=",
"[",
"]",
"# Block until data is ready",
"while",
"True",
":",
"self",
".",
"_info",
"=",
"self",
".",
"_api",
".",
"tables_get",
"(",
"self",
".",
"_name_parts",
")",
"if",
"'streamingBuffer'",
"not",
"in",
"self",
".",
"_info",
"or",
"'estimatedRows'",
"not",
"in",
"self",
".",
"_info",
"[",
"'streamingBuffer'",
"]",
"or",
"int",
"(",
"self",
".",
"_info",
"[",
"'streamingBuffer'",
"]",
"[",
"'estimatedRows'",
"]",
")",
">",
"0",
":",
"break",
"time",
".",
"sleep",
"(",
"2",
")",
"return",
"self"
] | Insert the contents of a Pandas DataFrame or a list of dictionaries into the table.
The insertion will be performed using at most 500 rows per POST, and at most 10 POSTs per
second, as BigQuery has some limits on streaming rates.
Args:
data: the DataFrame or list to insert.
include_index: whether to include the DataFrame or list index as a column in the BQ table.
index_name: for a list, if include_index is True, this should be the name for the index.
If not specified, 'Index' will be used.
Returns:
The table.
Raises:
Exception if the table doesn't exist, the table's schema differs from the data's schema,
or the insert failed. | [
"Insert",
"the",
"contents",
"of",
"a",
"Pandas",
"DataFrame",
"or",
"a",
"list",
"of",
"dictionaries",
"into",
"the",
"table",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_table.py#L309-L417 | train | 237,879 |
googledatalab/pydatalab | datalab/bigquery/_table.py | Table.range | def range(self, start_row=0, max_rows=None):
""" Get an iterator to iterate through a set of table rows.
Args:
start_row: the row of the table at which to start the iteration (default 0)
max_rows: an upper limit on the number of rows to iterate through (default None)
Returns:
A row iterator.
"""
fetcher = self._get_row_fetcher(start_row=start_row, max_rows=max_rows)
return iter(datalab.utils.Iterator(fetcher)) | python | def range(self, start_row=0, max_rows=None):
""" Get an iterator to iterate through a set of table rows.
Args:
start_row: the row of the table at which to start the iteration (default 0)
max_rows: an upper limit on the number of rows to iterate through (default None)
Returns:
A row iterator.
"""
fetcher = self._get_row_fetcher(start_row=start_row, max_rows=max_rows)
return iter(datalab.utils.Iterator(fetcher)) | [
"def",
"range",
"(",
"self",
",",
"start_row",
"=",
"0",
",",
"max_rows",
"=",
"None",
")",
":",
"fetcher",
"=",
"self",
".",
"_get_row_fetcher",
"(",
"start_row",
"=",
"start_row",
",",
"max_rows",
"=",
"max_rows",
")",
"return",
"iter",
"(",
"datalab",
".",
"utils",
".",
"Iterator",
"(",
"fetcher",
")",
")"
] | Get an iterator to iterate through a set of table rows.
Args:
start_row: the row of the table at which to start the iteration (default 0)
max_rows: an upper limit on the number of rows to iterate through (default None)
Returns:
A row iterator. | [
"Get",
"an",
"iterator",
"to",
"iterate",
"through",
"a",
"set",
"of",
"table",
"rows",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_table.py#L614-L625 | train | 237,880 |
googledatalab/pydatalab | datalab/bigquery/_table.py | Table.to_file_async | def to_file_async(self, destination, format='csv', csv_delimiter=',', csv_header=True):
"""Start saving the results to a local file in CSV format and return a Job for completion.
Args:
destination: path on the local filesystem for the saved results.
format: the format to use for the exported data; currently only 'csv' is supported.
csv_delimiter: for CSV exports, the field delimiter to use. Defaults to ','
csv_header: for CSV exports, whether to include an initial header line. Default true.
Returns:
A Job for the async save operation.
Raises:
An Exception if the operation failed.
"""
self.to_file(destination, format=format, csv_delimiter=csv_delimiter, csv_header=csv_header) | python | def to_file_async(self, destination, format='csv', csv_delimiter=',', csv_header=True):
"""Start saving the results to a local file in CSV format and return a Job for completion.
Args:
destination: path on the local filesystem for the saved results.
format: the format to use for the exported data; currently only 'csv' is supported.
csv_delimiter: for CSV exports, the field delimiter to use. Defaults to ','
csv_header: for CSV exports, whether to include an initial header line. Default true.
Returns:
A Job for the async save operation.
Raises:
An Exception if the operation failed.
"""
self.to_file(destination, format=format, csv_delimiter=csv_delimiter, csv_header=csv_header) | [
"def",
"to_file_async",
"(",
"self",
",",
"destination",
",",
"format",
"=",
"'csv'",
",",
"csv_delimiter",
"=",
"','",
",",
"csv_header",
"=",
"True",
")",
":",
"self",
".",
"to_file",
"(",
"destination",
",",
"format",
"=",
"format",
",",
"csv_delimiter",
"=",
"csv_delimiter",
",",
"csv_header",
"=",
"csv_header",
")"
] | Start saving the results to a local file in CSV format and return a Job for completion.
Args:
destination: path on the local filesystem for the saved results.
format: the format to use for the exported data; currently only 'csv' is supported.
csv_delimiter: for CSV exports, the field delimiter to use. Defaults to ','
csv_header: for CSV exports, whether to include an initial header line. Default true.
Returns:
A Job for the async save operation.
Raises:
An Exception if the operation failed. | [
"Start",
"saving",
"the",
"results",
"to",
"a",
"local",
"file",
"in",
"CSV",
"format",
"and",
"return",
"a",
"Job",
"for",
"completion",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_table.py#L680-L693 | train | 237,881 |
googledatalab/pydatalab | datalab/bigquery/_table.py | Table.update | def update(self, friendly_name=None, description=None, expiry=None, schema=None):
""" Selectively updates Table information.
Any parameters that are omitted or None are not updated.
Args:
friendly_name: if not None, the new friendly name.
description: if not None, the new description.
expiry: if not None, the new expiry time, either as a DateTime or milliseconds since epoch.
schema: if not None, the new schema: either a list of dictionaries or a Schema.
"""
self._load_info()
if friendly_name is not None:
self._info['friendlyName'] = friendly_name
if description is not None:
self._info['description'] = description
if expiry is not None:
if isinstance(expiry, datetime.datetime):
expiry = calendar.timegm(expiry.utctimetuple()) * 1000
self._info['expirationTime'] = expiry
if schema is not None:
if isinstance(schema, _schema.Schema):
schema = schema._bq_schema
self._info['schema'] = {'fields': schema}
try:
self._api.table_update(self._name_parts, self._info)
except datalab.utils.RequestException:
# The cached metadata is out of sync now; abandon it.
self._info = None
except Exception as e:
raise e | python | def update(self, friendly_name=None, description=None, expiry=None, schema=None):
""" Selectively updates Table information.
Any parameters that are omitted or None are not updated.
Args:
friendly_name: if not None, the new friendly name.
description: if not None, the new description.
expiry: if not None, the new expiry time, either as a DateTime or milliseconds since epoch.
schema: if not None, the new schema: either a list of dictionaries or a Schema.
"""
self._load_info()
if friendly_name is not None:
self._info['friendlyName'] = friendly_name
if description is not None:
self._info['description'] = description
if expiry is not None:
if isinstance(expiry, datetime.datetime):
expiry = calendar.timegm(expiry.utctimetuple()) * 1000
self._info['expirationTime'] = expiry
if schema is not None:
if isinstance(schema, _schema.Schema):
schema = schema._bq_schema
self._info['schema'] = {'fields': schema}
try:
self._api.table_update(self._name_parts, self._info)
except datalab.utils.RequestException:
# The cached metadata is out of sync now; abandon it.
self._info = None
except Exception as e:
raise e | [
"def",
"update",
"(",
"self",
",",
"friendly_name",
"=",
"None",
",",
"description",
"=",
"None",
",",
"expiry",
"=",
"None",
",",
"schema",
"=",
"None",
")",
":",
"self",
".",
"_load_info",
"(",
")",
"if",
"friendly_name",
"is",
"not",
"None",
":",
"self",
".",
"_info",
"[",
"'friendlyName'",
"]",
"=",
"friendly_name",
"if",
"description",
"is",
"not",
"None",
":",
"self",
".",
"_info",
"[",
"'description'",
"]",
"=",
"description",
"if",
"expiry",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"expiry",
",",
"datetime",
".",
"datetime",
")",
":",
"expiry",
"=",
"calendar",
".",
"timegm",
"(",
"expiry",
".",
"utctimetuple",
"(",
")",
")",
"*",
"1000",
"self",
".",
"_info",
"[",
"'expirationTime'",
"]",
"=",
"expiry",
"if",
"schema",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"schema",
",",
"_schema",
".",
"Schema",
")",
":",
"schema",
"=",
"schema",
".",
"_bq_schema",
"self",
".",
"_info",
"[",
"'schema'",
"]",
"=",
"{",
"'fields'",
":",
"schema",
"}",
"try",
":",
"self",
".",
"_api",
".",
"table_update",
"(",
"self",
".",
"_name_parts",
",",
"self",
".",
"_info",
")",
"except",
"datalab",
".",
"utils",
".",
"RequestException",
":",
"# The cached metadata is out of sync now; abandon it.",
"self",
".",
"_info",
"=",
"None",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e"
] | Selectively updates Table information.
Any parameters that are omitted or None are not updated.
Args:
friendly_name: if not None, the new friendly name.
description: if not None, the new description.
expiry: if not None, the new expiry time, either as a DateTime or milliseconds since epoch.
schema: if not None, the new schema: either a list of dictionaries or a Schema. | [
"Selectively",
"updates",
"Table",
"information",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_table.py#L712-L742 | train | 237,882 |
googledatalab/pydatalab | datalab/bigquery/_table.py | Table.to_query | def to_query(self, fields=None):
""" Return a Query for this Table.
Args:
fields: the fields to return. If None, all fields will be returned. This can be a string
which will be injected into the Query after SELECT, or a list of field names.
Returns:
A Query object that will return the specified fields from the records in the Table.
"""
# Do import here to avoid top-level circular dependencies.
from . import _query
if fields is None:
fields = '*'
elif isinstance(fields, list):
fields = ','.join(fields)
return _query.Query('SELECT %s FROM %s' % (fields, self._repr_sql_()), context=self._context) | python | def to_query(self, fields=None):
""" Return a Query for this Table.
Args:
fields: the fields to return. If None, all fields will be returned. This can be a string
which will be injected into the Query after SELECT, or a list of field names.
Returns:
A Query object that will return the specified fields from the records in the Table.
"""
# Do import here to avoid top-level circular dependencies.
from . import _query
if fields is None:
fields = '*'
elif isinstance(fields, list):
fields = ','.join(fields)
return _query.Query('SELECT %s FROM %s' % (fields, self._repr_sql_()), context=self._context) | [
"def",
"to_query",
"(",
"self",
",",
"fields",
"=",
"None",
")",
":",
"# Do import here to avoid top-level circular dependencies.",
"from",
".",
"import",
"_query",
"if",
"fields",
"is",
"None",
":",
"fields",
"=",
"'*'",
"elif",
"isinstance",
"(",
"fields",
",",
"list",
")",
":",
"fields",
"=",
"','",
".",
"join",
"(",
"fields",
")",
"return",
"_query",
".",
"Query",
"(",
"'SELECT %s FROM %s'",
"%",
"(",
"fields",
",",
"self",
".",
"_repr_sql_",
"(",
")",
")",
",",
"context",
"=",
"self",
".",
"_context",
")"
] | Return a Query for this Table.
Args:
fields: the fields to return. If None, all fields will be returned. This can be a string
which will be injected into the Query after SELECT, or a list of field names.
Returns:
A Query object that will return the specified fields from the records in the Table. | [
"Return",
"a",
"Query",
"for",
"this",
"Table",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_table.py#L914-L930 | train | 237,883 |
googledatalab/pydatalab | datalab/storage/_item.py | Item.copy_to | def copy_to(self, new_key, bucket=None):
"""Copies this item to the specified new key.
Args:
new_key: the new key to copy this item to.
bucket: the bucket of the new item; if None (the default) use the same bucket.
Returns:
An Item corresponding to new key.
Raises:
Exception if there was an error copying the item.
"""
if bucket is None:
bucket = self._bucket
try:
new_info = self._api.objects_copy(self._bucket, self._key, bucket, new_key)
except Exception as e:
raise e
return Item(bucket, new_key, new_info, context=self._context) | python | def copy_to(self, new_key, bucket=None):
"""Copies this item to the specified new key.
Args:
new_key: the new key to copy this item to.
bucket: the bucket of the new item; if None (the default) use the same bucket.
Returns:
An Item corresponding to new key.
Raises:
Exception if there was an error copying the item.
"""
if bucket is None:
bucket = self._bucket
try:
new_info = self._api.objects_copy(self._bucket, self._key, bucket, new_key)
except Exception as e:
raise e
return Item(bucket, new_key, new_info, context=self._context) | [
"def",
"copy_to",
"(",
"self",
",",
"new_key",
",",
"bucket",
"=",
"None",
")",
":",
"if",
"bucket",
"is",
"None",
":",
"bucket",
"=",
"self",
".",
"_bucket",
"try",
":",
"new_info",
"=",
"self",
".",
"_api",
".",
"objects_copy",
"(",
"self",
".",
"_bucket",
",",
"self",
".",
"_key",
",",
"bucket",
",",
"new_key",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"return",
"Item",
"(",
"bucket",
",",
"new_key",
",",
"new_info",
",",
"context",
"=",
"self",
".",
"_context",
")"
] | Copies this item to the specified new key.
Args:
new_key: the new key to copy this item to.
bucket: the bucket of the new item; if None (the default) use the same bucket.
Returns:
An Item corresponding to new key.
Raises:
Exception if there was an error copying the item. | [
"Copies",
"this",
"item",
"to",
"the",
"specified",
"new",
"key",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/storage/_item.py#L111-L128 | train | 237,884 |
googledatalab/pydatalab | datalab/storage/_item.py | Item.exists | def exists(self):
""" Checks if the item exists. """
try:
return self.metadata is not None
except datalab.utils.RequestException:
return False
except Exception as e:
raise e | python | def exists(self):
""" Checks if the item exists. """
try:
return self.metadata is not None
except datalab.utils.RequestException:
return False
except Exception as e:
raise e | [
"def",
"exists",
"(",
"self",
")",
":",
"try",
":",
"return",
"self",
".",
"metadata",
"is",
"not",
"None",
"except",
"datalab",
".",
"utils",
".",
"RequestException",
":",
"return",
"False",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e"
] | Checks if the item exists. | [
"Checks",
"if",
"the",
"item",
"exists",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/storage/_item.py#L130-L137 | train | 237,885 |
googledatalab/pydatalab | datalab/storage/_item.py | Item.delete | def delete(self):
"""Deletes this item from its bucket.
Raises:
Exception if there was an error deleting the item.
"""
if self.exists():
try:
self._api.objects_delete(self._bucket, self._key)
except Exception as e:
raise e | python | def delete(self):
"""Deletes this item from its bucket.
Raises:
Exception if there was an error deleting the item.
"""
if self.exists():
try:
self._api.objects_delete(self._bucket, self._key)
except Exception as e:
raise e | [
"def",
"delete",
"(",
"self",
")",
":",
"if",
"self",
".",
"exists",
"(",
")",
":",
"try",
":",
"self",
".",
"_api",
".",
"objects_delete",
"(",
"self",
".",
"_bucket",
",",
"self",
".",
"_key",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e"
] | Deletes this item from its bucket.
Raises:
Exception if there was an error deleting the item. | [
"Deletes",
"this",
"item",
"from",
"its",
"bucket",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/storage/_item.py#L139-L149 | train | 237,886 |
googledatalab/pydatalab | datalab/storage/_item.py | Item.write_to | def write_to(self, content, content_type):
"""Writes text content to this item.
Args:
content: the text content to be written.
content_type: the type of text content.
Raises:
Exception if there was an error requesting the item's content.
"""
try:
self._api.object_upload(self._bucket, self._key, content, content_type)
except Exception as e:
raise e | python | def write_to(self, content, content_type):
"""Writes text content to this item.
Args:
content: the text content to be written.
content_type: the type of text content.
Raises:
Exception if there was an error requesting the item's content.
"""
try:
self._api.object_upload(self._bucket, self._key, content, content_type)
except Exception as e:
raise e | [
"def",
"write_to",
"(",
"self",
",",
"content",
",",
"content_type",
")",
":",
"try",
":",
"self",
".",
"_api",
".",
"object_upload",
"(",
"self",
".",
"_bucket",
",",
"self",
".",
"_key",
",",
"content",
",",
"content_type",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e"
] | Writes text content to this item.
Args:
content: the text content to be written.
content_type: the type of text content.
Raises:
Exception if there was an error requesting the item's content. | [
"Writes",
"text",
"content",
"to",
"this",
"item",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/storage/_item.py#L212-L224 | train | 237,887 |
googledatalab/pydatalab | datalab/storage/_item.py | Items.contains | def contains(self, key):
"""Checks if the specified item exists.
Args:
key: the key of the item to lookup.
Returns:
True if the item exists; False otherwise.
Raises:
Exception if there was an error requesting information about the item.
"""
try:
self._api.objects_get(self._bucket, key)
except datalab.utils.RequestException as e:
if e.status == 404:
return False
raise e
except Exception as e:
raise e
return True | python | def contains(self, key):
"""Checks if the specified item exists.
Args:
key: the key of the item to lookup.
Returns:
True if the item exists; False otherwise.
Raises:
Exception if there was an error requesting information about the item.
"""
try:
self._api.objects_get(self._bucket, key)
except datalab.utils.RequestException as e:
if e.status == 404:
return False
raise e
except Exception as e:
raise e
return True | [
"def",
"contains",
"(",
"self",
",",
"key",
")",
":",
"try",
":",
"self",
".",
"_api",
".",
"objects_get",
"(",
"self",
".",
"_bucket",
",",
"key",
")",
"except",
"datalab",
".",
"utils",
".",
"RequestException",
"as",
"e",
":",
"if",
"e",
".",
"status",
"==",
"404",
":",
"return",
"False",
"raise",
"e",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"return",
"True"
] | Checks if the specified item exists.
Args:
key: the key of the item to lookup.
Returns:
True if the item exists; False otherwise.
Raises:
Exception if there was an error requesting information about the item. | [
"Checks",
"if",
"the",
"specified",
"item",
"exists",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/storage/_item.py#L252-L270 | train | 237,888 |
googledatalab/pydatalab | google/datalab/utils/_http.py | Http.request | def request(url, args=None, data=None, headers=None, method=None,
credentials=None, raw_response=False, stats=None):
"""Issues HTTP requests.
Args:
url: the URL to request.
args: optional query string arguments.
data: optional data to be sent within the request.
headers: optional headers to include in the request.
method: optional HTTP method to use. If unspecified this is inferred
(GET or POST) based on the existence of request data.
credentials: optional set of credentials to authorize the request.
raw_response: whether the raw response content should be returned as-is.
stats: an optional dictionary that, if provided, will be populated with some
useful info about the request, like 'duration' in seconds and 'data_size' in
bytes. These may be useful optimizing the access to rate-limited APIs.
Returns:
The parsed response object.
Raises:
Exception when the HTTP request fails or the response cannot be processed.
"""
if headers is None:
headers = {}
headers['user-agent'] = 'GoogleCloudDataLab/1.0'
# Add querystring to the URL if there are any arguments.
if args is not None:
qs = urllib.parse.urlencode(args)
url = url + '?' + qs
# Setup method to POST if unspecified, and appropriate request headers
# if there is data to be sent within the request.
if data is not None:
if method is None:
method = 'POST'
if data != '':
# If there is a content type specified, use it (and the data) as-is.
# Otherwise, assume JSON, and serialize the data object.
if 'Content-Type' not in headers:
data = json.dumps(data)
headers['Content-Type'] = 'application/json'
headers['Content-Length'] = str(len(data))
else:
if method == 'POST':
headers['Content-Length'] = '0'
# If the method is still unset, i.e. it was unspecified, and there
# was no data to be POSTed, then default to GET request.
if method is None:
method = 'GET'
http = Http.http
# Authorize with credentials if given
if credentials is not None:
# Make a copy of the shared http instance before we modify it.
http = copy.copy(http)
http = google_auth_httplib2.AuthorizedHttp(credentials)
if stats is not None:
stats['duration'] = datetime.datetime.utcnow()
response = None
try:
log.debug('request: method[%(method)s], url[%(url)s], body[%(data)s]' % locals())
response, content = http.request(url,
method=method,
body=data,
headers=headers)
if 200 <= response.status < 300:
if raw_response:
return content
if type(content) == str:
return json.loads(content)
else:
return json.loads(str(content, encoding='UTF-8'))
else:
raise RequestException(response.status, content)
except ValueError:
raise Exception('Failed to process HTTP response.')
except httplib2.HttpLib2Error:
raise Exception('Failed to send HTTP request.')
finally:
if stats is not None:
stats['data_size'] = len(data)
stats['status'] = response.status
stats['duration'] = (datetime.datetime.utcnow() - stats['duration']).total_seconds() | python | def request(url, args=None, data=None, headers=None, method=None,
credentials=None, raw_response=False, stats=None):
"""Issues HTTP requests.
Args:
url: the URL to request.
args: optional query string arguments.
data: optional data to be sent within the request.
headers: optional headers to include in the request.
method: optional HTTP method to use. If unspecified this is inferred
(GET or POST) based on the existence of request data.
credentials: optional set of credentials to authorize the request.
raw_response: whether the raw response content should be returned as-is.
stats: an optional dictionary that, if provided, will be populated with some
useful info about the request, like 'duration' in seconds and 'data_size' in
bytes. These may be useful optimizing the access to rate-limited APIs.
Returns:
The parsed response object.
Raises:
Exception when the HTTP request fails or the response cannot be processed.
"""
if headers is None:
headers = {}
headers['user-agent'] = 'GoogleCloudDataLab/1.0'
# Add querystring to the URL if there are any arguments.
if args is not None:
qs = urllib.parse.urlencode(args)
url = url + '?' + qs
# Setup method to POST if unspecified, and appropriate request headers
# if there is data to be sent within the request.
if data is not None:
if method is None:
method = 'POST'
if data != '':
# If there is a content type specified, use it (and the data) as-is.
# Otherwise, assume JSON, and serialize the data object.
if 'Content-Type' not in headers:
data = json.dumps(data)
headers['Content-Type'] = 'application/json'
headers['Content-Length'] = str(len(data))
else:
if method == 'POST':
headers['Content-Length'] = '0'
# If the method is still unset, i.e. it was unspecified, and there
# was no data to be POSTed, then default to GET request.
if method is None:
method = 'GET'
http = Http.http
# Authorize with credentials if given
if credentials is not None:
# Make a copy of the shared http instance before we modify it.
http = copy.copy(http)
http = google_auth_httplib2.AuthorizedHttp(credentials)
if stats is not None:
stats['duration'] = datetime.datetime.utcnow()
response = None
try:
log.debug('request: method[%(method)s], url[%(url)s], body[%(data)s]' % locals())
response, content = http.request(url,
method=method,
body=data,
headers=headers)
if 200 <= response.status < 300:
if raw_response:
return content
if type(content) == str:
return json.loads(content)
else:
return json.loads(str(content, encoding='UTF-8'))
else:
raise RequestException(response.status, content)
except ValueError:
raise Exception('Failed to process HTTP response.')
except httplib2.HttpLib2Error:
raise Exception('Failed to send HTTP request.')
finally:
if stats is not None:
stats['data_size'] = len(data)
stats['status'] = response.status
stats['duration'] = (datetime.datetime.utcnow() - stats['duration']).total_seconds() | [
"def",
"request",
"(",
"url",
",",
"args",
"=",
"None",
",",
"data",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"method",
"=",
"None",
",",
"credentials",
"=",
"None",
",",
"raw_response",
"=",
"False",
",",
"stats",
"=",
"None",
")",
":",
"if",
"headers",
"is",
"None",
":",
"headers",
"=",
"{",
"}",
"headers",
"[",
"'user-agent'",
"]",
"=",
"'GoogleCloudDataLab/1.0'",
"# Add querystring to the URL if there are any arguments.",
"if",
"args",
"is",
"not",
"None",
":",
"qs",
"=",
"urllib",
".",
"parse",
".",
"urlencode",
"(",
"args",
")",
"url",
"=",
"url",
"+",
"'?'",
"+",
"qs",
"# Setup method to POST if unspecified, and appropriate request headers",
"# if there is data to be sent within the request.",
"if",
"data",
"is",
"not",
"None",
":",
"if",
"method",
"is",
"None",
":",
"method",
"=",
"'POST'",
"if",
"data",
"!=",
"''",
":",
"# If there is a content type specified, use it (and the data) as-is.",
"# Otherwise, assume JSON, and serialize the data object.",
"if",
"'Content-Type'",
"not",
"in",
"headers",
":",
"data",
"=",
"json",
".",
"dumps",
"(",
"data",
")",
"headers",
"[",
"'Content-Type'",
"]",
"=",
"'application/json'",
"headers",
"[",
"'Content-Length'",
"]",
"=",
"str",
"(",
"len",
"(",
"data",
")",
")",
"else",
":",
"if",
"method",
"==",
"'POST'",
":",
"headers",
"[",
"'Content-Length'",
"]",
"=",
"'0'",
"# If the method is still unset, i.e. it was unspecified, and there",
"# was no data to be POSTed, then default to GET request.",
"if",
"method",
"is",
"None",
":",
"method",
"=",
"'GET'",
"http",
"=",
"Http",
".",
"http",
"# Authorize with credentials if given",
"if",
"credentials",
"is",
"not",
"None",
":",
"# Make a copy of the shared http instance before we modify it.",
"http",
"=",
"copy",
".",
"copy",
"(",
"http",
")",
"http",
"=",
"google_auth_httplib2",
".",
"AuthorizedHttp",
"(",
"credentials",
")",
"if",
"stats",
"is",
"not",
"None",
":",
"stats",
"[",
"'duration'",
"]",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"response",
"=",
"None",
"try",
":",
"log",
".",
"debug",
"(",
"'request: method[%(method)s], url[%(url)s], body[%(data)s]'",
"%",
"locals",
"(",
")",
")",
"response",
",",
"content",
"=",
"http",
".",
"request",
"(",
"url",
",",
"method",
"=",
"method",
",",
"body",
"=",
"data",
",",
"headers",
"=",
"headers",
")",
"if",
"200",
"<=",
"response",
".",
"status",
"<",
"300",
":",
"if",
"raw_response",
":",
"return",
"content",
"if",
"type",
"(",
"content",
")",
"==",
"str",
":",
"return",
"json",
".",
"loads",
"(",
"content",
")",
"else",
":",
"return",
"json",
".",
"loads",
"(",
"str",
"(",
"content",
",",
"encoding",
"=",
"'UTF-8'",
")",
")",
"else",
":",
"raise",
"RequestException",
"(",
"response",
".",
"status",
",",
"content",
")",
"except",
"ValueError",
":",
"raise",
"Exception",
"(",
"'Failed to process HTTP response.'",
")",
"except",
"httplib2",
".",
"HttpLib2Error",
":",
"raise",
"Exception",
"(",
"'Failed to send HTTP request.'",
")",
"finally",
":",
"if",
"stats",
"is",
"not",
"None",
":",
"stats",
"[",
"'data_size'",
"]",
"=",
"len",
"(",
"data",
")",
"stats",
"[",
"'status'",
"]",
"=",
"response",
".",
"status",
"stats",
"[",
"'duration'",
"]",
"=",
"(",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"-",
"stats",
"[",
"'duration'",
"]",
")",
".",
"total_seconds",
"(",
")"
] | Issues HTTP requests.
Args:
url: the URL to request.
args: optional query string arguments.
data: optional data to be sent within the request.
headers: optional headers to include in the request.
method: optional HTTP method to use. If unspecified this is inferred
(GET or POST) based on the existence of request data.
credentials: optional set of credentials to authorize the request.
raw_response: whether the raw response content should be returned as-is.
stats: an optional dictionary that, if provided, will be populated with some
useful info about the request, like 'duration' in seconds and 'data_size' in
bytes. These may be useful optimizing the access to rate-limited APIs.
Returns:
The parsed response object.
Raises:
Exception when the HTTP request fails or the response cannot be processed. | [
"Issues",
"HTTP",
"requests",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/_http.py#L81-L167 | train | 237,889 |
googledatalab/pydatalab | google/datalab/contrib/pipeline/commands/_pipeline.py | _add_command | def _add_command(parser, subparser_fn, handler, cell_required=False,
cell_prohibited=False):
""" Create and initialize a pipeline subcommand handler. """
sub_parser = subparser_fn(parser)
sub_parser.set_defaults(func=lambda args, cell: _dispatch_handler(
args, cell, sub_parser, handler, cell_required=cell_required,
cell_prohibited=cell_prohibited)) | python | def _add_command(parser, subparser_fn, handler, cell_required=False,
cell_prohibited=False):
""" Create and initialize a pipeline subcommand handler. """
sub_parser = subparser_fn(parser)
sub_parser.set_defaults(func=lambda args, cell: _dispatch_handler(
args, cell, sub_parser, handler, cell_required=cell_required,
cell_prohibited=cell_prohibited)) | [
"def",
"_add_command",
"(",
"parser",
",",
"subparser_fn",
",",
"handler",
",",
"cell_required",
"=",
"False",
",",
"cell_prohibited",
"=",
"False",
")",
":",
"sub_parser",
"=",
"subparser_fn",
"(",
"parser",
")",
"sub_parser",
".",
"set_defaults",
"(",
"func",
"=",
"lambda",
"args",
",",
"cell",
":",
"_dispatch_handler",
"(",
"args",
",",
"cell",
",",
"sub_parser",
",",
"handler",
",",
"cell_required",
"=",
"cell_required",
",",
"cell_prohibited",
"=",
"cell_prohibited",
")",
")"
] | Create and initialize a pipeline subcommand handler. | [
"Create",
"and",
"initialize",
"a",
"pipeline",
"subcommand",
"handler",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/pipeline/commands/_pipeline.py#L57-L63 | train | 237,890 |
googledatalab/pydatalab | google/datalab/contrib/pipeline/commands/_pipeline.py | pipeline | def pipeline(line, cell=None):
"""Implements the pipeline cell magic for ipython notebooks.
The supported syntax is:
%%pipeline <command> [<args>]
<cell>
or:
%pipeline <command> [<args>]
Use %pipeline --help for a list of commands, or %pipeline <command> --help for
help on a specific command.
"""
return google.datalab.utils.commands.handle_magic_line(line, cell, _pipeline_parser) | python | def pipeline(line, cell=None):
"""Implements the pipeline cell magic for ipython notebooks.
The supported syntax is:
%%pipeline <command> [<args>]
<cell>
or:
%pipeline <command> [<args>]
Use %pipeline --help for a list of commands, or %pipeline <command> --help for
help on a specific command.
"""
return google.datalab.utils.commands.handle_magic_line(line, cell, _pipeline_parser) | [
"def",
"pipeline",
"(",
"line",
",",
"cell",
"=",
"None",
")",
":",
"return",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"handle_magic_line",
"(",
"line",
",",
"cell",
",",
"_pipeline_parser",
")"
] | Implements the pipeline cell magic for ipython notebooks.
The supported syntax is:
%%pipeline <command> [<args>]
<cell>
or:
%pipeline <command> [<args>]
Use %pipeline --help for a list of commands, or %pipeline <command> --help for
help on a specific command. | [
"Implements",
"the",
"pipeline",
"cell",
"magic",
"for",
"ipython",
"notebooks",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/pipeline/commands/_pipeline.py#L90-L105 | train | 237,891 |
googledatalab/pydatalab | google/datalab/contrib/pipeline/commands/_pipeline.py | _dispatch_handler | def _dispatch_handler(args, cell, parser, handler, cell_required=False,
cell_prohibited=False):
""" Makes sure cell magics include cell and line magics don't, before
dispatching to handler.
Args:
args: the parsed arguments from the magic line.
cell: the contents of the cell, if any.
parser: the argument parser for <cmd>; used for error message.
handler: the handler to call if the cell present/absent check passes.
cell_required: True for cell magics, False for line magics that can't be
cell magics.
cell_prohibited: True for line magics, False for cell magics that can't be
line magics.
Returns:
The result of calling the handler.
Raises:
Exception if the invocation is not valid.
"""
if cell_prohibited:
if cell and len(cell.strip()):
parser.print_help()
raise Exception(
'Additional data is not supported with the %s command.' % parser.prog)
return handler(args)
if cell_required and not cell:
parser.print_help()
raise Exception('The %s command requires additional data' % parser.prog)
return handler(args, cell) | python | def _dispatch_handler(args, cell, parser, handler, cell_required=False,
cell_prohibited=False):
""" Makes sure cell magics include cell and line magics don't, before
dispatching to handler.
Args:
args: the parsed arguments from the magic line.
cell: the contents of the cell, if any.
parser: the argument parser for <cmd>; used for error message.
handler: the handler to call if the cell present/absent check passes.
cell_required: True for cell magics, False for line magics that can't be
cell magics.
cell_prohibited: True for line magics, False for cell magics that can't be
line magics.
Returns:
The result of calling the handler.
Raises:
Exception if the invocation is not valid.
"""
if cell_prohibited:
if cell and len(cell.strip()):
parser.print_help()
raise Exception(
'Additional data is not supported with the %s command.' % parser.prog)
return handler(args)
if cell_required and not cell:
parser.print_help()
raise Exception('The %s command requires additional data' % parser.prog)
return handler(args, cell) | [
"def",
"_dispatch_handler",
"(",
"args",
",",
"cell",
",",
"parser",
",",
"handler",
",",
"cell_required",
"=",
"False",
",",
"cell_prohibited",
"=",
"False",
")",
":",
"if",
"cell_prohibited",
":",
"if",
"cell",
"and",
"len",
"(",
"cell",
".",
"strip",
"(",
")",
")",
":",
"parser",
".",
"print_help",
"(",
")",
"raise",
"Exception",
"(",
"'Additional data is not supported with the %s command.'",
"%",
"parser",
".",
"prog",
")",
"return",
"handler",
"(",
"args",
")",
"if",
"cell_required",
"and",
"not",
"cell",
":",
"parser",
".",
"print_help",
"(",
")",
"raise",
"Exception",
"(",
"'The %s command requires additional data'",
"%",
"parser",
".",
"prog",
")",
"return",
"handler",
"(",
"args",
",",
"cell",
")"
] | Makes sure cell magics include cell and line magics don't, before
dispatching to handler.
Args:
args: the parsed arguments from the magic line.
cell: the contents of the cell, if any.
parser: the argument parser for <cmd>; used for error message.
handler: the handler to call if the cell present/absent check passes.
cell_required: True for cell magics, False for line magics that can't be
cell magics.
cell_prohibited: True for line magics, False for cell magics that can't be
line magics.
Returns:
The result of calling the handler.
Raises:
Exception if the invocation is not valid. | [
"Makes",
"sure",
"cell",
"magics",
"include",
"cell",
"and",
"line",
"magics",
"don",
"t",
"before",
"dispatching",
"to",
"handler",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/pipeline/commands/_pipeline.py#L108-L138 | train | 237,892 |
googledatalab/pydatalab | solutionbox/ml_workbench/xgboost/trainer/feature_analysis.py | expand_defaults | def expand_defaults(schema, features):
"""Add to features any default transformations.
Not every column in the schema has an explicit feature transformation listed
in the featurs file. For these columns, add a default transformation based on
the schema's type. The features dict is modified by this function call.
After this function call, every column in schema is used in a feature, and
every feature uses a column in the schema.
Args:
schema: schema list
features: features dict
Raises:
ValueError: if transform cannot be applied given schema type.
"""
schema_names = [x['name'] for x in schema]
# Add missing source columns
for name, transform in six.iteritems(features):
if 'source_column' not in transform:
transform['source_column'] = name
# Check source columns are in the schema and collect which are used.
used_schema_columns = []
for name, transform in six.iteritems(features):
if transform['source_column'] not in schema_names:
raise ValueError('source column %s is not in the schema for transform %s'
% (transform['source_column'], name))
used_schema_columns.append(transform['source_column'])
# Update default transformation based on schema.
for col_schema in schema:
schema_name = col_schema['name']
schema_type = col_schema['type'].lower()
if schema_type not in constant.NUMERIC_SCHEMA + [constant.STRING_SCHEMA]:
raise ValueError(('Only the following schema types are supported: %s'
% ' '.join(constant.NUMERIC_SCHEMA + [constant.STRING_SCHEMA])))
if schema_name not in used_schema_columns:
# add the default transform to the features
if schema_type in constant.NUMERIC_SCHEMA:
features[schema_name] = {
'transform': constant.DEFAULT_NUMERIC_TRANSFORM,
'source_column': schema_name}
elif schema_type == constant.STRING_SCHEMA:
features[schema_name] = {
'transform': constant.DEFAULT_CATEGORICAL_TRANSFORM,
'source_column': schema_name}
else:
raise NotImplementedError('Unknown type %s' % schema_type) | python | def expand_defaults(schema, features):
"""Add to features any default transformations.
Not every column in the schema has an explicit feature transformation listed
in the featurs file. For these columns, add a default transformation based on
the schema's type. The features dict is modified by this function call.
After this function call, every column in schema is used in a feature, and
every feature uses a column in the schema.
Args:
schema: schema list
features: features dict
Raises:
ValueError: if transform cannot be applied given schema type.
"""
schema_names = [x['name'] for x in schema]
# Add missing source columns
for name, transform in six.iteritems(features):
if 'source_column' not in transform:
transform['source_column'] = name
# Check source columns are in the schema and collect which are used.
used_schema_columns = []
for name, transform in six.iteritems(features):
if transform['source_column'] not in schema_names:
raise ValueError('source column %s is not in the schema for transform %s'
% (transform['source_column'], name))
used_schema_columns.append(transform['source_column'])
# Update default transformation based on schema.
for col_schema in schema:
schema_name = col_schema['name']
schema_type = col_schema['type'].lower()
if schema_type not in constant.NUMERIC_SCHEMA + [constant.STRING_SCHEMA]:
raise ValueError(('Only the following schema types are supported: %s'
% ' '.join(constant.NUMERIC_SCHEMA + [constant.STRING_SCHEMA])))
if schema_name not in used_schema_columns:
# add the default transform to the features
if schema_type in constant.NUMERIC_SCHEMA:
features[schema_name] = {
'transform': constant.DEFAULT_NUMERIC_TRANSFORM,
'source_column': schema_name}
elif schema_type == constant.STRING_SCHEMA:
features[schema_name] = {
'transform': constant.DEFAULT_CATEGORICAL_TRANSFORM,
'source_column': schema_name}
else:
raise NotImplementedError('Unknown type %s' % schema_type) | [
"def",
"expand_defaults",
"(",
"schema",
",",
"features",
")",
":",
"schema_names",
"=",
"[",
"x",
"[",
"'name'",
"]",
"for",
"x",
"in",
"schema",
"]",
"# Add missing source columns",
"for",
"name",
",",
"transform",
"in",
"six",
".",
"iteritems",
"(",
"features",
")",
":",
"if",
"'source_column'",
"not",
"in",
"transform",
":",
"transform",
"[",
"'source_column'",
"]",
"=",
"name",
"# Check source columns are in the schema and collect which are used.",
"used_schema_columns",
"=",
"[",
"]",
"for",
"name",
",",
"transform",
"in",
"six",
".",
"iteritems",
"(",
"features",
")",
":",
"if",
"transform",
"[",
"'source_column'",
"]",
"not",
"in",
"schema_names",
":",
"raise",
"ValueError",
"(",
"'source column %s is not in the schema for transform %s'",
"%",
"(",
"transform",
"[",
"'source_column'",
"]",
",",
"name",
")",
")",
"used_schema_columns",
".",
"append",
"(",
"transform",
"[",
"'source_column'",
"]",
")",
"# Update default transformation based on schema.",
"for",
"col_schema",
"in",
"schema",
":",
"schema_name",
"=",
"col_schema",
"[",
"'name'",
"]",
"schema_type",
"=",
"col_schema",
"[",
"'type'",
"]",
".",
"lower",
"(",
")",
"if",
"schema_type",
"not",
"in",
"constant",
".",
"NUMERIC_SCHEMA",
"+",
"[",
"constant",
".",
"STRING_SCHEMA",
"]",
":",
"raise",
"ValueError",
"(",
"(",
"'Only the following schema types are supported: %s'",
"%",
"' '",
".",
"join",
"(",
"constant",
".",
"NUMERIC_SCHEMA",
"+",
"[",
"constant",
".",
"STRING_SCHEMA",
"]",
")",
")",
")",
"if",
"schema_name",
"not",
"in",
"used_schema_columns",
":",
"# add the default transform to the features",
"if",
"schema_type",
"in",
"constant",
".",
"NUMERIC_SCHEMA",
":",
"features",
"[",
"schema_name",
"]",
"=",
"{",
"'transform'",
":",
"constant",
".",
"DEFAULT_NUMERIC_TRANSFORM",
",",
"'source_column'",
":",
"schema_name",
"}",
"elif",
"schema_type",
"==",
"constant",
".",
"STRING_SCHEMA",
":",
"features",
"[",
"schema_name",
"]",
"=",
"{",
"'transform'",
":",
"constant",
".",
"DEFAULT_CATEGORICAL_TRANSFORM",
",",
"'source_column'",
":",
"schema_name",
"}",
"else",
":",
"raise",
"NotImplementedError",
"(",
"'Unknown type %s'",
"%",
"schema_type",
")"
] | Add to features any default transformations.
Not every column in the schema has an explicit feature transformation listed
in the featurs file. For these columns, add a default transformation based on
the schema's type. The features dict is modified by this function call.
After this function call, every column in schema is used in a feature, and
every feature uses a column in the schema.
Args:
schema: schema list
features: features dict
Raises:
ValueError: if transform cannot be applied given schema type. | [
"Add",
"to",
"features",
"any",
"default",
"transformations",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/ml_workbench/xgboost/trainer/feature_analysis.py#L114-L167 | train | 237,893 |
googledatalab/pydatalab | datalab/bigquery/commands/_bigquery.py | _sample_cell | def _sample_cell(args, cell_body):
"""Implements the bigquery sample cell magic for ipython notebooks.
Args:
args: the optional arguments following '%%bigquery sample'.
cell_body: optional contents of the cell interpreted as SQL, YAML or JSON.
Returns:
The results of executing the sampling query, or a profile of the sample data.
"""
env = datalab.utils.commands.notebook_environment()
query = None
table = None
view = None
if args['query']:
query = _get_query_argument(args, cell_body, env)
elif args['table']:
table = _get_table(args['table'])
elif args['view']:
view = datalab.utils.commands.get_notebook_item(args['view'])
if not isinstance(view, datalab.bigquery.View):
raise Exception('%s is not a view' % args['view'])
else:
query = datalab.bigquery.Query(cell_body, values=env)
count = args['count']
method = args['method']
if method == 'random':
sampling = datalab.bigquery.Sampling.random(percent=args['percent'], count=count)
elif method == 'hashed':
sampling = datalab.bigquery.Sampling.hashed(field_name=args['field'], percent=args['percent'],
count=count)
elif method == 'sorted':
ascending = args['order'] == 'ascending'
sampling = datalab.bigquery.Sampling.sorted(args['field'],
ascending=ascending,
count=count)
elif method == 'limit':
sampling = datalab.bigquery.Sampling.default(count=count)
else:
sampling = datalab.bigquery.Sampling.default(count=count)
if query:
results = query.sample(sampling=sampling, dialect=args['dialect'], billing_tier=args['billing'])
elif view:
results = view.sample(sampling=sampling)
else:
results = table.sample(sampling=sampling)
if args['verbose']:
print(results.sql)
if args['profile']:
return datalab.utils.commands.profile_df(results.to_dataframe())
else:
return results | python | def _sample_cell(args, cell_body):
"""Implements the bigquery sample cell magic for ipython notebooks.
Args:
args: the optional arguments following '%%bigquery sample'.
cell_body: optional contents of the cell interpreted as SQL, YAML or JSON.
Returns:
The results of executing the sampling query, or a profile of the sample data.
"""
env = datalab.utils.commands.notebook_environment()
query = None
table = None
view = None
if args['query']:
query = _get_query_argument(args, cell_body, env)
elif args['table']:
table = _get_table(args['table'])
elif args['view']:
view = datalab.utils.commands.get_notebook_item(args['view'])
if not isinstance(view, datalab.bigquery.View):
raise Exception('%s is not a view' % args['view'])
else:
query = datalab.bigquery.Query(cell_body, values=env)
count = args['count']
method = args['method']
if method == 'random':
sampling = datalab.bigquery.Sampling.random(percent=args['percent'], count=count)
elif method == 'hashed':
sampling = datalab.bigquery.Sampling.hashed(field_name=args['field'], percent=args['percent'],
count=count)
elif method == 'sorted':
ascending = args['order'] == 'ascending'
sampling = datalab.bigquery.Sampling.sorted(args['field'],
ascending=ascending,
count=count)
elif method == 'limit':
sampling = datalab.bigquery.Sampling.default(count=count)
else:
sampling = datalab.bigquery.Sampling.default(count=count)
if query:
results = query.sample(sampling=sampling, dialect=args['dialect'], billing_tier=args['billing'])
elif view:
results = view.sample(sampling=sampling)
else:
results = table.sample(sampling=sampling)
if args['verbose']:
print(results.sql)
if args['profile']:
return datalab.utils.commands.profile_df(results.to_dataframe())
else:
return results | [
"def",
"_sample_cell",
"(",
"args",
",",
"cell_body",
")",
":",
"env",
"=",
"datalab",
".",
"utils",
".",
"commands",
".",
"notebook_environment",
"(",
")",
"query",
"=",
"None",
"table",
"=",
"None",
"view",
"=",
"None",
"if",
"args",
"[",
"'query'",
"]",
":",
"query",
"=",
"_get_query_argument",
"(",
"args",
",",
"cell_body",
",",
"env",
")",
"elif",
"args",
"[",
"'table'",
"]",
":",
"table",
"=",
"_get_table",
"(",
"args",
"[",
"'table'",
"]",
")",
"elif",
"args",
"[",
"'view'",
"]",
":",
"view",
"=",
"datalab",
".",
"utils",
".",
"commands",
".",
"get_notebook_item",
"(",
"args",
"[",
"'view'",
"]",
")",
"if",
"not",
"isinstance",
"(",
"view",
",",
"datalab",
".",
"bigquery",
".",
"View",
")",
":",
"raise",
"Exception",
"(",
"'%s is not a view'",
"%",
"args",
"[",
"'view'",
"]",
")",
"else",
":",
"query",
"=",
"datalab",
".",
"bigquery",
".",
"Query",
"(",
"cell_body",
",",
"values",
"=",
"env",
")",
"count",
"=",
"args",
"[",
"'count'",
"]",
"method",
"=",
"args",
"[",
"'method'",
"]",
"if",
"method",
"==",
"'random'",
":",
"sampling",
"=",
"datalab",
".",
"bigquery",
".",
"Sampling",
".",
"random",
"(",
"percent",
"=",
"args",
"[",
"'percent'",
"]",
",",
"count",
"=",
"count",
")",
"elif",
"method",
"==",
"'hashed'",
":",
"sampling",
"=",
"datalab",
".",
"bigquery",
".",
"Sampling",
".",
"hashed",
"(",
"field_name",
"=",
"args",
"[",
"'field'",
"]",
",",
"percent",
"=",
"args",
"[",
"'percent'",
"]",
",",
"count",
"=",
"count",
")",
"elif",
"method",
"==",
"'sorted'",
":",
"ascending",
"=",
"args",
"[",
"'order'",
"]",
"==",
"'ascending'",
"sampling",
"=",
"datalab",
".",
"bigquery",
".",
"Sampling",
".",
"sorted",
"(",
"args",
"[",
"'field'",
"]",
",",
"ascending",
"=",
"ascending",
",",
"count",
"=",
"count",
")",
"elif",
"method",
"==",
"'limit'",
":",
"sampling",
"=",
"datalab",
".",
"bigquery",
".",
"Sampling",
".",
"default",
"(",
"count",
"=",
"count",
")",
"else",
":",
"sampling",
"=",
"datalab",
".",
"bigquery",
".",
"Sampling",
".",
"default",
"(",
"count",
"=",
"count",
")",
"if",
"query",
":",
"results",
"=",
"query",
".",
"sample",
"(",
"sampling",
"=",
"sampling",
",",
"dialect",
"=",
"args",
"[",
"'dialect'",
"]",
",",
"billing_tier",
"=",
"args",
"[",
"'billing'",
"]",
")",
"elif",
"view",
":",
"results",
"=",
"view",
".",
"sample",
"(",
"sampling",
"=",
"sampling",
")",
"else",
":",
"results",
"=",
"table",
".",
"sample",
"(",
"sampling",
"=",
"sampling",
")",
"if",
"args",
"[",
"'verbose'",
"]",
":",
"print",
"(",
"results",
".",
"sql",
")",
"if",
"args",
"[",
"'profile'",
"]",
":",
"return",
"datalab",
".",
"utils",
".",
"commands",
".",
"profile_df",
"(",
"results",
".",
"to_dataframe",
"(",
")",
")",
"else",
":",
"return",
"results"
] | Implements the bigquery sample cell magic for ipython notebooks.
Args:
args: the optional arguments following '%%bigquery sample'.
cell_body: optional contents of the cell interpreted as SQL, YAML or JSON.
Returns:
The results of executing the sampling query, or a profile of the sample data. | [
"Implements",
"the",
"bigquery",
"sample",
"cell",
"magic",
"for",
"ipython",
"notebooks",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/commands/_bigquery.py#L285-L339 | train | 237,894 |
googledatalab/pydatalab | datalab/bigquery/commands/_bigquery.py | _create_cell | def _create_cell(args, cell_body):
"""Implements the BigQuery cell magic used to create datasets and tables.
The supported syntax is:
%%bigquery create dataset -n|--name <name> [-f|--friendly <friendlyname>]
[<description>]
or:
%%bigquery create table -n|--name <tablename> [--overwrite]
[<YAML or JSON cell_body defining schema to use for tables>]
Args:
args: the argument following '%bigquery create <command>'.
"""
if args['command'] == 'dataset':
try:
datalab.bigquery.Dataset(args['name']).create(friendly_name=args['friendly'],
description=cell_body)
except Exception as e:
print('Failed to create dataset %s: %s' % (args['name'], e))
else:
if cell_body is None:
print('Failed to create %s: no schema specified' % args['name'])
else:
try:
record = datalab.utils.commands.parse_config(cell_body,
datalab.utils.commands.notebook_environment(),
as_dict=False)
schema = datalab.bigquery.Schema(record)
datalab.bigquery.Table(args['name']).create(schema=schema, overwrite=args['overwrite'])
except Exception as e:
print('Failed to create table %s: %s' % (args['name'], e)) | python | def _create_cell(args, cell_body):
"""Implements the BigQuery cell magic used to create datasets and tables.
The supported syntax is:
%%bigquery create dataset -n|--name <name> [-f|--friendly <friendlyname>]
[<description>]
or:
%%bigquery create table -n|--name <tablename> [--overwrite]
[<YAML or JSON cell_body defining schema to use for tables>]
Args:
args: the argument following '%bigquery create <command>'.
"""
if args['command'] == 'dataset':
try:
datalab.bigquery.Dataset(args['name']).create(friendly_name=args['friendly'],
description=cell_body)
except Exception as e:
print('Failed to create dataset %s: %s' % (args['name'], e))
else:
if cell_body is None:
print('Failed to create %s: no schema specified' % args['name'])
else:
try:
record = datalab.utils.commands.parse_config(cell_body,
datalab.utils.commands.notebook_environment(),
as_dict=False)
schema = datalab.bigquery.Schema(record)
datalab.bigquery.Table(args['name']).create(schema=schema, overwrite=args['overwrite'])
except Exception as e:
print('Failed to create table %s: %s' % (args['name'], e)) | [
"def",
"_create_cell",
"(",
"args",
",",
"cell_body",
")",
":",
"if",
"args",
"[",
"'command'",
"]",
"==",
"'dataset'",
":",
"try",
":",
"datalab",
".",
"bigquery",
".",
"Dataset",
"(",
"args",
"[",
"'name'",
"]",
")",
".",
"create",
"(",
"friendly_name",
"=",
"args",
"[",
"'friendly'",
"]",
",",
"description",
"=",
"cell_body",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"'Failed to create dataset %s: %s'",
"%",
"(",
"args",
"[",
"'name'",
"]",
",",
"e",
")",
")",
"else",
":",
"if",
"cell_body",
"is",
"None",
":",
"print",
"(",
"'Failed to create %s: no schema specified'",
"%",
"args",
"[",
"'name'",
"]",
")",
"else",
":",
"try",
":",
"record",
"=",
"datalab",
".",
"utils",
".",
"commands",
".",
"parse_config",
"(",
"cell_body",
",",
"datalab",
".",
"utils",
".",
"commands",
".",
"notebook_environment",
"(",
")",
",",
"as_dict",
"=",
"False",
")",
"schema",
"=",
"datalab",
".",
"bigquery",
".",
"Schema",
"(",
"record",
")",
"datalab",
".",
"bigquery",
".",
"Table",
"(",
"args",
"[",
"'name'",
"]",
")",
".",
"create",
"(",
"schema",
"=",
"schema",
",",
"overwrite",
"=",
"args",
"[",
"'overwrite'",
"]",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"'Failed to create table %s: %s'",
"%",
"(",
"args",
"[",
"'name'",
"]",
",",
"e",
")",
")"
] | Implements the BigQuery cell magic used to create datasets and tables.
The supported syntax is:
%%bigquery create dataset -n|--name <name> [-f|--friendly <friendlyname>]
[<description>]
or:
%%bigquery create table -n|--name <tablename> [--overwrite]
[<YAML or JSON cell_body defining schema to use for tables>]
Args:
args: the argument following '%bigquery create <command>'. | [
"Implements",
"the",
"BigQuery",
"cell",
"magic",
"used",
"to",
"create",
"datasets",
"and",
"tables",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/commands/_bigquery.py#L342-L375 | train | 237,895 |
googledatalab/pydatalab | datalab/bigquery/commands/_bigquery.py | _delete_cell | def _delete_cell(args, _):
"""Implements the BigQuery cell magic used to delete datasets and tables.
The supported syntax is:
%%bigquery delete dataset -n|--name <name>
or:
%%bigquery delete table -n|--name <name>
Args:
args: the argument following '%bigquery delete <command>'.
"""
# TODO(gram): add support for wildchars and multiple arguments at some point. The latter is
# easy, the former a bit more tricky if non-default projects are involved.
if args['command'] == 'dataset':
try:
datalab.bigquery.Dataset(args['name']).delete()
except Exception as e:
print('Failed to delete dataset %s: %s' % (args['name'], e))
else:
try:
datalab.bigquery.Table(args['name']).delete()
except Exception as e:
print('Failed to delete table %s: %s' % (args['name'], e)) | python | def _delete_cell(args, _):
"""Implements the BigQuery cell magic used to delete datasets and tables.
The supported syntax is:
%%bigquery delete dataset -n|--name <name>
or:
%%bigquery delete table -n|--name <name>
Args:
args: the argument following '%bigquery delete <command>'.
"""
# TODO(gram): add support for wildchars and multiple arguments at some point. The latter is
# easy, the former a bit more tricky if non-default projects are involved.
if args['command'] == 'dataset':
try:
datalab.bigquery.Dataset(args['name']).delete()
except Exception as e:
print('Failed to delete dataset %s: %s' % (args['name'], e))
else:
try:
datalab.bigquery.Table(args['name']).delete()
except Exception as e:
print('Failed to delete table %s: %s' % (args['name'], e)) | [
"def",
"_delete_cell",
"(",
"args",
",",
"_",
")",
":",
"# TODO(gram): add support for wildchars and multiple arguments at some point. The latter is",
"# easy, the former a bit more tricky if non-default projects are involved.",
"if",
"args",
"[",
"'command'",
"]",
"==",
"'dataset'",
":",
"try",
":",
"datalab",
".",
"bigquery",
".",
"Dataset",
"(",
"args",
"[",
"'name'",
"]",
")",
".",
"delete",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"'Failed to delete dataset %s: %s'",
"%",
"(",
"args",
"[",
"'name'",
"]",
",",
"e",
")",
")",
"else",
":",
"try",
":",
"datalab",
".",
"bigquery",
".",
"Table",
"(",
"args",
"[",
"'name'",
"]",
")",
".",
"delete",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"'Failed to delete table %s: %s'",
"%",
"(",
"args",
"[",
"'name'",
"]",
",",
"e",
")",
")"
] | Implements the BigQuery cell magic used to delete datasets and tables.
The supported syntax is:
%%bigquery delete dataset -n|--name <name>
or:
%%bigquery delete table -n|--name <name>
Args:
args: the argument following '%bigquery delete <command>'. | [
"Implements",
"the",
"BigQuery",
"cell",
"magic",
"used",
"to",
"delete",
"datasets",
"and",
"tables",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/commands/_bigquery.py#L378-L403 | train | 237,896 |
googledatalab/pydatalab | datalab/bigquery/commands/_bigquery.py | _udf_cell | def _udf_cell(args, js):
"""Implements the bigquery_udf cell magic for ipython notebooks.
The supported syntax is:
%%bigquery udf --module <var>
<js function>
Args:
args: the optional arguments following '%%bigquery udf'.
js: the UDF declaration (inputs and outputs) and implementation in javascript.
Returns:
The results of executing the UDF converted to a dataframe if no variable
was specified. None otherwise.
"""
variable_name = args['module']
if not variable_name:
raise Exception('Declaration must be of the form %%bigquery udf --module <variable name>')
# Parse out the input and output specification
spec_pattern = r'\{\{([^}]+)\}\}'
spec_part_pattern = r'[a-z_][a-z0-9_]*'
specs = re.findall(spec_pattern, js)
if len(specs) < 2:
raise Exception('The JavaScript must declare the input row and output emitter parameters '
'using valid jsdoc format comments.\n'
'The input row param declaration must be typed as {{field:type, field2:type}} '
'and the output emitter param declaration must be typed as '
'function({{field:type, field2:type}}.')
inputs = []
input_spec_parts = re.findall(spec_part_pattern, specs[0], flags=re.IGNORECASE)
if len(input_spec_parts) % 2 != 0:
raise Exception('Invalid input row param declaration. The jsdoc type expression must '
'define an object with field and type pairs.')
for n, t in zip(input_spec_parts[0::2], input_spec_parts[1::2]):
inputs.append((n, t))
outputs = []
output_spec_parts = re.findall(spec_part_pattern, specs[1], flags=re.IGNORECASE)
if len(output_spec_parts) % 2 != 0:
raise Exception('Invalid output emitter param declaration. The jsdoc type expression must '
'define a function accepting an an object with field and type pairs.')
for n, t in zip(output_spec_parts[0::2], output_spec_parts[1::2]):
outputs.append((n, t))
# Look for imports. We use a non-standard @import keyword; we could alternatively use @requires.
# Object names can contain any characters except \r and \n.
import_pattern = r'@import[\s]+(gs://[a-z\d][a-z\d_\.\-]*[a-z\d]/[^\n\r]+)'
imports = re.findall(import_pattern, js)
# Split the cell if necessary. We look for a 'function(' with no name and a header comment
# block with @param and assume this is the primary function, up to a closing '}' at the start
# of the line. The remaining cell content is used as support code.
split_pattern = r'(.*)(/\*.*?@param.*?@param.*?\*/\w*\n\w*function\w*\(.*?^}\n?)(.*)'
parts = re.match(split_pattern, js, re.MULTILINE | re.DOTALL)
support_code = ''
if parts:
support_code = (parts.group(1) + parts.group(3)).strip()
if len(support_code):
js = parts.group(2)
# Finally build the UDF object
udf = datalab.bigquery.UDF(inputs, outputs, variable_name, js, support_code, imports)
datalab.utils.commands.notebook_environment()[variable_name] = udf | python | def _udf_cell(args, js):
"""Implements the bigquery_udf cell magic for ipython notebooks.
The supported syntax is:
%%bigquery udf --module <var>
<js function>
Args:
args: the optional arguments following '%%bigquery udf'.
js: the UDF declaration (inputs and outputs) and implementation in javascript.
Returns:
The results of executing the UDF converted to a dataframe if no variable
was specified. None otherwise.
"""
variable_name = args['module']
if not variable_name:
raise Exception('Declaration must be of the form %%bigquery udf --module <variable name>')
# Parse out the input and output specification
spec_pattern = r'\{\{([^}]+)\}\}'
spec_part_pattern = r'[a-z_][a-z0-9_]*'
specs = re.findall(spec_pattern, js)
if len(specs) < 2:
raise Exception('The JavaScript must declare the input row and output emitter parameters '
'using valid jsdoc format comments.\n'
'The input row param declaration must be typed as {{field:type, field2:type}} '
'and the output emitter param declaration must be typed as '
'function({{field:type, field2:type}}.')
inputs = []
input_spec_parts = re.findall(spec_part_pattern, specs[0], flags=re.IGNORECASE)
if len(input_spec_parts) % 2 != 0:
raise Exception('Invalid input row param declaration. The jsdoc type expression must '
'define an object with field and type pairs.')
for n, t in zip(input_spec_parts[0::2], input_spec_parts[1::2]):
inputs.append((n, t))
outputs = []
output_spec_parts = re.findall(spec_part_pattern, specs[1], flags=re.IGNORECASE)
if len(output_spec_parts) % 2 != 0:
raise Exception('Invalid output emitter param declaration. The jsdoc type expression must '
'define a function accepting an an object with field and type pairs.')
for n, t in zip(output_spec_parts[0::2], output_spec_parts[1::2]):
outputs.append((n, t))
# Look for imports. We use a non-standard @import keyword; we could alternatively use @requires.
# Object names can contain any characters except \r and \n.
import_pattern = r'@import[\s]+(gs://[a-z\d][a-z\d_\.\-]*[a-z\d]/[^\n\r]+)'
imports = re.findall(import_pattern, js)
# Split the cell if necessary. We look for a 'function(' with no name and a header comment
# block with @param and assume this is the primary function, up to a closing '}' at the start
# of the line. The remaining cell content is used as support code.
split_pattern = r'(.*)(/\*.*?@param.*?@param.*?\*/\w*\n\w*function\w*\(.*?^}\n?)(.*)'
parts = re.match(split_pattern, js, re.MULTILINE | re.DOTALL)
support_code = ''
if parts:
support_code = (parts.group(1) + parts.group(3)).strip()
if len(support_code):
js = parts.group(2)
# Finally build the UDF object
udf = datalab.bigquery.UDF(inputs, outputs, variable_name, js, support_code, imports)
datalab.utils.commands.notebook_environment()[variable_name] = udf | [
"def",
"_udf_cell",
"(",
"args",
",",
"js",
")",
":",
"variable_name",
"=",
"args",
"[",
"'module'",
"]",
"if",
"not",
"variable_name",
":",
"raise",
"Exception",
"(",
"'Declaration must be of the form %%bigquery udf --module <variable name>'",
")",
"# Parse out the input and output specification",
"spec_pattern",
"=",
"r'\\{\\{([^}]+)\\}\\}'",
"spec_part_pattern",
"=",
"r'[a-z_][a-z0-9_]*'",
"specs",
"=",
"re",
".",
"findall",
"(",
"spec_pattern",
",",
"js",
")",
"if",
"len",
"(",
"specs",
")",
"<",
"2",
":",
"raise",
"Exception",
"(",
"'The JavaScript must declare the input row and output emitter parameters '",
"'using valid jsdoc format comments.\\n'",
"'The input row param declaration must be typed as {{field:type, field2:type}} '",
"'and the output emitter param declaration must be typed as '",
"'function({{field:type, field2:type}}.'",
")",
"inputs",
"=",
"[",
"]",
"input_spec_parts",
"=",
"re",
".",
"findall",
"(",
"spec_part_pattern",
",",
"specs",
"[",
"0",
"]",
",",
"flags",
"=",
"re",
".",
"IGNORECASE",
")",
"if",
"len",
"(",
"input_spec_parts",
")",
"%",
"2",
"!=",
"0",
":",
"raise",
"Exception",
"(",
"'Invalid input row param declaration. The jsdoc type expression must '",
"'define an object with field and type pairs.'",
")",
"for",
"n",
",",
"t",
"in",
"zip",
"(",
"input_spec_parts",
"[",
"0",
":",
":",
"2",
"]",
",",
"input_spec_parts",
"[",
"1",
":",
":",
"2",
"]",
")",
":",
"inputs",
".",
"append",
"(",
"(",
"n",
",",
"t",
")",
")",
"outputs",
"=",
"[",
"]",
"output_spec_parts",
"=",
"re",
".",
"findall",
"(",
"spec_part_pattern",
",",
"specs",
"[",
"1",
"]",
",",
"flags",
"=",
"re",
".",
"IGNORECASE",
")",
"if",
"len",
"(",
"output_spec_parts",
")",
"%",
"2",
"!=",
"0",
":",
"raise",
"Exception",
"(",
"'Invalid output emitter param declaration. The jsdoc type expression must '",
"'define a function accepting an an object with field and type pairs.'",
")",
"for",
"n",
",",
"t",
"in",
"zip",
"(",
"output_spec_parts",
"[",
"0",
":",
":",
"2",
"]",
",",
"output_spec_parts",
"[",
"1",
":",
":",
"2",
"]",
")",
":",
"outputs",
".",
"append",
"(",
"(",
"n",
",",
"t",
")",
")",
"# Look for imports. We use a non-standard @import keyword; we could alternatively use @requires.",
"# Object names can contain any characters except \\r and \\n.",
"import_pattern",
"=",
"r'@import[\\s]+(gs://[a-z\\d][a-z\\d_\\.\\-]*[a-z\\d]/[^\\n\\r]+)'",
"imports",
"=",
"re",
".",
"findall",
"(",
"import_pattern",
",",
"js",
")",
"# Split the cell if necessary. We look for a 'function(' with no name and a header comment",
"# block with @param and assume this is the primary function, up to a closing '}' at the start",
"# of the line. The remaining cell content is used as support code.",
"split_pattern",
"=",
"r'(.*)(/\\*.*?@param.*?@param.*?\\*/\\w*\\n\\w*function\\w*\\(.*?^}\\n?)(.*)'",
"parts",
"=",
"re",
".",
"match",
"(",
"split_pattern",
",",
"js",
",",
"re",
".",
"MULTILINE",
"|",
"re",
".",
"DOTALL",
")",
"support_code",
"=",
"''",
"if",
"parts",
":",
"support_code",
"=",
"(",
"parts",
".",
"group",
"(",
"1",
")",
"+",
"parts",
".",
"group",
"(",
"3",
")",
")",
".",
"strip",
"(",
")",
"if",
"len",
"(",
"support_code",
")",
":",
"js",
"=",
"parts",
".",
"group",
"(",
"2",
")",
"# Finally build the UDF object",
"udf",
"=",
"datalab",
".",
"bigquery",
".",
"UDF",
"(",
"inputs",
",",
"outputs",
",",
"variable_name",
",",
"js",
",",
"support_code",
",",
"imports",
")",
"datalab",
".",
"utils",
".",
"commands",
".",
"notebook_environment",
"(",
")",
"[",
"variable_name",
"]",
"=",
"udf"
] | Implements the bigquery_udf cell magic for ipython notebooks.
The supported syntax is:
%%bigquery udf --module <var>
<js function>
Args:
args: the optional arguments following '%%bigquery udf'.
js: the UDF declaration (inputs and outputs) and implementation in javascript.
Returns:
The results of executing the UDF converted to a dataframe if no variable
was specified. None otherwise. | [
"Implements",
"the",
"bigquery_udf",
"cell",
"magic",
"for",
"ipython",
"notebooks",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/commands/_bigquery.py#L428-L492 | train | 237,897 |
googledatalab/pydatalab | datalab/bigquery/commands/_bigquery.py | _pipeline_cell | def _pipeline_cell(args, cell_body):
"""Implements the BigQuery cell magic used to validate, execute or deploy BQ pipelines.
The supported syntax is:
%%bigquery pipeline [-q|--sql <query identifier>] <other args> <action>
[<YAML or JSON cell_body or inline SQL>]
Args:
args: the arguments following '%bigquery pipeline'.
cell_body: optional contents of the cell interpreted as YAML or JSON.
Returns:
The QueryResultsTable
"""
if args['action'] == 'deploy':
raise Exception('Deploying a pipeline is not yet supported')
env = {}
for key, value in datalab.utils.commands.notebook_environment().items():
if isinstance(value, datalab.bigquery._udf.UDF):
env[key] = value
query = _get_query_argument(args, cell_body, env)
if args['verbose']:
print(query.sql)
if args['action'] == 'dryrun':
print(query.sql)
result = query.execute_dry_run()
return datalab.bigquery._query_stats.QueryStats(total_bytes=result['totalBytesProcessed'],
is_cached=result['cacheHit'])
if args['action'] == 'run':
return query.execute(args['target'], table_mode=args['mode'], use_cache=not args['nocache'],
allow_large_results=args['large'], dialect=args['dialect'],
billing_tier=args['billing']).results | python | def _pipeline_cell(args, cell_body):
"""Implements the BigQuery cell magic used to validate, execute or deploy BQ pipelines.
The supported syntax is:
%%bigquery pipeline [-q|--sql <query identifier>] <other args> <action>
[<YAML or JSON cell_body or inline SQL>]
Args:
args: the arguments following '%bigquery pipeline'.
cell_body: optional contents of the cell interpreted as YAML or JSON.
Returns:
The QueryResultsTable
"""
if args['action'] == 'deploy':
raise Exception('Deploying a pipeline is not yet supported')
env = {}
for key, value in datalab.utils.commands.notebook_environment().items():
if isinstance(value, datalab.bigquery._udf.UDF):
env[key] = value
query = _get_query_argument(args, cell_body, env)
if args['verbose']:
print(query.sql)
if args['action'] == 'dryrun':
print(query.sql)
result = query.execute_dry_run()
return datalab.bigquery._query_stats.QueryStats(total_bytes=result['totalBytesProcessed'],
is_cached=result['cacheHit'])
if args['action'] == 'run':
return query.execute(args['target'], table_mode=args['mode'], use_cache=not args['nocache'],
allow_large_results=args['large'], dialect=args['dialect'],
billing_tier=args['billing']).results | [
"def",
"_pipeline_cell",
"(",
"args",
",",
"cell_body",
")",
":",
"if",
"args",
"[",
"'action'",
"]",
"==",
"'deploy'",
":",
"raise",
"Exception",
"(",
"'Deploying a pipeline is not yet supported'",
")",
"env",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"datalab",
".",
"utils",
".",
"commands",
".",
"notebook_environment",
"(",
")",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"datalab",
".",
"bigquery",
".",
"_udf",
".",
"UDF",
")",
":",
"env",
"[",
"key",
"]",
"=",
"value",
"query",
"=",
"_get_query_argument",
"(",
"args",
",",
"cell_body",
",",
"env",
")",
"if",
"args",
"[",
"'verbose'",
"]",
":",
"print",
"(",
"query",
".",
"sql",
")",
"if",
"args",
"[",
"'action'",
"]",
"==",
"'dryrun'",
":",
"print",
"(",
"query",
".",
"sql",
")",
"result",
"=",
"query",
".",
"execute_dry_run",
"(",
")",
"return",
"datalab",
".",
"bigquery",
".",
"_query_stats",
".",
"QueryStats",
"(",
"total_bytes",
"=",
"result",
"[",
"'totalBytesProcessed'",
"]",
",",
"is_cached",
"=",
"result",
"[",
"'cacheHit'",
"]",
")",
"if",
"args",
"[",
"'action'",
"]",
"==",
"'run'",
":",
"return",
"query",
".",
"execute",
"(",
"args",
"[",
"'target'",
"]",
",",
"table_mode",
"=",
"args",
"[",
"'mode'",
"]",
",",
"use_cache",
"=",
"not",
"args",
"[",
"'nocache'",
"]",
",",
"allow_large_results",
"=",
"args",
"[",
"'large'",
"]",
",",
"dialect",
"=",
"args",
"[",
"'dialect'",
"]",
",",
"billing_tier",
"=",
"args",
"[",
"'billing'",
"]",
")",
".",
"results"
] | Implements the BigQuery cell magic used to validate, execute or deploy BQ pipelines.
The supported syntax is:
%%bigquery pipeline [-q|--sql <query identifier>] <other args> <action>
[<YAML or JSON cell_body or inline SQL>]
Args:
args: the arguments following '%bigquery pipeline'.
cell_body: optional contents of the cell interpreted as YAML or JSON.
Returns:
The QueryResultsTable | [
"Implements",
"the",
"BigQuery",
"cell",
"magic",
"used",
"to",
"validate",
"execute",
"or",
"deploy",
"BQ",
"pipelines",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/commands/_bigquery.py#L516-L548 | train | 237,898 |
googledatalab/pydatalab | datalab/bigquery/commands/_bigquery.py | _table_line | def _table_line(args):
"""Implements the BigQuery table magic used to display tables.
The supported syntax is:
%bigquery table -t|--table <name> <other args>
Args:
args: the arguments following '%bigquery table'.
Returns:
The HTML rendering for the table.
"""
# TODO(gram): It would be good to turn _table_viewer into a class that has a registered
# renderer. That would allow this to return a table viewer object which is easier to test.
name = args['table']
table = _get_table(name)
if table and table.exists():
fields = args['cols'].split(',') if args['cols'] else None
html = _table_viewer(table, rows_per_page=args['rows'], fields=fields)
return IPython.core.display.HTML(html)
else:
raise Exception('Table %s does not exist; cannot display' % name) | python | def _table_line(args):
"""Implements the BigQuery table magic used to display tables.
The supported syntax is:
%bigquery table -t|--table <name> <other args>
Args:
args: the arguments following '%bigquery table'.
Returns:
The HTML rendering for the table.
"""
# TODO(gram): It would be good to turn _table_viewer into a class that has a registered
# renderer. That would allow this to return a table viewer object which is easier to test.
name = args['table']
table = _get_table(name)
if table and table.exists():
fields = args['cols'].split(',') if args['cols'] else None
html = _table_viewer(table, rows_per_page=args['rows'], fields=fields)
return IPython.core.display.HTML(html)
else:
raise Exception('Table %s does not exist; cannot display' % name) | [
"def",
"_table_line",
"(",
"args",
")",
":",
"# TODO(gram): It would be good to turn _table_viewer into a class that has a registered",
"# renderer. That would allow this to return a table viewer object which is easier to test.",
"name",
"=",
"args",
"[",
"'table'",
"]",
"table",
"=",
"_get_table",
"(",
"name",
")",
"if",
"table",
"and",
"table",
".",
"exists",
"(",
")",
":",
"fields",
"=",
"args",
"[",
"'cols'",
"]",
".",
"split",
"(",
"','",
")",
"if",
"args",
"[",
"'cols'",
"]",
"else",
"None",
"html",
"=",
"_table_viewer",
"(",
"table",
",",
"rows_per_page",
"=",
"args",
"[",
"'rows'",
"]",
",",
"fields",
"=",
"fields",
")",
"return",
"IPython",
".",
"core",
".",
"display",
".",
"HTML",
"(",
"html",
")",
"else",
":",
"raise",
"Exception",
"(",
"'Table %s does not exist; cannot display'",
"%",
"name",
")"
] | Implements the BigQuery table magic used to display tables.
The supported syntax is:
%bigquery table -t|--table <name> <other args>
Args:
args: the arguments following '%bigquery table'.
Returns:
The HTML rendering for the table. | [
"Implements",
"the",
"BigQuery",
"table",
"magic",
"used",
"to",
"display",
"tables",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/commands/_bigquery.py#L551-L571 | train | 237,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.