_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q267200 | download_pojo | test | def download_pojo(model, path="", get_jar=True, jar_name=""):
"""
Download the POJO for this model to the directory specified by path; if path is "", then dump to screen.
:param model: the model whose scoring POJO should be retrieved.
:param path: an absolute path to the directory where POJO should be saved.
:param get_jar: retrieve the h2o-genmodel.jar also (will be saved to the same folder ``path``).
:param jar_name: Custom name of genmodel jar.
:returns: location of the downloaded POJO file.
"""
assert_is_type(model, ModelBase)
assert_is_type(path, str)
assert_is_type(get_jar, bool)
if not model.have_pojo:
raise H2OValueError("Export to POJO not supported")
if path == "":
java_code = api("GET /3/Models.java/%s" % model.model_id)
print(java_code)
return None
else:
filename = api("GET /3/Models.java/%s" % model.model_id, save_to=path)
if get_jar:
if jar_name == "":
api("GET /3/h2o-genmodel.jar", save_to=os.path.join(path, "h2o-genmodel.jar"))
else:
api("GET /3/h2o-genmodel.jar", save_to=os.path.join(path, jar_name))
return filename | python | {
"resource": ""
} |
q267201 | download_csv | test | def download_csv(data, filename):
"""
Download an H2O data set to a CSV file on the local disk.
Warning: Files located on the H2O server may be very large! Make sure you have enough
hard drive space to accommodate the entire file.
:param data: an H2OFrame object to be downloaded.
:param filename: name for the CSV file where the data should be saved to.
"""
assert_is_type(data, H2OFrame)
assert_is_type(filename, str)
url = h2oconn.make_url("DownloadDataset", 3) + "?frame_id={}&hex_string=false".format(data.frame_id)
with open(filename, "wb") as f:
f.write(urlopen()(url).read()) | python | {
"resource": ""
} |
q267202 | download_all_logs | test | def download_all_logs(dirname=".", filename=None):
"""
Download H2O log files to disk.
:param dirname: a character string indicating the directory that the log file should be saved in.
:param filename: a string indicating the name that the CSV file should be. Note that the saved format is .zip, so the file name must include the .zip extension.
:returns: path of logs written in a zip file.
:examples: The following code will save the zip file `'autoh2o_log.zip'` in a directory that is one down from where you are currently working into a directory called `your_directory_name`. (Please note that `your_directory_name` should be replaced with the name of the directory that you've created and that already exists.)
>>> h2o.download_all_logs(dirname='./your_directory_name/', filename = 'autoh2o_log.zip')
"""
assert_is_type(dirname, str)
assert_is_type(filename, str, None)
url = "%s/3/Logs/download" % h2oconn.base_url
opener = urlopen()
response = opener(url)
if not os.path.exists(dirname): os.mkdir(dirname)
if filename is None:
if PY3:
headers = [h[1] for h in response.headers._headers]
else:
headers = response.headers.headers
for h in headers:
if "filename=" in h:
filename = h.split("filename=")[1].strip()
break
path = os.path.join(dirname, filename)
response = opener(url).read()
print("Writing H2O logs to " + path)
with open(path, "wb") as f:
f.write(response)
return path | python | {
"resource": ""
} |
q267203 | export_file | test | def export_file(frame, path, force=False, parts=1):
"""
Export a given H2OFrame to a path on the machine this python session is currently connected to.
:param frame: the Frame to save to disk.
:param path: the path to the save point on disk.
:param force: if True, overwrite any preexisting file with the same path
:param parts: enables export to multiple 'part' files instead of just a single file.
Convenient for large datasets that take too long to store in a single file.
Use parts=-1 to instruct H2O to determine the optimal number of part files or
specify your desired maximum number of part files. Path needs to be a directory
when exporting to multiple files, also that directory must be empty.
Default is ``parts = 1``, which is to export to a single file.
"""
assert_is_type(frame, H2OFrame)
assert_is_type(path, str)
assert_is_type(force, bool)
assert_is_type(parts, int)
H2OJob(api("POST /3/Frames/%s/export" % (frame.frame_id), data={"path": path, "num_parts": parts, "force": force}),
"Export File").poll() | python | {
"resource": ""
} |
q267204 | as_list | test | def as_list(data, use_pandas=True, header=True):
"""
Convert an H2O data object into a python-specific object.
WARNING! This will pull all data local!
If Pandas is available (and use_pandas is True), then pandas will be used to parse the
data frame. Otherwise, a list-of-lists populated by character data will be returned (so
the types of data will all be str).
:param data: an H2O data object.
:param use_pandas: If True, try to use pandas for reading in the data.
:param header: If True, return column names as first element in list
:returns: List of lists (Rows x Columns).
"""
assert_is_type(data, H2OFrame)
assert_is_type(use_pandas, bool)
assert_is_type(header, bool)
return H2OFrame.as_data_frame(data, use_pandas=use_pandas, header=header) | python | {
"resource": ""
} |
q267205 | demo | test | def demo(funcname, interactive=True, echo=True, test=False):
"""
H2O built-in demo facility.
:param funcname: A string that identifies the h2o python function to demonstrate.
:param interactive: If True, the user will be prompted to continue the demonstration after every segment.
:param echo: If True, the python commands that are executed will be displayed.
:param test: If True, `h2o.init()` will not be called (used for pyunit testing).
:example:
>>> import h2o
>>> h2o.demo("gbm")
"""
import h2o.demos as h2odemo
assert_is_type(funcname, str)
assert_is_type(interactive, bool)
assert_is_type(echo, bool)
assert_is_type(test, bool)
demo_function = getattr(h2odemo, funcname, None)
if demo_function and type(demo_function) is type(demo):
demo_function(interactive, echo, test)
else:
print("Demo for %s is not available." % funcname) | python | {
"resource": ""
} |
q267206 | load_dataset | test | def load_dataset(relative_path):
"""Imports a data file within the 'h2o_data' folder."""
assert_is_type(relative_path, str)
h2o_dir = os.path.split(__file__)[0]
for possible_file in [os.path.join(h2o_dir, relative_path),
os.path.join(h2o_dir, "h2o_data", relative_path),
os.path.join(h2o_dir, "h2o_data", relative_path + ".csv")]:
if os.path.exists(possible_file):
return upload_file(possible_file)
# File not found -- raise an error!
raise H2OValueError("Data file %s cannot be found" % relative_path) | python | {
"resource": ""
} |
q267207 | make_metrics | test | def make_metrics(predicted, actual, domain=None, distribution=None):
"""
Create Model Metrics from predicted and actual values in H2O.
:param H2OFrame predicted: an H2OFrame containing predictions.
:param H2OFrame actuals: an H2OFrame containing actual values.
:param domain: list of response factors for classification.
:param distribution: distribution for regression.
"""
assert_is_type(predicted, H2OFrame)
assert_is_type(actual, H2OFrame)
# assert predicted.ncol == 1, "`predicted` frame should have exactly 1 column"
assert actual.ncol == 1, "`actual` frame should have exactly 1 column"
assert_is_type(distribution, str, None)
assert_satisfies(actual.ncol, actual.ncol == 1)
if domain is None and any(actual.isfactor()):
domain = actual.levels()[0]
res = api("POST /3/ModelMetrics/predictions_frame/%s/actuals_frame/%s" % (predicted.frame_id, actual.frame_id),
data={"domain": domain, "distribution": distribution})
return res["model_metrics"] | python | {
"resource": ""
} |
q267208 | _put_key | test | def _put_key(file_path, dest_key=None, overwrite=True):
"""
Upload given file into DKV and save it under give key as raw object.
:param dest_key: name of destination key in DKV
:param file_path: path to file to upload
:return: key name if object was uploaded successfully
"""
ret = api("POST /3/PutKey?destination_key={}&overwrite={}".format(dest_key if dest_key else '', overwrite),
filename=file_path)
return ret["destination_key"] | python | {
"resource": ""
} |
q267209 | upload_custom_metric | test | def upload_custom_metric(func, func_file="metrics.py", func_name=None, class_name=None, source_provider=None):
"""
Upload given metrics function into H2O cluster.
The metrics can have different representation:
- class: needs to implement map(pred, act, weight, offset, model), reduce(l, r) and metric(l) methods
- string: the same as in class case, but the class is given as a string
:param func: metric representation: string, class
:param func_file: internal name of file to save given metrics representation
:param func_name: name for h2o key under which the given metric is saved
:param class_name: name of class wrapping the metrics function (when supplied as string)
:param source_provider: a function which provides a source code for given function
:return: reference to uploaded metrics function
:examples:
>>> class CustomMaeFunc:
>>> def map(self, pred, act, w, o, model):
>>> return [abs(act[0] - pred[0]), 1]
>>>
>>> def reduce(self, l, r):
>>> return [l[0] + r[0], l[1] + r[1]]
>>>
>>> def metric(self, l):
>>> return l[0] / l[1]
>>>
>>>
>>> h2o.upload_custom_metric(CustomMaeFunc, func_name="mae")
>>>
>>> custom_func_str = '''class CustomMaeFunc:
>>> def map(self, pred, act, w, o, model):
>>> return [abs(act[0] - pred[0]), 1]
>>>
>>> def reduce(self, l, r):
>>> return [l[0] + r[0], l[1] + r[1]]
>>>
>>> def metric(self, l):
>>> return l[0] / l[1]'''
>>>
>>>
>>> h2o.upload_custom_metric(custom_func_str, class_name="CustomMaeFunc", func_name="mae")
"""
import tempfile
import inspect
# Use default source provider
if not source_provider:
source_provider = _default_source_provider
# The template wraps given metrics representation
_CFUNC_CODE_TEMPLATE = """# Generated code
import water.udf.CMetricFunc as MetricFunc
# User given metric function as a class implementing
# 3 methods defined by interface CMetricFunc
{}
# Generated user metric which satisfies the interface
# of Java MetricFunc
class {}Wrapper({}, MetricFunc, object):
pass
"""
assert_satisfies(func, inspect.isclass(func) or isinstance(func, str),
"The argument func needs to be string or class !")
assert_satisfies(func_file, func_file is not None,
"The argument func_file is missing!")
assert_satisfies(func_file, func_file.endswith('.py'),
"The argument func_file needs to end with '.py'")
code = None
derived_func_name = None
module_name = func_file[:-3]
if isinstance(func, str):
assert_satisfies(class_name, class_name is not None,
"The argument class_name is missing! " +
"It needs to reference the class in given string!")
code = _CFUNC_CODE_TEMPLATE.format(func, class_name, class_name)
derived_func_name = "metrics_{}".format(class_name)
class_name = "{}.{}Wrapper".format(module_name, class_name)
else:
assert_satisfies(func, inspect.isclass(func), "The parameter `func` should be str or class")
for method in ['map', 'reduce', 'metric']:
assert_satisfies(func, method in func.__dict__, "The class `func` needs to define method `{}`".format(method))
assert_satisfies(class_name, class_name is None,
"If class is specified then class_name parameter needs to be None")
class_name = "{}.{}Wrapper".format(module_name, func.__name__)
derived_func_name = "metrics_{}".format(func.__name__)
code = _CFUNC_CODE_TEMPLATE.format(source_provider(func), func.__name__, func.__name__)
# If the func name is not given, use whatever we can derived from given definition
if not func_name:
func_name = derived_func_name
# Saved into jar file
tmpdir = tempfile.mkdtemp(prefix="h2o-func")
func_arch_file = _create_zip_file("{}/func.jar".format(tmpdir), (func_file, code))
# Upload into K/V
dest_key = _put_key(func_arch_file, dest_key=func_name)
# Reference
return "python:{}={}".format(dest_key, class_name) | python | {
"resource": ""
} |
q267210 | check_frame_id | test | def check_frame_id(frame_id):
"""Check that the provided frame id is valid in Rapids language."""
if frame_id is None:
return
if frame_id.strip() == "":
raise H2OValueError("Frame id cannot be an empty string: %r" % frame_id)
for i, ch in enumerate(frame_id):
# '$' character has special meaning at the beginning of the string; and prohibited anywhere else
if ch == "$" and i == 0: continue
if ch not in _id_allowed_characters:
raise H2OValueError("Character '%s' is illegal in frame id: %s" % (ch, frame_id))
if re.match(r"-?[0-9]", frame_id):
raise H2OValueError("Frame id cannot start with a number: %s" % frame_id) | python | {
"resource": ""
} |
q267211 | get_human_readable_bytes | test | def get_human_readable_bytes(size):
"""
Convert given number of bytes into a human readable representation, i.e. add prefix such as kb, Mb, Gb,
etc. The `size` argument must be a non-negative integer.
:param size: integer representing byte size of something
:return: string representation of the size, in human-readable form
"""
if size == 0: return "0"
if size is None: return ""
assert_is_type(size, int)
assert size >= 0, "`size` cannot be negative, got %d" % size
suffixes = "PTGMk"
maxl = len(suffixes)
for i in range(maxl + 1):
shift = (maxl - i) * 10
if size >> shift == 0: continue
ndigits = 0
for nd in [3, 2, 1]:
if size >> (shift + 12 - nd * 3) == 0:
ndigits = nd
break
if ndigits == 0 or size == (size >> shift) << shift:
rounded_val = str(size >> shift)
else:
rounded_val = "%.*f" % (ndigits, size / (1 << shift))
return "%s %sb" % (rounded_val, suffixes[i] if i < maxl else "") | python | {
"resource": ""
} |
q267212 | normalize_slice | test | def normalize_slice(s, total):
"""
Return a "canonical" version of slice ``s``.
:param slice s: the original slice expression
:param total int: total number of elements in the collection sliced by ``s``
:return slice: a slice equivalent to ``s`` but not containing any negative indices or Nones.
"""
newstart = 0 if s.start is None else max(0, s.start + total) if s.start < 0 else min(s.start, total)
newstop = total if s.stop is None else max(0, s.stop + total) if s.stop < 0 else min(s.stop, total)
newstep = 1 if s.step is None else s.step
return slice(newstart, newstop, newstep) | python | {
"resource": ""
} |
q267213 | slice_is_normalized | test | def slice_is_normalized(s):
"""Return True if slice ``s`` in "normalized" form."""
return (s.start is not None and s.stop is not None and s.step is not None and s.start <= s.stop) | python | {
"resource": ""
} |
q267214 | mojo_predict_pandas | test | def mojo_predict_pandas(dataframe, mojo_zip_path, genmodel_jar_path=None, classpath=None, java_options=None, verbose=False):
"""
MOJO scoring function to take a Pandas frame and use MOJO model as zip file to score.
:param dataframe: Pandas frame to score.
:param mojo_zip_path: Path to MOJO zip downloaded from H2O.
:param genmodel_jar_path: Optional, path to genmodel jar file. If None (default) then the h2o-genmodel.jar in the same
folder as the MOJO zip will be used.
:param classpath: Optional, specifies custom user defined classpath which will be used when scoring. If None
(default) then the default classpath for this MOJO model will be used.
:param java_options: Optional, custom user defined options for Java. By default ``-Xmx4g`` is used.
:param verbose: Optional, if True, then additional debug information will be printed. False by default.
:return: Pandas frame with predictions
"""
tmp_dir = tempfile.mkdtemp()
try:
if not can_use_pandas():
raise RuntimeException('Cannot import pandas')
import pandas
assert_is_type(dataframe, pandas.DataFrame)
input_csv_path = os.path.join(tmp_dir, 'input.csv')
prediction_csv_path = os.path.join(tmp_dir, 'prediction.csv')
dataframe.to_csv(input_csv_path)
mojo_predict_csv(input_csv_path=input_csv_path, mojo_zip_path=mojo_zip_path,
output_csv_path=prediction_csv_path, genmodel_jar_path=genmodel_jar_path,
classpath=classpath, java_options=java_options, verbose=verbose)
return pandas.read_csv(prediction_csv_path)
finally:
shutil.rmtree(tmp_dir) | python | {
"resource": ""
} |
q267215 | mojo_predict_csv | test | def mojo_predict_csv(input_csv_path, mojo_zip_path, output_csv_path=None, genmodel_jar_path=None, classpath=None, java_options=None, verbose=False):
"""
MOJO scoring function to take a CSV file and use MOJO model as zip file to score.
:param input_csv_path: Path to input CSV file.
:param mojo_zip_path: Path to MOJO zip downloaded from H2O.
:param output_csv_path: Optional, name of the output CSV file with computed predictions. If None (default), then
predictions will be saved as prediction.csv in the same folder as the MOJO zip.
:param genmodel_jar_path: Optional, path to genmodel jar file. If None (default) then the h2o-genmodel.jar in the same
folder as the MOJO zip will be used.
:param classpath: Optional, specifies custom user defined classpath which will be used when scoring. If None
(default) then the default classpath for this MOJO model will be used.
:param java_options: Optional, custom user defined options for Java. By default ``-Xmx4g -XX:ReservedCodeCacheSize=256m`` is used.
:param verbose: Optional, if True, then additional debug information will be printed. False by default.
:return: List of computed predictions
"""
default_java_options = '-Xmx4g -XX:ReservedCodeCacheSize=256m'
prediction_output_file = 'prediction.csv'
# Checking java
java = H2OLocalServer._find_java()
H2OLocalServer._check_java(java=java, verbose=verbose)
# Ensure input_csv exists
if verbose:
print("input_csv:\t%s" % input_csv_path)
if not os.path.isfile(input_csv_path):
raise RuntimeError("Input csv cannot be found at %s" % input_csv_path)
# Ensure mojo_zip exists
mojo_zip_path = os.path.abspath(mojo_zip_path)
if verbose:
print("mojo_zip:\t%s" % mojo_zip_path)
if not os.path.isfile(mojo_zip_path):
raise RuntimeError("MOJO zip cannot be found at %s" % mojo_zip_path)
parent_dir = os.path.dirname(mojo_zip_path)
# Set output_csv if necessary
if output_csv_path is None:
output_csv_path = os.path.join(parent_dir, prediction_output_file)
# Set path to h2o-genmodel.jar if necessary and check it's valid
if genmodel_jar_path is None:
genmodel_jar_path = os.path.join(parent_dir, gen_model_file_name)
if verbose:
print("genmodel_jar:\t%s" % genmodel_jar_path)
if not os.path.isfile(genmodel_jar_path):
raise RuntimeError("Genmodel jar cannot be found at %s" % genmodel_jar_path)
if verbose and output_csv_path is not None:
print("output_csv:\t%s" % output_csv_path)
# Set classpath if necessary
if classpath is None:
classpath = genmodel_jar_path
if verbose:
print("classpath:\t%s" % classpath)
# Set java_options if necessary
if java_options is None:
java_options = default_java_options
if verbose:
print("java_options:\t%s" % java_options)
# Construct command to invoke java
cmd = [java]
for option in java_options.split(' '):
cmd += [option]
cmd += ["-cp", classpath, h2o_predictor_class, "--mojo", mojo_zip_path, "--input", input_csv_path,
'--output', output_csv_path, '--decimal']
if verbose:
cmd_str = " ".join(cmd)
print("java cmd:\t%s" % cmd_str)
# invoke the command
subprocess.check_call(cmd, shell=False)
# load predictions in form of a dict
with open(output_csv_path) as csv_file:
result = list(csv.DictReader(csv_file))
return result | python | {
"resource": ""
} |
q267216 | deprecated | test | def deprecated(message):
"""The decorator to mark deprecated functions."""
from traceback import extract_stack
assert message, "`message` argument in @deprecated is required."
def deprecated_decorator(fun):
def decorator_invisible(*args, **kwargs):
stack = extract_stack()
assert len(stack) >= 2 and stack[-1][2] == "decorator_invisible", "Got confusing stack... %r" % stack
print("[WARNING] in %s line %d:" % (stack[-2][0], stack[-2][1]))
print(" >>> %s" % (stack[-2][3] or "????"))
print(" ^^^^ %s" % message)
return fun(*args, **kwargs)
decorator_invisible.__doc__ = message
decorator_invisible.__name__ = fun.__name__
decorator_invisible.__module__ = fun.__module__
decorator_invisible.__deprecated__ = True
return decorator_invisible
return deprecated_decorator | python | {
"resource": ""
} |
q267217 | H2OGridSearch.join | test | def join(self):
"""Wait until grid finishes computing."""
self._future = False
self._job.poll()
self._job = None | python | {
"resource": ""
} |
q267218 | H2OGridSearch.deepfeatures | test | def deepfeatures(self, test_data, layer):
"""
Obtain a hidden layer's details on a dataset.
:param test_data: Data to create a feature space on.
:param int layer: Index of the hidden layer.
:returns: A dictionary of hidden layer details for each model.
"""
return {model.model_id: model.deepfeatures(test_data, layer) for model in self.models} | python | {
"resource": ""
} |
q267219 | H2OGridSearch.summary | test | def summary(self, header=True):
"""Print a detailed summary of the explored models."""
table = []
for model in self.models:
model_summary = model._model_json["output"]["model_summary"]
r_values = list(model_summary.cell_values[0])
r_values[0] = model.model_id
table.append(r_values)
# if h2o.can_use_pandas():
# import pandas
# pandas.options.display.max_rows = 20
# print pandas.DataFrame(table,columns=self.col_header)
# return
print()
if header:
print('Grid Summary:')
print()
H2ODisplay(table, ['Model Id'] + model_summary.col_header[1:], numalign="left", stralign="left") | python | {
"resource": ""
} |
q267220 | H2OGridSearch.show | test | def show(self):
"""Print models sorted by metric."""
hyper_combos = itertools.product(*list(self.hyper_params.values()))
if not self.models:
c_values = [[idx + 1, list(val)] for idx, val in enumerate(hyper_combos)]
print(H2OTwoDimTable(
col_header=['Model', 'Hyperparameters: [' + ', '.join(list(self.hyper_params.keys())) + ']'],
table_header='Grid Search of Model ' + self.model.__class__.__name__, cell_values=c_values))
else:
print(self.sorted_metric_table()) | python | {
"resource": ""
} |
q267221 | H2OGridSearch.get_hyperparams | test | def get_hyperparams(self, id, display=True):
"""
Get the hyperparameters of a model explored by grid search.
:param str id: The model id of the model with hyperparameters of interest.
:param bool display: Flag to indicate whether to display the hyperparameter names.
:returns: A list of the hyperparameters for the specified model.
"""
idx = id if is_type(id, int) else self.model_ids.index(id)
model = self[idx]
# if cross-validation is turned on, parameters in one of the fold model actuall contains the max_runtime_secs
# parameter and not the main model that is returned.
if model._is_xvalidated:
model = h2o.get_model(model._xval_keys[0])
res = [model.params[h]['actual'][0] if isinstance(model.params[h]['actual'], list)
else model.params[h]['actual']
for h in self.hyper_params]
if display: print('Hyperparameters: [' + ', '.join(list(self.hyper_params.keys())) + ']')
return res | python | {
"resource": ""
} |
q267222 | H2OGridSearch.get_hyperparams_dict | test | def get_hyperparams_dict(self, id, display=True):
"""
Derived and returned the model parameters used to train the particular grid search model.
:param str id: The model id of the model with hyperparameters of interest.
:param bool display: Flag to indicate whether to display the hyperparameter names.
:returns: A dict of model pararmeters derived from the hyper-parameters used to train this particular model.
"""
idx = id if is_type(id, int) else self.model_ids.index(id)
model = self[idx]
model_params = dict()
# if cross-validation is turned on, parameters in one of the fold model actual contains the max_runtime_secs
# parameter and not the main model that is returned.
if model._is_xvalidated:
model = h2o.get_model(model._xval_keys[0])
for param_name in self.hyper_names:
model_params[param_name] = model.params[param_name]['actual'][0] if \
isinstance(model.params[param_name]['actual'], list) else model.params[param_name]['actual']
if display: print('Hyperparameters: [' + ', '.join(list(self.hyper_params.keys())) + ']')
return model_params | python | {
"resource": ""
} |
q267223 | H2OGridSearch.get_grid | test | def get_grid(self, sort_by=None, decreasing=None):
"""
Retrieve an H2OGridSearch instance.
Optionally specify a metric by which to sort models and a sort order.
Note that if neither cross-validation nor a validation frame is used in the grid search, then the
training metrics will display in the "get grid" output. If a validation frame is passed to the grid, and
``nfolds = 0``, then the validation metrics will display. However, if ``nfolds`` > 1, then cross-validation
metrics will display even if a validation frame is provided.
:param str sort_by: A metric by which to sort the models in the grid space. Choices are: ``"logloss"``,
``"residual_deviance"``, ``"mse"``, ``"auc"``, ``"r2"``, ``"accuracy"``, ``"precision"``, ``"recall"``,
``"f1"``, etc.
:param bool decreasing: Sort the models in decreasing order of metric if true, otherwise sort in increasing
order (default).
:returns: A new H2OGridSearch instance optionally sorted on the specified metric.
"""
if sort_by is None and decreasing is None: return self
grid_json = h2o.api("GET /99/Grids/%s" % self._id, data={"sort_by": sort_by, "decreasing": decreasing})
grid = H2OGridSearch(self.model, self.hyper_params, self._id)
grid.models = [h2o.get_model(key['name']) for key in grid_json['model_ids']] # reordered
first_model_json = h2o.api("GET /99/Models/%s" % grid_json['model_ids'][0]['name'])['models'][0]
model_class = H2OGridSearch._metrics_class(first_model_json)
m = model_class()
m._id = self._id
m._grid_json = grid_json
# m._metrics_class = metrics_class
m._parms = grid._parms
H2OEstimator.mixin(grid, model_class)
grid.__dict__.update(m.__dict__.copy())
return grid | python | {
"resource": ""
} |
q267224 | H2OBinomialGridSearch.F1 | test | def F1(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the F1 values for a set of thresholds for the models explored.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where
the keys are "train", "valid", and "xval".
:param List thresholds: If None, then the thresholds in this set of metrics will be used.
:param bool train: If True, return the F1 value for the training data.
:param bool valid: If True, return the F1 value for the validation data.
:param bool xval: If True, return the F1 value for each of the cross-validated splits.
:returns: Dictionary of model keys to F1 values
"""
return {model.model_id: model.F1(thresholds, train, valid, xval) for model in
self.models} | python | {
"resource": ""
} |
q267225 | H2ODimReductionModel.varimp | test | def varimp(self, use_pandas=False):
"""
Return the Importance of components associcated with a pca model.
use_pandas: ``bool`` (default: ``False``).
"""
model = self._model_json["output"]
if "importance" in list(model.keys()) and model["importance"]:
vals = model["importance"].cell_values
header = model["importance"].col_header
if use_pandas and can_use_pandas():
import pandas
return pandas.DataFrame(vals, columns=header)
else:
return vals
else:
print("Warning: This model doesn't have importances of components.") | python | {
"resource": ""
} |
q267226 | H2ODimReductionModel.proj_archetypes | test | def proj_archetypes(self, test_data, reverse_transform=False):
"""
Convert archetypes of the model into original feature space.
:param H2OFrame test_data: The dataset upon which the model was trained.
:param bool reverse_transform: Whether the transformation of the training data during model-building
should be reversed on the projected archetypes.
:returns: model archetypes projected back into the original training data's feature space.
"""
if test_data is None or test_data.nrow == 0: raise ValueError("Must specify test data")
j = h2o.api("POST /3/Predictions/models/%s/frames/%s" % (self.model_id, test_data.frame_id),
data={"project_archetypes": True, "reverse_transform": reverse_transform})
return h2o.get_frame(j["model_metrics"][0]["predictions"]["frame_id"]["name"]) | python | {
"resource": ""
} |
q267227 | H2ODimReductionModel.screeplot | test | def screeplot(self, type="barplot", **kwargs):
"""
Produce the scree plot.
Library ``matplotlib`` is required for this function.
:param str type: either ``"barplot"`` or ``"lines"``.
"""
# check for matplotlib. exit if absent.
is_server = kwargs.pop("server")
if kwargs:
raise ValueError("Unknown arguments %s to screeplot()" % ", ".join(kwargs.keys()))
try:
import matplotlib
if is_server: matplotlib.use('Agg', warn=False)
import matplotlib.pyplot as plt
except ImportError:
print("matplotlib is required for this function!")
return
variances = [s ** 2 for s in self._model_json['output']['importance'].cell_values[0][1:]]
plt.xlabel('Components')
plt.ylabel('Variances')
plt.title('Scree Plot')
plt.xticks(list(range(1, len(variances) + 1)))
if type == "barplot":
plt.bar(list(range(1, len(variances) + 1)), variances)
elif type == "lines":
plt.plot(list(range(1, len(variances) + 1)), variances, 'b--')
if not is_server: plt.show() | python | {
"resource": ""
} |
q267228 | translate_name | test | def translate_name(name):
"""
Convert names with underscores into camelcase.
For example:
"num_rows" => "numRows"
"very_long_json_name" => "veryLongJsonName"
"build_GBM_model" => "buildGbmModel"
"KEY" => "key"
"middle___underscores" => "middleUnderscores"
"_exclude_fields" => "_excludeFields" (retain initial/trailing underscores)
"__http_status__" => "__httpStatus__"
:param name: name to be converted
"""
parts = name.split("_")
i = 0
while parts[i] == "":
parts[i] = "_"
i += 1
parts[i] = parts[i].lower()
for j in range(i + 1, len(parts)):
parts[j] = parts[j].capitalize()
i = len(parts) - 1
while parts[i] == "":
parts[i] = "_"
i -= 1
return "".join(parts) | python | {
"resource": ""
} |
q267229 | dedent | test | def dedent(ind, text):
"""
Dedent text to the specific indentation level.
:param ind: common indentation level for the resulting text (number of spaces to append to every line)
:param text: text that should be transformed.
:return: ``text`` with all common indentation removed, and then the specified amount of indentation added.
"""
text2 = textwrap.dedent(text)
if ind == 0:
return text2
indent_str = " " * ind
return "\n".join(indent_str + line for line in text2.split("\n")) | python | {
"resource": ""
} |
q267230 | extractRunInto | test | def extractRunInto(javaLogText):
"""
This function will extract the various operation time for GLRM model building iterations.
:param javaLogText:
:return:
"""
global g_initialXY
global g_reguarlize_Y
global g_regularize_X_objective
global g_updateX
global g_updateY
global g_objective
global g_stepsize
global g_history
if os.path.isfile(javaLogText):
run_result = dict()
run_result["total time (ms)"] = []
run_result["initialXY (ms)"] = []
run_result["regularize Y (ms)"] = []
run_result["regularize X and objective (ms)"] = []
run_result["update X (ms)"] = []
run_result["update Y (ms)"] = []
run_result["objective (ms)"] = []
run_result["step size (ms)"] = []
run_result["update history (ms)"] = []
total_run_time = -1
val = 0.0
with open(javaLogText, 'r') as thefile: # go into tempfile and grab test run info
for each_line in thefile:
temp_string = each_line.split()
if len(temp_string) > 0:
val = temp_string[-1].replace('\\','')
if g_initialXY in each_line: # start of a new file
if total_run_time > 0: # update total run time
run_result["total time (ms)"].append(total_run_time)
total_run_time = 0.0
else:
total_run_time = 0.0
run_result["initialXY (ms)"].append(float(val))
total_run_time = total_run_time+float(val)
if g_reguarlize_Y in each_line:
run_result["regularize Y (ms)"].append(float(val))
total_run_time = total_run_time+float(val)
if g_regularize_X_objective in each_line:
run_result["regularize X and objective (ms)"].append(float(val))
total_run_time = total_run_time+float(val)
if g_updateX in each_line:
run_result["update X (ms)"].append(float(val))
total_run_time = total_run_time+float(val)
if g_updateY in each_line:
run_result["update Y (ms)"].append(float(val))
total_run_time = total_run_time+float(val)
if g_objective in each_line:
run_result["objective (ms)"].append(float(val))
total_run_time = total_run_time+float(val)
if g_stepsize in each_line:
run_result["step size (ms)"].append(float(val))
total_run_time = total_run_time+float(val)
if g_history in each_line:
run_result["update history (ms)"].append(float(val))
total_run_time = total_run_time+float(val)
run_result["total time (ms)"].append(total_run_time) # save the last one
print("Run result summary: \n {0}".format(run_result))
else:
print("Cannot find your java log file. Nothing is done.\n") | python | {
"resource": ""
} |
q267231 | main | test | def main(argv):
"""
Main program. Take user input, parse it and call other functions to execute the commands
and extract run summary and store run result in json file
@return: none
"""
global g_test_root_dir
global g_temp_filename
if len(argv) < 2:
print("invoke this script as python extractGLRMRuntimeJavaLog.py javatextlog.\n")
sys.exit(1)
else: # we may be in business
javaLogText = argv[1] # filename while java log is stored
print("your java text is {0}".format(javaLogText))
extractRunInto(javaLogText) | python | {
"resource": ""
} |
q267232 | H2OConnection.close | test | def close(self):
"""
Close an existing connection; once closed it cannot be used again.
Strictly speaking it is not necessary to close all connection that you opened -- we have several mechanisms
in place that will do so automatically (__del__(), __exit__() and atexit() handlers), however there is also
no good reason to make this method private.
"""
if self._session_id:
try:
# If the server gone bad, we don't want to wait forever...
if self._timeout is None: self._timeout = 1
self.request("DELETE /4/sessions/%s" % self._session_id)
self._print("H2O session %s closed." % self._session_id)
except Exception:
pass
self._session_id = None
self._stage = -1 | python | {
"resource": ""
} |
q267233 | H2OConnection.session_id | test | def session_id(self):
"""
Return the session id of the current connection.
The session id is issued (through an API request) the first time it is requested, but no sooner. This is
because generating a session id puts it into the DKV on the server, which effectively locks the cluster. Once
issued, the session id will stay the same until the connection is closed.
"""
if self._session_id is None:
req = self.request("POST /4/sessions")
self._session_id = req.get("session_key") or req.get("session_id")
return CallableString(self._session_id) | python | {
"resource": ""
} |
q267234 | H2OConnection.start_logging | test | def start_logging(self, dest=None):
"""
Start logging all API requests to the provided destination.
:param dest: Where to write the log: either a filename (str), or an open file handle (file). If not given,
then a new temporary file will be created.
"""
assert_is_type(dest, None, str, type(sys.stdout))
if dest is None:
dest = os.path.join(tempfile.mkdtemp(), "h2o-connection.log")
self._print("Now logging all API requests to file %r" % dest)
self._is_logging = True
self._logging_dest = dest | python | {
"resource": ""
} |
q267235 | H2OConnection._prepare_data_payload | test | def _prepare_data_payload(data):
"""
Make a copy of the `data` object, preparing it to be sent to the server.
The data will be sent via x-www-form-urlencoded or multipart/form-data mechanisms. Both of them work with
plain lists of key/value pairs, so this method converts the data into such format.
"""
if not data: return None
res = {}
for key, value in viewitems(data):
if value is None: continue # don't send args set to None so backend defaults take precedence
if isinstance(value, list):
value = stringify_list(value)
elif isinstance(value, dict):
if "__meta" in value and value["__meta"]["schema_name"].endswith("KeyV3"):
value = value["name"]
else:
value = stringify_dict(value)
else:
value = str(value)
res[key] = value
return res | python | {
"resource": ""
} |
q267236 | H2OConnection._prepare_file_payload | test | def _prepare_file_payload(filename):
"""
Prepare `filename` to be sent to the server.
The "preparation" consists of creating a data structure suitable
for passing to requests.request().
"""
if not filename: return None
absfilename = os.path.abspath(filename)
if not os.path.exists(absfilename):
raise H2OValueError("File %s does not exist" % filename, skip_frames=1)
return {os.path.basename(absfilename): open(absfilename, "rb")} | python | {
"resource": ""
} |
q267237 | H2OConnection._log_start_transaction | test | def _log_start_transaction(self, endpoint, data, json, files, params):
"""Log the beginning of an API request."""
# TODO: add information about the caller, i.e. which module + line of code called the .request() method
# This can be done by fetching current traceback and then traversing it until we find the request function
self._requests_counter += 1
if not self._is_logging: return
msg = "\n---- %d --------------------------------------------------------\n" % self._requests_counter
msg += "[%s] %s\n" % (time.strftime("%H:%M:%S"), endpoint)
if params is not None: msg += " params: {%s}\n" % ", ".join("%s:%s" % item for item in viewitems(params))
if data is not None: msg += " body: {%s}\n" % ", ".join("%s:%s" % item for item in viewitems(data))
if json is not None:
import json as j
msg += " json: %s\n" % j.dumps(json)
if files is not None: msg += " file: %s\n" % ", ".join(f.name for f in viewvalues(files))
self._log_message(msg + "\n") | python | {
"resource": ""
} |
q267238 | H2OConnection._log_end_transaction | test | def _log_end_transaction(self, start_time, response):
"""Log response from an API request."""
if not self._is_logging: return
elapsed_time = int((time.time() - start_time) * 1000)
msg = "<<< HTTP %d %s (%d ms)\n" % (response.status_code, response.reason, elapsed_time)
if "Content-Type" in response.headers:
msg += " Content-Type: %s\n" % response.headers["Content-Type"]
msg += response.text
self._log_message(msg + "\n\n") | python | {
"resource": ""
} |
q267239 | H2OConnection._log_message | test | def _log_message(self, msg):
"""
Log the message `msg` to the destination `self._logging_dest`.
If this destination is a file name, then we append the message to the file and then close the file
immediately. If the destination is an open file handle, then we simply write the message there and do not
attempt to close it.
"""
if is_type(self._logging_dest, str):
with open(self._logging_dest, "at", encoding="utf-8") as f:
f.write(msg)
else:
self._logging_dest.write(msg) | python | {
"resource": ""
} |
q267240 | H2OConnection._process_response | test | def _process_response(response, save_to):
"""
Given a response object, prepare it to be handed over to the external caller.
Preparation steps include:
* detect if the response has error status, and convert it to an appropriate exception;
* detect Content-Type, and based on that either parse the response as JSON or return as plain text.
"""
status_code = response.status_code
if status_code == 200 and save_to:
if save_to.startswith("~"): save_to = os.path.expanduser(save_to)
if os.path.isdir(save_to) or save_to.endswith(os.path.sep):
dirname = os.path.abspath(save_to)
filename = H2OConnection._find_file_name(response)
else:
dirname, filename = os.path.split(os.path.abspath(save_to))
fullname = os.path.join(dirname, filename)
try:
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(fullname, "wb") as f:
for chunk in response.iter_content(chunk_size=65536):
if chunk: # Empty chunks may occasionally happen
f.write(chunk)
except OSError as e:
raise H2OValueError("Cannot write to file %s: %s" % (fullname, e))
return fullname
content_type = response.headers.get("Content-Type", "")
if ";" in content_type: # Remove a ";charset=..." part
content_type = content_type[:content_type.index(";")]
# Auto-detect response type by its content-type. Decode JSON, all other responses pass as-is.
if content_type == "application/json":
try:
data = response.json(object_pairs_hook=H2OResponse)
except (JSONDecodeError, requests.exceptions.ContentDecodingError) as e:
raise H2OServerError("Malformed JSON from server (%s):\n%s" % (str(e), response.text))
else:
data = response.text
# Success (200 = "Ok", 201 = "Created", 202 = "Accepted", 204 = "No Content")
if status_code in {200, 201, 202, 204}:
return data
# Client errors (400 = "Bad Request", 404 = "Not Found", 412 = "Precondition Failed")
if status_code in {400, 404, 412} and isinstance(data, (H2OErrorV3, H2OModelBuilderErrorV3)):
raise H2OResponseError(data)
# Server errors (notably 500 = "Server Error")
# Note that it is possible to receive valid H2OErrorV3 object in this case, however it merely means the server
# did not provide the correct status code.
raise H2OServerError("HTTP %d %s:\n%r" % (status_code, response.reason, data)) | python | {
"resource": ""
} |
q267241 | H2OConnection._print | test | def _print(self, msg, flush=False, end="\n"):
"""Helper function to print connection status messages when in verbose mode."""
if self._verbose:
print2(msg, end=end, flush=flush) | python | {
"resource": ""
} |
q267242 | get_automl | test | def get_automl(project_name):
"""
Retrieve information about an AutoML instance.
:param str project_name: A string indicating the project_name of the automl instance to retrieve.
:returns: A dictionary containing the project_name, leader model, and leaderboard.
"""
automl_json = h2o.api("GET /99/AutoML/%s" % project_name)
project_name = automl_json["project_name"]
leaderboard_list = [key["name"] for key in automl_json['leaderboard']['models']]
if leaderboard_list is not None and len(leaderboard_list) > 0:
leader_id = leaderboard_list[0]
else:
leader_id = None
leader = h2o.get_model(leader_id)
# Intentionally mask the progress bar here since showing multiple progress bars is confusing to users.
# If any failure happens, revert back to user's original setting for progress and display the error message.
is_progress = H2OJob.__PROGRESS_BAR__
h2o.no_progress()
try:
# Parse leaderboard H2OTwoDimTable & return as an H2OFrame
leaderboard = h2o.H2OFrame(
automl_json["leaderboard_table"].cell_values,
column_names=automl_json["leaderboard_table"].col_header)
except Exception as ex:
raise ex
finally:
if is_progress is True:
h2o.show_progress()
leaderboard = leaderboard[1:]
automl_dict = {'project_name': project_name, "leader": leader, "leaderboard": leaderboard}
return automl_dict | python | {
"resource": ""
} |
q267243 | H2OAutoML.download_pojo | test | def download_pojo(self, path="", get_genmodel_jar=False, genmodel_name=""):
"""
Download the POJO for the leader model in AutoML to the directory specified by path.
If path is an empty string, then dump the output to screen.
:param path: An absolute path to the directory where POJO should be saved.
:param get_genmodel_jar: if True, then also download h2o-genmodel.jar and store it in folder ``path``.
:param genmodel_name Custom name of genmodel jar
:returns: name of the POJO file written.
"""
return h2o.download_pojo(self.leader, path, get_jar=get_genmodel_jar, jar_name=genmodel_name) | python | {
"resource": ""
} |
q267244 | H2OAutoML.download_mojo | test | def download_mojo(self, path=".", get_genmodel_jar=False, genmodel_name=""):
"""
Download the leader model in AutoML in MOJO format.
:param path: the path where MOJO file should be saved.
:param get_genmodel_jar: if True, then also download h2o-genmodel.jar and store it in folder ``path``.
:param genmodel_name Custom name of genmodel jar
:returns: name of the MOJO file written.
"""
return ModelBase.download_mojo(self.leader, path, get_genmodel_jar, genmodel_name) | python | {
"resource": ""
} |
q267245 | H2OScaler.fit | test | def fit(self, X, y=None, **params):
"""
Fit this object by computing the means and standard deviations used by the transform method.
:param X: An H2OFrame; may contain NAs and/or categoricals.
:param y: None (Ignored)
:param params: Ignored
:returns: This H2OScaler instance
"""
if isinstance(self.parms["center"], (tuple, list)): self._means = self.parms["center"]
if isinstance(self.parms["scale"], (tuple, list)): self._stds = self.parms["scale"]
if self.means is None and self.parms["center"]:
self._means = X.mean(return_frame=True).getrow()
else:
self._means = False
if self.stds is None and self.parms["scale"]:
self._stds = X.sd()
else:
self._stds = False
return self | python | {
"resource": ""
} |
q267246 | H2OScaler.transform | test | def transform(self, X, y=None, **params):
"""
Scale an H2OFrame with the fitted means and standard deviations.
:param X: An H2OFrame; may contain NAs and/or categoricals.
:param y: None (Ignored)
:param params: (Ignored)
:returns: A scaled H2OFrame.
"""
return X.scale(self.means, self.stds) | python | {
"resource": ""
} |
q267247 | H2OScaler.inverse_transform | test | def inverse_transform(self, X, y=None, **params):
"""
Undo the scale transformation.
:param X: An H2OFrame; may contain NAs and/or categoricals.
:param y: None (Ignored)
:param params: (Ignored)
:returns: An H2OFrame
"""
for i in range(X.ncol):
X[i] = self.means[i] + self.stds[i] * X[i]
return X | python | {
"resource": ""
} |
q267248 | extract_true_string | test | def extract_true_string(string_content):
"""
remove extra characters before the actual string we are
looking for. The Jenkins console output is encoded using utf-8. However, the stupid
redirect function can only encode using ASCII. I have googled for half a day with no
results to how to resolve the issue. Hence, we are going to the heat and just manually
get rid of the junk.
Parameters
----------
string_content : str
contains a line read in from jenkins console
:return: str: contains the content of the line after the string '[0m'
"""
startL,found,endL = string_content.partition('[0m')
if found:
return endL
else:
return string_content | python | {
"resource": ""
} |
q267249 | find_node_name | test | def find_node_name(each_line,temp_func_list):
"""
Find the slave machine where a Jenkins job was executed on. It will save this
information in g_failed_test_info_dict. In addition, it will
delete this particular function handle off the temp_func_list as we do not need
to perform this action again.
Parameters
----------
each_line : str
contains a line read in from jenkins console
temp_func_list : list of Python function handles
contains a list of functions that we want to invoke to extract information from
the Jenkins console text.
:return: bool to determine if text mining should continue on the jenkins console text
"""
global g_node_name
global g_failed_test_info_dict
if g_node_name in each_line:
temp_strings = each_line.split()
[start,found,endstr] = each_line.partition(g_node_name)
if found:
temp_strings = endstr.split()
g_failed_test_info_dict["6.node_name"] = extract_true_string(temp_strings[1])
temp_func_list.remove(find_node_name)
return True | python | {
"resource": ""
} |
q267250 | find_git_hash_branch | test | def find_git_hash_branch(each_line,temp_func_list):
"""
Find the git hash and branch info that a Jenkins job was taken from. It will save this
information in g_failed_test_info_dict. In addition, it will delete this particular
function handle off the temp_func_list as we do not need to perform this action again.
Parameters
----------
each_line : str
contains a line read in from jenkins console
temp_func_list : list of Python function handles
contains a list of functions that we want to invoke to extract information from
the Jenkins console text.
:return: bool to determine if text mining should continue on the jenkins console text
"""
global g_git_hash_branch
global g_failed_test_info_dict
if g_git_hash_branch in each_line:
[start,found,endstr] = each_line.partition(g_git_hash_branch)
temp_strings = endstr.strip().split()
if len(temp_strings) > 1:
g_failed_test_info_dict["4.git_hash"] = temp_strings[0]
g_failed_test_info_dict["5.git_branch"] = temp_strings[1]
temp_func_list.remove(find_git_hash_branch)
return True | python | {
"resource": ""
} |
q267251 | find_build_timeout | test | def find_build_timeout(each_line,temp_func_list):
"""
Find if a Jenkins job has taken too long to finish and was killed. It will save this
information in g_failed_test_info_dict.
Parameters
----------
each_line : str
contains a line read in from jenkins console
temp_func_list : list of Python function handles
contains a list of functions that we want to invoke to extract information from
the Jenkins console text.
:return: bool to determine if text mining should continue on the jenkins console text
"""
global g_build_timeout
global g_failed_test_info_dict
global g_failure_occurred
if g_build_timeout in each_line:
g_failed_test_info_dict["8.build_timeout"] = 'Yes'
g_failure_occurred = True
return False # build timeout was found, no need to continue mining the console text
else:
return True | python | {
"resource": ""
} |
q267252 | find_build_failure | test | def find_build_failure(each_line,temp_func_list):
"""
Find if a Jenkins job has failed to build. It will save this
information in g_failed_test_info_dict. In addition, it will delete this particular
function handle off the temp_func_list as we do not need to perform this action again.
Parameters
----------
each_line : str
contains a line read in from jenkins console
temp_func_list : list of Python function handles
contains a list of functions that we want to invoke to extract information from
the Jenkins console text.
:return: bool to determine if text mining should continue on the jenkins console text
"""
global g_build_success
global g_build_success_tests
global g_failed_test_info_dict
global g_failure_occurred
global g_build_failed_message
for ind in range(0,len(g_build_failed_message)):
if g_build_failed_message[ind] in each_line.lower():
if ((ind == 0) and (len(g_failed_jobs) > 0)):
continue
else:
g_failure_occurred = True
g_failed_test_info_dict["7.build_failure"] = 'Yes'
temp_func_list.remove(find_build_failure)
return False
return True | python | {
"resource": ""
} |
q267253 | find_build_id | test | def find_build_id(each_line,temp_func_list):
"""
Find the build id of a jenkins job. It will save this
information in g_failed_test_info_dict. In addition, it will delete this particular
function handle off the temp_func_list as we do not need to perform this action again.
Parameters
----------
each_line : str
contains a line read in from jenkins console
temp_func_list : list of Python function handles
contains a list of functions that we want to invoke to extract information from
the Jenkins console text.
:return: bool to determine if text mining should continue on the jenkins console text
"""
global g_before_java_file
global g_java_filenames
global g_build_id_text
global g_jenkins_url
global g_output_filename
global g_output_pickle_filename
if g_build_id_text in each_line:
[startStr,found,endStr] = each_line.partition(g_build_id_text)
g_failed_test_info_dict["2.build_id"] = endStr.strip()
temp_func_list.remove(find_build_id)
g_jenkins_url = os.path.join('http://',g_jenkins_url,'view',g_view_name,'job',g_failed_test_info_dict["1.jobName"],g_failed_test_info_dict["2.build_id"],'artifact')
return True | python | {
"resource": ""
} |
q267254 | extract_job_build_url | test | def extract_job_build_url(url_string):
"""
From user input, grab the jenkins job name and saved it in g_failed_test_info_dict.
In addition, it will grab the jenkins url and the view name into g_jenkins_url, and
g_view_name.
Parameters
----------
url_string : str
contains information on the jenkins job whose console output we are interested in.
:return: none
"""
global g_failed_test_info_dict
global g_jenkins_url
global g_view_name
tempString = url_string.strip('/').split('/')
if len(tempString) < 6:
print "Illegal URL resource address.\n"
sys.exit(1)
g_failed_test_info_dict["1.jobName"] = tempString[6]
g_jenkins_url = tempString[2]
g_view_name = tempString[4] | python | {
"resource": ""
} |
q267255 | grab_java_message | test | def grab_java_message():
"""scan through the java output text and extract the bad java messages that may or may not happened when
unit tests are run. It will not record any bad java messages that are stored in g_ok_java_messages.
:return: none
"""
global g_temp_filename
global g_current_testname
global g_java_start_text
global g_ok_java_messages
global g_java_general_bad_messages # store bad java messages not associated with running a unit test
global g_java_general_bad_message_types
global g_failure_occurred
global g_java_message_type
global g_all_java_message_type
global g_toContinue
java_messages = [] # store all bad java messages associated with running a unit test
java_message_types = [] # store all bad java message types associated with running a unit test
if os.path.isfile(g_temp_filename): # open temp file containing content of some java_*_0.out.txt
java_file = open(g_temp_filename,'r')
g_toContinue = False # denote if a multi-line message starts
tempMessage = ""
messageType = ""
for each_line in java_file:
if (g_java_start_text in each_line):
startStr,found,endStr = each_line.partition(g_java_start_text)
if len(found) > 0:
if len(g_current_testname) > 0: # a new unit test is being started. Save old info and move on
associate_test_with_java(g_current_testname,java_messages,java_message_types)
g_current_testname = endStr.strip() # record the test name
java_messages = []
java_message_types = []
temp_strings = each_line.strip().split()
if (len(temp_strings) >= 6) and (temp_strings[5] in g_all_java_message_type):
if g_toContinue == True: # at the end of last message fragment
addJavaMessages(tempMessage,messageType,java_messages,java_message_types)
tempMessage = ""
messageType = ""
# start of new message fragment
g_toContinue = False
else: # non standard output. Continuation of last java message, add it to bad java message list
if g_toContinue:
tempMessage += each_line # add more java message here
# if len(g_current_testname) == 0:
# addJavaMessages(each_line.strip(),"",java_messages,java_message_types)
# else:
# addJavaMessages(each_line.strip(),"",java_messages,java_message_types)
if ((len(temp_strings) > 5) and (temp_strings[5] in g_java_message_type)): # find a bad java message
startStr,found,endStr = each_line.partition(temp_strings[5]) # can be WARN,ERRR,FATAL,TRACE
if found and (len(endStr.strip()) > 0):
tempMessage += endStr
messageType = temp_strings[5]
# if (tempMessage not in g_ok_java_messages["general"]): # found new bad messages that cannot be ignored
g_toContinue = True
# add tempMessage to bad java message list
# addJavaMessages(tempMessage,temp_strings[5],java_messages,java_message_types)
java_file.close() | python | {
"resource": ""
} |
q267256 | save_dict | test | def save_dict():
"""
Save the log scraping results into logs denoted by g_output_filename_failed_tests and
g_output_filename_passed_tests.
:return: none
"""
global g_test_root_dir
global g_output_filename_failed_tests
global g_output_filename_passed_tests
global g_output_pickle_filename
global g_failed_test_info_dict
# some build can fail really early that no buid id info is stored in the console text.
if "2.build_id" not in g_failed_test_info_dict.keys():
g_failed_test_info_dict["2.build_id"] = "unknown"
build_id = g_failed_test_info_dict["2.build_id"]
g_output_filename_failed_tests = g_output_filename_failed_tests+'_build_'+build_id+'_failed_tests.log'
g_output_filename_passed_tests = g_output_filename_passed_tests+'_build_'+build_id+'_passed_tests.log'
g_output_pickle_filename = g_output_pickle_filename+'_build_'+build_id+'.pickle'
allKeys = sorted(g_failed_test_info_dict.keys())
# write out the jenkins job info into log files.
with open(g_output_pickle_filename,'wb') as test_file:
pickle.dump(g_failed_test_info_dict,test_file)
# write out the failure report as text into a text file
text_file_failed_tests = open(g_output_filename_failed_tests,'w')
text_file_passed_tests = None
allKeys = sorted(g_failed_test_info_dict.keys())
write_passed_tests = False
if ("passed_tests_info *********" in allKeys):
text_file_passed_tests = open(g_output_filename_passed_tests,'w')
write_passed_tests = True
for keyName in allKeys:
val = g_failed_test_info_dict[keyName]
if isinstance(val,list): # writing one of the job lists
if (len(val) == 3): # it is a message for a test
if keyName == "failed_tests_info *********":
write_test_java_message(keyName,val,text_file_failed_tests)
if keyName == "passed_tests_info *********":
write_test_java_message(keyName,val,text_file_passed_tests)
elif (len(val) == 2): # it is a general bad java message
write_java_message(keyName,val,text_file_failed_tests)
if write_passed_tests:
write_java_message(keyName,val,text_file_passed_tests)
else:
write_general_build_message(keyName,val,text_file_failed_tests)
if write_passed_tests:
write_general_build_message(keyName,val,text_file_passed_tests)
text_file_failed_tests.close()
if write_passed_tests:
text_file_passed_tests.close() | python | {
"resource": ""
} |
q267257 | update_summary_file | test | def update_summary_file():
"""
Concatecate all log file into a summary text file to be sent to users
at the end of a daily log scraping.
:return: none
"""
global g_summary_text_filename
global g_output_filename_failed_tests
global g_output_filename_passed_tests
with open(g_summary_text_filename,'a') as tempfile:
write_file_content(tempfile,g_output_filename_failed_tests)
write_file_content(tempfile,g_output_filename_passed_tests) | python | {
"resource": ""
} |
q267258 | write_file_content | test | def write_file_content(fhandle,file2read):
"""
Write one log file into the summary text file.
Parameters
----------
fhandle : Python file handle
file handle to the summary text file
file2read : Python file handle
file handle to log file where we want to add its content to the summary text file.
:return: none
"""
if os.path.isfile(file2read):
# write summary of failed tests logs
with open(file2read,'r') as tfile:
fhandle.write('============ Content of '+ file2read)
fhandle.write('\n')
fhandle.write(tfile.read())
fhandle.write('\n\n') | python | {
"resource": ""
} |
q267259 | write_java_message | test | def write_java_message(key,val,text_file):
"""
Loop through all java messages that are not associated with a unit test and
write them into a log file.
Parameters
----------
key : str
9.general_bad_java_messages
val : list of list of str
contains the bad java messages and the message types.
:return: none
"""
text_file.write(key)
text_file.write('\n')
if (len(val[0]) > 0) and (len(val) >= 3):
for index in range(len(val[0])):
text_file.write("Java Message Type: ")
text_file.write(val[1][index])
text_file.write('\n')
text_file.write("Java Message: ")
for jmess in val[2][index]:
text_file.write(jmess)
text_file.write('\n')
text_file.write('\n \n') | python | {
"resource": ""
} |
q267260 | load_java_messages_to_ignore | test | def load_java_messages_to_ignore():
"""
Load in pickle file that contains dict structure with bad java messages to ignore per unit test
or for all cases. The ignored bad java info is stored in g_ok_java_messages dict.
:return:
"""
global g_ok_java_messages
global g_java_message_pickle_filename
if os.path.isfile(g_java_message_pickle_filename):
with open(g_java_message_pickle_filename,'rb') as tfile:
g_ok_java_messages = pickle.load(tfile)
else:
g_ok_java_messages["general"] = [] | python | {
"resource": ""
} |
q267261 | normalize_enum_constant | test | def normalize_enum_constant(s):
"""Return enum constant `s` converted to a canonical snake-case."""
if s.islower(): return s
if s.isupper(): return s.lower()
return "".join(ch if ch.islower() else "_" + ch.lower() for ch in s).strip("_") | python | {
"resource": ""
} |
q267262 | H2OWordEmbeddingModel.find_synonyms | test | def find_synonyms(self, word, count=20):
"""
Find synonyms using a word2vec model.
:param str word: A single word to find synonyms for.
:param int count: The first "count" synonyms will be returned.
:returns: the approximate reconstruction of the training data.
"""
j = h2o.api("GET /3/Word2VecSynonyms", data={'model': self.model_id, 'word': word, 'count': count})
return OrderedDict(sorted(zip(j['synonyms'], j['scores']), key=lambda t: t[1], reverse=True)) | python | {
"resource": ""
} |
q267263 | H2OJob.poll | test | def poll(self, verbose_model_scoring_history = False):
"""
Wait until the job finishes.
This method will continuously query the server about the status of the job, until the job reaches a
completion. During this time we will display (in stdout) a progress bar with % completion status.
"""
try:
hidden = not H2OJob.__PROGRESS_BAR__
pb = ProgressBar(title=self._job_type + " progress", hidden=hidden)
if verbose_model_scoring_history:
pb.execute(self._refresh_job_status, print_verbose_info=lambda x: self._print_verbose_info() if int(x * 10) % 5 == 0 else " ")
else:
pb.execute(self._refresh_job_status)
except StopIteration as e:
if str(e) == "cancelled":
h2o.api("POST /3/Jobs/%s/cancel" % self.job_key)
self.status = "CANCELLED"
# Potentially we may want to re-raise the exception here
assert self.status in {"DONE", "CANCELLED", "FAILED"} or self._poll_count <= 0, \
"Polling finished while the job has status %s" % self.status
if self.warnings:
for w in self.warnings:
warnings.warn(w)
# check if failed... and politely print relevant message
if self.status == "CANCELLED":
raise H2OJobCancelled("Job<%s> was cancelled by the user." % self.job_key)
if self.status == "FAILED":
if (isinstance(self.job, dict)) and ("stacktrace" in list(self.job)):
raise EnvironmentError("Job with key {} failed with an exception: {}\nstacktrace: "
"\n{}".format(self.job_key, self.exception, self.job["stacktrace"]))
else:
raise EnvironmentError("Job with key %s failed with an exception: %s" % (self.job_key, self.exception))
return self | python | {
"resource": ""
} |
q267264 | H2OAssembly.to_pojo | test | def to_pojo(self, pojo_name="", path="", get_jar=True):
"""
Convert the munging operations performed on H2OFrame into a POJO.
:param pojo_name: (str) Name of POJO
:param path: (str) path of POJO.
:param get_jar: (bool) Whether to also download the h2o-genmodel.jar file needed to compile the POJO
:return: None
"""
assert_is_type(pojo_name, str)
assert_is_type(path, str)
assert_is_type(get_jar, bool)
if pojo_name == "":
pojo_name = "AssemblyPOJO_" + str(uuid.uuid4())
java = h2o.api("GET /99/Assembly.java/%s/%s" % (self.id, pojo_name))
file_path = path + "/" + pojo_name + ".java"
if path == "":
print(java)
else:
with open(file_path, 'w', encoding="utf-8") as f:
f.write(java) # this had better be utf-8 ?
if get_jar and path != "":
h2o.api("GET /3/h2o-genmodel.jar", save_to=os.path.join(path, "h2o-genmodel.jar")) | python | {
"resource": ""
} |
q267265 | H2OAssembly.fit | test | def fit(self, fr):
"""
To perform the munging operations on a frame specified in steps on the frame fr.
:param fr: H2OFrame where munging operations are to be performed on.
:return: H2OFrame after munging operations are completed.
"""
assert_is_type(fr, H2OFrame)
steps = "[%s]" % ",".join(quoted(step[1].to_rest(step[0]).replace('"', "'")) for step in self.steps)
j = h2o.api("POST /99/Assembly", data={"steps": steps, "frame": fr.frame_id})
self.id = j["assembly"]["name"]
return H2OFrame.get_frame(j["result"]["name"]) | python | {
"resource": ""
} |
q267266 | percentileOnSortedList | test | def percentileOnSortedList(N, percent, key=lambda x:x, interpolate='mean'):
# 5 ways of resolving fractional
# floor, ceil, funky, linear, mean
interpolateChoices = ['floor', 'ceil', 'funky', 'linear', 'mean']
if interpolate not in interpolateChoices:
print "Bad choice for interpolate:", interpolate
print "Supported choices:", interpolateChoices
"""
Find the percentile of a list of values.
@parameter N - is a list of values. Note N MUST BE already sorted.
@parameter percent - a float value from 0.0 to 1.0.
@parameter key - optional key function to compute value from each element of N.
@return - the percentile of the values
"""
if N is None:
return None
k = (len(N)-1) * percent
f = int(math.floor(k))
c = int(math.ceil(k))
if f == c:
d = key(N[f])
msg = "aligned:"
elif interpolate=='floor':
d = key(N[f])
msg = "fractional with floor:"
elif interpolate=='ceil':
d = key(N[c])
msg = "fractional with ceil:"
elif interpolate=='funky':
d0 = key(N[f]) * (c-k)
d1 = key(N[c]) * (k-f)
d = d0+d1
msg = "fractional with Tung(floor and ceil) :"
elif interpolate=='linear':
assert (c-f)==1
assert (k>=f) and (k<=c)
pctDiff = k-f
dDiff = pctDiff * (key(N[c]) - key(N[f]))
d = key(N[f] + dDiff)
msg = "fractional %s with linear(floor and ceil):" % pctDiff
elif interpolate=='mean':
d = (key(N[c]) + key(N[f])) / 2.0
msg = "fractional with mean(floor and ceil):"
# print 3 around the floored k, for eyeballing when we're close
flooredK = int(f)
# print the 3 around the median
if flooredK > 0:
print "prior->", key(N[flooredK-1]), " "
else:
print "prior->", "<bof>"
print "floor->", key(N[flooredK]), " ", msg, 'result:', d, "f:", f, "len(N):", len(N)
if flooredK+1 < len(N):
print " ceil->", key(N[flooredK+1]), "c:", c
else:
print " ceil-> <eof>", "c:", c
return d | python | {
"resource": ""
} |
q267267 | ModelBase.default_params | test | def default_params(self):
"""Dictionary of the default parameters of the model."""
params = {}
for p in self.parms:
params[p] = self.parms[p]["default_value"]
return params | python | {
"resource": ""
} |
q267268 | ModelBase.actual_params | test | def actual_params(self):
"""Dictionary of actual parameters of the model."""
params_to_select = {"model_id": "name",
"response_column": "column_name",
"training_frame": "name",
"validation_frame": "name"}
params = {}
for p in self.parms:
if p in params_to_select.keys():
params[p] = self.parms[p]["actual_value"].get(params_to_select[p], None)
else:
params[p] = self.parms[p]["actual_value"]
return params | python | {
"resource": ""
} |
q267269 | ModelBase.deepfeatures | test | def deepfeatures(self, test_data, layer):
"""
Return hidden layer details.
:param test_data: Data to create a feature space on
:param layer: 0 index hidden layer
"""
if test_data is None: raise ValueError("Must specify test data")
if str(layer).isdigit():
j = H2OJob(h2o.api("POST /4/Predictions/models/%s/frames/%s" % (self._id, test_data.frame_id),
data={"deep_features_hidden_layer": layer}), "deepfeatures")
else:
j = H2OJob(h2o.api("POST /4/Predictions/models/%s/frames/%s" % (self._id, test_data.frame_id),
data={"deep_features_hidden_layer_name": layer}), "deepfeatures")
j.poll()
return h2o.get_frame(j.dest_key) | python | {
"resource": ""
} |
q267270 | ModelBase.scoring_history | test | def scoring_history(self):
"""
Retrieve Model Score History.
:returns: The score history as an H2OTwoDimTable or a Pandas DataFrame.
"""
model = self._model_json["output"]
if "scoring_history" in model and model["scoring_history"] is not None:
return model["scoring_history"].as_data_frame()
print("No score history for this model") | python | {
"resource": ""
} |
q267271 | ModelBase.show | test | def show(self):
"""Print innards of model, without regards to type."""
if self._future:
self._job.poll_once()
return
if self._model_json is None:
print("No model trained yet")
return
if self.model_id is None:
print("This H2OEstimator has been removed.")
return
model = self._model_json["output"]
print("Model Details")
print("=============")
print(self.__class__.__name__, ": ", self._model_json["algo_full_name"])
print("Model Key: ", self._id)
self.summary()
print()
# training metrics
tm = model["training_metrics"]
if tm: tm.show()
vm = model["validation_metrics"]
if vm: vm.show()
xm = model["cross_validation_metrics"]
if xm: xm.show()
xms = model["cross_validation_metrics_summary"]
if xms: xms.show()
if "scoring_history" in model and model["scoring_history"]:
model["scoring_history"].show()
if "variable_importances" in model and model["variable_importances"]:
model["variable_importances"].show() | python | {
"resource": ""
} |
q267272 | ModelBase.varimp | test | def varimp(self, use_pandas=False):
"""
Pretty print the variable importances, or return them in a list.
:param use_pandas: If True, then the variable importances will be returned as a pandas data frame.
:returns: A list or Pandas DataFrame.
"""
model = self._model_json["output"]
if self.algo=='glm' or "variable_importances" in list(model.keys()) and model["variable_importances"]:
if self.algo=='glm':
tempvals = model["standardized_coefficient_magnitudes"].cell_values
maxVal = 0
sum=0
for item in tempvals:
sum=sum+item[1]
if item[1]>maxVal:
maxVal = item[1]
vals = []
for item in tempvals:
tempT = (item[0], item[1], item[1]/maxVal, item[1]/sum)
vals.append(tempT)
header = ["variable", "relative_importance", "scaled_importance", "percentage"]
else:
vals = model["variable_importances"].cell_values
header = model["variable_importances"].col_header
if use_pandas and can_use_pandas():
import pandas
return pandas.DataFrame(vals, columns=header)
else:
return vals
else:
print("Warning: This model doesn't have variable importances") | python | {
"resource": ""
} |
q267273 | ModelBase.residual_degrees_of_freedom | test | def residual_degrees_of_freedom(self, train=False, valid=False, xval=False):
"""
Retreive the residual degress of freedom if this model has the attribute, or None otherwise.
:param bool train: Get the residual dof for the training set. If both train and valid are False, then train
is selected by default.
:param bool valid: Get the residual dof for the validation set. If both train and valid are True, then train
is selected by default.
:returns: Return the residual dof, or None if it is not present.
"""
if xval: raise H2OValueError("Cross-validation metrics are not available.")
if not train and not valid: train = True
if train and valid: train = True
if train:
return self._model_json["output"]["training_metrics"].residual_degrees_of_freedom()
else:
return self._model_json["output"]["validation_metrics"].residual_degrees_of_freedom() | python | {
"resource": ""
} |
q267274 | ModelBase.coef | test | def coef(self):
"""
Return the coefficients which can be applied to the non-standardized data.
Note: standardize = True by default, if set to False then coef() return the coefficients which are fit directly.
"""
tbl = self._model_json["output"]["coefficients_table"]
if tbl is None:
return None
return {name: coef for name, coef in zip(tbl["names"], tbl["coefficients"])} | python | {
"resource": ""
} |
q267275 | ModelBase.download_pojo | test | def download_pojo(self, path="", get_genmodel_jar=False, genmodel_name=""):
"""
Download the POJO for this model to the directory specified by path.
If path is an empty string, then dump the output to screen.
:param path: An absolute path to the directory where POJO should be saved.
:param get_genmodel_jar: if True, then also download h2o-genmodel.jar and store it in folder ``path``.
:param genmodel_name Custom name of genmodel jar
:returns: name of the POJO file written.
"""
assert_is_type(path, str)
assert_is_type(get_genmodel_jar, bool)
path = path.rstrip("/")
return h2o.download_pojo(self, path, get_jar=get_genmodel_jar, jar_name=genmodel_name) | python | {
"resource": ""
} |
q267276 | ModelBase.download_mojo | test | def download_mojo(self, path=".", get_genmodel_jar=False, genmodel_name=""):
"""
Download the model in MOJO format.
:param path: the path where MOJO file should be saved.
:param get_genmodel_jar: if True, then also download h2o-genmodel.jar and store it in folder ``path``.
:param genmodel_name Custom name of genmodel jar
:returns: name of the MOJO file written.
"""
assert_is_type(path, str)
assert_is_type(get_genmodel_jar, bool)
if not self.have_mojo:
raise H2OValueError("Export to MOJO not supported")
if get_genmodel_jar:
if genmodel_name == "":
h2o.api("GET /3/h2o-genmodel.jar", save_to=os.path.join(path, "h2o-genmodel.jar"))
else:
h2o.api("GET /3/h2o-genmodel.jar", save_to=os.path.join(path, genmodel_name))
return h2o.api("GET /3/Models/%s/mojo" % self.model_id, save_to=path) | python | {
"resource": ""
} |
q267277 | ModelBase.save_model_details | test | def save_model_details(self, path="", force=False):
"""
Save Model Details of an H2O Model in JSON Format to disk.
:param model: The model object to save.
:param path: a path to save the model details at (hdfs, s3, local)
:param force: if True overwrite destination directory in case it exists, or throw exception if set to False.
:returns str: the path of the saved model details
"""
assert_is_type(path, str)
assert_is_type(force, bool)
path = os.path.join(os.getcwd() if path == "" else path, self.model_id + ".json")
return h2o.api("GET /99/Models/%s/json" % self.model_id, data={"dir": path, "force": force})["dir"] | python | {
"resource": ""
} |
q267278 | ModelBase._check_targets | test | def _check_targets(y_actual, y_predicted):
"""Check that y_actual and y_predicted have the same length.
:param H2OFrame y_actual:
:param H2OFrame y_predicted:
:returns: None
"""
if len(y_actual) != len(y_predicted):
raise ValueError("Row mismatch: [{},{}]".format(len(y_actual), len(y_predicted))) | python | {
"resource": ""
} |
q267279 | ModelBase.cross_validation_models | test | def cross_validation_models(self):
"""
Obtain a list of cross-validation models.
:returns: list of H2OModel objects.
"""
cvmodels = self._model_json["output"]["cross_validation_models"]
if cvmodels is None: return None
m = []
for p in cvmodels: m.append(h2o.get_model(p["name"]))
return m | python | {
"resource": ""
} |
q267280 | gbm | test | def gbm(interactive=True, echo=True, testing=False):
"""GBM model demo."""
def demo_body(go):
"""
Demo of H2O's Gradient Boosting estimator.
This demo uploads a dataset to h2o, parses it, and shows a description.
Then it divides the dataset into training and test sets, builds a GLM
from the training set, and makes predictions for the test set.
Finally, default performance metrics are displayed.
"""
go()
# Connect to H2O
h2o.init()
go()
# Upload the prostate dataset that comes included in the h2o python package
prostate = h2o.load_dataset("prostate")
go()
# Print a description of the prostate data
prostate.describe()
go()
# Randomly split the dataset into ~70/30, training/test sets
train, test = prostate.split_frame(ratios=[0.70])
go()
# Convert the response columns to factors (for binary classification problems)
train["CAPSULE"] = train["CAPSULE"].asfactor()
test["CAPSULE"] = test["CAPSULE"].asfactor()
go()
# Build a (classification) GLM
from h2o.estimators import H2OGradientBoostingEstimator
prostate_gbm = H2OGradientBoostingEstimator(distribution="bernoulli", ntrees=10, max_depth=8,
min_rows=10, learn_rate=0.2)
prostate_gbm.train(x=["AGE", "RACE", "PSA", "VOL", "GLEASON"],
y="CAPSULE", training_frame=train)
go()
# Show the model
prostate_gbm.show()
go()
# Predict on the test set and show the first ten predictions
predictions = prostate_gbm.predict(test)
predictions.show()
go()
# Fetch a tree, print number of tree nodes, show root node description
from h2o.tree import H2OTree, H2ONode
tree = H2OTree(prostate_gbm, 0, "0")
len(tree)
tree.left_children
tree.right_children
tree.root_node.show()
go()
# Show default performance metrics
performance = prostate_gbm.model_performance(test)
performance.show()
# Execute:
_run_demo(demo_body, interactive, echo, testing) | python | {
"resource": ""
} |
q267281 | deeplearning | test | def deeplearning(interactive=True, echo=True, testing=False):
"""Deep Learning model demo."""
def demo_body(go):
"""
Demo of H2O's Deep Learning model.
This demo uploads a dataset to h2o, parses it, and shows a description.
Then it divides the dataset into training and test sets, builds a GLM
from the training set, and makes predictions for the test set.
Finally, default performance metrics are displayed.
"""
go()
# Connect to H2O
h2o.init()
go()
# Upload the prostate dataset that comes included in the h2o python package
prostate = h2o.load_dataset("prostate")
go()
# Print a description of the prostate data
prostate.describe()
go()
# Randomly split the dataset into ~70/30, training/test sets
train, test = prostate.split_frame(ratios=[0.70])
go()
# Convert the response columns to factors (for binary classification problems)
train["CAPSULE"] = train["CAPSULE"].asfactor()
test["CAPSULE"] = test["CAPSULE"].asfactor()
go()
# Build a (classification) GLM
from h2o.estimators import H2ODeepLearningEstimator
prostate_dl = H2ODeepLearningEstimator(activation="Tanh", hidden=[10, 10, 10], epochs=10000)
prostate_dl.train(x=list(set(prostate.col_names) - {"ID", "CAPSULE"}),
y="CAPSULE", training_frame=train)
go()
# Show the model
prostate_dl.show()
go()
# Predict on the test set and show the first ten predictions
predictions = prostate_dl.predict(test)
predictions.show()
go()
# Show default performance metrics
performance = prostate_dl.model_performance(test)
performance.show()
# Execute:
_run_demo(demo_body, interactive, echo, testing) | python | {
"resource": ""
} |
q267282 | glm | test | def glm(interactive=True, echo=True, testing=False):
"""GLM model demo."""
def demo_body(go):
"""
Demo of H2O's Generalized Linear Estimator.
This demo uploads a dataset to h2o, parses it, and shows a description.
Then it divides the dataset into training and test sets, builds a GLM
from the training set, and makes predictions for the test set.
Finally, default performance metrics are displayed.
"""
go()
# Connect to H2O
h2o.init()
go()
# Upload the prostate dataset that comes included in the h2o python package
prostate = h2o.load_dataset("prostate")
go()
# Print a description of the prostate data
prostate.describe()
go()
# Randomly split the dataset into ~70/30, training/test sets
train, test = prostate.split_frame(ratios=[0.70])
go()
# Convert the response columns to factors (for binary classification problems)
train["CAPSULE"] = train["CAPSULE"].asfactor()
test["CAPSULE"] = test["CAPSULE"].asfactor()
go()
# Build a (classification) GLM
from h2o.estimators import H2OGeneralizedLinearEstimator
prostate_glm = H2OGeneralizedLinearEstimator(family="binomial", alpha=[0.5])
prostate_glm.train(x=["AGE", "RACE", "PSA", "VOL", "GLEASON"],
y="CAPSULE", training_frame=train)
go()
# Show the model
prostate_glm.show()
go()
# Predict on the test set and show the first ten predictions
predictions = prostate_glm.predict(test)
predictions.show()
go()
# Show default performance metrics
performance = prostate_glm.model_performance(test)
performance.show()
# Execute:
_run_demo(demo_body, interactive, echo, testing) | python | {
"resource": ""
} |
q267283 | _wait_for_keypress | test | def _wait_for_keypress():
"""
Wait for a key press on the console and return it.
Borrowed from http://stackoverflow.com/questions/983354/how-do-i-make-python-to-wait-for-a-pressed-key
"""
result = None
if os.name == "nt":
# noinspection PyUnresolvedReferences
import msvcrt
result = msvcrt.getch()
else:
import termios
fd = sys.stdin.fileno()
oldterm = termios.tcgetattr(fd)
newattr = termios.tcgetattr(fd)
newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, newattr)
try:
result = sys.stdin.read(1)
except IOError:
pass
finally:
termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)
return result | python | {
"resource": ""
} |
q267284 | H2OTwoDimTable.as_data_frame | test | def as_data_frame(self):
"""Convert to a python 'data frame'."""
if can_use_pandas():
import pandas
pandas.options.display.max_colwidth = 70
return pandas.DataFrame(self._cell_values, columns=self._col_header)
return self | python | {
"resource": ""
} |
q267285 | H2OTwoDimTable.show | test | def show(self, header=True):
"""Print the contents of this table."""
# if h2o.can_use_pandas():
# import pandas
# pandas.options.display.max_rows = 20
# print pandas.DataFrame(self._cell_values,columns=self._col_header)
# return
if header and self._table_header:
print(self._table_header + ":", end=' ')
if self._table_description: print(self._table_description)
print()
table = copy.deepcopy(self._cell_values)
nr = 0
if _is_list_of_lists(table): nr = len(
table) # only set if we truly have multiple rows... not just one long row :)
if nr > 20: # create a truncated view of the table, first/last 5 rows
trunc_table = []
trunc_table += [v for v in table[:5]]
trunc_table.append(["---"] * len(table[0]))
trunc_table += [v for v in table[(nr - 5):]]
table = trunc_table
H2ODisplay(table, self._col_header, numalign="left", stralign="left")
if nr > 20 and can_use_pandas(): print('\nSee the whole table with table.as_data_frame()') | python | {
"resource": ""
} |
q267286 | H2OLocalServer.start | test | def start(jar_path=None, nthreads=-1, enable_assertions=True, max_mem_size=None, min_mem_size=None,
ice_root=None, log_dir=None, log_level=None, port="54321+", name=None, extra_classpath=None,
verbose=True, jvm_custom_args=None, bind_to_localhost=True):
"""
Start new H2O server on the local machine.
:param jar_path: Path to the h2o.jar executable. If not given, then we will search for h2o.jar in the
locations returned by `._jar_paths()`.
:param nthreads: Number of threads in the thread pool. This should be related to the number of CPUs used.
-1 means use all CPUs on the host. A positive integer specifies the number of CPUs directly.
:param enable_assertions: If True, pass `-ea` option to the JVM.
:param max_mem_size: Maximum heap size (jvm option Xmx), in bytes.
:param min_mem_size: Minimum heap size (jvm option Xms), in bytes.
:param log_dir: Directory for H2O logs to be stored if a new instance is started. Default directory is determined
by H2O internally.
:param log_level: The logger level for H2O if a new instance is started.
:param ice_root: A directory where H2O stores its temporary files. Default location is determined by
tempfile.mkdtemp().
:param port: Port where to start the new server. This could be either an integer, or a string of the form
"DDDDD+", indicating that the server should start looking for an open port starting from DDDDD and up.
:param name: name of the h2o cluster to be started
:param extra_classpath List of paths to libraries that should be included on the Java classpath.
:param verbose: If True, then connection info will be printed to the stdout.
:param jvm_custom_args Custom, user-defined arguments for the JVM H2O is instantiated in
:param bind_to_localhost A flag indicating whether access to the H2O instance should be restricted to the local
machine (default) or if it can be reached from other computers on the network.
Only applicable when H2O is started from the Python client.
:returns: a new H2OLocalServer instance
"""
assert_is_type(jar_path, None, str)
assert_is_type(port, None, int, str)
assert_is_type(name, None, str)
assert_is_type(nthreads, -1, BoundInt(1, 4096))
assert_is_type(enable_assertions, bool)
assert_is_type(min_mem_size, None, int)
assert_is_type(max_mem_size, None, BoundInt(1 << 25))
assert_is_type(log_dir, str, None)
assert_is_type(log_level, str, None)
assert_satisfies(log_level, log_level in [None, "TRACE", "DEBUG", "INFO", "WARN", "ERRR", "FATA"])
assert_is_type(ice_root, None, I(str, os.path.isdir))
assert_is_type(extra_classpath, None, [str])
assert_is_type(jvm_custom_args, list, None)
assert_is_type(bind_to_localhost, bool)
if jar_path:
assert_satisfies(jar_path, jar_path.endswith("h2o.jar"))
if min_mem_size is not None and max_mem_size is not None and min_mem_size > max_mem_size:
raise H2OValueError("`min_mem_size`=%d is larger than the `max_mem_size`=%d" % (min_mem_size, max_mem_size))
if port is None: port = "54321+"
baseport = None
# TODO: get rid of this port gimmick and have 2 separate parameters.
if is_type(port, str):
if port.isdigit():
port = int(port)
else:
if not(port[-1] == "+" and port[:-1].isdigit()):
raise H2OValueError("`port` should be of the form 'DDDD+', where D is a digit. Got: %s" % port)
baseport = int(port[:-1])
port = 0
hs = H2OLocalServer()
hs._verbose = bool(verbose)
hs._jar_path = hs._find_jar(jar_path)
hs._extra_classpath = extra_classpath
hs._ice_root = ice_root
hs._name = name
if not ice_root:
hs._ice_root = tempfile.mkdtemp()
hs._tempdir = hs._ice_root
if verbose: print("Attempting to start a local H2O server...")
hs._launch_server(port=port, baseport=baseport, nthreads=int(nthreads), ea=enable_assertions,
mmax=max_mem_size, mmin=min_mem_size, jvm_custom_args=jvm_custom_args,
bind_to_localhost=bind_to_localhost, log_dir=log_dir, log_level=log_level)
if verbose: print(" Server is running at %s://%s:%d" % (hs.scheme, hs.ip, hs.port))
atexit.register(lambda: hs.shutdown())
return hs | python | {
"resource": ""
} |
q267287 | H2OLocalServer._find_jar | test | def _find_jar(self, path0=None):
"""
Return the location of an h2o.jar executable.
:param path0: Explicitly given h2o.jar path. If provided, then we will simply check whether the file is there,
otherwise we will search for an executable in locations returned by ._jar_paths().
:raises H2OStartupError: if no h2o.jar executable can be found.
"""
jar_paths = [path0] if path0 else self._jar_paths()
searched_paths = []
for jp in jar_paths:
searched_paths.append(jp)
if os.path.exists(jp):
return jp
raise H2OStartupError("Cannot start local server: h2o.jar not found. Paths searched:\n" +
"".join(" %s\n" % s for s in searched_paths)) | python | {
"resource": ""
} |
q267288 | H2OLocalServer._jar_paths | test | def _jar_paths():
"""Produce potential paths for an h2o.jar executable."""
# PUBDEV-3534 hook to use arbitrary h2o.jar
own_jar = os.getenv("H2O_JAR_PATH", "")
if own_jar != "":
if not os.path.isfile(own_jar):
raise H2OStartupError("Environment variable H2O_JAR_PATH is set to '%d' but file does not exists, unset environment variable or provide valid path to h2o.jar file." % own_jar)
yield own_jar
# Check if running from an h2o-3 src folder (or any subfolder), in which case use the freshly-built h2o.jar
cwd_chunks = os.path.abspath(".").split(os.path.sep)
for i in range(len(cwd_chunks), 0, -1):
if cwd_chunks[i - 1] == "h2o-3":
yield os.path.sep.join(cwd_chunks[:i] + ["build", "h2o.jar"])
# Then check the backend/bin folder:
# (the following works assuming this code is located in h2o/backend/server.py file)
backend_dir = os.path.split(os.path.realpath(__file__))[0]
yield os.path.join(backend_dir, "bin", "h2o.jar")
# Then try several old locations where h2o.jar might have been installed
prefix1 = prefix2 = sys.prefix
# On Unix-like systems Python typically gets installed into /Library/... or /System/Library/... If one of
# those paths is sys.prefix, then we also build its counterpart.
if prefix1.startswith(os.path.sep + "Library"):
prefix2 = os.path.join("", "System", prefix1)
elif prefix1.startswith(os.path.sep + "System"):
prefix2 = prefix1[len(os.path.join("", "System")):]
yield os.path.join(prefix1, "h2o_jar", "h2o.jar")
yield os.path.join(os.path.abspath(os.sep), "usr", "local", "h2o_jar", "h2o.jar")
yield os.path.join(prefix1, "local", "h2o_jar", "h2o.jar")
yield os.path.join(get_config_var("userbase"), "h2o_jar", "h2o.jar")
yield os.path.join(prefix2, "h2o_jar", "h2o.jar") | python | {
"resource": ""
} |
q267289 | H2OMultinomialModel.hit_ratio_table | test | def hit_ratio_table(self, train=False, valid=False, xval=False):
"""
Retrieve the Hit Ratios.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param train: If train is True, then return the hit ratio value for the training data.
:param valid: If valid is True, then return the hit ratio value for the validation data.
:param xval: If xval is True, then return the hit ratio value for the cross validation data.
:return: The hit ratio for this regression model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k, v in zip(list(tm.keys()), list(tm.values())): m[k] = None if v is None else v.hit_ratio_table()
return list(m.values())[0] if len(m) == 1 else m | python | {
"resource": ""
} |
q267290 | csv_dict_writer | test | def csv_dict_writer(f, fieldnames, **kwargs):
"""Equivalent of csv.DictWriter, but allows `delimiter` to be a unicode string on Py2."""
import csv
if "delimiter" in kwargs:
kwargs["delimiter"] = str(kwargs["delimiter"])
return csv.DictWriter(f, fieldnames, **kwargs) | python | {
"resource": ""
} |
q267291 | ApiDocWriter._uri2path | test | def _uri2path(self, uri):
''' Convert uri to absolute filepath
Parameters
----------
uri : string
URI of python module to return path for
Returns
-------
path : None or string
Returns None if there is no valid path for this URI
Otherwise returns absolute file system path for URI
Examples
--------
>>> docwriter = ApiDocWriter('sphinx')
>>> import sphinx
>>> modpath = sphinx.__path__[0]
>>> res = docwriter._uri2path('sphinx.builder')
>>> res == os.path.join(modpath, 'builder.py')
True
>>> res = docwriter._uri2path('sphinx')
>>> res == os.path.join(modpath, '__init__.py')
True
>>> docwriter._uri2path('sphinx.does_not_exist')
'''
if uri == self.package_name:
return os.path.join(self.root_path, '__init__.py')
path = uri.replace('.', os.path.sep)
path = path.replace(self.package_name + os.path.sep, '')
path = os.path.join(self.root_path, path)
# XXX maybe check for extensions as well?
if os.path.exists(path + '.py'): # file
path += '.py'
elif os.path.exists(os.path.join(path, '__init__.py')):
path = os.path.join(path, '__init__.py')
else:
return None
return path | python | {
"resource": ""
} |
q267292 | ApiDocWriter._path2uri | test | def _path2uri(self, dirpath):
''' Convert directory path to uri '''
relpath = dirpath.replace(self.root_path, self.package_name)
if relpath.startswith(os.path.sep):
relpath = relpath[1:]
return relpath.replace(os.path.sep, '.') | python | {
"resource": ""
} |
q267293 | ApiDocWriter._parse_lines | test | def _parse_lines(self, linesource):
''' Parse lines of text for functions and classes '''
functions = []
classes = []
for line in linesource:
if line.startswith('def ') and line.count('('):
# exclude private stuff
name = self._get_object_name(line)
if not name.startswith('_'):
functions.append(name)
elif line.startswith('class '):
# exclude private stuff
name = self._get_object_name(line)
if not name.startswith('_'):
classes.append(name)
else:
pass
functions.sort()
classes.sort()
return functions, classes | python | {
"resource": ""
} |
q267294 | ApiDocWriter.generate_api_doc | test | def generate_api_doc(self, uri):
'''Make autodoc documentation template string for a module
Parameters
----------
uri : string
python location of module - e.g 'sphinx.builder'
Returns
-------
S : string
Contents of API doc
'''
# get the names of all classes and functions
functions, classes = self._parse_module(uri)
if not len(functions) and not len(classes):
print 'WARNING: Empty -',uri # dbg
return ''
# Make a shorter version of the uri that omits the package name for
# titles
uri_short = re.sub(r'^%s\.' % self.package_name,'',uri)
ad = '.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n'
chap_title = uri_short
ad += (chap_title+'\n'+ self.rst_section_levels[1] * len(chap_title)
+ '\n\n')
# Set the chapter title to read 'module' for all modules except for the
# main packages
if '.' in uri:
title = 'Module: :mod:`' + uri_short + '`'
else:
title = ':mod:`' + uri_short + '`'
ad += title + '\n' + self.rst_section_levels[2] * len(title)
if len(classes):
ad += '\nInheritance diagram for ``%s``:\n\n' % uri
ad += '.. inheritance-diagram:: %s \n' % uri
ad += ' :parts: 3\n'
ad += '\n.. automodule:: ' + uri + '\n'
ad += '\n.. currentmodule:: ' + uri + '\n'
multi_class = len(classes) > 1
multi_fx = len(functions) > 1
if multi_class:
ad += '\n' + 'Classes' + '\n' + \
self.rst_section_levels[2] * 7 + '\n'
elif len(classes) and multi_fx:
ad += '\n' + 'Class' + '\n' + \
self.rst_section_levels[2] * 5 + '\n'
for c in classes:
ad += '\n:class:`' + c + '`\n' \
+ self.rst_section_levels[multi_class + 2 ] * \
(len(c)+9) + '\n\n'
ad += '\n.. autoclass:: ' + c + '\n'
# must NOT exclude from index to keep cross-refs working
ad += ' :members:\n' \
' :undoc-members:\n' \
' :show-inheritance:\n' \
' :inherited-members:\n' \
'\n' \
' .. automethod:: __init__\n'
if multi_fx:
ad += '\n' + 'Functions' + '\n' + \
self.rst_section_levels[2] * 9 + '\n\n'
elif len(functions) and multi_class:
ad += '\n' + 'Function' + '\n' + \
self.rst_section_levels[2] * 8 + '\n\n'
for f in functions:
# must NOT exclude from index to keep cross-refs working
ad += '\n.. autofunction:: ' + uri + '.' + f + '\n\n'
return ad | python | {
"resource": ""
} |
q267295 | ApiDocWriter.discover_modules | test | def discover_modules(self):
''' Return module sequence discovered from ``self.package_name``
Parameters
----------
None
Returns
-------
mods : sequence
Sequence of module names within ``self.package_name``
Examples
--------
>>> dw = ApiDocWriter('sphinx')
>>> mods = dw.discover_modules()
>>> 'sphinx.util' in mods
True
>>> dw.package_skip_patterns.append('\.util$')
>>> 'sphinx.util' in dw.discover_modules()
False
>>>
'''
modules = [self.package_name]
# raw directory parsing
for dirpath, dirnames, filenames in os.walk(self.root_path):
# Check directory names for packages
root_uri = self._path2uri(os.path.join(self.root_path,
dirpath))
for dirname in dirnames[:]: # copy list - we modify inplace
package_uri = '.'.join((root_uri, dirname))
if (self._uri2path(package_uri) and
self._survives_exclude(package_uri, 'package')):
modules.append(package_uri)
else:
dirnames.remove(dirname)
# Check filenames for modules
for filename in filenames:
module_name = filename[:-3]
module_uri = '.'.join((root_uri, module_name))
if (self._uri2path(module_uri) and
self._survives_exclude(module_uri, 'module')):
modules.append(module_uri)
return sorted(modules) | python | {
"resource": ""
} |
q267296 | ApiDocWriter.write_api_docs | test | def write_api_docs(self, outdir):
"""Generate API reST files.
Parameters
----------
outdir : string
Directory name in which to store files
We create automatic filenames for each module
Returns
-------
None
Notes
-----
Sets self.written_modules to list of written modules
"""
if not os.path.exists(outdir):
os.mkdir(outdir)
# compose list of modules
modules = self.discover_modules()
self.write_modules_api(modules,outdir) | python | {
"resource": ""
} |
q267297 | ApiDocWriter.write_index | test | def write_index(self, outdir, froot='gen', relative_to=None):
"""Make a reST API index file from written files
Parameters
----------
path : string
Filename to write index to
outdir : string
Directory to which to write generated index file
froot : string, optional
root (filename without extension) of filename to write to
Defaults to 'gen'. We add ``self.rst_extension``.
relative_to : string
path to which written filenames are relative. This
component of the written file path will be removed from
outdir, in the generated index. Default is None, meaning,
leave path as it is.
"""
if self.written_modules is None:
raise ValueError('No modules written')
# Get full filename path
path = os.path.join(outdir, froot+self.rst_extension)
# Path written into index is relative to rootpath
if relative_to is not None:
relpath = outdir.replace(relative_to + os.path.sep, '')
else:
relpath = outdir
idx = open(path,'wt')
w = idx.write
w('.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n')
w('.. toctree::\n\n')
for f in self.written_modules:
w(' %s\n' % os.path.join(relpath,f))
idx.close() | python | {
"resource": ""
} |
q267298 | ConfusionMatrix.to_list | test | def to_list(self):
"""Convert this confusion matrix into a 2x2 plain list of values."""
return [[int(self.table.cell_values[0][1]), int(self.table.cell_values[0][2])],
[int(self.table.cell_values[1][1]), int(self.table.cell_values[1][2])]] | python | {
"resource": ""
} |
q267299 | load_dict | test | def load_dict():
"""
Load java messages that can be ignored pickle file into a dict structure g_ok_java_messages.
:return: none
"""
global g_load_java_message_filename
global g_ok_java_messages
if os.path.isfile(g_load_java_message_filename):
# only load dict from file if it exists.
with open(g_load_java_message_filename,'rb') as ofile:
g_ok_java_messages = pickle.load(ofile)
else: # no previous java messages to be excluded are found
g_ok_java_messages["general"] = [] | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.