_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q267100 | assert_satisfies | test | def assert_satisfies(v, cond, message=None):
"""
Assert that variable satisfies the provided condition.
:param v: variable to check. Its value is only used for error reporting.
:param bool cond: condition that must be satisfied. Should be somehow related to the variable ``v``.
:param message: message string to use instead of the default.
"""
if not cond:
vname, vexpr = _retrieve_assert_arguments()
if not message:
message = "Argument `{var}` (= {val!r}) does not satisfy the condition {expr}" \
.format(var=vname, val=v, expr=vexpr)
raise H2OValueError(message=message, var_name=vname, skip_frames=1) | python | {
"resource": ""
} |
q267101 | _retrieve_assert_arguments | test | def _retrieve_assert_arguments():
"""
Magic variable name retrieval.
This function is designed as a helper for assert_is_type() function. Typically such assertion is used like this::
assert_is_type(num_threads, int)
If the variable `num_threads` turns out to be non-integer, we would like to raise an exception such as
H2OTypeError("`num_threads` is expected to be integer, but got <str>")
and in order to compose an error message like that, we need to know that the variables that was passed to
assert_is_type() carries a name "num_threads". Naturally, the variable itself knows nothing about that.
This is where this function comes in: we walk up the stack trace until the first frame outside of this
file, find the original line that called the assert_is_type() function, and extract the variable name from
that line. This is slightly fragile, in particular we assume that only one assert_is_type statement can be per line,
or that this statement does not spill over multiple lines, etc.
"""
try:
raise RuntimeError("Catch me!")
except RuntimeError:
# Walk up the stacktrace until we are outside of this file
tb = sys.exc_info()[2]
assert tb.tb_frame.f_code.co_name == "_retrieve_assert_arguments"
this_filename = tb.tb_frame.f_code.co_filename
fr = tb.tb_frame
while fr is not None and fr.f_code.co_filename == this_filename:
fr = fr.f_back
# Read the source file and tokenize it, extracting the expressions.
try:
with io.open(fr.f_code.co_filename, "r", encoding="utf-8") as f:
# Skip initial lines that are irrelevant
for i in range(fr.f_lineno - 1): next(f)
# Create tokenizer
g = tokenize.generate_tokens(f.readline)
step = 0
args_tokens = []
level = 0
for ttt in g:
if step == 0:
if ttt[0] != tokenize.NAME: continue
if not ttt[1].startswith("assert_"): continue
step = 1
elif step == 1:
assert ttt[0] == tokenize.OP and ttt[1] == "("
args_tokens.append([])
step = 2
elif step == 2:
if level == 0 and ttt[0] == tokenize.OP and ttt[1] == ",":
args_tokens.append([])
elif level == 0 and ttt[0] == tokenize.OP and ttt[1] == ")":
break
else:
if ttt[0] == tokenize.OP and ttt[1] in "([{": level += 1
if ttt[0] == tokenize.OP and ttt[1] in ")]}": level -= 1
assert level >= 0, "Parse error: parentheses level became negative"
args_tokens[-1].append(ttt)
args = [tokenize.untokenize(at).strip().replace("\n", " ") for at in args_tokens]
return args
except IOError:
return "arg", | python | {
"resource": ""
} |
q267102 | _check_type | test | def _check_type(var, vtype):
"""
Return True if the variable is of the specified type, and False otherwise.
:param var: variable to check
:param vtype: expected variable's type
"""
if vtype is None:
return var is None
if isinstance(vtype, _primitive_type):
return var == vtype
if vtype is str:
return isinstance(var, _str_type)
if vtype is int:
return isinstance(var, _int_type)
if vtype is numeric:
return isinstance(var, _num_type)
if isinstance(vtype, MagicType):
return vtype.check(var)
if isinstance(vtype, type):
# ``vtype`` is a name of the class, or a built-in type such as "list", "tuple", etc
return isinstance(var, vtype)
if isinstance(vtype, list):
# ``vtype`` is a list literal
elem_type = U(*vtype)
return isinstance(var, list) and all(_check_type(item, elem_type) for item in var)
if isinstance(vtype, set):
# ``vtype`` is a set literal
elem_type = U(*vtype)
return isinstance(var, set) and all(_check_type(item, elem_type) for item in var)
if isinstance(vtype, tuple):
# ``vtype`` is a tuple literal
return (isinstance(var, tuple) and len(vtype) == len(var) and
all(_check_type(var[i], vtype[i]) for i in range(len(vtype))))
if isinstance(vtype, dict):
# ``vtype`` is a dict literal
ttkv = U(*viewitems(vtype))
return isinstance(var, dict) and all(_check_type(kv, ttkv) for kv in viewitems(var))
if isinstance(vtype, (FunctionType, BuiltinFunctionType)):
return vtype(var)
raise RuntimeError("Ivalid type %r in _check_type()" % vtype) | python | {
"resource": ""
} |
q267103 | _get_type_name | test | def _get_type_name(vtype, dump=None):
"""
Return the name of the provided type.
_get_type_name(int) == "integer"
_get_type_name(str) == "string"
_get_type_name(tuple) == "tuple"
_get_type_name(Exception) == "Exception"
_get_type_name(U(int, float, bool)) == "integer|float|bool"
_get_type_name(U(H2OFrame, None)) == "?H2OFrame"
"""
if vtype is None:
return "None"
if vtype is str:
return "string"
if vtype is int:
return "integer"
if vtype is numeric:
return "numeric"
if is_type(vtype, str):
return '"%s"' % repr(vtype)[1:-1]
if is_type(vtype, int):
return str(vtype)
if isinstance(vtype, MagicType):
return vtype.name(dump)
if isinstance(vtype, type):
return vtype.__name__
if isinstance(vtype, list):
return "list(%s)" % _get_type_name(U(*vtype), dump)
if isinstance(vtype, set):
return "set(%s)" % _get_type_name(U(*vtype), dump)
if isinstance(vtype, tuple):
return "(%s)" % ", ".join(_get_type_name(item, dump) for item in vtype)
if isinstance(vtype, dict):
return "dict(%s)" % ", ".join("%s: %s" % (_get_type_name(tk, dump), _get_type_name(tv, dump))
for tk, tv in viewitems(vtype))
if isinstance(vtype, (FunctionType, BuiltinFunctionType)):
if vtype.__name__ == "<lambda>":
return _get_lambda_source_code(vtype, dump)
else:
return vtype.__name__
raise RuntimeError("Unexpected `vtype`: %r" % vtype) | python | {
"resource": ""
} |
q267104 | _get_lambda_source_code | test | def _get_lambda_source_code(lambda_fn, src):
"""Attempt to find the source code of the ``lambda_fn`` within the string ``src``."""
def gen_lambdas():
def gen():
yield src + "\n"
g = gen()
step = 0
tokens = []
for tok in tokenize.generate_tokens(getattr(g, "next", getattr(g, "__next__", None))):
if step == 0:
if tok[0] == tokenize.NAME and tok[1] == "lambda":
step = 1
tokens = [tok]
level = 0
elif step == 1:
if tok[0] == tokenize.NAME:
tokens.append(tok)
step = 2
else:
step = 0
elif step == 2:
if tok[0] == tokenize.OP and tok[1] == ":":
tokens.append(tok)
step = 3
else:
step = 0
elif step == 3:
if level == 0 and (tok[0] == tokenize.OP and tok[1] in ",)" or tok[0] == tokenize.ENDMARKER):
yield tokenize.untokenize(tokens).strip()
step = 0
else:
tokens.append(tok)
if tok[0] == tokenize.OP:
if tok[1] in "[({": level += 1
if tok[1] in "])}": level -= 1
assert not tokens
actual_code = lambda_fn.__code__.co_code
for lambda_src in gen_lambdas():
try:
fn = eval(lambda_src, globals(), locals())
if fn.__code__.co_code == actual_code:
return lambda_src.split(":", 1)[1].strip()
except Exception:
pass
return "<lambda>" | python | {
"resource": ""
} |
q267105 | NOT.check | test | def check(self, var):
"""Return True if the variable does not match any of the types, and False otherwise."""
return not any(_check_type(var, tt) for tt in self._types) | python | {
"resource": ""
} |
q267106 | Enum.check | test | def check(self, var):
"""Check whether the provided value is a valid enum constant."""
if not isinstance(var, _str_type): return False
return _enum_mangle(var) in self._consts | python | {
"resource": ""
} |
q267107 | H2OConfigReader.get_config | test | def get_config():
"""Retrieve the config as a dictionary of key-value pairs."""
self = H2OConfigReader._get_instance()
if not self._config_loaded:
self._read_config()
return self._config | python | {
"resource": ""
} |
q267108 | H2OConfigReader._read_config | test | def _read_config(self):
"""Find and parse config file, storing all variables in ``self._config``."""
self._config_loaded = True
conf = []
for f in self._candidate_log_files():
if os.path.isfile(f):
self._logger.info("Reading config file %s" % f)
section_rx = re.compile(r"^\[(\w+)\]$")
keyvalue_rx = re.compile(r"^(\w+:)?([\w.]+)\s*=(.*)$")
with io.open(f, "rt", encoding="utf-8") as config_file:
section_name = None
for lineno, line in enumerate(config_file):
line = line.strip()
if line == "" or line.startswith("#"): continue
m1 = section_rx.match(line)
if m1:
section_name = m1.group(1)
continue
m2 = keyvalue_rx.match(line)
if m2:
lng = m2.group(1)
key = m2.group(2)
val = m2.group(3).strip()
if lng and lng.lower() != "py:": continue
if section_name:
key = section_name + "." + key
if key in H2OConfigReader._allowed_config_keys:
conf.append((key, val))
else:
self._logger.error("Key %s is not a valid config key" % key)
continue
self._logger.error("Syntax error in config file line %d: %s" % (lineno, line))
self._config = dict(conf)
return | python | {
"resource": ""
} |
q267109 | H2OConfigReader._candidate_log_files | test | def _candidate_log_files():
"""Return possible locations for the .h2oconfig file, one at a time."""
# Search for .h2oconfig in the current directory and all parent directories
relpath = ".h2oconfig"
prevpath = None
while True:
abspath = os.path.abspath(relpath)
if abspath == prevpath: break
prevpath = abspath
relpath = "../" + relpath
yield abspath
# Also check if .h2oconfig exists in the user's directory
yield os.path.expanduser("~/.h2oconfig") | python | {
"resource": ""
} |
q267110 | ProgressBar.execute | test | def execute(self, progress_fn, print_verbose_info=None):
"""
Start the progress bar, and return only when the progress reaches 100%.
:param progress_fn: the executor function (or a generator). This function should take no arguments
and return either a single number -- the current progress level, or a tuple (progress level, delay),
where delay is the time interval for when the progress should be checked again. This function may at
any point raise the ``StopIteration(message)`` exception, which will interrupt the progress bar,
display the ``message`` in red font, and then re-raise the exception.
:raises StopIteration: if the job is interrupted. The reason for interruption is provided in the exception's
message. The message will say "cancelled" if the job was interrupted by the user by pressing Ctrl+C.
"""
assert_is_type(progress_fn, FunctionType, GeneratorType, MethodType)
if isinstance(progress_fn, GeneratorType):
# Convert generator to a regular function
progress_fn = (lambda g: lambda: next(g))(progress_fn)
# Initialize the execution context
self._next_poll_time = 0
self._t0 = time.time()
self._x0 = 0
self._v0 = 0.01 # corresponds to 100s completion time
self._ve = 0.01
progress = 0
status = None # Status message in case the job gets interrupted.
try:
while True:
# We attempt to synchronize all helper functions, ensuring that each of them has the same idea
# for what the current time moment is. Otherwise we could have some corner cases when one method
# says that something must happen right now, while the other already sees that moment in the past.
now = time.time()
# Query the progress level, but only if it's time already
if self._next_poll_time <= now:
res = progress_fn() # may raise StopIteration
assert_is_type(res, (numeric, numeric), numeric)
if not isinstance(res, tuple):
res = (res, -1)
# Progress querying could have taken some time, so update the current time moment
now = time.time()
self._store_model_progress(res, now)
self._recalculate_model_parameters(now)
# Render the widget regardless of whether it's too early or not
progress = min(self._compute_progress_at_time(now)[0], 1)
if progress == 1 and self._get_real_progress() >= 1:
# Do not exit until both the model and the actual progress reach 100% mark.
break
result = self._widget.render(progress)
assert_is_type(result, RenderResult)
time0 = result.next_time
time1 = self._get_time_at_progress(result.next_progress)
next_render_time = min(time0, time1)
self._draw(result.rendered)
# Wait until the next rendering/querying cycle
wait_time = min(next_render_time, self._next_poll_time) - now
if wait_time > 0:
time.sleep(wait_time)
if print_verbose_info is not None:
print_verbose_info(progress)
except KeyboardInterrupt:
# If the user presses Ctrl+C, we interrupt the progress bar.
status = "cancelled"
except StopIteration as e:
# If the generator raises StopIteration before reaching 100%, then the progress display will
# reamin incomplete.
status = str(e)
# Do one final rendering before we exit
result = self._widget.render(progress=progress, status=status)
self._draw(result.rendered, final=True)
if status == "cancelled":
# Re-raise the exception, to inform the upstream caller that something unexpected happened.
raise StopIteration(status) | python | {
"resource": ""
} |
q267111 | ProgressBar._store_model_progress | test | def _store_model_progress(self, res, now):
"""
Save the current model progress into ``self._progress_data``, and update ``self._next_poll_time``.
:param res: tuple (progress level, poll delay).
:param now: current timestamp.
"""
raw_progress, delay = res
raw_progress = clamp(raw_progress, 0, self._maxval)
self._progress_data.append((now, raw_progress))
if delay < 0:
# calculation of ``_guess_next_poll_interval()`` should be done only *after* we pushed the fresh data to
# ``self._progress_data``.
delay = self._guess_next_poll_interval()
self._next_poll_time = now + clamp(delay, self.MIN_PROGRESS_CHECK_INTERVAL, self.MAX_PROGRESS_CHECK_INTERVAL) | python | {
"resource": ""
} |
q267112 | ProgressBar._recalculate_model_parameters | test | def _recalculate_model_parameters(self, now):
"""Compute t0, x0, v0, ve."""
time_until_end = self._estimate_progress_completion_time(now) - now
assert time_until_end >= 0, "Estimated progress completion cannot be in the past."
x_real = self._get_real_progress()
if x_real == 1:
t0, x0, v0, ve = now, 1, 0, 0
else:
x0, v0 = self._compute_progress_at_time(now)
t0 = now
if x0 >= 1:
# On rare occasion, the model's progress may have reached 100% by ``now``. This can happen if
# (1) the progress is close to 100% initially and has high speed, (2) on the previous call we
# estimated that the process completion time will be right after the next poll time, and (3)
# the polling itself took so much time that the process effectively "overshoot".
# If this happens, then we adjust x0, v0 to the previous valid data checkpoint.
t0, x0, v0 = self._t0, self._x0, self._v0
time_until_end += now - t0
z = self.BETA * time_until_end
max_speed = (1 - x_real**2) / self.FINISH_DELAY
ve = v0 + (self.BETA * (1 - x0) - v0 * z) / (z - 1 + math.exp(-z))
if ve < 0:
# Current speed is too high -- reduce v0 (violate non-smoothness of speed)
v0 = self.BETA * (1 - x0) / (1 - math.exp(-z))
ve = 0
if ve > max_speed:
# Current speed is too low: finish later, but do not allow ``ve`` to be higher than ``max_speed``
ve = max_speed
self._t0, self._x0, self._v0, self._ve = t0, x0, v0, ve | python | {
"resource": ""
} |
q267113 | ProgressBar._estimate_progress_completion_time | test | def _estimate_progress_completion_time(self, now):
"""
Estimate the moment when the underlying process is expected to reach completion.
This function should only return future times. Also this function is not allowed to return time moments less
than self._next_poll_time if the actual progress is below 100% (this is because we won't know that the
process have finished until we poll the external progress function).
"""
assert self._next_poll_time >= now
tlast, wlast = self._progress_data[-1]
# If reached 100%, make sure that we finish as soon as possible, but maybe not immediately
if wlast == self._maxval:
current_completion_time = (1 - self._x0) / self._v0 + self._t0
return clamp(current_completion_time, now, now + self.FINISH_DELAY)
# Calculate the approximate speed of the raw progress based on recent data
tacc, wacc = 0, 0
factor = self.GAMMA
for t, x in self._progress_data[-2::-1]:
tacc += factor * (tlast - t)
wacc += factor * (wlast - x)
factor *= self.GAMMA
if factor < 1e-2: break
# If there was no progress at all, then just assume it's 5 minutes from now
if wacc == 0: return now + 300
# Estimate the completion time assuming linear progress
t_estimate = tlast + tacc * (self._maxval - wlast) / wacc
# Adjust the estimate if it looks like it may happen too soon
if t_estimate <= self._next_poll_time:
t_estimate = self._next_poll_time + self.FINISH_DELAY
return t_estimate | python | {
"resource": ""
} |
q267114 | ProgressBar._guess_next_poll_interval | test | def _guess_next_poll_interval(self):
"""
Determine when to query the progress status next.
This function is used if the external progress function did not return time interval for when it should be
queried next.
"""
time_elapsed = self._progress_data[-1][0] - self._progress_data[0][0]
real_progress = self._get_real_progress()
return min(0.2 * time_elapsed, 0.5 + (1 - real_progress)**0.5) | python | {
"resource": ""
} |
q267115 | ProgressBar._compute_progress_at_time | test | def _compute_progress_at_time(self, t):
"""
Calculate the modelled progress state for the given time moment.
:returns: tuple (x, v) of the progress level and progress speed.
"""
t0, x0, v0, ve = self._t0, self._x0, self._v0, self._ve
z = (v0 - ve) * math.exp(-self.BETA * (t - t0))
vt = ve + z
xt = clamp(x0 + ve * (t - t0) + (v0 - ve - z) / self.BETA, 0, 1)
return xt, vt | python | {
"resource": ""
} |
q267116 | ProgressBar._get_time_at_progress | test | def _get_time_at_progress(self, x_target):
"""
Return the projected time when progress level `x_target` will be reached.
Since the underlying progress model is nonlinear, we need to do use Newton method to find a numerical solution
to the equation x(t) = x_target.
"""
t, x, v = self._t0, self._x0, self._v0
# The convergence should be achieved in just few iterations, however in unlikely situation that it doesn't
# we don't want to loop forever...
for _ in range(20):
if v == 0: return 1e20
# make time prediction assuming the progress will continue at a linear speed ``v``
t += (x_target - x) / v
# calculate the actual progress at that time
x, v = self._compute_progress_at_time(t)
# iterate until convergence
if abs(x - x_target) < 1e-3: return t
return time.time() + 100 | python | {
"resource": ""
} |
q267117 | ProgressBar._draw | test | def _draw(self, txt, final=False):
"""Print the rendered string to the stdout."""
if not self._file_mode:
# If the user presses Ctrl+C this ensures we still start writing from the beginning of the line
sys.stdout.write("\r")
sys.stdout.write(txt)
if final and not isinstance(self._widget, _HiddenWidget):
sys.stdout.write("\n")
else:
if not self._file_mode:
sys.stdout.write("\r")
sys.stdout.flush() | python | {
"resource": ""
} |
q267118 | _ProgressBarCompoundWidget._compute_widget_sizes | test | def _compute_widget_sizes(self):
"""Initial rendering stage, done in order to compute widths of all widgets."""
wl = [0] * len(self._widgets)
flex_count = 0
# First render all non-flexible widgets
for i, widget in enumerate(self._widgets):
if isinstance(widget, ProgressBarFlexibleWidget):
flex_count += 1
else:
wl[i] = widget.render(1).length
remaining_width = self._width - sum(wl)
remaining_width -= len(self._widgets) - 1 # account for 1-space interval between widgets
if remaining_width < 10 * flex_count:
if self._file_mode:
remaining_width = 10 * flex_count
else:
# The window is too small to accomodate the widget: try to split it into several lines, otherwise
# switch to "file mode". If we don't do this, then rendering the widget will cause it to wrap, and
# then when we use \r to go to the beginning of the line, only part of the widget will be overwritten,
# which means we'll have many (possibly hundreds) of progress bar lines in the end.
widget0 = self._widgets[0]
if isinstance(widget0, PBWString) and remaining_width + widget0.render(0).length >= 10 * flex_count:
remaining_width += widget0.render(0).length + 1
self._to_render = widget0.render(0).rendered + "\n"
self._widgets = self._widgets[1:]
if remaining_width < 10 * flex_count:
self._file_mode = True
remaining_width = 10 * flex_count
remaining_width = max(remaining_width, 10 * flex_count) # Ensure at least 10 chars per flexible widget
for i, widget in enumerate(self._widgets):
if isinstance(widget, ProgressBarFlexibleWidget):
target_length = int(remaining_width / flex_count)
result = widget.render(1, target_length)
wl[i] = result.length
remaining_width -= result.length
flex_count -= 1
return wl | python | {
"resource": ""
} |
q267119 | _ProgressBarCompoundWidget._get_terminal_size | test | def _get_terminal_size():
"""Find current STDOUT's width, in characters."""
# If output is not terminal but a regular file, assume 100 chars width
if not sys.stdout.isatty():
return 80
# Otherwise, first try getting the dimensions from shell command `stty`:
try:
import subprocess
ret = subprocess.check_output(["stty", "size"]).strip().split(" ")
if len(ret) == 2:
return int(ret[1])
except:
pass
# Otherwise try using ioctl
try:
from termios import TIOCGWINSZ
from fcntl import ioctl
from struct import unpack
res = unpack("hh", ioctl(sys.stdout, TIOCGWINSZ, b"1234"))
return int(res[1])
except:
pass
# Finally check the COLUMNS environment variable
return int(os.environ.get("COLUMNS", 80)) | python | {
"resource": ""
} |
q267120 | PBWBar.set_encoding | test | def set_encoding(self, encoding):
"""Inform the widget about the encoding of the underlying character stream."""
self._bar_ends = "[]"
self._bar_symbols = "#"
if not encoding: return
s1 = "\u258F\u258E\u258D\u258C\u258B\u258A\u2589\u2588"
s2 = "\u258C\u2588"
s3 = "\u2588"
if self._file_mode:
s1 = s2 = None
assert len(s3) == 1
for s in (s1, s2, s3):
if s is None: continue
try:
s.encode(encoding)
self._bar_ends = "||"
self._bar_symbols = s
return
except UnicodeEncodeError:
pass
except LookupError:
print("Warning: unknown encoding %s" % encoding) | python | {
"resource": ""
} |
q267121 | TargetEncoder.fit | test | def fit(self, frame = None):
"""
Returns encoding map as an object that maps 'column_name' -> 'frame_with_encoding_map_for_this_column_name'
:param frame frame: An H2OFrame object with which to create the target encoding map
"""
self._teColumns = list(map(lambda i: frame.names[i], self._teColumns)) if all(isinstance(n, int) for n in self._teColumns) else self._teColumns
self._responseColumnName = frame.names[self._responseColumnName] if isinstance(self._responseColumnName, int) else self._responseColumnName
self._foldColumnName = frame.names[self._foldColumnName] if isinstance(self._foldColumnName, int) else self._foldColumnName
self._encodingMap = ExprNode("target.encoder.fit", frame, self._teColumns, self._responseColumnName,
self._foldColumnName)._eager_map_frame()
return self._encodingMap | python | {
"resource": ""
} |
q267122 | H2OFrame.get_frame | test | def get_frame(frame_id, rows=10, rows_offset=0, cols=-1, full_cols=-1, cols_offset=0, light=False):
"""
Retrieve an existing H2OFrame from the H2O cluster using the frame's id.
:param str frame_id: id of the frame to retrieve
:param int rows: number of rows to fetch for preview (10 by default)
:param int rows_offset: offset to fetch rows from (0 by default)
:param int cols: number of columns to fetch (all by default)
:param full_cols: number of columns to fetch together with backed data
:param int cols_offset: offset to fetch rows from (0 by default)
:param bool light: wether to use light frame endpoint or not
:returns: an existing H2OFrame with the id provided; or None if such frame doesn't exist.
"""
fr = H2OFrame()
fr._ex._cache._id = frame_id
try:
fr._ex._cache.fill(rows=rows, rows_offset=rows_offset, cols=cols, full_cols=full_cols, cols_offset=cols_offset, light=light)
except EnvironmentError:
return None
return fr | python | {
"resource": ""
} |
q267123 | H2OFrame.refresh | test | def refresh(self):
"""Reload frame information from the backend H2O server."""
self._ex._cache.flush()
self._frame(fill_cache=True) | python | {
"resource": ""
} |
q267124 | H2OFrame.type | test | def type(self, col):
"""
The type for the given column.
:param col: either a name, or an index of the column to look up
:returns: type of the column, one of: ``str``, ``int``, ``real``, ``enum``, ``time``, ``bool``.
:raises H2OValueError: if such column does not exist in the frame.
"""
assert_is_type(col, int, str)
if not self._ex._cache.types_valid() or not self._ex._cache.names_valid():
self._ex._cache.flush()
self._frame(fill_cache=True)
types = self._ex._cache.types
if is_type(col, str):
if col in types:
return types[col]
else:
names = self._ex._cache.names
if -len(names) <= col < len(names):
return types[names[col]]
raise H2OValueError("Column '%r' does not exist in the frame" % col) | python | {
"resource": ""
} |
q267125 | H2OFrame.columns_by_type | test | def columns_by_type(self, coltype="numeric"):
"""
Extract columns of the specified type from the frame.
:param str coltype: A character string indicating which column type to filter by. This must be
one of the following:
- ``"numeric"`` - Numeric, but not categorical or time
- ``"categorical"`` - Integer, with a categorical/factor String mapping
- ``"string"`` - String column
- ``"time"`` - Long msec since the Unix Epoch - with a variety of display/parse options
- ``"uuid"`` - UUID
- ``"bad"`` - No none-NA rows (triple negative! all NAs or zero rows)
:returns: list of indices of columns that have the requested type
"""
assert_is_type(coltype, "numeric", "categorical", "string", "time", "uuid", "bad")
assert_is_type(self, H2OFrame)
return ExprNode("columnsByType", self, coltype)._eager_scalar() | python | {
"resource": ""
} |
q267126 | H2OFrame.summary | test | def summary(self, return_data=False):
"""
Display summary information about the frame.
Summary includes min/mean/max/sigma and other rollup data.
:param bool return_data: Return a dictionary of the summary output
"""
if not self._has_content():
print("This H2OFrame is empty and not initialized.")
return self._ex._cache._data;
if not self._ex._cache.is_valid(): self._frame()._ex._cache.fill()
if not return_data:
if self.nrows == 0:
print("This H2OFrame is empty.")
elif H2ODisplay._in_ipy():
import IPython.display
IPython.display.display_html(self._ex._cache._tabulate("html", True), raw=True)
else:
print(self._ex._cache._tabulate("simple", True))
else:
return self._ex._cache._data | python | {
"resource": ""
} |
q267127 | H2OFrame.describe | test | def describe(self, chunk_summary=False):
"""
Generate an in-depth description of this H2OFrame.
This will print to the console the dimensions of the frame; names/types/summary statistics for each column;
and finally first ten rows of the frame.
:param bool chunk_summary: Retrieve the chunk summary along with the distribution summary
"""
if self._has_content():
res = h2o.api("GET /3/Frames/%s" % self.frame_id, data={"row_count": 10})["frames"][0]
self._ex._cache._fill_data(res)
print("Rows:{}".format(self.nrow))
print("Cols:{}".format(self.ncol))
#The chunk & distribution summaries are not cached, so must be pulled if chunk_summary=True.
if chunk_summary:
res["chunk_summary"].show()
res["distribution_summary"].show()
print("\n")
self.summary() | python | {
"resource": ""
} |
q267128 | H2OFrame.head | test | def head(self, rows=10, cols=200):
"""
Return the first ``rows`` and ``cols`` of the frame as a new H2OFrame.
:param int rows: maximum number of rows to return
:param int cols: maximum number of columns to return
:returns: a new H2OFrame cut from the top left corner of the current frame, and having dimensions at
most ``rows`` x ``cols``.
"""
assert_is_type(rows, int)
assert_is_type(cols, int)
nrows = min(self.nrows, rows)
ncols = min(self.ncols, cols)
newdt = self[:nrows, :ncols]
return newdt._frame(rows=nrows, cols=cols, fill_cache=True) | python | {
"resource": ""
} |
q267129 | H2OFrame.mult | test | def mult(self, matrix):
"""
Multiply this frame, viewed as a matrix, by another matrix.
:param matrix: another frame that you want to multiply the current frame by; must be compatible with the
current frame (i.e. its number of rows must be the same as number of columns in the current frame).
:returns: new H2OFrame, which is the result of multiplying the current frame by ``matrix``.
"""
if self.ncols != matrix.nrows:
raise H2OValueError("Matrix is not compatible for multiplication with the current frame")
return H2OFrame._expr(expr=ExprNode("x", self, matrix)) | python | {
"resource": ""
} |
q267130 | H2OFrame.levels | test | def levels(self):
"""
Get the factor levels.
:returns: A list of lists, one list per column, of levels.
"""
lol = H2OFrame._expr(expr=ExprNode("levels", self)).as_data_frame(False)
lol.pop(0) # Remove column headers
lol = list(zip(*lol))
return [[ll for ll in l if ll != ''] for l in lol] | python | {
"resource": ""
} |
q267131 | H2OFrame.nlevels | test | def nlevels(self):
"""
Get the number of factor levels for each categorical column.
:returns: A list of the number of levels per column.
"""
levels = self.levels()
return [len(l) for l in levels] if levels else 0 | python | {
"resource": ""
} |
q267132 | H2OFrame.set_level | test | def set_level(self, level):
"""
A method to set all column values to one of the levels.
:param str level: The level at which the column will be set (a string)
:returns: H2OFrame with entries set to the desired level.
"""
return H2OFrame._expr(expr=ExprNode("setLevel", self, level), cache=self._ex._cache) | python | {
"resource": ""
} |
q267133 | H2OFrame.set_levels | test | def set_levels(self, levels):
"""
Replace the levels of a categorical column.
New levels must be aligned with the old domain. This call has copy-on-write semantics.
:param List[str] levels: A list of strings specifying the new levels. The number of new
levels must match the number of old levels.
:returns: A single-column H2OFrame with the desired levels.
"""
assert_is_type(levels, [str])
return H2OFrame._expr(expr=ExprNode("setDomain", self, False, levels), cache=self._ex._cache) | python | {
"resource": ""
} |
q267134 | H2OFrame.rename | test | def rename(self, columns=None):
"""
Change names of columns in the frame.
Dict key is an index or name of the column whose name is to be set.
Dict value is the new name of the column.
:param columns: dict-like transformations to apply to the column names
"""
assert_is_type(columns, None, dict)
new_names = self.names
ncols = self.ncols
for col, name in columns.items():
col_index = None
if is_type(col, int) and (-ncols <= col < ncols):
col_index = (col + ncols) % ncols # handle negative indices
elif is_type(col, str) and col in self.names:
col_index = self.names.index(col) # lookup the name
if col_index is not None:
new_names[col_index] = name
return self.set_names(new_names) | python | {
"resource": ""
} |
q267135 | H2OFrame.set_names | test | def set_names(self, names):
"""
Change names of all columns in the frame.
:param List[str] names: The list of new names for every column in the frame.
"""
assert_is_type(names, [str])
assert_satisfies(names, len(names) == self.ncol)
self._ex = ExprNode("colnames=", self, range(self.ncol), names) # Update-in-place, but still lazy
return self | python | {
"resource": ""
} |
q267136 | H2OFrame.set_name | test | def set_name(self, col=None, name=None):
"""
Set a new name for a column.
:param col: index or name of the column whose name is to be set; may be skipped for 1-column frames
:param name: the new name of the column
"""
assert_is_type(col, None, int, str)
assert_is_type(name, str)
ncols = self.ncols
col_index = None
if is_type(col, int):
if not(-ncols <= col < ncols):
raise H2OValueError("Index %d is out of bounds for a frame with %d columns" % (col, ncols))
col_index = (col + ncols) % ncols # handle negative indices
elif is_type(col, str):
if col not in self.names:
raise H2OValueError("Column %s doesn't exist in the frame." % col)
col_index = self.names.index(col) # lookup the name
else:
assert col is None
if ncols != 1:
raise H2OValueError("The frame has %d columns; please specify which one to rename" % ncols)
col_index = 0
if name != self.names[col_index] and name in self.types:
raise H2OValueError("Column '%s' already exists in the frame" % name)
oldname = self.names[col_index]
old_cache = self._ex._cache
self._ex = ExprNode("colnames=", self, col_index, name) # Update-in-place, but still lazy
self._ex._cache.fill_from(old_cache)
if self.names is None:
self._frame()._ex._cache.fill()
else:
self._ex._cache._names = self.names[:col_index] + [name] + self.names[col_index + 1:]
self._ex._cache._types[name] = self._ex._cache._types.pop(oldname)
return | python | {
"resource": ""
} |
q267137 | H2OFrame.isin | test | def isin(self, item):
"""
Test whether elements of an H2OFrame are contained in the ``item``.
:param items: An item or a list of items to compare the H2OFrame against.
:returns: An H2OFrame of 0s and 1s showing whether each element in the original H2OFrame is contained in item.
"""
if is_type(item, list, tuple, set):
if self.ncols == 1 and (self.type(0) == 'str' or self.type(0) == 'enum'):
return self.match(item)
else:
return functools.reduce(H2OFrame.__or__, (self == i for i in item))
else:
return self == item | python | {
"resource": ""
} |
q267138 | H2OFrame.modulo_kfold_column | test | def modulo_kfold_column(self, n_folds=3):
"""
Build a fold assignments column for cross-validation.
Rows are assigned a fold according to the current row number modulo ``n_folds``.
:param int n_folds: An integer specifying the number of validation sets to split the training data into.
:returns: A single-column H2OFrame with the fold assignments.
"""
return H2OFrame._expr(expr=ExprNode("modulo_kfold_column", self, n_folds))._frame() | python | {
"resource": ""
} |
q267139 | H2OFrame.stratified_kfold_column | test | def stratified_kfold_column(self, n_folds=3, seed=-1):
"""
Build a fold assignment column with the constraint that each fold has the same class
distribution as the fold column.
:param int n_folds: The number of folds to build.
:param int seed: A seed for the random number generator.
:returns: A single column H2OFrame with the fold assignments.
"""
return H2OFrame._expr(
expr=ExprNode("stratified_kfold_column", self, n_folds, seed))._frame() | python | {
"resource": ""
} |
q267140 | H2OFrame.structure | test | def structure(self):
"""Compactly display the internal structure of an H2OFrame."""
df = self.as_data_frame(use_pandas=False)
cn = df.pop(0)
nr = self.nrow
nc = self.ncol
width = max([len(c) for c in cn])
isfactor = self.isfactor()
numlevels = self.nlevels()
lvls = self.levels()
print("H2OFrame: '{}' \nDimensions: {} obs. of {} variables".format(self.frame_id, nr, nc))
for i in range(nc):
print("$ {} {}: ".format(cn[i], ' ' * (width - max(0, len(cn[i])))), end=' ')
if isfactor[i]:
nl = numlevels[i]
print("Factor w/ {} level(s) {} ".format(nl, '"' + '","'.join(lvls[i]) + '"'), end='\n')
else:
print("num {}".format(" ".join(it[0] if it else "nan" for it in h2o.as_list(self[:10, i], False)[1:]))) | python | {
"resource": ""
} |
q267141 | H2OFrame.as_data_frame | test | def as_data_frame(self, use_pandas=True, header=True):
"""
Obtain the dataset as a python-local object.
:param bool use_pandas: If True (default) then return the H2OFrame as a pandas DataFrame (requires that the
``pandas`` library was installed). If False, then return the contents of the H2OFrame as plain nested
list, in a row-wise order.
:param bool header: If True (default), then column names will be appended as the first row in list
:returns: A python object (a list of lists of strings, each list is a row, if use_pandas=False, otherwise
a pandas DataFrame) containing this H2OFrame instance's data.
"""
if can_use_pandas() and use_pandas:
import pandas
return pandas.read_csv(StringIO(self.get_frame_data()), low_memory=False, skip_blank_lines=False)
from h2o.utils.csv.readers import reader
frame = [row for row in reader(StringIO(self.get_frame_data()))]
if not header:
frame.pop(0)
return frame | python | {
"resource": ""
} |
q267142 | H2OFrame.pop | test | def pop(self, i):
"""
Pop a column from the H2OFrame at index i.
:param i: The index (int) or name (str) of the column to pop.
:returns: an H2OFrame containing the column dropped from the current frame; the current frame is modified
in-place and loses the column.
"""
if is_type(i, str): i = self.names.index(i)
col = H2OFrame._expr(expr=ExprNode("cols", self, i))
old_cache = self._ex._cache
self._ex = ExprNode("cols", self, -(i + 1))
self._ex._cache.ncols -= 1
self._ex._cache.names = old_cache.names[:i] + old_cache.names[i + 1:]
self._ex._cache.types = {name: old_cache.types[name] for name in self._ex._cache.names}
self._ex._cache._data = None
col._ex._cache.ncols = 1
col._ex._cache.names = [old_cache.names[i]]
return col | python | {
"resource": ""
} |
q267143 | H2OFrame.quantile | test | def quantile(self, prob=None, combine_method="interpolate", weights_column=None):
"""
Compute quantiles.
:param List[float] prob: list of probabilities for which quantiles should be computed.
:param str combine_method: for even samples this setting determines how to combine quantiles. This can be
one of ``"interpolate"``, ``"average"``, ``"low"``, ``"high"``.
:param weights_column: optional weights for each row. If not given, all rows are assumed to have equal
importance. This parameter can be either the name of column containing the observation weights in
this frame, or a single-column separate H2OFrame of observation weights.
:returns: a new H2OFrame containing the quantiles and probabilities.
"""
if len(self) == 0: return self
if prob is None: prob = [0.01, 0.1, 0.25, 0.333, 0.5, 0.667, 0.75, 0.9, 0.99]
if weights_column is None:
weights_column = "_"
else:
assert_is_type(weights_column, str, I(H2OFrame, lambda wc: wc.ncol == 1 and wc.nrow == self.nrow))
if isinstance(weights_column, H2OFrame):
merged = self.cbind(weights_column)
weights_column = merged.names[-1]
return H2OFrame._expr(expr=ExprNode("quantile", merged, prob, combine_method, weights_column))
return H2OFrame._expr(expr=ExprNode("quantile", self, prob, combine_method, weights_column)) | python | {
"resource": ""
} |
q267144 | H2OFrame.concat | test | def concat(self, frames, axis=1):
"""
Append multiple H2OFrames to this frame, column-wise or row-wise.
:param List[H2OFrame] frames: list of frames that should be appended to the current frame.
:param int axis: if 1 then append column-wise (default), if 0 then append row-wise.
:returns: an H2OFrame of the combined datasets.
"""
if len(frames) == 0:
raise ValueError("Input list of frames is empty! Nothing to concat.")
if axis == 1:
df = self.cbind(frames)
else:
df = self.rbind(frames)
return df | python | {
"resource": ""
} |
q267145 | H2OFrame.cbind | test | def cbind(self, data):
"""
Append data to this frame column-wise.
:param H2OFrame data: append columns of frame ``data`` to the current frame. You can also cbind a number,
in which case it will get converted into a constant column.
:returns: new H2OFrame with all frames in ``data`` appended column-wise.
"""
assert_is_type(data, H2OFrame, numeric, [H2OFrame, numeric])
frames = [data] if not isinstance(data, list) else data
new_cols = list(self.columns)
new_types = dict(self.types)
for frame in frames:
if isinstance(frame, H2OFrame):
if frame.nrow != self.nrow:
raise H2OValueError("Cannot bind a dataframe with %d rows to a data frame with %d rows: "
"the number of rows should match" % (frame.nrow, self.nrow))
new_cols += frame.columns
new_types.update(frame.types)
else:
new_cols += [None]
unique_cols = set(new_cols)
fr = H2OFrame._expr(expr=ExprNode("cbind", self, *frames), cache=self._ex._cache)
fr._ex._cache.ncols = len(new_cols)
if len(new_cols) == len(unique_cols) and None not in unique_cols:
fr._ex._cache.names = new_cols
fr._ex._cache.types = new_types
else:
# Invalidate names and types since they contain duplicate / unknown names, and the server will choose those.
fr._ex._cache.names = None
fr._ex._cache.types = None
return fr | python | {
"resource": ""
} |
q267146 | H2OFrame.rbind | test | def rbind(self, data):
"""
Append data to this frame row-wise.
:param data: an H2OFrame or a list of H2OFrame's to be combined with current frame row-wise.
:returns: this H2OFrame with all frames in data appended row-wise.
"""
assert_is_type(data, H2OFrame, [H2OFrame])
frames = [data] if not isinstance(data, list) else data
for frame in frames:
if frame.ncol != self.ncol:
raise H2OValueError("Cannot row-bind a dataframe with %d columns to a data frame with %d columns: "
"the columns must match" % (frame.ncol, self.ncol))
if frame.columns != self.columns or frame.types != self.types:
raise H2OValueError("Column names and types must match for rbind() to work")
fr = H2OFrame._expr(expr=ExprNode("rbind", self, *frames), cache=self._ex._cache)
fr._ex._cache.nrows = self.nrow + sum(frame.nrow for frame in frames)
return fr | python | {
"resource": ""
} |
q267147 | H2OFrame.split_frame | test | def split_frame(self, ratios=None, destination_frames=None, seed=None):
"""
Split a frame into distinct subsets of size determined by the given ratios.
The number of subsets is always 1 more than the number of ratios given. Note that
this does not give an exact split. H2O is designed to be efficient on big data
using a probabilistic splitting method rather than an exact split. For example
when specifying a split of 0.75/0.25, H2O will produce a test/train split with
an expected value of 0.75/0.25 rather than exactly 0.75/0.25. On small datasets,
the sizes of the resulting splits will deviate from the expected value more than
on big data, where they will be very close to exact.
:param List[float] ratios: The fractions of rows for each split.
:param List[str] destination_frames: The names of the split frames.
:param int seed: seed for the random number generator
:returns: A list of H2OFrames
"""
assert_is_type(ratios, [numeric], None)
assert_is_type(destination_frames, [str], None)
assert_is_type(seed, int, None)
if ratios is None:
ratios = [0.75]
if not ratios:
raise ValueError("Ratios array may not be empty")
if destination_frames is not None:
if len(ratios) + 1 != len(destination_frames):
raise ValueError("The number of provided destination_frames must be one more "
"than the number of provided ratios")
num_slices = len(ratios) + 1
boundaries = []
last_boundary = 0
i = 0
while i < num_slices - 1:
ratio = ratios[i]
if ratio < 0:
raise ValueError("Ratio must be greater than 0")
boundary = last_boundary + ratio
if boundary >= 1.0:
raise ValueError("Ratios must add up to less than 1.0")
boundaries.append(boundary)
last_boundary = boundary
i += 1
splits = []
tmp_runif = self.runif(seed)
tmp_runif.frame_id = "%s_splitter" % _py_tmp_key(h2o.connection().session_id)
i = 0
while i < num_slices:
if i == 0:
# lower_boundary is 0.0
upper_boundary = boundaries[i]
tmp_slice = self[(tmp_runif <= upper_boundary), :]
elif i == num_slices - 1:
lower_boundary = boundaries[i - 1]
# upper_boundary is 1.0
tmp_slice = self[(tmp_runif > lower_boundary), :]
else:
lower_boundary = boundaries[i - 1]
upper_boundary = boundaries[i]
tmp_slice = self[((tmp_runif > lower_boundary) & (tmp_runif <= upper_boundary)), :]
if destination_frames is None:
splits.append(tmp_slice)
else:
destination_frame_id = destination_frames[i]
tmp_slice.frame_id = destination_frame_id
splits.append(tmp_slice)
i += 1
del tmp_runif
return splits | python | {
"resource": ""
} |
q267148 | H2OFrame.group_by | test | def group_by(self, by):
"""
Return a new ``GroupBy`` object using this frame and the desired grouping columns.
The returned groups are sorted by the natural group-by column sort.
:param by: The columns to group on (either a single column name, or a list of column names, or
a list of column indices).
"""
assert_is_type(by, str, int, [str, int])
return GroupBy(self, by) | python | {
"resource": ""
} |
q267149 | H2OFrame.fillna | test | def fillna(self,method="forward",axis=0,maxlen=1):
"""
Return a new Frame that fills NA along a given axis and along a given direction with a maximum fill length
:param method: ``"forward"`` or ``"backward"``
:param axis: 0 for columnar-wise or 1 for row-wise fill
:param maxlen: Max number of consecutive NA's to fill
:return:
"""
assert_is_type(axis, 0, 1)
assert_is_type(method,str)
assert_is_type(maxlen, int)
return H2OFrame._expr(expr=ExprNode("h2o.fillna",self,method,axis,maxlen)) | python | {
"resource": ""
} |
q267150 | H2OFrame.impute | test | def impute(self, column=-1, method="mean", combine_method="interpolate", by=None, group_by_frame=None, values=None):
"""
Impute missing values into the frame, modifying it in-place.
:param int column: Index of the column to impute, or -1 to impute the entire frame.
:param str method: The method of imputation: ``"mean"``, ``"median"``, or ``"mode"``.
:param str combine_method: When the method is ``"median"``, this setting dictates how to combine quantiles
for even samples. One of ``"interpolate"``, ``"average"``, ``"low"``, ``"high"``.
:param by: The list of columns to group on.
:param H2OFrame group_by_frame: Impute the values with this pre-computed grouped frame.
:param List values: The list of impute values, one per column. None indicates to skip the column.
:returns: A list of values used in the imputation or the group-by result used in imputation.
"""
if is_type(column, str): column = self.names.index(column)
if is_type(by, str): by = self.names.index(by)
if values is None:
values = "_"
else:
assert len(values) == len(self.columns), "Length of values does not match length of columns"
# convert string values to categorical num values
values2 = []
for i in range(0,len(values)):
if self.type(i) == "enum":
try:
values2.append(self.levels()[i].index(values[i]))
except:
raise H2OValueError("Impute value of: " + values[i] + " not found in existing levels of"
" column: " + self.col_names[i])
else:
values2.append(values[i])
values = values2
if group_by_frame is None: group_by_frame = "_"
# This code below is needed to ensure the frame (self) exists on the server. Without it, self._ex._cache.fill()
# fails with an assertion that ._id is None.
# This code should be removed / reworked once we have a more consistent strategy of dealing with frames.
self._ex._eager_frame()
if by is not None or group_by_frame is not "_":
res = H2OFrame._expr(
expr=ExprNode("h2o.impute", self, column, method, combine_method, by, group_by_frame, values))._frame()
else:
res = ExprNode("h2o.impute", self, column, method, combine_method, by, group_by_frame,
values)._eager_scalar()
self._ex._cache.flush()
self._ex._cache.fill(10)
return res | python | {
"resource": ""
} |
q267151 | H2OFrame.merge | test | def merge(self, other, all_x=False, all_y=False, by_x=None, by_y=None, method="auto"):
"""
Merge two datasets based on common column names. We do not support all_x=True and all_y=True.
Only one can be True or none is True. The default merge method is auto and it will default to the
radix method. The radix method will return the correct merge result regardless of duplicated rows
in the right frame. In addition, the radix method can perform merge even if you have string columns
in your frames. If there are duplicated rows in your rite frame, they will not be included if you use
the hash method. The hash method cannot perform merge if you have string columns in your left frame.
Hence, we consider the radix method superior to the hash method and is the default method to use.
:param H2OFrame other: The frame to merge to the current one. By default, must have at least one column in common with
this frame, and all columns in common are used as the merge key. If you want to use only a subset of the
columns in common, rename the other columns so the columns are unique in the merged result.
:param bool all_x: If True, include all rows from the left/self frame
:param bool all_y: If True, include all rows from the right/other frame
:param by_x: list of columns in the current frame to use as a merge key.
:param by_y: list of columns in the ``other`` frame to use as a merge key. Should have the same number of
columns as in the ``by_x`` list.
:param method: string representing the merge method, one of auto(default), radix or hash.
:returns: New H2OFrame with the result of merging the current frame with the ``other`` frame.
"""
if by_x is None and by_y is None:
common_names = list(set(self.names) & set(other.names))
if not common_names:
raise H2OValueError("No columns in common to merge on!")
if by_x is None:
by_x = [self.names.index(c) for c in common_names]
else:
by_x = _getValidCols(by_x,self)
if by_y is None:
by_y = [other.names.index(c) for c in common_names]
else:
by_y = _getValidCols(by_y,other)
return H2OFrame._expr(expr=ExprNode("merge", self, other, all_x, all_y, by_x, by_y, method)) | python | {
"resource": ""
} |
q267152 | H2OFrame.relevel | test | def relevel(self, y):
"""
Reorder levels of an H2O factor for one single column of a H2O frame
The levels of a factor are reordered such that the reference level is at level 0, all remaining levels are
moved down as needed.
:param str y: The reference level
:returns: New reordered factor column
"""
return H2OFrame._expr(expr=ExprNode("relevel", self, quote(y))) | python | {
"resource": ""
} |
q267153 | H2OFrame.insert_missing_values | test | def insert_missing_values(self, fraction=0.1, seed=None):
"""
Insert missing values into the current frame, modifying it in-place.
Randomly replaces a user-specified fraction of entries in a H2O dataset with missing
values.
:param float fraction: A number between 0 and 1 indicating the fraction of entries to replace with missing.
:param int seed: The seed for the random number generator used to determine which values to make missing.
:returns: the original H2OFrame with missing values inserted.
"""
kwargs = {}
kwargs['dataset'] = self.frame_id # Eager; forces eval now for following REST call
kwargs['fraction'] = fraction
if seed is not None: kwargs['seed'] = seed
job = {}
job['job'] = h2o.api("POST /3/MissingInserter", data=kwargs)
H2OJob(job, job_type=("Insert Missing Values")).poll()
self._ex._cache.flush()
return self | python | {
"resource": ""
} |
q267154 | H2OFrame.var | test | def var(self, y=None, na_rm=False, use=None):
"""
Compute the variance-covariance matrix of one or two H2OFrames.
:param H2OFrame y: If this parameter is given, then a covariance matrix between the columns of the target
frame and the columns of ``y`` is computed. If this parameter is not provided then the covariance matrix
of the target frame is returned. If target frame has just a single column, then return the scalar variance
instead of the matrix. Single rows are treated as single columns.
:param str use: A string indicating how to handle missing values. This could be one of the following:
- ``"everything"``: outputs NaNs whenever one of its contributing observations is missing
- ``"all.obs"``: presence of missing observations will throw an error
- ``"complete.obs"``: discards missing values along with all observations in their rows so that only
complete observations are used
:param bool na_rm: an alternative to ``use``: when this is True then default value for ``use`` is
``"everything"``; and if False then default ``use`` is ``"complete.obs"``. This parameter has no effect
if ``use`` is given explicitly.
:returns: An H2OFrame of the covariance matrix of the columns of this frame (if ``y`` is not given),
or with the columns of ``y`` (if ``y`` is given). However when this frame and ``y`` are both single rows
or single columns, then the variance is returned as a scalar.
"""
symmetric = False
if y is None:
y = self
symmetric = True
if use is None: use = "complete.obs" if na_rm else "everything"
if self.nrow == 1 or (self.ncol == 1 and y.ncol == 1):
return ExprNode("var", self, y, use, symmetric)._eager_scalar()
return H2OFrame._expr(expr=ExprNode("var", self, y, use, symmetric))._frame() | python | {
"resource": ""
} |
q267155 | H2OFrame.cor | test | def cor(self, y=None, na_rm=False, use=None):
"""
Compute the correlation matrix of one or two H2OFrames.
:param H2OFrame y: If this parameter is provided, then compute correlation between the columns of ``y``
and the columns of the current frame. If this parameter is not given, then just compute the correlation
matrix for the columns of the current frame.
:param str use: A string indicating how to handle missing values. This could be one of the following:
- ``"everything"``: outputs NaNs whenever one of its contributing observations is missing
- ``"all.obs"``: presence of missing observations will throw an error
- ``"complete.obs"``: discards missing values along with all observations in their rows so that only
complete observations are used
:param bool na_rm: an alternative to ``use``: when this is True then default value for ``use`` is
``"everything"``; and if False then default ``use`` is ``"complete.obs"``. This parameter has no effect
if ``use`` is given explicitly.
:returns: An H2OFrame of the correlation matrix of the columns of this frame (if ``y`` is not given),
or with the columns of ``y`` (if ``y`` is given). However when this frame and ``y`` are both single rows
or single columns, then the correlation is returned as a scalar.
"""
assert_is_type(y, H2OFrame, None)
assert_is_type(na_rm, bool)
assert_is_type(use, None, "everything", "all.obs", "complete.obs")
if y is None:
y = self
if use is None: use = "complete.obs" if na_rm else "everything"
if self.nrow == 1 or (self.ncol == 1 and y.ncol == 1): return ExprNode("cor", self, y, use)._eager_scalar()
return H2OFrame._expr(expr=ExprNode("cor", self, y, use))._frame() | python | {
"resource": ""
} |
q267156 | H2OFrame.distance | test | def distance(self, y, measure=None):
"""
Compute a pairwise distance measure between all rows of two numeric H2OFrames.
:param H2OFrame y: Frame containing queries (small)
:param str use: A string indicating what distance measure to use. Must be one of:
- ``"l1"``: Absolute distance (L1-norm, >=0)
- ``"l2"``: Euclidean distance (L2-norm, >=0)
- ``"cosine"``: Cosine similarity (-1...1)
- ``"cosine_sq"``: Squared Cosine similarity (0...1)
:examples:
>>>
>>> iris_h2o = h2o.import_file(path=pyunit_utils.locate("smalldata/iris/iris.csv"))
>>> references = iris_h2o[10:150,0:4
>>> queries = iris_h2o[0:10,0:4]
>>> A = references.distance(queries, "l1")
>>> B = references.distance(queries, "l2")
>>> C = references.distance(queries, "cosine")
>>> D = references.distance(queries, "cosine_sq")
>>> E = queries.distance(references, "l1")
>>> (E.transpose() == A).all()
:returns: An H2OFrame of the matrix containing pairwise distance / similarity between the
rows of this frame (N x p) and ``y`` (M x p), with dimensions (N x M).
"""
assert_is_type(y, H2OFrame)
if measure is None: measure = "l2"
return H2OFrame._expr(expr=ExprNode("distance", self, y, measure))._frame() | python | {
"resource": ""
} |
q267157 | H2OFrame.asfactor | test | def asfactor(self):
"""
Convert columns in the current frame to categoricals.
:returns: new H2OFrame with columns of the "enum" type.
"""
for colname in self.names:
t = self.types[colname]
if t not in {"bool", "int", "string", "enum"}:
raise H2OValueError("Only 'int' or 'string' are allowed for "
"asfactor(), got %s:%s " % (colname, t))
fr = H2OFrame._expr(expr=ExprNode("as.factor", self), cache=self._ex._cache)
if fr._ex._cache.types_valid():
fr._ex._cache.types = {name: "enum" for name in self.types}
else:
raise H2OTypeError("Types are not available in result")
return fr | python | {
"resource": ""
} |
q267158 | H2OFrame.strsplit | test | def strsplit(self, pattern):
"""
Split the strings in the target column on the given regular expression pattern.
:param str pattern: The split pattern.
:returns: H2OFrame containing columns of the split strings.
"""
fr = H2OFrame._expr(expr=ExprNode("strsplit", self, pattern))
fr._ex._cache.nrows = self.nrow
return fr | python | {
"resource": ""
} |
q267159 | H2OFrame.countmatches | test | def countmatches(self, pattern):
"""
For each string in the frame, count the occurrences of the provided pattern. If countmathces is applied to
a frame, all columns of the frame must be type string, otherwise, the returned frame will contain errors.
The pattern here is a plain string, not a regular expression. We will search for the occurrences of the
pattern as a substring in element of the frame. This function is applicable to frames containing only
string or categorical columns.
:param str pattern: The pattern to count matches on in each string. This can also be a list of strings,
in which case all of them will be searched for.
:returns: numeric H2OFrame with the same shape as the original, containing counts of matches of the
pattern for each cell in the original frame.
"""
assert_is_type(pattern, str, [str])
fr = H2OFrame._expr(expr=ExprNode("countmatches", self, pattern))
fr._ex._cache.nrows = self.nrow
fr._ex._cache.ncols = self.ncol
return fr | python | {
"resource": ""
} |
q267160 | H2OFrame.substring | test | def substring(self, start_index, end_index=None):
"""
For each string, return a new string that is a substring of the original string.
If end_index is not specified, then the substring extends to the end of the original string. If the start_index
is longer than the length of the string, or is greater than or equal to the end_index, an empty string is
returned. Negative start_index is coerced to 0.
:param int start_index: The index of the original string at which to start the substring, inclusive.
:param int end_index: The index of the original string at which to end the substring, exclusive.
:returns: An H2OFrame containing the specified substrings.
"""
fr = H2OFrame._expr(expr=ExprNode("substring", self, start_index, end_index))
fr._ex._cache.nrows = self.nrow
fr._ex._cache.ncol = self.ncol
return fr | python | {
"resource": ""
} |
q267161 | H2OFrame.lstrip | test | def lstrip(self, set=" "):
"""
Return a copy of the column with leading characters removed.
The set argument is a string specifying the set of characters to be removed.
If omitted, the set argument defaults to removing whitespace.
:param character set: The set of characters to lstrip from strings in column.
:returns: a new H2OFrame with the same shape as the original frame and having all its values
trimmed from the left (equivalent of Python's ``str.lstrip()``).
"""
# work w/ None; parity with python lstrip
if set is None: set = " "
fr = H2OFrame._expr(expr=ExprNode("lstrip", self, set))
fr._ex._cache.nrows = self.nrow
fr._ex._cache.ncol = self.ncol
return fr | python | {
"resource": ""
} |
q267162 | H2OFrame.entropy | test | def entropy(self):
"""
For each string compute its Shannon entropy, if the string is empty the entropy is 0.
:returns: an H2OFrame of Shannon entropies.
"""
fr = H2OFrame._expr(expr=ExprNode("entropy", self))
fr._ex._cache.nrows = self.nrow
fr._ex._cache.ncol = self.ncol
return fr | python | {
"resource": ""
} |
q267163 | H2OFrame.num_valid_substrings | test | def num_valid_substrings(self, path_to_words):
"""
For each string, find the count of all possible substrings with 2 characters or more that are contained in
the line-separated text file whose path is given.
:param str path_to_words: Path to file that contains a line-separated list of strings considered valid.
:returns: An H2OFrame with the number of substrings that are contained in the given word list.
"""
assert_is_type(path_to_words, str)
fr = H2OFrame._expr(expr=ExprNode("num_valid_substrings", self, path_to_words))
fr._ex._cache.nrows = self.nrow
fr._ex._cache.ncol = self.ncol
return fr | python | {
"resource": ""
} |
q267164 | H2OFrame.table | test | def table(self, data2=None, dense=True):
"""
Compute the counts of values appearing in a column, or co-occurence counts between two columns.
:param H2OFrame data2: An optional single column to aggregate counts by.
:param bool dense: If True (default) then use dense representation, which lists only non-zero counts,
1 combination per row. Set to False to expand counts across all combinations.
:returns: H2OFrame of the counts at each combination of factor levels
"""
return H2OFrame._expr(expr=ExprNode("table", self, data2, dense)) if data2 is not None else H2OFrame._expr(
expr=ExprNode("table", self, dense)) | python | {
"resource": ""
} |
q267165 | H2OFrame.hist | test | def hist(self, breaks="sturges", plot=True, **kwargs):
"""
Compute a histogram over a numeric column.
:param breaks: Can be one of ``"sturges"``, ``"rice"``, ``"sqrt"``, ``"doane"``, ``"fd"``, ``"scott"``;
or a single number for the number of breaks; or a list containing the split points, e.g:
``[-50, 213.2123, 9324834]``. If breaks is "fd", the MAD is used over the IQR in computing bin width.
:param bool plot: If True (default), then a plot will be generated using ``matplotlib``.
:returns: If ``plot`` is False, return H2OFrame with these columns: breaks, counts, mids_true,
mids, and density; otherwise this method draws a plot and returns nothing.
"""
server = kwargs.pop("server") if "server" in kwargs else False
assert_is_type(breaks, int, [numeric], Enum("sturges", "rice", "sqrt", "doane", "fd", "scott"))
assert_is_type(plot, bool)
assert_is_type(server, bool)
if kwargs:
raise H2OValueError("Unknown parameters to hist(): %r" % kwargs)
hist = H2OFrame._expr(expr=ExprNode("hist", self, breaks))._frame()
if plot:
try:
import matplotlib
if server:
matplotlib.use("Agg", warn=False)
import matplotlib.pyplot as plt
except ImportError:
print("ERROR: matplotlib is required to make the histogram plot. "
"Set `plot` to False, if a plot is not desired.")
return
hist["widths"] = hist["breaks"].difflag1()
# [2:] because we're removing the title and the first row (which consists of NaNs)
lefts = [float(c[0]) for c in h2o.as_list(hist["breaks"], use_pandas=False)[2:]]
widths = [float(c[0]) for c in h2o.as_list(hist["widths"], use_pandas=False)[2:]]
counts = [float(c[0]) for c in h2o.as_list(hist["counts"], use_pandas=False)[2:]]
plt.xlabel(self.names[0])
plt.ylabel("Frequency")
plt.title("Histogram of %s" % self.names[0])
plt.bar(left=lefts, width=widths, height=counts, bottom=0)
if not server:
plt.show()
else:
hist["density"] = hist["counts"] / (hist["breaks"].difflag1() * hist["counts"].sum())
return hist | python | {
"resource": ""
} |
q267166 | H2OFrame.isax | test | def isax(self, num_words, max_cardinality, optimize_card=False, **kwargs):
"""
Compute the iSAX index for DataFrame which is assumed to be numeric time series data.
References:
- http://www.cs.ucr.edu/~eamonn/SAX.pdf
- http://www.cs.ucr.edu/~eamonn/iSAX_2.0.pdf
:param int num_words: Number of iSAX words for the timeseries, i.e. granularity along the time series
:param int max_cardinality: Maximum cardinality of the iSAX word. Each word can have less than the max
:param bool optimized_card: An optimization flag that will find the max cardinality regardless of what is
passed in for ``max_cardinality``.
:returns: An H2OFrame with the name of time series, string representation of iSAX word, followed by
binary representation.
"""
if num_words <= 0: raise H2OValueError("num_words must be greater than 0")
if max_cardinality <= 0: raise H2OValueError("max_cardinality must be greater than 0")
return H2OFrame._expr(expr=ExprNode("isax", self, num_words, max_cardinality, optimize_card)) | python | {
"resource": ""
} |
q267167 | H2OFrame.sub | test | def sub(self, pattern, replacement, ignore_case=False):
"""
Substitute the first occurrence of pattern in a string with replacement.
:param str pattern: A regular expression.
:param str replacement: A replacement string.
:param bool ignore_case: If True then pattern will match case-insensitively.
:returns: an H2OFrame with all values matching ``pattern`` replaced with ``replacement``.
"""
return H2OFrame._expr(expr=ExprNode("replacefirst", self, pattern, replacement, ignore_case)) | python | {
"resource": ""
} |
q267168 | H2OFrame.toupper | test | def toupper(self):
"""
Translate characters from lower to upper case for a particular column.
:returns: new H2OFrame with all strings in the current frame converted to the uppercase.
"""
return H2OFrame._expr(expr=ExprNode("toupper", self), cache=self._ex._cache) | python | {
"resource": ""
} |
q267169 | H2OFrame.grep | test | def grep(self,pattern, ignore_case = False, invert = False, output_logical = False):
"""
Searches for matches to argument `pattern` within each element
of a string column.
Default behavior is to return indices of the elements matching the pattern. Parameter
`output_logical` can be used to return a logical vector indicating if the element matches
the pattern (1) or not (0).
:param str pattern: A character string containing a regular expression.
:param bool ignore_case: If True, then case is ignored during matching.
:param bool invert: If True, then identify elements that do not match the pattern.
:param bool output_logical: If True, then return logical vector of indicators instead of list of matching positions
:return: H2OFrame holding the matching positions or a logical list if `output_logical` is enabled.
"""
return H2OFrame._expr(expr=ExprNode("grep", self, pattern, ignore_case, invert, output_logical)) | python | {
"resource": ""
} |
q267170 | H2OFrame.na_omit | test | def na_omit(self):
"""
Remove rows with NAs from the H2OFrame.
:returns: new H2OFrame with all rows from the original frame containing any NAs removed.
"""
fr = H2OFrame._expr(expr=ExprNode("na.omit", self), cache=self._ex._cache)
fr._ex._cache.nrows = -1
return fr | python | {
"resource": ""
} |
q267171 | H2OFrame.difflag1 | test | def difflag1(self):
"""
Conduct a diff-1 transform on a numeric frame column.
:returns: an H2OFrame where each element is equal to the corresponding element in the source
frame minus the previous-row element in the same frame.
"""
if self.ncols > 1:
raise H2OValueError("Only single-column frames supported")
if self.types[self.columns[0]] not in {"real", "int", "bool"}:
raise H2OValueError("Numeric column expected")
fr = H2OFrame._expr(expr=ExprNode("difflag1", self), cache=self._ex._cache)
return fr | python | {
"resource": ""
} |
q267172 | H2OFrame.isna | test | def isna(self):
"""
For each element in an H2OFrame, determine if it is NA or not.
:returns: an H2OFrame of 1s and 0s, where 1s mean the values were NAs.
"""
fr = H2OFrame._expr(expr=ExprNode("is.na", self))
fr._ex._cache.nrows = self._ex._cache.nrows
fr._ex._cache.ncols = self._ex._cache.ncols
if self._ex._cache.names:
fr._ex._cache.names = ["isNA(%s)" % n for n in self._ex._cache.names]
fr._ex._cache.types = {"isNA(%s)" % n: "int" for n in self._ex._cache.names}
return fr | python | {
"resource": ""
} |
q267173 | H2OFrame.minute | test | def minute(self):
"""
Extract the "minute" part from a date column.
:returns: a single-column H2OFrame containing the "minute" part from the source frame.
"""
fr = H2OFrame._expr(expr=ExprNode("minute", self), cache=self._ex._cache)
if fr._ex._cache.types_valid():
fr._ex._cache.types = {k: "int" for k in self._ex._cache.types.keys()}
return fr | python | {
"resource": ""
} |
q267174 | H2OFrame.runif | test | def runif(self, seed=None):
"""
Generate a column of random numbers drawn from a uniform distribution [0,1) and
having the same data layout as the source frame.
:param int seed: seed for the random number generator.
:returns: Single-column H2OFrame filled with doubles sampled uniformly from [0,1).
"""
fr = H2OFrame._expr(expr=ExprNode("h2o.runif", self, -1 if seed is None else seed))
fr._ex._cache.ncols = 1
fr._ex._cache.nrows = self.nrow
return fr | python | {
"resource": ""
} |
q267175 | H2OFrame.stratified_split | test | def stratified_split(self, test_frac=0.2, seed=-1):
"""
Construct a column that can be used to perform a random stratified split.
:param float test_frac: The fraction of rows that will belong to the "test".
:param int seed: The seed for the random number generator.
:returns: an H2OFrame having single categorical column with two levels: ``"train"`` and ``"test"``.
:examples:
>>> stratsplit = df["y"].stratified_split(test_frac=0.3, seed=12349453)
>>> train = df[stratsplit=="train"]
>>> test = df[stratsplit=="test"]
>>>
>>> # check that the distributions among the initial frame, and the
>>> # train/test frames match
>>> df["y"].table()["Count"] / df["y"].table()["Count"].sum()
>>> train["y"].table()["Count"] / train["y"].table()["Count"].sum()
>>> test["y"].table()["Count"] / test["y"].table()["Count"].sum()
"""
return H2OFrame._expr(expr=ExprNode('h2o.random_stratified_split', self, test_frac, seed)) | python | {
"resource": ""
} |
q267176 | H2OFrame.cut | test | def cut(self, breaks, labels=None, include_lowest=False, right=True, dig_lab=3):
"""
Cut a numeric vector into categorical "buckets".
This method is only applicable to a single-column numeric frame.
:param List[float] breaks: The cut points in the numeric vector.
:param List[str] labels: Labels for categorical levels produced. Defaults to set notation of
intervals defined by the breaks.
:param bool include_lowest: By default, cuts are defined as intervals ``(lo, hi]``. If this parameter
is True, then the interval becomes ``[lo, hi]``.
:param bool right: Include the high value: ``(lo, hi]``. If False, get ``(lo, hi)``.
:param int dig_lab: Number of digits following the decimal point to consider.
:returns: Single-column H2OFrame of categorical data.
"""
assert_is_type(breaks, [numeric])
if self.ncols != 1: raise H2OValueError("Single-column frame is expected")
if self.types[self.names[0]] not in {"int", "real"}: raise H2OValueError("A numeric column is expected")
fr = H2OFrame._expr(expr=ExprNode("cut", self, breaks, labels, include_lowest, right, dig_lab),
cache=self._ex._cache)
fr._ex._cache.types = {k: "enum" for k in self.names}
return fr | python | {
"resource": ""
} |
q267177 | H2OFrame.idxmax | test | def idxmax(self,skipna=True, axis=0):
"""
Get the index of the max value in a column or row
:param bool skipna: If True (default), then NAs are ignored during the search. Otherwise presence
of NAs renders the entire result NA.
:param int axis: Direction of finding the max index. If 0 (default), then the max index is searched columnwise, and the
result is a frame with 1 row and number of columns as in the original frame. If 1, then the max index is searched
rowwise and the result is a frame with 1 column, and number of rows equal to the number of rows in the original frame.
:returns: either a list of max index values per-column or an H2OFrame containing max index values
per-row from the original frame.
"""
return H2OFrame._expr(expr=ExprNode("which.max", self, skipna, axis)) | python | {
"resource": ""
} |
q267178 | H2OFrame.apply | test | def apply(self, fun=None, axis=0):
"""
Apply a lambda expression to an H2OFrame.
:param fun: a lambda expression to be applied per row or per column.
:param axis: 0 = apply to each column; 1 = apply to each row
:returns: a new H2OFrame with the results of applying ``fun`` to the current frame.
"""
from .astfun import lambda_to_expr
assert_is_type(axis, 0, 1)
assert_is_type(fun, FunctionType)
assert_satisfies(fun, fun.__name__ == "<lambda>")
res = lambda_to_expr(fun)
return H2OFrame._expr(expr=ExprNode("apply", self, 1 + (axis == 0), *res)) | python | {
"resource": ""
} |
q267179 | parse_text | test | def parse_text(text):
"""Parse code from a string of text."""
assert isinstance(text, _str_type), "`text` parameter should be a string, got %r" % type(text)
gen = iter(text.splitlines(True)) # True = keep newlines
readline = gen.next if hasattr(gen, "next") else gen.__next__
return Code(_tokenize(readline)) | python | {
"resource": ""
} |
q267180 | parse_file | test | def parse_file(filename):
"""Parse the provided file, and return Code object."""
assert isinstance(filename, _str_type), "`filename` parameter should be a string, got %r" % type(filename)
with open(filename, "rt", encoding="utf-8") as f:
return Code(_tokenize(f.readline)) | python | {
"resource": ""
} |
q267181 | Token.move | test | def move(self, drow, dcol=0):
"""Move the token by `drow` rows and `dcol` columns."""
self._start_row += drow
self._start_col += dcol
self._end_row += drow
self._end_col += dcol | python | {
"resource": ""
} |
q267182 | ParsedBase.unparse | test | def unparse(self):
"""Convert the parsed representation back into the source code."""
ut = Untokenizer(start_row=self._tokens[0].start_row)
self._unparse(ut)
return ut.result() | python | {
"resource": ""
} |
q267183 | H2OClusteringModel.size | test | def size(self, train=False, valid=False, xval=False):
"""
Get the sizes of each cluster.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where
the keys are "train", "valid", and "xval".
:param bool train: If True, return the cluster sizes for the training data.
:param bool valid: If True, return the cluster sizes for the validation data.
:param bool xval: If True, return the cluster sizes for each of the cross-validated splits.
:returns: The cluster sizes for the specified key(s).
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k, v in tm.items():
m[k] = None if v is None else [v[2] for v in v._metric_json["centroid_stats"].cell_values]
return list(m.values())[0] if len(m) == 1 else m | python | {
"resource": ""
} |
q267184 | H2OClusteringModel.centers | test | def centers(self):
"""The centers for the KMeans model."""
o = self._model_json["output"]
cvals = o["centers"].cell_values
centers = [list(cval[1:]) for cval in cvals]
return centers | python | {
"resource": ""
} |
q267185 | H2OClusteringModel.centers_std | test | def centers_std(self):
"""The standardized centers for the kmeans model."""
o = self._model_json["output"]
cvals = o["centers_std"].cell_values
centers_std = [list(cval[1:]) for cval in cvals]
centers_std = [list(x) for x in zip(*centers_std)]
return centers_std | python | {
"resource": ""
} |
q267186 | connect | test | def connect(server=None, url=None, ip=None, port=None, https=None, verify_ssl_certificates=None, auth=None,
proxy=None, cookies=None, verbose=True, config=None):
"""
Connect to an existing H2O server, remote or local.
There are two ways to connect to a server: either pass a `server` parameter containing an instance of
an H2OLocalServer, or specify `ip` and `port` of the server that you want to connect to.
:param server: An H2OLocalServer instance to connect to (optional).
:param url: Full URL of the server to connect to (can be used instead of `ip` + `port` + `https`).
:param ip: The ip address (or host name) of the server where H2O is running.
:param port: Port number that H2O service is listening to.
:param https: Set to True to connect via https:// instead of http://.
:param verify_ssl_certificates: When using https, setting this to False will disable SSL certificates verification.
:param auth: Either a (username, password) pair for basic authentication, an instance of h2o.auth.SpnegoAuth
or one of the requests.auth authenticator objects.
:param proxy: Proxy server address.
:param cookies: Cookie (or list of) to add to request
:param verbose: Set to False to disable printing connection status messages.
:param connection_conf: Connection configuration object encapsulating connection parameters.
:returns: the new :class:`H2OConnection` object.
"""
global h2oconn
if config:
if "connect_params" in config:
h2oconn = _connect_with_conf(config["connect_params"])
else:
h2oconn = _connect_with_conf(config)
else:
h2oconn = H2OConnection.open(server=server, url=url, ip=ip, port=port, https=https,
auth=auth, verify_ssl_certificates=verify_ssl_certificates,
proxy=proxy, cookies=cookies,
verbose=verbose)
if verbose:
h2oconn.cluster.show_status()
return h2oconn | python | {
"resource": ""
} |
q267187 | api | test | def api(endpoint, data=None, json=None, filename=None, save_to=None):
"""
Perform a REST API request to a previously connected server.
This function is mostly for internal purposes, but may occasionally be useful for direct access to
the backend H2O server. It has same parameters as :meth:`H2OConnection.request <h2o.backend.H2OConnection.request>`.
"""
# type checks are performed in H2OConnection class
_check_connection()
return h2oconn.request(endpoint, data=data, json=json, filename=filename, save_to=save_to) | python | {
"resource": ""
} |
q267188 | version_check | test | def version_check():
"""Used to verify that h2o-python module and the H2O server are compatible with each other."""
from .__init__ import __version__ as ver_pkg
ci = h2oconn.cluster
if not ci:
raise H2OConnectionError("Connection not initialized. Did you run h2o.connect()?")
ver_h2o = ci.version
if ver_pkg == "SUBST_PROJECT_VERSION": ver_pkg = "UNKNOWN"
if str(ver_h2o) != str(ver_pkg):
branch_name_h2o = ci.branch_name
build_number_h2o = ci.build_number
if build_number_h2o is None or build_number_h2o == "unknown":
raise H2OConnectionError(
"Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"Upgrade H2O and h2o-Python to latest stable version - "
"http://h2o-release.s3.amazonaws.com/h2o/latest_stable.html"
"".format(ver_h2o, ver_pkg))
elif build_number_h2o == "99999":
raise H2OConnectionError(
"Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"This is a developer build, please contact your developer."
"".format(ver_h2o, ver_pkg))
else:
raise H2OConnectionError(
"Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"Install the matching h2o-Python version from - "
"http://h2o-release.s3.amazonaws.com/h2o/{2}/{3}/index.html."
"".format(ver_h2o, ver_pkg, branch_name_h2o, build_number_h2o))
# Check age of the install
if ci.build_too_old:
print("Warning: Your H2O cluster version is too old ({})! Please download and install the latest "
"version from http://h2o.ai/download/".format(ci.build_age)) | python | {
"resource": ""
} |
q267189 | lazy_import | test | def lazy_import(path, pattern=None):
"""
Import a single file or collection of files.
:param path: A path to a data file (remote or local).
:param pattern: Character string containing a regular expression to match file(s) in the folder.
:returns: either a :class:`H2OFrame` with the content of the provided file, or a list of such frames if
importing multiple files.
"""
assert_is_type(path, str, [str])
assert_is_type(pattern, str, None)
paths = [path] if is_type(path, str) else path
return _import_multi(paths, pattern) | python | {
"resource": ""
} |
q267190 | upload_file | test | def upload_file(path, destination_frame=None, header=0, sep=None, col_names=None, col_types=None,
na_strings=None, skipped_columns=None):
"""
Upload a dataset from the provided local path to the H2O cluster.
Does a single-threaded push to H2O. Also see :meth:`import_file`.
:param path: A path specifying the location of the data to upload.
:param destination_frame: The unique hex key assigned to the imported file. If none is given, a key will
be automatically generated.
:param header: -1 means the first line is data, 0 means guess, 1 means first line is header.
:param sep: The field separator character. Values on each line of the file are separated by
this character. If not provided, the parser will automatically detect the separator.
:param col_names: A list of column names for the file.
:param col_types: A list of types or a dictionary of column names to types to specify whether columns
should be forced to a certain type upon import parsing. If a list, the types for elements that are
one will be guessed. The possible types a column may have are:
- "unknown" - this will force the column to be parsed as all NA
- "uuid" - the values in the column must be true UUID or will be parsed as NA
- "string" - force the column to be parsed as a string
- "numeric" - force the column to be parsed as numeric. H2O will handle the compression of the numeric
data in the optimal manner.
- "enum" - force the column to be parsed as a categorical column.
- "time" - force the column to be parsed as a time column. H2O will attempt to parse the following
list of date time formats: (date) "yyyy-MM-dd", "yyyy MM dd", "dd-MMM-yy", "dd MMM yy", (time)
"HH:mm:ss", "HH:mm:ss:SSS", "HH:mm:ss:SSSnnnnnn", "HH.mm.ss" "HH.mm.ss.SSS", "HH.mm.ss.SSSnnnnnn".
Times can also contain "AM" or "PM".
:param na_strings: A list of strings, or a list of lists of strings (one list per column), or a dictionary
of column names to strings which are to be interpreted as missing values.
:param skipped_columns: an integer lists of column indices to skip and not parsed into the final frame from the import file.
:returns: a new :class:`H2OFrame` instance.
:examples:
>>> frame = h2o.upload_file("/path/to/local/data")
"""
coltype = U(None, "unknown", "uuid", "string", "float", "real", "double", "int", "numeric",
"categorical", "factor", "enum", "time")
natype = U(str, [str])
assert_is_type(path, str)
assert_is_type(destination_frame, str, None)
assert_is_type(header, -1, 0, 1)
assert_is_type(sep, None, I(str, lambda s: len(s) == 1))
assert_is_type(col_names, [str], None)
assert_is_type(col_types, [coltype], {str: coltype}, None)
assert_is_type(na_strings, [natype], {str: natype}, None)
assert (skipped_columns==None) or isinstance(skipped_columns, list), \
"The skipped_columns should be an list of column names!"
check_frame_id(destination_frame)
if path.startswith("~"):
path = os.path.expanduser(path)
return H2OFrame()._upload_parse(path, destination_frame, header, sep, col_names, col_types, na_strings, skipped_columns) | python | {
"resource": ""
} |
q267191 | import_file | test | def import_file(path=None, destination_frame=None, parse=True, header=0, sep=None, col_names=None, col_types=None,
na_strings=None, pattern=None, skipped_columns=None, custom_non_data_line_markers = None):
"""
Import a dataset that is already on the cluster.
The path to the data must be a valid path for each node in the H2O cluster. If some node in the H2O cluster
cannot see the file, then an exception will be thrown by the H2O cluster. Does a parallel/distributed
multi-threaded pull of the data. The main difference between this method and :func:`upload_file` is that
the latter works with local files, whereas this method imports remote files (i.e. files local to the server).
If you running H2O server on your own maching, then both methods behave the same.
:param path: path(s) specifying the location of the data to import or a path to a directory of files to import
:param destination_frame: The unique hex key assigned to the imported file. If none is given, a key will be
automatically generated.
:param parse: If True, the file should be parsed after import. If False, then a list is returned containing the file path.
:param header: -1 means the first line is data, 0 means guess, 1 means first line is header.
:param sep: The field separator character. Values on each line of the file are separated by
this character. If not provided, the parser will automatically detect the separator.
:param col_names: A list of column names for the file.
:param col_types: A list of types or a dictionary of column names to types to specify whether columns
should be forced to a certain type upon import parsing. If a list, the types for elements that are
one will be guessed. The possible types a column may have are:
- "unknown" - this will force the column to be parsed as all NA
- "uuid" - the values in the column must be true UUID or will be parsed as NA
- "string" - force the column to be parsed as a string
- "numeric" - force the column to be parsed as numeric. H2O will handle the compression of the numeric
data in the optimal manner.
- "enum" - force the column to be parsed as a categorical column.
- "time" - force the column to be parsed as a time column. H2O will attempt to parse the following
list of date time formats: (date) "yyyy-MM-dd", "yyyy MM dd", "dd-MMM-yy", "dd MMM yy", (time)
"HH:mm:ss", "HH:mm:ss:SSS", "HH:mm:ss:SSSnnnnnn", "HH.mm.ss" "HH.mm.ss.SSS", "HH.mm.ss.SSSnnnnnn".
Times can also contain "AM" or "PM".
:param na_strings: A list of strings, or a list of lists of strings (one list per column), or a dictionary
of column names to strings which are to be interpreted as missing values.
:param pattern: Character string containing a regular expression to match file(s) in the folder if `path` is a
directory.
:param skipped_columns: an integer list of column indices to skip and not parsed into the final frame from the import file.
:param custom_non_data_line_markers: If a line in imported file starts with any character in given string it will NOT be imported. Empty string means all lines are imported, None means that default behaviour for given format will be used
:returns: a new :class:`H2OFrame` instance.
:examples:
>>> # Single file import
>>> iris = import_file("h2o-3/smalldata/iris.csv")
>>> # Return all files in the folder iris/ matching the regex r"iris_.*\.csv"
>>> iris_pattern = h2o.import_file(path = "h2o-3/smalldata/iris",
... pattern = "iris_.*\.csv")
"""
coltype = U(None, "unknown", "uuid", "string", "float", "real", "double", "int", "numeric",
"categorical", "factor", "enum", "time")
natype = U(str, [str])
assert_is_type(path, str, [str])
assert_is_type(pattern, str, None)
assert_is_type(destination_frame, str, None)
assert_is_type(parse, bool)
assert_is_type(header, -1, 0, 1)
assert_is_type(sep, None, I(str, lambda s: len(s) == 1))
assert_is_type(col_names, [str], None)
assert_is_type(col_types, [coltype], {str: coltype}, None)
assert_is_type(na_strings, [natype], {str: natype}, None)
assert isinstance(skipped_columns, (type(None), list)), "The skipped_columns should be an list of column names!"
check_frame_id(destination_frame)
patharr = path if isinstance(path, list) else [path]
if any(os.path.split(p)[0] == "~" for p in patharr):
raise H2OValueError("Paths relative to a current user (~) are not valid in the server environment. "
"Please use absolute paths if possible.")
if not parse:
return lazy_import(path, pattern)
else:
return H2OFrame()._import_parse(path, pattern, destination_frame, header, sep, col_names, col_types, na_strings,
skipped_columns, custom_non_data_line_markers) | python | {
"resource": ""
} |
q267192 | import_hive_table | test | def import_hive_table(database=None, table=None, partitions=None, allow_multi_format=False):
"""
Import Hive table to H2OFrame in memory.
Make sure to start H2O with Hive on classpath. Uses hive-site.xml on classpath to connect to Hive.
:param database: Name of Hive database (default database will be used by default)
:param table: name of Hive table to import
:param partitions: a list of lists of strings - partition key column values of partitions you want to import.
:param allow_multi_format: enable import of partitioned tables with different storage formats used. WARNING:
this may fail on out-of-memory for tables with a large number of small partitions.
:returns: an :class:`H2OFrame` containing data of the specified Hive table.
:examples:
>>> my_citibike_data = h2o.import_hive_table("default", "table", [["2017", "01"], ["2017", "02"]])
"""
assert_is_type(database, str, None)
assert_is_type(table, str)
assert_is_type(partitions, [[str]], None)
p = { "database": database, "table": table, "partitions": partitions, "allow_multi_format": allow_multi_format }
j = H2OJob(api("POST /3/ImportHiveTable", data=p), "Import Hive Table").poll()
return get_frame(j.dest_key) | python | {
"resource": ""
} |
q267193 | import_sql_table | test | def import_sql_table(connection_url, table, username, password, columns=None, optimize=True, fetch_mode=None):
"""
Import SQL table to H2OFrame in memory.
Assumes that the SQL table is not being updated and is stable.
Runs multiple SELECT SQL queries concurrently for parallel ingestion.
Be sure to start the h2o.jar in the terminal with your downloaded JDBC driver in the classpath::
java -cp <path_to_h2o_jar>:<path_to_jdbc_driver_jar> water.H2OApp
Also see :func:`import_sql_select`.
Currently supported SQL databases are MySQL, PostgreSQL, MariaDB, Hive, Oracle and Microsoft SQL.
:param connection_url: URL of the SQL database connection as specified by the Java Database Connectivity (JDBC)
Driver. For example, "jdbc:mysql://localhost:3306/menagerie?&useSSL=false"
:param table: name of SQL table
:param columns: a list of column names to import from SQL table. Default is to import all columns.
:param username: username for SQL server
:param password: password for SQL server
:param optimize: DEPRECATED. Ignored - use fetch_mode instead. Optimize import of SQL table for faster imports.
:param fetch_mode: Set to DISTRIBUTED to enable distributed import. Set to SINGLE to force a sequential read by a single node
from the database.
:returns: an :class:`H2OFrame` containing data of the specified SQL table.
:examples:
>>> conn_url = "jdbc:mysql://172.16.2.178:3306/ingestSQL?&useSSL=false"
>>> table = "citibike20k"
>>> username = "root"
>>> password = "abc123"
>>> my_citibike_data = h2o.import_sql_table(conn_url, table, username, password)
"""
assert_is_type(connection_url, str)
assert_is_type(table, str)
assert_is_type(username, str)
assert_is_type(password, str)
assert_is_type(columns, [str], None)
assert_is_type(optimize, bool)
assert_is_type(fetch_mode, str, None)
p = {"connection_url": connection_url, "table": table, "username": username, "password": password,
"fetch_mode": fetch_mode}
if columns:
p["columns"] = ", ".join(columns)
j = H2OJob(api("POST /99/ImportSQLTable", data=p), "Import SQL Table").poll()
return get_frame(j.dest_key) | python | {
"resource": ""
} |
q267194 | import_sql_select | test | def import_sql_select(connection_url, select_query, username, password, optimize=True,
use_temp_table=None, temp_table_name=None, fetch_mode=None):
"""
Import the SQL table that is the result of the specified SQL query to H2OFrame in memory.
Creates a temporary SQL table from the specified sql_query.
Runs multiple SELECT SQL queries on the temporary table concurrently for parallel ingestion, then drops the table.
Be sure to start the h2o.jar in the terminal with your downloaded JDBC driver in the classpath::
java -cp <path_to_h2o_jar>:<path_to_jdbc_driver_jar> water.H2OApp
Also see h2o.import_sql_table. Currently supported SQL databases are MySQL, PostgreSQL, MariaDB, Hive, Oracle
and Microsoft SQL Server.
:param connection_url: URL of the SQL database connection as specified by the Java Database Connectivity (JDBC)
Driver. For example, "jdbc:mysql://localhost:3306/menagerie?&useSSL=false"
:param select_query: SQL query starting with `SELECT` that returns rows from one or more database tables.
:param username: username for SQL server
:param password: password for SQL server
:param optimize: DEPRECATED. Ignored - use fetch_mode instead. Optimize import of SQL table for faster imports.
:param use_temp_table: whether a temporary table should be created from select_query
:param temp_table_name: name of temporary table to be created from select_query
:param fetch_mode: Set to DISTRIBUTED to enable distributed import. Set to SINGLE to force a sequential read by a single node
from the database.
:returns: an :class:`H2OFrame` containing data of the specified SQL query.
:examples:
>>> conn_url = "jdbc:mysql://172.16.2.178:3306/ingestSQL?&useSSL=false"
>>> select_query = "SELECT bikeid from citibike20k"
>>> username = "root"
>>> password = "abc123"
>>> my_citibike_data = h2o.import_sql_select(conn_url, select_query,
... username, password, fetch_mode)
"""
assert_is_type(connection_url, str)
assert_is_type(select_query, str)
assert_is_type(username, str)
assert_is_type(password, str)
assert_is_type(optimize, bool)
assert_is_type(use_temp_table, bool, None)
assert_is_type(temp_table_name, str, None)
assert_is_type(fetch_mode, str, None)
p = {"connection_url": connection_url, "select_query": select_query, "username": username, "password": password,
"use_temp_table": use_temp_table, "temp_table_name": temp_table_name, "fetch_mode": fetch_mode}
j = H2OJob(api("POST /99/ImportSQLTable", data=p), "Import SQL Table").poll()
return get_frame(j.dest_key) | python | {
"resource": ""
} |
q267195 | parse_raw | test | def parse_raw(setup, id=None, first_line_is_header=0):
"""
Parse dataset using the parse setup structure.
:param setup: Result of ``h2o.parse_setup()``
:param id: an id for the frame.
:param first_line_is_header: -1, 0, 1 if the first line is to be used as the header
:returns: an :class:`H2OFrame` object.
"""
assert_is_type(setup, dict)
assert_is_type(id, str, None)
assert_is_type(first_line_is_header, -1, 0, 1)
check_frame_id(id)
if id:
setup["destination_frame"] = id
if first_line_is_header != (-1, 0, 1):
if first_line_is_header not in (-1, 0, 1): raise ValueError("first_line_is_header should be -1, 0, or 1")
setup["check_header"] = first_line_is_header
fr = H2OFrame()
fr._parse_raw(setup)
return fr | python | {
"resource": ""
} |
q267196 | deep_copy | test | def deep_copy(data, xid):
"""
Create a deep clone of the frame ``data``.
:param data: an H2OFrame to be cloned
:param xid: (internal) id to be assigned to the new frame.
:returns: new :class:`H2OFrame` which is the clone of the passed frame.
"""
assert_is_type(data, H2OFrame)
assert_is_type(xid, str)
assert_satisfies(xid, xid != data.frame_id)
check_frame_id(xid)
duplicate = data.apply(lambda x: x)
duplicate._ex = ExprNode("assign", xid, duplicate)._eval_driver(False)
duplicate._ex._cache._id = xid
duplicate._ex._children = None
return duplicate | python | {
"resource": ""
} |
q267197 | get_model | test | def get_model(model_id):
"""
Load a model from the server.
:param model_id: The model identification in H2O
:returns: Model object, a subclass of H2OEstimator
"""
assert_is_type(model_id, str)
model_json = api("GET /3/Models/%s" % model_id)["models"][0]
algo = model_json["algo"]
if algo == "svd": m = H2OSVD()
elif algo == "pca": m = H2OPrincipalComponentAnalysisEstimator()
elif algo == "drf": m = H2ORandomForestEstimator()
elif algo == "naivebayes": m = H2ONaiveBayesEstimator()
elif algo == "kmeans": m = H2OKMeansEstimator()
elif algo == "glrm": m = H2OGeneralizedLowRankEstimator()
elif algo == "glm": m = H2OGeneralizedLinearEstimator()
elif algo == "gbm": m = H2OGradientBoostingEstimator()
elif algo == "deepwater": m = H2ODeepWaterEstimator()
elif algo == "xgboost": m = H2OXGBoostEstimator()
elif algo == "word2vec": m = H2OWord2vecEstimator()
elif algo == "generic": m = H2OGenericEstimator()
elif algo == "deeplearning":
if model_json["output"]["model_category"] == "AutoEncoder":
m = H2OAutoEncoderEstimator()
else:
m = H2ODeepLearningEstimator()
elif algo == "stackedensemble": m = H2OStackedEnsembleEstimator()
elif algo == "isolationforest": m = H2OIsolationForestEstimator()
else:
raise ValueError("Unknown algo type: " + algo)
m._resolve_model(model_id, model_json)
return m | python | {
"resource": ""
} |
q267198 | get_grid | test | def get_grid(grid_id):
"""
Return the specified grid.
:param grid_id: The grid identification in h2o
:returns: an :class:`H2OGridSearch` instance.
"""
assert_is_type(grid_id, str)
grid_json = api("GET /99/Grids/%s" % grid_id)
models = [get_model(key["name"]) for key in grid_json["model_ids"]]
# get first model returned in list of models from grid search to get model class (binomial, multinomial, etc)
first_model_json = api("GET /3/Models/%s" % grid_json["model_ids"][0]["name"])["models"][0]
gs = H2OGridSearch(None, {}, grid_id)
gs._resolve_grid(grid_id, grid_json, first_model_json)
gs.models = models
hyper_params = {param: set() for param in gs.hyper_names}
for param in gs.hyper_names:
for model in models:
if isinstance(model.full_parameters[param]["actual_value"], list):
hyper_params[param].add(model.full_parameters[param]["actual_value"][0])
else:
hyper_params[param].add(model.full_parameters[param]["actual_value"])
hyper_params = {str(param): list(vals) for param, vals in hyper_params.items()}
gs.hyper_params = hyper_params
gs.model = model.__class__()
return gs | python | {
"resource": ""
} |
q267199 | get_frame | test | def get_frame(frame_id, **kwargs):
"""
Obtain a handle to the frame in H2O with the frame_id key.
:param str frame_id: id of the frame to retrieve.
:returns: an :class:`H2OFrame` object
"""
assert_is_type(frame_id, str)
return H2OFrame.get_frame(frame_id, **kwargs) | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.