_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q267000
Conversation.handle_request
test
def handle_request(self, request: dict) -> dict: """Routes Alexa requests to appropriate handlers. Args: request: Alexa request. Returns: response: Response conforming Alexa response specification. """ request_type = request['request']['type'] request_id = request['request']['requestId'] log.debug(f'Received request. Type: {request_type}, id: {request_id}') if request_type in self.handled_requests.keys(): response: dict = self.handled_requests[request_type](request) else: response: dict = self.handled_requests['_unsupported'](request) log.warning(f'Unsupported request type: {request_type}, request id: {request_id}') self._rearm_self_destruct() return response
python
{ "resource": "" }
q267001
Conversation._act
test
def _act(self, utterance: str) -> list: """Infers DeepPavlov agent with raw user input extracted from Alexa request. Args: utterance: Raw user input extracted from Alexa request. Returns: response: DeepPavlov agent response. """ if self.stateful: utterance = [[utterance], [self.key]] else: utterance = [[utterance]] agent_response: list = self.agent(*utterance) return agent_response
python
{ "resource": "" }
q267002
Conversation._generate_response
test
def _generate_response(self, response: dict, request: dict) -> dict: """Populates generated response with additional data conforming Alexa response specification. Args: response: Raw user input extracted from Alexa request. request: Alexa request. Returns: response: Response conforming Alexa response specification. """ response_template = deepcopy(self.response_template) response_template['sessionAttributes']['sessionId'] = request['session']['sessionId'] for key, value in response_template.items(): if key not in response.keys(): response[key] = value return response
python
{ "resource": "" }
q267003
Conversation._handle_intent
test
def _handle_intent(self, request: dict) -> dict: """Handles IntentRequest Alexa request. Args: request: Alexa request. Returns: response: "response" part of response dict conforming Alexa specification. """ intent_name = self.config['intent_name'] slot_name = self.config['slot_name'] request_id = request['request']['requestId'] request_intent: dict = request['request']['intent'] if intent_name != request_intent['name']: log.error(f"Wrong intent name received: {request_intent['name']} in request {request_id}") return {'error': 'wrong intent name'} if slot_name not in request_intent['slots'].keys(): log.error(f'No slot named {slot_name} found in request {request_id}') return {'error': 'no slot found'} utterance = request_intent['slots'][slot_name]['value'] agent_response = self._act(utterance) if not agent_response: log.error(f'Some error during response generation for request {request_id}') return {'error': 'error during response generation'} prediction: RichMessage = agent_response[0] prediction: list = prediction.alexa() if not prediction: log.error(f'Some error during response generation for request {request_id}') return {'error': 'error during response generation'} response = self._generate_response(prediction[0], request) return response
python
{ "resource": "" }
q267004
Conversation._handle_launch
test
def _handle_launch(self, request: dict) -> dict: """Handles LaunchRequest Alexa request. Args: request: Alexa request. Returns: response: "response" part of response dict conforming Alexa specification. """ response = { 'response': { 'shouldEndSession': False, 'outputSpeech': { 'type': 'PlainText', 'text': self.config['start_message'] }, 'card': { 'type': 'Simple', 'content': self.config['start_message'] } } } response = self._generate_response(response, request) return response
python
{ "resource": "" }
q267005
Conversation._handle_unsupported
test
def _handle_unsupported(self, request: dict) -> dict: """Handles all unsupported types of Alexa requests. Returns standard message. Args: request: Alexa request. Returns: response: "response" part of response dict conforming Alexa specification. """ response = { 'response': { 'shouldEndSession': False, 'outputSpeech': { 'type': 'PlainText', 'text': self.config['unsupported_message'] }, 'card': { 'type': 'Simple', 'content': self.config['unsupported_message'] } } } response = self._generate_response(response, request) return response
python
{ "resource": "" }
q267006
Struct._repr_pretty_
test
def _repr_pretty_(self, p, cycle): """method that defines ``Struct``'s pretty printing rules for iPython Args: p (IPython.lib.pretty.RepresentationPrinter): pretty printer object cycle (bool): is ``True`` if pretty detected a cycle """ if cycle: p.text('Struct(...)') else: with p.group(7, 'Struct(', ')'): p.pretty(self._asdict())
python
{ "resource": "" }
q267007
elmo_loss2ppl
test
def elmo_loss2ppl(losses: List[np.ndarray]) -> float: """ Calculates perplexity by loss Args: losses: list of numpy arrays of model losses Returns: perplexity : float """ avg_loss = np.mean(losses) return float(np.exp(avg_loss))
python
{ "resource": "" }
q267008
build_model
test
def build_model(config: Union[str, Path, dict], mode: str = 'infer', load_trained: bool = False, download: bool = False, serialized: Optional[bytes] = None) -> Chainer: """Build and return the model described in corresponding configuration file.""" config = parse_config(config) if serialized: serialized: list = pickle.loads(serialized) if download: deep_download(config) import_packages(config.get('metadata', {}).get('imports', [])) model_config = config['chainer'] model = Chainer(model_config['in'], model_config['out'], model_config.get('in_y')) for component_config in model_config['pipe']: if load_trained and ('fit_on' in component_config or 'in_y' in component_config): try: component_config['load_path'] = component_config['save_path'] except KeyError: log.warning('No "save_path" parameter for the {} component, so "load_path" will not be renewed' .format(component_config.get('class_name', component_config.get('ref', 'UNKNOWN')))) if serialized and 'in' in component_config: component_serialized = serialized.pop(0) else: component_serialized = None component = from_params(component_config, mode=mode, serialized=component_serialized) if 'in' in component_config: c_in = component_config['in'] c_out = component_config['out'] in_y = component_config.get('in_y', None) main = component_config.get('main', False) model.append(component, c_in, c_out, in_y, main) return model
python
{ "resource": "" }
q267009
interact_model
test
def interact_model(config: Union[str, Path, dict]) -> None: """Start interaction with the model described in corresponding configuration file.""" model = build_model(config) while True: args = [] for in_x in model.in_x: args.append((input('{}::'.format(in_x)),)) # check for exit command if args[-1][0] in {'exit', 'stop', 'quit', 'q'}: return pred = model(*args) if len(model.out_params) > 1: pred = zip(*pred) print('>>', *pred)
python
{ "resource": "" }
q267010
predict_on_stream
test
def predict_on_stream(config: Union[str, Path, dict], batch_size: int = 1, file_path: Optional[str] = None) -> None: """Make a prediction with the component described in corresponding configuration file.""" if file_path is None or file_path == '-': if sys.stdin.isatty(): raise RuntimeError('To process data from terminal please use interact mode') f = sys.stdin else: f = open(file_path, encoding='utf8') model: Chainer = build_model(config) args_count = len(model.in_x) while True: batch = list((l.strip() for l in islice(f, batch_size * args_count))) if not batch: break args = [] for i in range(args_count): args.append(batch[i::args_count]) res = model(*args) if len(model.out_params) == 1: res = [res] for res in zip(*res): res = json.dumps(res, ensure_ascii=False) print(res, flush=True) if f is not sys.stdin: f.close()
python
{ "resource": "" }
q267011
read_infile
test
def read_infile(infile: Union[Path, str], from_words=False, word_column: int = WORD_COLUMN, pos_column: int = POS_COLUMN, tag_column: int = TAG_COLUMN, max_sents: int = -1, read_only_words: bool = False) -> List[Tuple[List, Union[List, None]]]: """Reads input file in CONLL-U format Args: infile: a path to a file word_column: column containing words (default=1) pos_column: column containing part-of-speech labels (default=3) tag_column: column containing fine-grained tags (default=5) max_sents: maximal number of sents to read read_only_words: whether to read only words Returns: a list of sentences. Each item contains a word sequence and a tag sequence, which is ``None`` in case ``read_only_words = True`` """ answer, curr_word_sent, curr_tag_sent = [], [], [] if from_words: word_column, read_only_words = 0, True with open(infile, "r", encoding="utf8") as fin: for line in fin: line = line.strip() if line.startswith("#"): continue if line == "": if len(curr_word_sent) > 0: if read_only_words: curr_tag_sent = None answer.append((curr_word_sent, curr_tag_sent)) curr_tag_sent, curr_word_sent = [], [] if len(answer) == max_sents: break continue splitted = line.split("\t") index = splitted[0] if not from_words and not index.isdigit(): continue curr_word_sent.append(splitted[word_column]) if not read_only_words: pos, tag = splitted[pos_column], splitted[tag_column] tag = pos if tag == "_" else "{},{}".format(pos, tag) curr_tag_sent.append(tag) if len(curr_word_sent) > 0: if read_only_words: curr_tag_sent = None answer.append((curr_word_sent, curr_tag_sent)) return answer
python
{ "resource": "" }
q267012
fn_from_str
test
def fn_from_str(name: str) -> Callable[..., Any]: """Returns a function object with the name given in string.""" try: module_name, fn_name = name.split(':') except ValueError: raise ConfigError('Expected function description in a `module.submodules:function_name` form, but got `{}`' .format(name)) return getattr(importlib.import_module(module_name), fn_name)
python
{ "resource": "" }
q267013
register_metric
test
def register_metric(metric_name: str) -> Callable[..., Any]: """Decorator for metric registration.""" def decorate(fn): fn_name = fn.__module__ + ':' + fn.__name__ if metric_name in _REGISTRY and _REGISTRY[metric_name] != fn_name: log.warning('"{}" is already registered as a metric name, the old function will be ignored' .format(metric_name)) _REGISTRY[metric_name] = fn_name return fn return decorate
python
{ "resource": "" }
q267014
get_metric_by_name
test
def get_metric_by_name(name: str) -> Callable[..., Any]: """Returns a metric callable with a corresponding name.""" if name not in _REGISTRY: raise ConfigError(f'"{name}" is not registered as a metric') return fn_from_str(_REGISTRY[name])
python
{ "resource": "" }
q267015
DecayType.from_str
test
def from_str(cls, label: str) -> int: """ Convert given string label of decay type to special index Args: label: name of decay type. Set of values: `"linear"`, `"cosine"`, `"exponential"`, `"onecycle"`, `"trapezoid"`, `["polynomial", K]`, where K is a polynomial power Returns: index of decay type """ label_norm = label.replace('1', 'one').upper() if label_norm in cls.__members__: return DecayType[label_norm] else: raise NotImplementedError
python
{ "resource": "" }
q267016
LRScheduledModel._get_best
test
def _get_best(values: List[float], losses: List[float], max_loss_div: float = 0.9, min_val_div: float = 10.0) -> float: """ Find the best value according to given losses Args: values: list of considered values losses: list of obtained loss values corresponding to `values` max_loss_div: maximal divergence of loss to be considered significant min_val_div: minimum divergence of loss to be considered significant Returns: best value divided by `min_val_div` """ assert len(values) == len(losses), "lengths of values and losses should be equal" min_ind = np.argmin(losses) for i in range(min_ind - 1, 0, -1): if (losses[i] * max_loss_div > losses[min_ind]) or\ (values[i] * min_val_div < values[min_ind]): return values[i + 1] return values[min_ind] / min_val_div
python
{ "resource": "" }
q267017
Embedder._encode
test
def _encode(self, tokens: List[str], mean: bool) -> Union[List[np.ndarray], np.ndarray]: """ Embed one text sample Args: tokens: tokenized text sample mean: whether to return mean embedding of tokens per sample Returns: list of embedded tokens or array of mean values """ embedded_tokens = [] for t in tokens: try: emb = self.tok2emb[t] except KeyError: try: emb = self._get_word_vector(t) except KeyError: emb = np.zeros(self.dim, dtype=np.float32) self.tok2emb[t] = emb embedded_tokens.append(emb) if mean is None: mean = self.mean if mean: filtered = [et for et in embedded_tokens if np.any(et)] if filtered: return np.mean(filtered, axis=0) return np.zeros(self.dim, dtype=np.float32) return embedded_tokens
python
{ "resource": "" }
q267018
read_requirements
test
def read_requirements(): """parses requirements from requirements.txt""" reqs_path = os.path.join(__location__, 'requirements.txt') with open(reqs_path, encoding='utf8') as f: reqs = [line.strip() for line in f if not line.strip().startswith('#')] names = [] links = [] for req in reqs: if '://' in req: links.append(req) else: names.append(req) return {'install_requires': names, 'dependency_links': links}
python
{ "resource": "" }
q267019
sk_log_loss
test
def sk_log_loss(y_true: Union[List[List[float]], List[List[int]], np.ndarray], y_predicted: Union[List[List[float]], List[List[int]], np.ndarray]) -> float: """ Calculates log loss. Args: y_true: list or array of true values y_predicted: list or array of predicted values Returns: Log loss """ return log_loss(y_true, y_predicted)
python
{ "resource": "" }
q267020
export2hub
test
def export2hub(weight_file, hub_dir, options): """Exports a TF-Hub module """ spec = make_module_spec(options, str(weight_file)) try: with tf.Graph().as_default(): module = hub.Module(spec) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) if hub_dir.exists(): shutil.rmtree(hub_dir) module.export(str(hub_dir), sess) finally: pass
python
{ "resource": "" }
q267021
show_details
test
def show_details(item_data: Dict[Any, Any]) -> str: """Format catalog item output Parameters: item_data: item's attributes values Returns: [rich_message]: list of formatted rich message """ txt = "" for key, value in item_data.items(): txt += "**" + str(key) + "**" + ': ' + str(value) + " \n" return txt
python
{ "resource": "" }
q267022
make_agent
test
def make_agent() -> EcommerceAgent: """Make an agent Returns: agent: created Ecommerce agent """ config_path = find_config('tfidf_retrieve') skill = build_model(config_path) agent = EcommerceAgent(skills=[skill]) return agent
python
{ "resource": "" }
q267023
main
test
def main(): """Parse parameters and run ms bot framework""" args = parser.parse_args() run_ms_bot_framework_server(agent_generator=make_agent, app_id=args.ms_id, app_secret=args.ms_secret, stateful=True)
python
{ "resource": "" }
q267024
download
test
def download(dest_file_path: [List[Union[str, Path]]], source_url: str, force_download=True): """Download a file from URL to one or several target locations Args: dest_file_path: path or list of paths to the file destination files (including file name) source_url: the source URL force_download: download file if it already exists, or not """ if isinstance(dest_file_path, list): dest_file_paths = [Path(path) for path in dest_file_path] else: dest_file_paths = [Path(dest_file_path).absolute()] if not force_download: to_check = list(dest_file_paths) dest_file_paths = [] for p in to_check: if p.exists(): log.info(f'File already exists in {p}') else: dest_file_paths.append(p) if dest_file_paths: cache_dir = os.getenv('DP_CACHE_DIR') cached_exists = False if cache_dir: first_dest_path = Path(cache_dir) / md5(source_url.encode('utf8')).hexdigest()[:15] cached_exists = first_dest_path.exists() else: first_dest_path = dest_file_paths.pop() if not cached_exists: first_dest_path.parent.mkdir(parents=True, exist_ok=True) simple_download(source_url, first_dest_path) else: log.info(f'Found cached {source_url} in {first_dest_path}') for dest_path in dest_file_paths: dest_path.parent.mkdir(parents=True, exist_ok=True) shutil.copy(str(first_dest_path), str(dest_path))
python
{ "resource": "" }
q267025
untar
test
def untar(file_path, extract_folder=None): """Simple tar archive extractor Args: file_path: path to the tar file to be extracted extract_folder: folder to which the files will be extracted """ file_path = Path(file_path) if extract_folder is None: extract_folder = file_path.parent extract_folder = Path(extract_folder) tar = tarfile.open(file_path) tar.extractall(extract_folder) tar.close()
python
{ "resource": "" }
q267026
download_decompress
test
def download_decompress(url: str, download_path: [Path, str], extract_paths=None): """Download and extract .tar.gz or .gz file to one or several target locations. The archive is deleted if extraction was successful. Args: url: URL for file downloading download_path: path to the directory where downloaded file will be stored until the end of extraction extract_paths: path or list of paths where contents of archive will be extracted """ file_name = Path(urlparse(url).path).name download_path = Path(download_path) if extract_paths is None: extract_paths = [download_path] elif isinstance(extract_paths, list): extract_paths = [Path(path) for path in extract_paths] else: extract_paths = [Path(extract_paths)] cache_dir = os.getenv('DP_CACHE_DIR') extracted = False if cache_dir: cache_dir = Path(cache_dir) url_hash = md5(url.encode('utf8')).hexdigest()[:15] arch_file_path = cache_dir / url_hash extracted_path = cache_dir / (url_hash + '_extracted') extracted = extracted_path.exists() if not extracted and not arch_file_path.exists(): simple_download(url, arch_file_path) else: arch_file_path = download_path / file_name simple_download(url, arch_file_path) extracted_path = extract_paths.pop() if not extracted: log.info('Extracting {} archive into {}'.format(arch_file_path, extracted_path)) extracted_path.mkdir(parents=True, exist_ok=True) if file_name.endswith('.tar.gz'): untar(arch_file_path, extracted_path) elif file_name.endswith('.gz'): ungzip(arch_file_path, extracted_path / Path(file_name).with_suffix('').name) elif file_name.endswith('.zip'): with zipfile.ZipFile(arch_file_path, 'r') as zip_ref: zip_ref.extractall(extracted_path) else: raise RuntimeError(f'Trying to extract an unknown type of archive {file_name}') if not cache_dir: arch_file_path.unlink() for extract_path in extract_paths: for src in extracted_path.iterdir(): dest = extract_path / src.name if src.is_dir(): copytree(src, dest) else: extract_path.mkdir(parents=True, exist_ok=True) shutil.copy(str(src), str(dest))
python
{ "resource": "" }
q267027
update_dict_recursive
test
def update_dict_recursive(editable_dict: dict, editing_dict: dict) -> None: """Updates dict recursively You need to use this function to update dictionary if depth of editing_dict is more then 1 Args: editable_dict: dictionary, that will be edited editing_dict: dictionary, that contains edits Returns: None """ for k, v in editing_dict.items(): if isinstance(v, collections.Mapping): update_dict_recursive(editable_dict.get(k, {}), v) else: editable_dict[k] = v
python
{ "resource": "" }
q267028
path_set_md5
test
def path_set_md5(url): """Given a file URL, return a md5 query of the file Args: url: a given URL Returns: URL of the md5 file """ scheme, netloc, path, query_string, fragment = urlsplit(url) path += '.md5' return urlunsplit((scheme, netloc, path, query_string, fragment))
python
{ "resource": "" }
q267029
set_query_parameter
test
def set_query_parameter(url, param_name, param_value): """Given a URL, set or replace a query parameter and return the modified URL. Args: url: a given URL param_name: the parameter name to add param_value: the parameter value Returns: URL with the added parameter """ scheme, netloc, path, query_string, fragment = urlsplit(url) query_params = parse_qs(query_string) query_params[param_name] = [param_value] new_query_string = urlencode(query_params, doseq=True) return urlunsplit((scheme, netloc, path, new_query_string, fragment))
python
{ "resource": "" }
q267030
PlainText.alexa
test
def alexa(self) -> dict: """Returns Amazon Alexa compatible state of the PlainText instance. Creating Amazon Alexa response blank with populated "outputSpeech" and "card sections. Returns: response: Amazon Alexa representation of PlainText state. """ response = { 'response': { 'shouldEndSession': False, 'outputSpeech': { 'type': 'PlainText', 'text': self.content}, 'card': { 'type': 'Simple', 'content': self.content } } } return response
python
{ "resource": "" }
q267031
Button.json
test
def json(self) -> dict: """Returns json compatible state of the Button instance. Returns: control_json: Json representation of Button state. """ content = {} content['name'] = self.name content['callback'] = self.callback self.control_json['content'] = content return self.control_json
python
{ "resource": "" }
q267032
Button.ms_bot_framework
test
def ms_bot_framework(self) -> dict: """Returns MS Bot Framework compatible state of the Button instance. Creates MS Bot Framework CardAction (button) with postBack value return. Returns: control_json: MS Bot Framework representation of Button state. """ card_action = {} card_action['type'] = 'postBack' card_action['title'] = self.name card_action['value'] = self.callback = self.callback return card_action
python
{ "resource": "" }
q267033
ButtonsFrame.json
test
def json(self) -> dict: """Returns json compatible state of the ButtonsFrame instance. Returns json compatible state of the ButtonsFrame instance including all nested buttons. Returns: control_json: Json representation of ButtonsFrame state. """ content = {} if self.text: content['text'] = self.text content['controls'] = [control.json() for control in self.content] self.control_json['content'] = content return self.control_json
python
{ "resource": "" }
q267034
ButtonsFrame.ms_bot_framework
test
def ms_bot_framework(self) -> dict: """Returns MS Bot Framework compatible state of the ButtonsFrame instance. Creating MS Bot Framework activity blank with RichCard in "attachments". RichCard is populated with CardActions corresponding buttons embedded in ButtonsFrame. Returns: control_json: MS Bot Framework representation of ButtonsFrame state. """ rich_card = {} buttons = [button.ms_bot_framework() for button in self.content] rich_card['buttons'] = buttons if self.text: rich_card['title'] = self.text attachments = [ { "contentType": "application/vnd.microsoft.card.thumbnail", "content": rich_card } ] out_activity = {} out_activity['type'] = 'message' out_activity['attachments'] = attachments return out_activity
python
{ "resource": "" }
q267035
squad_v2_f1
test
def squad_v2_f1(y_true: List[List[str]], y_predicted: List[str]) -> float: """ Calculates F-1 score between y_true and y_predicted F-1 score uses the best matching y_true answer The same as in SQuAD-v2.0 Args: y_true: list of correct answers (correct answers are represented by list of strings) y_predicted: list of predicted answers Returns: F-1 score : float """ f1_total = 0.0 for ground_truth, prediction in zip(y_true, y_predicted): prediction_tokens = normalize_answer(prediction).split() f1s = [] for gt in ground_truth: gt_tokens = normalize_answer(gt).split() if len(gt_tokens) == 0 or len(prediction_tokens) == 0: f1s.append(float(gt_tokens == prediction_tokens)) continue common = Counter(prediction_tokens) & Counter(gt_tokens) num_same = sum(common.values()) if num_same == 0: f1s.append(0.0) continue precision = 1.0 * num_same / len(prediction_tokens) recall = 1.0 * num_same / len(gt_tokens) f1 = (2 * precision * recall) / (precision + recall) f1s.append(f1) f1_total += max(f1s) return 100 * f1_total / len(y_true) if len(y_true) > 0 else 0
python
{ "resource": "" }
q267036
recall_at_k
test
def recall_at_k(y_true: List[int], y_pred: List[List[np.ndarray]], k: int): """ Calculates recall at k ranking metric. Args: y_true: Labels. Not used in the calculation of the metric. y_predicted: Predictions. Each prediction contains ranking score of all ranking candidates for the particular data sample. It is supposed that the ranking score for the true candidate goes first in the prediction. Returns: Recall at k """ num_examples = float(len(y_pred)) predictions = np.array(y_pred) predictions = np.flip(np.argsort(predictions, -1), -1)[:, :k] num_correct = 0 for el in predictions: if 0 in el: num_correct += 1 return float(num_correct) / num_examples
python
{ "resource": "" }
q267037
check_gpu_existence
test
def check_gpu_existence(): r"""Return True if at least one GPU is available""" global _gpu_available if _gpu_available is None: sess_config = tf.ConfigProto() sess_config.gpu_options.allow_growth = True try: with tf.Session(config=sess_config): device_list = device_lib.list_local_devices() _gpu_available = any(device.device_type == 'GPU' for device in device_list) except AttributeError as e: log.warning(f'Got an AttributeError `{e}`, assuming documentation building') _gpu_available = False return _gpu_available
python
{ "resource": "" }
q267038
_parse_config_property
test
def _parse_config_property(item: _T, variables: Dict[str, Union[str, Path, float, bool, None]]) -> _T: """Recursively apply config's variables values to its property""" if isinstance(item, str): return item.format(**variables) elif isinstance(item, list): return [_parse_config_property(item, variables) for item in item] elif isinstance(item, dict): return {k: _parse_config_property(v, variables) for k, v in item.items()} else: return item
python
{ "resource": "" }
q267039
parse_config
test
def parse_config(config: Union[str, Path, dict]) -> dict: """Read config's variables and apply their values to all its properties""" if isinstance(config, (str, Path)): config = read_json(find_config(config)) variables = { 'DEEPPAVLOV_PATH': os.getenv(f'DP_DEEPPAVLOV_PATH', Path(__file__).parent.parent.parent) } for name, value in config.get('metadata', {}).get('variables', {}).items(): env_name = f'DP_{name}' if env_name in os.environ: value = os.getenv(env_name) variables[name] = value.format(**variables) return _parse_config_property(config, variables)
python
{ "resource": "" }
q267040
expand_path
test
def expand_path(path: Union[str, Path]) -> Path: """Convert relative paths to absolute with resolving user directory.""" return Path(path).expanduser().resolve()
python
{ "resource": "" }
q267041
from_params
test
def from_params(params: Dict, mode: str = 'infer', serialized: Any = None, **kwargs) -> Component: """Builds and returns the Component from corresponding dictionary of parameters.""" # what is passed in json: config_params = {k: _resolve(v) for k, v in params.items()} # get component by reference (if any) if 'ref' in config_params: try: component = _refs[config_params['ref']] if serialized is not None: component.deserialize(serialized) return component except KeyError: e = ConfigError('Component with id "{id}" was referenced but not initialized' .format(id=config_params['ref'])) log.exception(e) raise e elif 'config_path' in config_params: from deeppavlov.core.commands.infer import build_model refs = _refs.copy() _refs.clear() config = parse_config(expand_path(config_params['config_path'])) model = build_model(config, serialized=serialized) _refs.clear() _refs.update(refs) try: _refs[config_params['id']] = model except KeyError: pass return model cls_name = config_params.pop('class_name', None) if not cls_name: e = ConfigError('Component config has no `class_name` nor `ref` fields') log.exception(e) raise e cls = get_model(cls_name) # find the submodels params recursively config_params = {k: _init_param(v, mode) for k, v in config_params.items()} try: spec = inspect.getfullargspec(cls) if 'mode' in spec.args+spec.kwonlyargs or spec.varkw is not None: kwargs['mode'] = mode component = cls(**dict(config_params, **kwargs)) try: _refs[config_params['id']] = component except KeyError: pass except Exception: log.exception("Exception in {}".format(cls)) raise if serialized is not None: component.deserialize(serialized) return component
python
{ "resource": "" }
q267042
Bot.run
test
def run(self) -> None: """Thread run method implementation.""" while True: request = self.input_queue.get() response = self._handle_request(request) self.output_queue.put(response)
python
{ "resource": "" }
q267043
Bot._del_conversation
test
def _del_conversation(self, conversation_key: str) -> None: """Deletes Conversation instance. Args: conversation_key: Conversation key. """ if conversation_key in self.conversations.keys(): del self.conversations[conversation_key] log.info(f'Deleted conversation, key: {conversation_key}')
python
{ "resource": "" }
q267044
Bot._refresh_valid_certs
test
def _refresh_valid_certs(self) -> None: """Conducts cleanup of periodical certificates with expired validation.""" self.timer = Timer(REFRESH_VALID_CERTS_PERIOD_SECS, self._refresh_valid_certs) self.timer.start() expired_certificates = [] for valid_cert_url, valid_cert in self.valid_certificates.items(): valid_cert: ValidatedCert = valid_cert cert_expiration_time: datetime = valid_cert.expiration_timestamp if datetime.utcnow() > cert_expiration_time: expired_certificates.append(valid_cert_url) for expired_cert_url in expired_certificates: del self.valid_certificates[expired_cert_url] log.info(f'Validation period of {expired_cert_url} certificate expired')
python
{ "resource": "" }
q267045
Bot._verify_request
test
def _verify_request(self, signature_chain_url: str, signature: str, request_body: bytes) -> bool: """Conducts series of Alexa request verifications against Amazon Alexa requirements. Args: signature_chain_url: Signature certificate URL from SignatureCertChainUrl HTTP header. signature: Base64 decoded Alexa request signature from Signature HTTP header. request_body: full HTTPS request body Returns: result: True if verification was successful, False if not. """ if signature_chain_url not in self.valid_certificates.keys(): amazon_cert: X509 = verify_cert(signature_chain_url) if amazon_cert: amazon_cert_lifetime: timedelta = self.config['amazon_cert_lifetime'] expiration_timestamp = datetime.utcnow() + amazon_cert_lifetime validated_cert = ValidatedCert(cert=amazon_cert, expiration_timestamp=expiration_timestamp) self.valid_certificates[signature_chain_url] = validated_cert log.info(f'Certificate {signature_chain_url} validated') else: log.error(f'Certificate {signature_chain_url} validation failed') return False else: validated_cert: ValidatedCert = self.valid_certificates[signature_chain_url] amazon_cert: X509 = validated_cert.cert if verify_signature(amazon_cert, signature, request_body): result = True else: log.error(f'Failed signature verification for request: {request_body.decode("utf-8", "replace")}') result = False return result
python
{ "resource": "" }
q267046
Bot._handle_request
test
def _handle_request(self, request: dict) -> dict: """Processes Alexa requests from skill server and returns responses to Alexa. Args: request: Dict with Alexa request payload and metadata. Returns: result: Alexa formatted or error response. """ request_body: bytes = request['request_body'] signature_chain_url: str = request['signature_chain_url'] signature: str = request['signature'] alexa_request: dict = request['alexa_request'] if not self._verify_request(signature_chain_url, signature, request_body): return {'error': 'failed certificate/signature check'} timestamp_str = alexa_request['request']['timestamp'] timestamp_datetime = datetime.strptime(timestamp_str, '%Y-%m-%dT%H:%M:%SZ') now = datetime.utcnow() delta = now - timestamp_datetime if now >= timestamp_datetime else timestamp_datetime - now if abs(delta.seconds) > REQUEST_TIMESTAMP_TOLERANCE_SECS: log.error(f'Failed timestamp check for request: {request_body.decode("utf-8", "replace")}') return {'error': 'failed request timestamp check'} conversation_key = alexa_request['session']['user']['userId'] if conversation_key not in self.conversations.keys(): if self.config['multi_instance']: conv_agent = self._init_agent() log.info('New conversation instance level agent initiated') else: conv_agent = self.agent self.conversations[conversation_key] = \ Conversation(config=self.config, agent=conv_agent, conversation_key=conversation_key, self_destruct_callback=lambda: self._del_conversation(conversation_key)) log.info(f'Created new conversation, key: {conversation_key}') conversation = self.conversations[conversation_key] response = conversation.handle_request(alexa_request) return response
python
{ "resource": "" }
q267047
cls_from_str
test
def cls_from_str(name: str) -> type: """Returns a class object with the name given as a string.""" try: module_name, cls_name = name.split(':') except ValueError: raise ConfigError('Expected class description in a `module.submodules:ClassName` form, but got `{}`' .format(name)) return getattr(importlib.import_module(module_name), cls_name)
python
{ "resource": "" }
q267048
register
test
def register(name: str = None) -> type: """ Register classes that could be initialized from JSON configuration file. If name is not passed, the class name is converted to snake-case. """ def decorate(model_cls: type, reg_name: str = None) -> type: model_name = reg_name or short_name(model_cls) global _REGISTRY cls_name = model_cls.__module__ + ':' + model_cls.__name__ if model_name in _REGISTRY and _REGISTRY[model_name] != cls_name: logger.warning('Registry name "{}" has been already registered and will be overwritten.'.format(model_name)) _REGISTRY[model_name] = cls_name return model_cls return lambda model_cls_name: decorate(model_cls_name, name)
python
{ "resource": "" }
q267049
get_model
test
def get_model(name: str) -> type: """Returns a registered class object with the name given in the string.""" if name not in _REGISTRY: if ':' not in name: raise ConfigError("Model {} is not registered.".format(name)) return cls_from_str(name) return cls_from_str(_REGISTRY[name])
python
{ "resource": "" }
q267050
H2OGeneralizedLinearEstimator.getGLMRegularizationPath
test
def getGLMRegularizationPath(model): """ Extract full regularization path explored during lambda search from glm model. :param model: source lambda search model """ x = h2o.api("GET /3/GetGLMRegPath", data={"model": model._model_json["model_id"]["name"]}) ns = x.pop("coefficient_names") res = { "lambdas": x["lambdas"], "explained_deviance_train": x["explained_deviance_train"], "explained_deviance_valid": x["explained_deviance_valid"], "coefficients": [dict(zip(ns, y)) for y in x["coefficients"]], } if "coefficients_std" in x: res["coefficients_std"] = [dict(zip(ns, y)) for y in x["coefficients_std"]] return res
python
{ "resource": "" }
q267051
H2OGeneralizedLinearEstimator.makeGLMModel
test
def makeGLMModel(model, coefs, threshold=.5): """ Create a custom GLM model using the given coefficients. Needs to be passed source model trained on the dataset to extract the dataset information from. :param model: source model, used for extracting dataset information :param coefs: dictionary containing model coefficients :param threshold: (optional, only for binomial) decision threshold used for classification """ model_json = h2o.api( "POST /3/MakeGLMModel", data={"model": model._model_json["model_id"]["name"], "names": list(coefs.keys()), "beta": list(coefs.values()), "threshold": threshold} ) m = H2OGeneralizedLinearEstimator() m._resolve_model(model_json["model_id"]["name"], model_json) return m
python
{ "resource": "" }
q267052
H2OCluster.from_kvs
test
def from_kvs(keyvals): """ Create H2OCluster object from a list of key-value pairs. TODO: This method should be moved into the base H2OResponse class. """ obj = H2OCluster() obj._retrieved_at = time.time() for k, v in keyvals: if k in {"__meta", "_exclude_fields", "__schema"}: continue if k in _cloud_v3_valid_keys: obj._props[k] = v else: raise AttributeError("Attribute %s cannot be set on H2OCluster (= %r)" % (k, v)) return obj
python
{ "resource": "" }
q267053
H2OCluster.shutdown
test
def shutdown(self, prompt=False): """ Shut down the server. This method checks if the H2O cluster is still running, and if it does shuts it down (via a REST API call). :param prompt: A logical value indicating whether to prompt the user before shutting down the H2O server. """ if not self.is_running(): return assert_is_type(prompt, bool) if prompt: question = "Are you sure you want to shutdown the H2O instance running at %s (Y/N)? " \ % h2o.connection().base_url response = input(question) # works in Py2 & Py3 because redefined in h2o.utils.compatibility module else: response = "Y" if response.lower() in {"y", "yes"}: h2o.api("POST /3/Shutdown") h2o.connection().close()
python
{ "resource": "" }
q267054
H2OCluster.is_running
test
def is_running(self): """ Determine if the H2O cluster is running or not. :returns: True if the cluster is up; False otherwise """ try: if h2o.connection().local_server and not h2o.connection().local_server.is_running(): return False h2o.api("GET /") return True except (H2OConnectionError, H2OServerError): return False
python
{ "resource": "" }
q267055
H2OCluster.show_status
test
def show_status(self, detailed=False): """ Print current cluster status information. :param detailed: if True, then also print detailed information about each node. """ if self._retrieved_at + self.REFRESH_INTERVAL < time.time(): # Info is stale, need to refresh new_info = h2o.api("GET /3/Cloud") self._fill_from_h2ocluster(new_info) ncpus = sum(node["num_cpus"] for node in self.nodes) allowed_cpus = sum(node["cpus_allowed"] for node in self.nodes) free_mem = sum(node["free_mem"] for node in self.nodes) unhealthy_nodes = sum(not node["healthy"] for node in self.nodes) status = "locked" if self.locked else "accepting new members" if unhealthy_nodes == 0: status += ", healthy" else: status += ", %d nodes are not healthy" % unhealthy_nodes api_extensions = self.list_api_extensions() H2ODisplay([ ["H2O cluster uptime:", get_human_readable_time(self.cloud_uptime_millis)], ["H2O cluster timezone:", self.cloud_internal_timezone], ["H2O data parsing timezone:", self.datafile_parser_timezone], ["H2O cluster version:", self.version], ["H2O cluster version age:", "{} {}".format(self.build_age, ("!!!" if self.build_too_old else ""))], ["H2O cluster name:", self.cloud_name], ["H2O cluster total nodes:", self.cloud_size], ["H2O cluster free memory:", get_human_readable_bytes(free_mem)], ["H2O cluster total cores:", str(ncpus)], ["H2O cluster allowed cores:", str(allowed_cpus)], ["H2O cluster status:", status], ["H2O connection url:", h2o.connection().base_url], ["H2O connection proxy:", h2o.connection().proxy], ["H2O internal security:", self.internal_security_enabled], ["H2O API Extensions:", ', '.join(api_extensions)], ["Python version:", "%d.%d.%d %s" % tuple(sys.version_info[:4])], ]) if detailed: keys = ["h2o", "healthy", "last_ping", "num_cpus", "sys_load", "mem_value_size", "free_mem", "pojo_mem", "swap_mem", "free_disk", "max_disk", "pid", "num_keys", "tcps_active", "open_fds", "rpcs_active"] header = ["Nodes info:"] + ["Node %d" % (i + 1) for i in range(len(self.nodes))] table = [[k] for k in keys] for node in self.nodes: for i, k in enumerate(keys): table[i].append(node[k]) H2ODisplay(table=table, header=header)
python
{ "resource": "" }
q267056
H2OCluster.list_jobs
test
def list_jobs(self): """List all jobs performed by the cluster.""" res = h2o.api("GET /3/Jobs") table = [["type"], ["dest"], ["description"], ["status"]] for job in res["jobs"]: job_dest = job["dest"] table[0].append(self._translate_job_type(job_dest["type"])) table[1].append(job_dest["name"]) table[2].append(job["description"]) table[3].append(job["status"]) return table
python
{ "resource": "" }
q267057
H2OCluster.list_timezones
test
def list_timezones(self): """Return the list of all known timezones.""" from h2o.expr import ExprNode return h2o.H2OFrame._expr(expr=ExprNode("listTimeZones"))._frame()
python
{ "resource": "" }
q267058
H2OCluster._fill_from_h2ocluster
test
def _fill_from_h2ocluster(self, other): """ Update information in this object from another H2OCluster instance. :param H2OCluster other: source of the new information for this object. """ self._props = other._props self._retrieved_at = other._retrieved_at other._props = {} other._retrieved_at = None
python
{ "resource": "" }
q267059
H2OStackedEnsembleEstimator.metalearner_params
test
def metalearner_params(self): """ Parameters for metalearner algorithm Type: ``dict`` (default: ``None``). Example: metalearner_gbm_params = {'max_depth': 2, 'col_sample_rate': 0.3} """ if self._parms.get("metalearner_params") != None: metalearner_params_dict = ast.literal_eval(self._parms.get("metalearner_params")) for k in metalearner_params_dict: if len(metalearner_params_dict[k]) == 1: #single parameter metalearner_params_dict[k] = metalearner_params_dict[k][0] return metalearner_params_dict else: return self._parms.get("metalearner_params")
python
{ "resource": "" }
q267060
H2O.stabilize
test
def stabilize(self, test_func, error, timeoutSecs=10, retryDelaySecs=0.5): '''Repeatedly test a function waiting for it to return True. Arguments: test_func -- A function that will be run repeatedly error -- A function that will be run to produce an error message it will be called with (node, timeTakenSecs, numberOfRetries) OR -- A string that will be interpolated with a dictionary of { 'timeTakenSecs', 'numberOfRetries' } timeoutSecs -- How long in seconds to keep trying before declaring a failure retryDelaySecs -- How long to wait between retry attempts ''' start = time.time() numberOfRetries = 0 while h2o_args.no_timeout or (time.time() - start < timeoutSecs): if test_func(self, tries=numberOfRetries, timeoutSecs=timeoutSecs): break time.sleep(retryDelaySecs) numberOfRetries += 1 # hey, check the sandbox if we've been waiting a long time...rather than wait for timeout # to find the badness?. can check_sandbox_for_errors at any time if ((numberOfRetries % 50) == 0): check_sandbox_for_errors(python_test_name=h2o_args.python_test_name) else: timeTakenSecs = time.time() - start if isinstance(error, type('')): raise Exception('%s failed after %.2f seconds having retried %d times' % ( error, timeTakenSecs, numberOfRetries)) else: msg = error(self, timeTakenSecs, numberOfRetries) raise Exception(msg)
python
{ "resource": "" }
q267061
summary
test
def summary(self, key, column="C1", timeoutSecs=10, **kwargs): ''' Return the summary for a single column for a single Frame in the h2o cluster. ''' params_dict = { # 'offset': 0, # 'len': 100 } h2o_methods.check_params_update_kwargs(params_dict, kwargs, 'summary', True) result = self.do_json_request('3/Frames.json/%s/columns/%s/summary' % (key, column), timeout=timeoutSecs, params=params_dict) h2o_sandbox.check_sandbox_for_errors() return result
python
{ "resource": "" }
q267062
delete_frame
test
def delete_frame(self, key, ignoreMissingKey=True, timeoutSecs=60, **kwargs): ''' Delete a frame on the h2o cluster, given its key. ''' assert key is not None, '"key" parameter is null' result = self.do_json_request('/3/Frames.json/' + key, cmd='delete', timeout=timeoutSecs) # TODO: look for what? if not ignoreMissingKey and 'f00b4r' in result: raise ValueError('Frame key not found: ' + key) return result
python
{ "resource": "" }
q267063
model_builders
test
def model_builders(self, algo=None, timeoutSecs=10, **kwargs): ''' Return a model builder or all of the model builders known to the h2o cluster. The model builders are contained in a dictionary called "model_builders" at the top level of the result. The dictionary maps algorithm names to parameters lists. Each of the parameters contains all the metdata required by a client to present a model building interface to the user. if parameters = True, return the parameters? ''' params_dict = {} h2o_methods.check_params_update_kwargs(params_dict, kwargs, 'model_builders', False) request = '3/ModelBuilders.json' if algo: request += "/" + algo result = self.do_json_request(request, timeout=timeoutSecs, params=params_dict) # verboseprint(request, "result:", dump_json(result)) h2o_sandbox.check_sandbox_for_errors() return result
python
{ "resource": "" }
q267064
validate_model_parameters
test
def validate_model_parameters(self, algo, training_frame, parameters, timeoutSecs=60, **kwargs): ''' Check a dictionary of model builder parameters on the h2o cluster using the given algorithm and model parameters. ''' assert algo is not None, '"algo" parameter is null' # Allow this now: assert training_frame is not None, '"training_frame" parameter is null' assert parameters is not None, '"parameters" parameter is null' model_builders = self.model_builders(timeoutSecs=timeoutSecs) assert model_builders is not None, "/ModelBuilders REST call failed" assert algo in model_builders['model_builders'] builder = model_builders['model_builders'][algo] # TODO: test this assert, I don't think this is working. . . if training_frame is not None: frames = self.frames(key=training_frame) assert frames is not None, "/Frames/{0} REST call failed".format(training_frame) key_name = frames['frames'][0]['key']['name'] assert key_name==training_frame, \ "/Frames/{0} returned Frame {1} rather than Frame {2}".format(training_frame, key_name, training_frame) parameters['training_frame'] = training_frame # TODO: add parameter existence checks # TODO: add parameter value validation # FIX! why ignoreH2oError here? result = self.do_json_request('/3/ModelBuilders.json/' + algo + "/parameters", cmd='post', timeout=timeoutSecs, postData=parameters, ignoreH2oError=True, noExtraErrorCheck=True) verboseprint("model parameters validation: " + repr(result)) return result
python
{ "resource": "" }
q267065
compute_model_metrics
test
def compute_model_metrics(self, model, frame, timeoutSecs=60, **kwargs): ''' Score a model on the h2o cluster on the given Frame and return only the model metrics. ''' assert model is not None, '"model" parameter is null' assert frame is not None, '"frame" parameter is null' models = self.models(key=model, timeoutSecs=timeoutSecs) assert models is not None, "/Models REST call failed" assert models['models'][0]['model_id']['name'] == model, "/Models/{0} returned Model {1} rather than Model {2}".format(model, models['models'][0]['key']['name'], model) # TODO: test this assert, I don't think this is working. . . frames = self.frames(key=frame) assert frames is not None, "/Frames/{0} REST call failed".format(frame) print "frames:", dump_json(frames) # is the name not there? # assert frames['frames'][0]['model_id']['name'] == frame, "/Frames/{0} returned Frame {1} rather than Frame {2}".format(frame, models['models'][0]['key']['name'], frame) result = self.do_json_request('/3/ModelMetrics.json/models/' + model + '/frames/' + frame, cmd='post', timeout=timeoutSecs) mm = result['model_metrics'][0] verboseprint("model metrics: " + repr(mm)) h2o_sandbox.check_sandbox_for_errors() return mm
python
{ "resource": "" }
q267066
model_metrics
test
def model_metrics(self, timeoutSecs=60, **kwargs): ''' ModelMetrics list. ''' result = self.do_json_request('/3/ModelMetrics.json', cmd='get', timeout=timeoutSecs) h2o_sandbox.check_sandbox_for_errors() return result
python
{ "resource": "" }
q267067
delete_model
test
def delete_model(self, key, ignoreMissingKey=True, timeoutSecs=60, **kwargs): ''' Delete a model on the h2o cluster, given its key. ''' assert key is not None, '"key" parameter is null' result = self.do_json_request('/3/Models.json/' + key, cmd='delete', timeout=timeoutSecs) # TODO: look for what? if not ignoreMissingKey and 'f00b4r' in result: raise ValueError('Model key not found: ' + key) verboseprint("delete_model result:", dump_json(result)) return result
python
{ "resource": "" }
q267068
H2OCache._tabulate
test
def _tabulate(self, tablefmt="simple", rollups=False, rows=10): """Pretty tabulated string of all the cached data, and column names""" if not self.is_valid(): self.fill(rows=rows) # Pretty print cached data d = collections.OrderedDict() # If also printing the rollup stats, build a full row-header if rollups: col = next(iter(viewvalues(self._data))) # Get a sample column lrows = len(col['data']) # Cached rows being displayed d[""] = ["type", "mins", "mean", "maxs", "sigma", "zeros", "missing"] + list(map(str, range(lrows))) # For all columns... for k, v in viewitems(self._data): x = v['data'] # Data to display t = v["type"] # Column type if t == "enum": domain = v['domain'] # Map to cat strings as needed x = ["" if math.isnan(idx) else domain[int(idx)] for idx in x] elif t == "time": x = ["" if math.isnan(z) else time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(z / 1000)) for z in x] if rollups: # Rollups, if requested mins = v['mins'][0] if v['mins'] and v["type"] != "enum" else None maxs = v['maxs'][0] if v['maxs'] and v["type"] != "enum" else None #Cross check type with mean and sigma. Set to None if of type enum. if v['type'] == "enum": v['mean'] = v['sigma'] = v['zero_count'] = None x = [v['type'], mins, v['mean'], maxs, v['sigma'], v['zero_count'], v['missing_count']] + x d[k] = x # Insert into ordered-dict return tabulate.tabulate(d, headers="keys", tablefmt=tablefmt)
python
{ "resource": "" }
q267069
run_instances
test
def run_instances(count, ec2_config, region, waitForSSH=True, tags=None): '''Create a new reservation for count instances''' ec2params = inheritparams(ec2_config, EC2_API_RUN_INSTANCE) ec2params.setdefault('min_count', count) ec2params.setdefault('max_count', count) reservation = None conn = ec2_connect(region) try: reservation = conn.run_instances(**ec2params) log('Reservation: {0}'.format(reservation.id)) log('Waiting for {0} EC2 instances {1} to come up, this can take 1-2 minutes.'.format(len(reservation.instances), reservation.instances)) start = time.time() time.sleep(1) for instance in reservation.instances: while instance.update() == 'pending': time.sleep(1) h2o_cmd.dot() if not instance.state == 'running': raise Exception('\033[91m[ec2] Error waiting for running state. Instance is in state {0}.\033[0m'.format(instance.state)) log('Instances started in {0} seconds'.format(time.time() - start)) log('Instances: ') for inst in reservation.instances: log(" {0} ({1}) : public ip: {2}, private ip: {3}".format(inst.public_dns_name, inst.id, inst.ip_address, inst.private_ip_address)) if waitForSSH: # kbn: changing to private address, so it should fail if not in right domain # used to have the public ip address wait_for_ssh([ i.private_ip_address for i in reservation.instances ]) # Tag instances try: if tags: conn.create_tags([i.id for i in reservation.instances], tags) except: warn('Something wrong during tagging instances. Exceptions IGNORED!') print sys.exc_info() pass return reservation except: print "\033[91mUnexpected error\033[0m :", sys.exc_info() if reservation: terminate_reservation(reservation, region) raise
python
{ "resource": "" }
q267070
terminate_instances
test
def terminate_instances(instances, region): '''terminate all the instances given by its ids''' if not instances: return conn = ec2_connect(region) log("Terminating instances {0}.".format(instances)) conn.terminate_instances(instances) log("Done")
python
{ "resource": "" }
q267071
stop_instances
test
def stop_instances(instances, region): '''stop all the instances given by its ids''' if not instances: return conn = ec2_connect(region) log("Stopping instances {0}.".format(instances)) conn.stop_instances(instances) log("Done")
python
{ "resource": "" }
q267072
start_instances
test
def start_instances(instances, region): '''Start all the instances given by its ids''' if not instances: return conn = ec2_connect(region) log("Starting instances {0}.".format(instances)) conn.start_instances(instances) log("Done")
python
{ "resource": "" }
q267073
reboot_instances
test
def reboot_instances(instances, region): '''Reboot all the instances given by its ids''' if not instances: return conn = ec2_connect(region) log("Rebooting instances {0}.".format(instances)) conn.reboot_instances(instances) log("Done")
python
{ "resource": "" }
q267074
wait_for_ssh
test
def wait_for_ssh(ips, port=22, skipAlive=True, requiredsuccess=3): ''' Wait for ssh service to appear on given hosts''' log('Waiting for SSH on following hosts: {0}'.format(ips)) for ip in ips: if not skipAlive or not ssh_live(ip, port): log('Waiting for SSH on instance {0}...'.format(ip)) count = 0 while count < requiredsuccess: if ssh_live(ip, port): count += 1 else: count = 0 time.sleep(1) h2o_cmd.dot()
python
{ "resource": "" }
q267075
_get_method_full_name
test
def _get_method_full_name(func): """ Return fully qualified function name. This method will attempt to find "full name" of the given function object. This full name is either of the form "<class name>.<method name>" if the function is a class method, or "<module name>.<func name>" if it's a regular function. Thus, this is an attempt to back-port func.__qualname__ to Python 2. :param func: a function object. :returns: string with the function's full name as explained above. """ # Python 3.3 already has this information available... if hasattr(func, "__qualname__"): return func.__qualname__ module = inspect.getmodule(func) if module is None: return "?.%s" % getattr(func, "__name__", "?") for cls_name in dir(module): cls = getattr(module, cls_name) if not inspect.isclass(cls): continue for method_name in dir(cls): cls_method = getattr(cls, method_name) if cls_method == func: return "%s.%s" % (cls_name, method_name) if hasattr(func, "__name__"): return "%s.%s" % (module.__name__, func.__name__) return "<unknown>"
python
{ "resource": "" }
q267076
_find_function_from_code
test
def _find_function_from_code(frame, code): """ Given a frame and a compiled function code, find the corresponding function object within the frame. This function addresses the following problem: when handling a stacktrace, we receive information about which piece of code was being executed in the form of a CodeType object. That objects contains function name, file name, line number, and the compiled bytecode. What it *doesn't* contain is the function object itself. So this utility function aims at locating this function object, and it does so by searching through objects in the preceding local frame (i.e. the frame where the function was called from). We expect that the function should usually exist there -- either by itself, or as a method on one of the objects. :param types.FrameType frame: local frame where the function ought to be found somewhere. :param types.CodeType code: the compiled code of the function to look for. :returns: the function object, or None if not found. """ def find_code(iterable, depth=0): if depth > 3: return # Avoid potential infinite loops, or generally objects that are too deep. for item in iterable: if item is None: continue found = None if hasattr(item, "__code__") and item.__code__ == code: found = item elif isinstance(item, type) or isinstance(item, ModuleType): # class / module try: found = find_code((getattr(item, n, None) for n in dir(item)), depth + 1) except Exception: # Sometimes merely getting module's attributes may cause an exception. For example :mod:`six.moves` # is such an offender... continue elif isinstance(item, (list, tuple, set)): found = find_code(item, depth + 1) elif isinstance(item, dict): found = find_code(item.values(), depth + 1) if found: return found return find_code(frame.f_locals.values()) or find_code(frame.f_globals.values())
python
{ "resource": "" }
q267077
_get_args_str
test
def _get_args_str(func, highlight=None): """ Return function's declared arguments as a string. For example for this function it returns "func, highlight=None"; for the ``_wrap`` function it returns "text, wrap_at=120, indent=4". This should usually coincide with the function's declaration (the part which is inside the parentheses). """ if not func: return "" s = str(inspect.signature(func))[1:-1] if highlight: s = re.sub(r"\b%s\b" % highlight, Style.BRIGHT + Fore.WHITE + highlight + Fore.LIGHTBLACK_EX + Style.NORMAL, s) return s
python
{ "resource": "" }
q267078
_wrap
test
def _wrap(text, wrap_at=120, indent=4): """ Return piece of text, wrapped around if needed. :param text: text that may be too long and then needs to be wrapped. :param wrap_at: the maximum line length. :param indent: number of spaces to prepend to all subsequent lines after the first. """ out = "" curr_line_length = indent space_needed = False for word in text.split(): if curr_line_length + len(word) > wrap_at: out += "\n" + " " * indent curr_line_length = indent space_needed = False if space_needed: out += " " curr_line_length += 1 out += word curr_line_length += len(word) space_needed = True return out
python
{ "resource": "" }
q267079
H2OEstimator.join
test
def join(self): """Wait until job's completion.""" self._future = False self._job.poll() model_key = self._job.dest_key self._job = None model_json = h2o.api("GET /%d/Models/%s" % (self._rest_version, model_key))["models"][0] self._resolve_model(model_key, model_json)
python
{ "resource": "" }
q267080
H2OEstimator.train
test
def train(self, x=None, y=None, training_frame=None, offset_column=None, fold_column=None, weights_column=None, validation_frame=None, max_runtime_secs=None, ignored_columns=None, model_id=None, verbose=False): """ Train the H2O model. :param x: A list of column names or indices indicating the predictor columns. :param y: An index or a column name indicating the response column. :param H2OFrame training_frame: The H2OFrame having the columns indicated by x and y (as well as any additional columns specified by fold, offset, and weights). :param offset_column: The name or index of the column in training_frame that holds the offsets. :param fold_column: The name or index of the column in training_frame that holds the per-row fold assignments. :param weights_column: The name or index of the column in training_frame that holds the per-row weights. :param validation_frame: H2OFrame with validation data to be scored on while training. :param float max_runtime_secs: Maximum allowed runtime in seconds for model training. Use 0 to disable. :param bool verbose: Print scoring history to stdout. Defaults to False. """ self._train(x=x, y=y, training_frame=training_frame, offset_column=offset_column, fold_column=fold_column, weights_column=weights_column, validation_frame=validation_frame, max_runtime_secs=max_runtime_secs, ignored_columns=ignored_columns, model_id=model_id, verbose=verbose)
python
{ "resource": "" }
q267081
H2OEstimator.fit
test
def fit(self, X, y=None, **params): """ Fit an H2O model as part of a scikit-learn pipeline or grid search. A warning will be issued if a caller other than sklearn attempts to use this method. :param H2OFrame X: An H2OFrame consisting of the predictor variables. :param H2OFrame y: An H2OFrame consisting of the response variable. :param params: Extra arguments. :returns: The current instance of H2OEstimator for method chaining. """ stk = inspect.stack()[1:] warn = True for s in stk: mod = inspect.getmodule(s[0]) if mod: warn = "sklearn" not in mod.__name__ if not warn: break if warn: warnings.warn("\n\n\t`fit` is not recommended outside of the sklearn framework. Use `train` instead.", UserWarning, stacklevel=2) training_frame = X.cbind(y) if y is not None else X x = X.names y = y.names[0] if y is not None else None self.train(x, y, training_frame, **params) return self
python
{ "resource": "" }
q267082
H2OEstimator.get_params
test
def get_params(self, deep=True): """ Obtain parameters for this estimator. Used primarily for sklearn Pipelines and sklearn grid search. :param deep: If True, return parameters of all sub-objects that are estimators. :returns: A dict of parameters """ out = dict() for key, value in self.parms.items(): if deep and isinstance(value, H2OEstimator): deep_items = list(value.get_params().items()) out.update((key + "__" + k, val) for k, val in deep_items) out[key] = value return out
python
{ "resource": "" }
q267083
signal_handler
test
def signal_handler(signum, stackframe): """Helper function to handle caught signals.""" global g_runner global g_handling_signal if g_handling_signal: # Don't do this recursively. return g_handling_signal = True print("") print("----------------------------------------------------------------------") print("") print("SIGNAL CAUGHT (" + str(signum) + "). TEARING DOWN CLOUDS.") print("") print("----------------------------------------------------------------------") g_runner.terminate()
python
{ "resource": "" }
q267084
wipe_output_dir
test
def wipe_output_dir(): """Clear the output directory.""" print("Wiping output directory.") try: if os.path.exists(g_output_dir): shutil.rmtree(str(g_output_dir)) except OSError as e: print("ERROR: Removing output directory %s failed: " % g_output_dir) print(" (errno {0}): {1}".format(e.errno, e.strerror)) print("") sys.exit(1)
python
{ "resource": "" }
q267085
remove_sandbox
test
def remove_sandbox(parent_dir, dir_name): """ This function is written to remove sandbox directories if they exist under the parent_dir. :param parent_dir: string denoting full parent directory path :param dir_name: string denoting directory path which could be a sandbox :return: None """ if "Rsandbox" in dir_name: rsandbox_dir = os.path.join(parent_dir, dir_name) try: if sys.platform == "win32": os.system(r'C:/cygwin64/bin/rm.exe -r -f "{0}"'.format(rsandbox_dir)) else: shutil.rmtree(rsandbox_dir) except OSError as e: print("") print("ERROR: Removing RSandbox directory failed: " + rsandbox_dir) print(" (errno {0}): {1}".format(e.errno, e.strerror)) print("") sys.exit(1)
python
{ "resource": "" }
q267086
H2OCloudNode.scrape_port_from_stdout
test
def scrape_port_from_stdout(self): """ Look at the stdout log and figure out which port the JVM chose. If successful, port number is stored in self.port; otherwise the program is terminated. This call is blocking, and will wait for up to 30s for the server to start up. """ regex = re.compile(r"Open H2O Flow in your web browser: https?://([^:]+):(\d+)") retries_left = 30 while retries_left and not self.terminated: with open(self.output_file_name, "r") as f: for line in f: mm = re.search(regex, line) if mm is not None: self.port = mm.group(2) print("H2O cloud %d node %d listening on port %s\n with output file %s" % (self.cloud_num, self.node_num, self.port, self.output_file_name)) return if self.terminated: break retries_left -= 1 time.sleep(1) if self.terminated: return print("\nERROR: Too many retries starting cloud %d.\nCheck the output log %s.\n" % (self.cloud_num, self.output_file_name)) sys.exit(1)
python
{ "resource": "" }
q267087
H2OCloudNode.scrape_cloudsize_from_stdout
test
def scrape_cloudsize_from_stdout(self, nodes_per_cloud): """ Look at the stdout log and wait until the cluster of proper size is formed. This call is blocking. Exit if this fails. :param nodes_per_cloud: :return none """ retries = 60 while retries > 0: if self.terminated: return f = open(self.output_file_name, "r") s = f.readline() while len(s) > 0: if self.terminated: return match_groups = re.search(r"Cloud of size (\d+) formed", s) if match_groups is not None: size = match_groups.group(1) if size is not None: size = int(size) if size == nodes_per_cloud: f.close() return s = f.readline() f.close() retries -= 1 if self.terminated: return time.sleep(1) print("") print("ERROR: Too many retries starting cloud.") print("") sys.exit(1)
python
{ "resource": "" }
q267088
H2OCloudNode.stop
test
def stop(self): """ Normal node shutdown. Ignore failures for now. :return none """ if self.pid > 0: print("Killing JVM with PID {}".format(self.pid)) try: self.child.terminate() self.child.wait() except OSError: pass self.pid = -1
python
{ "resource": "" }
q267089
H2OCloud.stop
test
def stop(self): """ Normal cluster shutdown. :return none """ for node in self.nodes: node.stop() for node in self.client_nodes: node.stop()
python
{ "resource": "" }
q267090
H2OCloud.get_ip
test
def get_ip(self): """ Return an ip to use to talk to this cluster. """ if len(self.client_nodes) > 0: node = self.client_nodes[0] else: node = self.nodes[0] return node.get_ip()
python
{ "resource": "" }
q267091
H2OCloud.get_port
test
def get_port(self): """ Return a port to use to talk to this cluster. """ if len(self.client_nodes) > 0: node = self.client_nodes[0] else: node = self.nodes[0] return node.get_port()
python
{ "resource": "" }
q267092
H2OBinomialModel.roc
test
def roc(self, train=False, valid=False, xval=False): """ Return the coordinates of the ROC curve for a given set of data. The coordinates are two-tuples containing the false positive rates as a list and true positive rates as a list. If all are False (default), then return is the training data. If more than one ROC curve is requested, the data is returned as a dictionary of two-tuples. :param bool train: If True, return the ROC value for the training data. :param bool valid: If True, return the ROC value for the validation data. :param bool xval: If True, return the ROC value for each of the cross-validated splits. :returns: The ROC values for the specified key(s). """ tm = ModelBase._get_metrics(self, train, valid, xval) m = {} for k, v in viewitems(tm): if v is not None: m[k] = (v.fprs, v.tprs) return list(m.values())[0] if len(m) == 1 else m
python
{ "resource": "" }
q267093
H2OWord2vecEstimator._determine_vec_size
test
def _determine_vec_size(self): """ Determines vec_size for a pre-trained model after basic model verification. """ first_column = self.pre_trained.types[self.pre_trained.columns[0]] if first_column != 'string': raise H2OValueError("First column of given pre_trained model %s is required to be a String", self.pre_trained.frame_id) if list(self.pre_trained.types.values()).count('string') > 1: raise H2OValueError("There are multiple columns in given pre_trained model %s with a String type.", self.pre_trained.frame_id) self.vec_size = self.pre_trained.dim[1] - 1;
python
{ "resource": "" }
q267094
h2o_mean_absolute_error
test
def h2o_mean_absolute_error(y_actual, y_predicted, weights=None): """ Mean absolute error regression loss. :param y_actual: H2OFrame of actual response. :param y_predicted: H2OFrame of predicted response. :param weights: (Optional) sample weights :returns: mean absolute error loss (best is 0.0). """ ModelBase._check_targets(y_actual, y_predicted) return _colmean((y_predicted - y_actual).abs())
python
{ "resource": "" }
q267095
h2o_mean_squared_error
test
def h2o_mean_squared_error(y_actual, y_predicted, weights=None): """ Mean squared error regression loss :param y_actual: H2OFrame of actual response. :param y_predicted: H2OFrame of predicted response. :param weights: (Optional) sample weights :returns: mean squared error loss (best is 0.0). """ ModelBase._check_targets(y_actual, y_predicted) return _colmean((y_predicted - y_actual) ** 2)
python
{ "resource": "" }
q267096
h2o_median_absolute_error
test
def h2o_median_absolute_error(y_actual, y_predicted): """ Median absolute error regression loss :param y_actual: H2OFrame of actual response. :param y_predicted: H2OFrame of predicted response. :returns: median absolute error loss (best is 0.0) """ ModelBase._check_targets(y_actual, y_predicted) return (y_predicted - y_actual).abs().median()
python
{ "resource": "" }
q267097
h2o_explained_variance_score
test
def h2o_explained_variance_score(y_actual, y_predicted, weights=None): """ Explained variance regression score function. :param y_actual: H2OFrame of actual response. :param y_predicted: H2OFrame of predicted response. :param weights: (Optional) sample weights :returns: the explained variance score. """ ModelBase._check_targets(y_actual, y_predicted) _, numerator = _mean_var(y_actual - y_predicted, weights) _, denominator = _mean_var(y_actual, weights) if denominator == 0.0: return 1. if numerator == 0 else 0. # 0/0 => 1, otherwise, 0 return 1 - numerator / denominator
python
{ "resource": "" }
q267098
assert_is_type
test
def assert_is_type(var, *types, **kwargs): """ Assert that the argument has the specified type. This function is used to check that the type of the argument is correct, otherwises it raises an H2OTypeError. See more details in the module's help. :param var: variable to check :param types: the expected types :param kwargs: message: override the error message skip_frames: how many local frames to skip when printing out the error. :raises H2OTypeError: if the argument is not of the desired type. """ assert types, "The list of expected types was not provided" expected_type = types[0] if len(types) == 1 else U(*types) if _check_type(var, expected_type): return # Type check failed => Create a nice error message assert set(kwargs).issubset({"message", "skip_frames"}), "Unexpected keyword arguments: %r" % kwargs message = kwargs.get("message", None) skip_frames = kwargs.get("skip_frames", 1) args = _retrieve_assert_arguments() vname = args[0] etn = _get_type_name(expected_type, dump=", ".join(args[1:])) vtn = _get_type_name(type(var)) raise H2OTypeError(var_name=vname, var_value=var, var_type_name=vtn, exp_type_name=etn, message=message, skip_frames=skip_frames)
python
{ "resource": "" }
q267099
assert_matches
test
def assert_matches(v, regex): """ Assert that string variable matches the provided regular expression. :param v: variable to check. :param regex: regular expression to check against (can be either a string, or compiled regexp). """ m = re.match(regex, v) if m is None: vn = _retrieve_assert_arguments()[0] message = "Argument `{var}` (= {val!r}) did not match /{regex}/".format(var=vn, regex=regex, val=v) raise H2OValueError(message, var_name=vn, skip_frames=1) return m
python
{ "resource": "" }