code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def train(self, docs, retrain=False): if type(docs) == dict: docs = docs.items() train_sentences = [self._gen_sentence(item) for item in docs] if (self.is_trained) and (retrain == False): self.update_model(train_sentences, update_labels_bool=True) else: self.model = Doc2Vec(train_sentences, size=self.size, window=self.window, min_count=self.min_count, workers=self.workers) self.is_trained = True return 0
Train Doc2Vec on a series of docs. Train from scratch or update. Args: docs: list of tuples (assetid, body_text) or dictionary {assetid : body_text} retrain: boolean, retrain from scratch or update model saves model in class to self.model Returns: 0 if successful
juraj-google-style
def _create_flow(self, request_handler): if (self.flow is None): redirect_uri = request_handler.request.relative_url(self._callback_path) self.flow = client.OAuth2WebServerFlow(self._client_id, self._client_secret, self._scope, redirect_uri=redirect_uri, user_agent=self._user_agent, auth_uri=self._auth_uri, token_uri=self._token_uri, revoke_uri=self._revoke_uri, **self._kwargs)
Create the Flow object. The Flow is calculated lazily since we don't know where this app is running until it receives a request, at which point redirect_uri can be calculated and then the Flow object can be constructed. Args: request_handler: webapp.RequestHandler, the request handler.
codesearchnet
def get_title(page): start_pos = page.find("<title>") end_pos = page.find("</title>") assert start_pos != -1 assert end_pos != -1 start_pos += len("<title>") return text_encoder.to_unicode_utf8(page[start_pos:end_pos])
Extract the title from a page. Args: page: a string Returns: a string
juraj-google-style
def tokenize_sentence(input_dict): text, uid = (input_dict['text'], input_dict['id']) tokens = Tokenizer([text], padding=True, truncation=True, return_tensors='pt') tokens = {key: torch.squeeze(val) for key, val in tokens.items()} return ((text, uid), tokens)
Takes a dictionary with a text and an id, tokenizes the text, and returns a tuple of the text and id and the tokenized text Args: input_dict: a dictionary with the text and id of the sentence Returns: A tuple of the text and id, and a dictionary of the tokens.
github-repos
def __init__(self, tag_name, **kwargs): class_name = type(self).__name__ end_idx = class_name.rfind('TagProcessor') tag_kind = str(class_name[:end_idx]) entry_type = tag_kind.capitalize() super(TagProcessorWithAutoEntryTypeAndFindByNamePlusAutoKind, self).__init__(entry_type, tag_name, tag_kind, **kwargs)
Initializer. Args: tag_name: unicode string name of tag to match. Usually u'compound' or u'member'.
juraj-google-style
def __getitem__(self, anchor_id): file_path = self._anchor_path(anchor_id) try: with file_path.open(mode='rt') as handle: return load_anchor(handle, self.root) except OSError: raise KeyError('No anchor with id {}'.format(anchor_id))
Get an Anchor by ID. Args: anchor_id: The ID of the anchor to retrieve. Returns: An anchor instance. Raises: KeyError: The anchor can not be found.
juraj-google-style
def emit_flow_start(self, name: str, timestamp: int, pid: int, tid: int, flow_id: int) -> None: event = self._create_event('s', 'DataFlow', name, pid, tid, timestamp) event['id'] = flow_id self._events.append(event)
Adds a flow start event to the trace. When matched with a flow end event (with the same 'flow_id') this will cause the trace viewer to draw an arrow between the start and end events. Args: name: The event name as a string. timestamp: The timestamp of this event as a long integer. pid: Identifier of the process generating this event as an integer. tid: Identifier of the thread generating this event as an integer. flow_id: Identifier of the flow as an integer.
github-repos
def find_dependency_wheels(tile): return [os.path.join(x.folder, 'python', x.support_wheel) for x in _iter_dependencies(tile) if x.has_wheel]
Return a list of all python wheel objects created by dependencies of this tile Args: tile (IOTile): Tile that we should scan for dependencies Returns: list: A list of paths to dependency wheels
juraj-google-style
def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0): local_buffer = utils.BytearrayStream() if self._unique_identifier: self._unique_identifier.write( local_buffer, kmip_version=kmip_version ) else: raise exceptions.InvalidField( "The GetAttributes response payload is missing the unique " "identifier field." ) if kmip_version < enums.KMIPVersion.KMIP_2_0: for attribute in self._attributes: attribute.write(local_buffer, kmip_version=kmip_version) else: if self._attributes: template_attribute = objects.TemplateAttribute( attributes=self.attributes ) attributes = objects.convert_template_attribute_to_attributes( template_attribute ) attributes.write(local_buffer, kmip_version=kmip_version) else: raise exceptions.InvalidField( "The GetAttributes response payload is missing the " "attributes list." ) self.length = local_buffer.length() super(GetAttributesResponsePayload, self).write( output_buffer, kmip_version=kmip_version ) output_buffer.write(local_buffer.buffer)
Write the data encoding the GetAttributes response payload to a stream. Args: output_buffer (stream): A data stream in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0.
juraj-google-style
def _check_format_string(self, node, format_arg): num_args = _count_supplied_tokens(node.args[format_arg + 1 :]) if not num_args: return format_string = node.args[format_arg].value if not isinstance(format_string, str): required_num_args = 0 else: try: if self._format_style == "old": keyword_args, required_num_args, _, _ = utils.parse_format_string( format_string ) if keyword_args: return elif self._format_style == "new": keyword_arguments, implicit_pos_args, explicit_pos_args = utils.parse_format_method_string( format_string ) keyword_args_cnt = len( set(k for k, l in keyword_arguments if not isinstance(k, int)) ) required_num_args = ( keyword_args_cnt + implicit_pos_args + explicit_pos_args ) except utils.UnsupportedFormatCharacter as ex: char = format_string[ex.index] self.add_message( "logging-unsupported-format", node=node, args=(char, ord(char), ex.index), ) return except utils.IncompleteFormatString: self.add_message("logging-format-truncated", node=node) return if num_args > required_num_args: self.add_message("logging-too-many-args", node=node) elif num_args < required_num_args: self.add_message("logging-too-few-args", node=node)
Checks that format string tokens match the supplied arguments. Args: node (astroid.node_classes.NodeNG): AST node to be checked. format_arg (int): Index of the format string in the node arguments.
juraj-google-style
def CallHwclock(logger): command = ['/sbin/hwclock', '--hctosys'] try: subprocess.check_call(command) except subprocess.CalledProcessError: logger.warning('Failed to sync system time with hardware clock.') else: logger.info('Synced system time with hardware clock.')
Sync clock using hwclock. Args: logger: logger object, used to write to SysLog and serial port.
juraj-google-style
def decode_predictions(preds, top=5): global CLASS_INDEX if len(preds.shape) != 2 or preds.shape[1] != 1000: raise ValueError(f'`decode_predictions` expects a batch of predictions (i.e. a 2D array of shape (samples, 1000)). Received array with shape: {preds.shape}') if CLASS_INDEX is None: fpath = file_utils.get_file('imagenet_class_index.json', CLASS_INDEX_PATH, cache_subdir='models', file_hash='c2c37ea517e94d9795004a39431a14cb') with open(fpath) as f: CLASS_INDEX = json.load(f) results = [] preds = ops.convert_to_numpy(preds) for pred in preds: top_indices = pred.argsort()[-top:][::-1] result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices] result.sort(key=lambda x: x[2], reverse=True) results.append(result) return results
Decodes the prediction of an ImageNet model. Args: preds: NumPy array encoding a batch of predictions. top: Integer, how many top-guesses to return. Defaults to `5`. Returns: A list of lists of top class prediction tuples `(class_name, class_description, score)`. One list of tuples per sample in batch input. Raises: ValueError: In case of invalid shape of the `pred` array (must be 2D).
github-repos
def __init__(self, option_strings, dest, help, metavar, flag_instance): del dest self._flag_instance = flag_instance super(_FlagAction, self).__init__( option_strings=option_strings, dest=argparse.SUPPRESS, help=help, metavar=metavar)
Initializes _FlagAction. Args: option_strings: See argparse.Action. dest: Ignored. The flag is always defined with dest=argparse.SUPPRESS. help: See argparse.Action. metavar: See argparse.Action. flag_instance: absl.flags.Flag, the absl flag instance.
juraj-google-style
def compute_mask_offsets(shard_id2num_examples): total_num_examples = sum(shard_id2num_examples) mask_offsets = [] total_num_examples = 0 for num_examples_in_shard in shard_id2num_examples: mask_offsets.append((total_num_examples % 100)) total_num_examples += num_examples_in_shard return mask_offsets
Return the list of offsets associated with each shards. Args: shard_id2num_examples: `list[int]`, mapping shard_id=>num_examples Returns: mask_offsets: `list[int]`, offset to skip for each of the shard
codesearchnet
def dns_rr(self, ips): api_name = 'opendns-dns_rr' fmt_url_path = u'dnsdb/name/a/{0}.json' return self._multi_get(api_name, fmt_url_path, ips)
Get the domains related to input domains. Args: domains: an enumerable of strings as domains Returns: An enumerable of resource records and features
juraj-google-style
def disable_lower_using_switch_merge(graph_def): output_graph_def = graph_pb2.GraphDef() output_graph_def.CopyFrom(graph_def) def disable_control_flow_lowering(node): if node.op in _CONTROL_FLOW_OPS: node.attr['_lower_using_switch_merge'].b = False for node in output_graph_def.node: disable_control_flow_lowering(node) if output_graph_def.library: for func in output_graph_def.library.function: for node in func.node_def: disable_control_flow_lowering(node) return output_graph_def
Set '_lower_using_switch_merge' attributes to False. Sets the attribute to False in the NodeDefs in the main graph and the NodeDefs in each function's graph. Args: graph_def: GraphDef proto. Returns: GraphDef
github-repos
def merge_pot1_files(self, delete_source=True): natom = len(self[0].input.structure) max_pertcase = (3 * natom) pot1_files = [] for task in self: if (not isinstance(task, DfptTask)): continue paths = task.outdir.list_filepaths(wildcard='*_POT*') for path in paths: i = path.rindex('_POT') pertcase = int(path[(i + 4):].replace('.nc', '')) if (pertcase <= max_pertcase): pot1_files.append(path) if (not pot1_files): return None self.history.info(('Will call mrgdvdb to merge %s files:' % len(pot1_files))) out_dvdb = self.outdir.path_in('out_DVDB') if (len(pot1_files) == 1): shutil.copy(pot1_files[0], out_dvdb) else: mrgdvdb = wrappers.Mrgdvdb(manager=self[0].manager, verbose=0) mrgdvdb.merge(self.outdir.path, pot1_files, out_dvdb, delete_source=delete_source) return out_dvdb
This method is called when all the q-points have been computed. It runs `mrgdvdb` in sequential on the local machine to produce the final DVDB file in the outdir of the `Work`. Args: delete_source: True if POT1 files should be removed after (successful) merge. Returns: path to the output DVDB file. None if not DFPT POT file is found.
codesearchnet
def wavfile_to_examples(wav_file): from scipy.io import wavfile sr, wav_data = wavfile.read(wav_file) assert wav_data.dtype == np.int16, 'Bad sample type: %r' % wav_data.dtype samples = wav_data / 32768.0 return waveform_to_examples(samples, sr)
Convenience wrapper around waveform_to_examples() for a common WAV format. Args: wav_file: String path to a file, or a file-like object. The file is assumed to contain WAV audio data with signed 16-bit PCM samples. Returns: See waveform_to_examples.
juraj-google-style
def execute_no_wait(self, cmd, walltime, envs={}): current_env = copy.deepcopy(self._envs) current_env.update(envs) try: proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.userhome, env=current_env, shell=True, preexec_fn=os.setpgrp) pid = proc.pid except Exception as e: print('Caught exception : {0}'.format(e)) logger.warn('Execution of command [%s] failed due to \n %s ', (cmd, e)) return (pid, proc)
Synchronously execute a commandline string on the shell. Args: - cmd (string) : Commandline string to execute - walltime (int) : walltime in seconds, this is not really used now. Returns: - retcode : Return code from the execution, -1 on fail - stdout : stdout string - stderr : stderr string Raises: None.
codesearchnet
def get_name(self, tag): name = tag.findChild('name').contents[0] if self.include_parent_scopes: parent_tag = tag.findParent() if (parent_tag.get('kind') in ['class', 'struct', 'namespace']): name = ((parent_tag.findChild('name').contents[0] + '::') + name) return name
Extract and return a representative "name" from a tag. Override as necessary. get_name's output can be controlled through keyword arguments that are provided when initializing a TagProcessor. For instance, a member of a class or namespace can have its parent scope included in the name by passing include_parent_scopes=True to __init__(). Args: tag: A BeautifulSoup Tag that satisfies match_criterion. Returns: A string that would be appropriate to use as an entry name in a Zeal database.
codesearchnet
def __call__(self, input: EventSet) -> Dict[str, EventSet]: assert isinstance(self.operator, BaseScalarOperator) output_schema = self.output_schema('output') dst_evset = EventSet(data={}, schema=output_schema) for index_key, index_data in input.data.items(): dst_evset.set_index_value(index_key, IndexData([self._do_operation(feature, self.operator.value, input.schema.features[feature_idx].dtype) for feature_idx, feature in enumerate(index_data.features)], index_data.timestamps, schema=output_schema), normalize=False) return {'output': dst_evset}
Applies the corresponding arithmetic operation between an EventSet and a scalar. Args: input: Event set to perform the operation to. Returns: Result of the operation.
github-repos
async def validate(state, holdout_glob): if (not glob.glob(holdout_glob)): print('Glob "{}" didn\'t match any files, skipping validation'.format(holdout_glob)) else: (await run('python3', 'validate.py', holdout_glob, '--flagfile={}'.format(os.path.join(FLAGS.flags_dir, 'validate.flags')), '--work_dir={}'.format(fsdb.working_dir())))
Validate the trained model against holdout games. Args: state: the RL loop State instance. holdout_glob: a glob that matches holdout games.
codesearchnet
def word_ngrams(s, n=3, token_fn=tokens.on_whitespace): tokens = token_fn(s) return __ngrams(tokens, n=min(len(tokens), n))
Word-level n-grams in a string By default, whitespace is assumed to be a word boundary. >>> ng.word_ngrams('This is not a test!') [('This', 'is', 'not'), ('is', 'not', 'a'), ('not', 'a', 'test!')] If the sequence's length is less than or equal to n, the n-grams are simply the sequence itself. >>> ng.word_ngrams('Test!') [('Test!')] Args: s: a string Returns: list: tuples of word-level n-grams
codesearchnet
def supported_tasks(self, lang=None): if lang: collection = self.get_collection(lang=lang) return [x.id.split('.')[0] for x in collection.packages] else: return [x.name.split()[0] for x in self.collections() if Downloader.TASK_PREFIX in x.id]
Languages that are covered by a specific task. Args: lang (string): Language code name.
juraj-google-style
def compute_gradients(self, *args, **kwargs): return self._opt.compute_gradients(*args, **kwargs)
Compute gradients of "loss" for the variables in "var_list". This simply wraps the compute_gradients() from the real optimizer. The gradients will be aggregated in the apply_gradients() so that user can modify the gradients like clipping with per replica global norm if needed. The global norm with aggregated gradients can be bad as one replica's huge gradients can hurt the gradients from other replicas. Args: *args: Arguments for compute_gradients(). **kwargs: Keyword arguments for compute_gradients(). Returns: A list of (gradient, variable) pairs.
github-repos
def register_extension(self, group, name, extension): if isinstance(extension, str): (name, extension) = self.load_extension(extension)[0] if (group not in self._registered_extensions): self._registered_extensions[group] = [] self._registered_extensions[group].append((name, extension))
Register an extension. Args: group (str): The type of the extension name (str): A name for the extension extension (str or class): If this is a string, then it will be interpreted as a path to import and load. Otherwise it will be treated as the extension object itself.
codesearchnet
def get_repo_config(self, repo='default'): for repo_config in self.repositories: if ((repo_config.name == repo) or (repo_config.url in RepositoryURL(repo))): return repo_config return None
Retrieve configuration for a given repository. Args: repo (str): a repository "realm" (alias) or its URL Returns: RepositoryConfig: if there is configuration for that repository None: otherwise
codesearchnet
def _resolve_task_logging(job_metadata, job_resources, task_descriptors): if (not job_resources.logging): return for task_descriptor in task_descriptors: logging_uri = provider_base.format_logging_uri(job_resources.logging.uri, job_metadata, task_descriptor.task_metadata) logging_path = job_model.LoggingParam(logging_uri, job_resources.logging.file_provider) if task_descriptor.task_resources: task_descriptor.task_resources = task_descriptor.task_resources._replace(logging_path=logging_path) else: task_descriptor.task_resources = job_model.Resources(logging_path=logging_path)
Resolve the logging path from job and task properties. Args: job_metadata: Job metadata, such as job-id, job-name, and user-id. job_resources: Resources specified such as ram, cpu, and logging path. task_descriptors: Task metadata, parameters, and resources. Resolve the logging path, which may have substitution parameters such as job-id, task-id, user-id, and job-name.
codesearchnet
def poisson(data): data = np.hstack(([0.0], np.array(data))) cumm = np.cumsum(data) def cost(s, t): ' Cost function for poisson distribution with changing mean\n\n Args:\n start (int): start index\n end (int): end index\n Returns:\n float: Cost, from start to end\n ' diff = (cumm[t] - cumm[s]) if (diff == 0): return (((- 2) * diff) * ((- np.log((t - s))) - 1)) else: return (((- 2) * diff) * ((np.log(diff) - np.log((t - s))) - 1)) return cost
Creates a segment cost function for a time series with a poisson distribution with changing mean Args: data (:obj:`list` of float): 1D time series data Returns: function: Function with signature (int, int) -> float where the first arg is the starting index, and the second is the last arg. Returns the cost of that segment
codesearchnet
def running_instances(self, context, process_name): handle = (id(context), process_name) it = self.processes.get(handle, {}).itervalues() entries = [x for x in it if (x[0].poll() is None)] return entries
Get a list of running instances. Args: context (`ResolvedContext`): Context the process is running in. process_name (str): Name of the process. Returns: List of (`subprocess.Popen`, start-time) 2-tuples, where start_time is the epoch time the process was added.
codesearchnet
def structure_from_ncdata(ncdata, site_properties=None, cls=Structure): (ncdata, closeit) = as_ncreader(ncdata) lattice = ArrayWithUnit(ncdata.read_value('primitive_vectors'), 'bohr').to('ang') red_coords = ncdata.read_value('reduced_atom_positions') natom = len(red_coords) znucl_type = ncdata.read_value('atomic_numbers') type_atom = ncdata.read_value('atom_species') species = (natom * [None]) for atom in range(natom): type_idx = (type_atom[atom] - 1) species[atom] = int(znucl_type[type_idx]) d = {} if (site_properties is not None): for prop in site_properties: d[property] = ncdata.read_value(prop) structure = cls(lattice, species, red_coords, site_properties=d) try: from abipy.core.structure import Structure as AbipyStructure structure.__class__ = AbipyStructure except ImportError: pass if closeit: ncdata.close() return structure
Reads and returns a pymatgen structure from a NetCDF file containing crystallographic data in the ETSF-IO format. Args: ncdata: filename or NetcdfReader instance. site_properties: Dictionary with site properties. cls: The Structure class to instanciate.
codesearchnet
def _TransposeTridiagonalMatrix(diags): diag = diags[..., 1, :] if diags.shape.is_fully_defined(): zeros = array_ops.zeros(list(diags.shape[:-2]) + [1], dtype=diags.dtype) superdiag = array_ops.concat((diags[..., 2, 1:], zeros), axis=-1) subdiag = array_ops.concat((zeros, diags[..., 0, :-1]), axis=-1) else: rank = array_ops.rank(diags) zeros = array_ops.zeros((rank - 2, 2), dtype=dtypes.int32) superdiag_pad = array_ops.concat((zeros, array_ops.constant([[0, 1]])), axis=0) superdiag = array_ops.pad(diags[..., 2, 1:], superdiag_pad) subdiag_pad = array_ops.concat((zeros, array_ops.constant([[1, 0]])), axis=0) subdiag = array_ops.pad(diags[..., 0, :-1], subdiag_pad) return array_ops_stack.stack([superdiag, diag, subdiag], axis=-2)
Transposes a tridiagonal matrix. Args: diags: the diagonals of the input matrix in the compact form (see linalg_ops.tridiagonal_solve). Returns: Diagonals of the transposed matrix in the compact form.
github-repos
def post_url(self, url, token='', json=None, data=None, headers=None): if (token == ''): token = self._user_token if headers: headers.update({'Authorization': 'Token {}'.format(token)}) else: headers = {'Authorization': 'Token {}'.format(token)} if json: return requests.post(url, headers=headers, json=json, verify=False) if data: return requests.post(url, headers=headers, data=data, verify=False) return requests.post(url, headers=headers, verify=False)
Returns a post resquest object taking in a url, user token, and possible json information. Arguments: url (str): The url to make post to token (str): The authentication token json (dict): json info to send Returns: obj: Post request object
codesearchnet
def write_tree_newick(self, filename, hide_rooted_prefix=False): if (not isinstance(filename, str)): raise TypeError('filename must be a str') treestr = self.newick() if hide_rooted_prefix: if treestr.startswith('[&R]'): treestr = treestr[4:].strip() else: warn('Specified hide_rooted_prefix, but tree was not rooted') if filename.lower().endswith('.gz'): f = gopen(expanduser(filename), 'wb', 9) f.write(treestr.encode()) f.close() else: f = open(expanduser(filename), 'w') f.write(treestr) f.close()
Write this ``Tree`` to a Newick file Args: ``filename`` (``str``): Path to desired output file (plain-text or gzipped)
codesearchnet
def copy_workspace(self, uri, new_name): payload = { 'isPublic': True, 'newName': new_name } return self._api.request('post', '/api/documents/' + uri['did'] + '/workspaces/' + uri['wvm'] + '/copy', body=payload)
Copy the current workspace. Args: - uri (dict): the uri of the workspace being copied. Needs to have a did and wid key. - new_name (str): the new name of the copied workspace. Returns: - requests.Response: Onshape response data
juraj-google-style
def sub(self, other, axis="columns", level=None, fill_value=None): return self._binary_op( "sub", other, axis=axis, level=level, fill_value=fill_value )
Subtract a DataFrame/Series/scalar from this DataFrame. Args: other: The object to use to apply the subtraction to this. axis: The axis to apply the subtraction over. level: Mutlilevel index level to subtract over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the subtraciont applied.
juraj-google-style
def create_primes(threshold): if threshold == 2: return [2] elif threshold < 2: return [] numbers = list(range(3, threshold+1, 2)) root_of_threshold = threshold ** 0.5 half = int((threshold+1)/2-1) idx = 0 counter = 3 while counter <= root_of_threshold: if numbers[idx]: idy = int((counter*counter-3)/2) numbers[idy] = 0 while idy < half: numbers[idy] = 0 idy += counter idx += 1 counter = 2*idx+3 return [2] + [number for number in numbers if number]
Generate prime values using sieve of Eratosthenes method. Args: threshold (int): The upper bound for the size of the prime values. Returns (List[int]): All primes from 2 and up to ``threshold``.
juraj-google-style
def static_nrows(self): if self._row_splits is not None: nrows_plus_one = tensor_shape.dimension_value(self._row_splits.shape[0]) if nrows_plus_one is not None: return nrows_plus_one - 1 if self._row_lengths is not None: nrows = tensor_shape.dimension_value(self._row_lengths.shape[0]) if nrows is not None: return nrows if self._nrows is not None: return tensor_util.constant_value(self._nrows) return None
The number of rows in this partition, if statically known. ```python self.row_lengths().shape == [self.static_nrows] self.row_starts().shape == [self.static_nrows] self.row_limits().shape == [self.static_nrows] self.row_splits().shape == [self.static_nrows + 1] ``` Returns: The number of rows in this partition as an `int` (if statically known); or `None` (otherwise).
github-repos
def guess_task_type(name, task_defn): parts = name.split(':') task_type = parts[-1] if task_type == 'parent': if is_action(task_defn): task_type = 'action' else: task_type = 'decision' if task_type not in get_valid_task_types(): raise CoTError( "Invalid task type for {}!".format(name) ) return task_type
Guess the task type of the task. Args: name (str): the name of the task. Returns: str: the task_type. Raises: CoTError: on invalid task_type.
juraj-google-style
def predict_step(self, data): data = data_adapter.expand_1d(data) x, _, _ = data_adapter.unpack_x_y_sample_weight(data) return self(x, training=False)
The logic for one inference step. This method can be overridden to support custom inference logic. This method is called by `Model.make_predict_function`. This method should contain the mathematical logic for one step of inference. This typically includes the forward pass. Configuration details for *how* this logic is run (e.g. `tf.function` and `tf.distribute.Strategy` settings), should be left to `Model.make_predict_function`, which can also be overridden. Args: data: A nested structure of `Tensor`s. Returns: The result of one inference step, typically the output of calling the `Model` on data.
github-repos
def patch_toText(self, patches): text = [] for patch in patches: text.append(str(patch)) return "".join(text)
Take a list of patches and return a textual representation. Args: patches: Array of Patch objects. Returns: Text representation of patches.
juraj-google-style
def _operation_status_message(self): msg = None action = None if (not google_v2_operations.is_done(self._op)): last_event = google_v2_operations.get_last_event(self._op) if last_event: msg = last_event['description'] action_id = last_event.get('details', {}).get('actionId') if action_id: action = google_v2_operations.get_action_by_id(self._op, action_id) else: msg = 'Pending' else: failed_events = google_v2_operations.get_failed_events(self._op) if failed_events: failed_event = failed_events[(- 1)] msg = failed_event.get('details', {}).get('stderr') action_id = failed_event.get('details', {}).get('actionId') if action_id: action = google_v2_operations.get_action_by_id(self._op, action_id) if (not msg): error = google_v2_operations.get_error(self._op) if error: msg = error['message'] else: msg = 'Success' return (msg, action)
Returns the most relevant status string and failed action. This string is meant for display only. Returns: A printable status string and name of failed action (if any).
codesearchnet
def on_success(self, inv_plugin, emit_set_slot): self.dirty = set() self.apply(inv_plugin) for changed_slot in self.dirty: emit_set_slot(changed_slot)
Called when the click was successful and should be applied to the inventory. Args: inv_plugin (InventoryPlugin): inventory plugin instance emit_set_slot (func): function to signal a slot change, should be InventoryPlugin().emit_set_slot
juraj-google-style
def compute(self, t, yerr=1.123e-12, check_sorted=True, A=None, U=None, V=None): t = np.atleast_1d(t) if (check_sorted and np.any((np.diff(t) < 0.0))): raise ValueError('the input coordinates must be sorted') if (check_sorted and (len(t.shape) > 1)): raise ValueError('dimension mismatch') self._t = t self._yerr = np.empty_like(self._t) self._yerr[:] = yerr (alpha_real, beta_real, alpha_complex_real, alpha_complex_imag, beta_complex_real, beta_complex_imag) = self.kernel.coefficients self._A = (np.empty(0) if (A is None) else A) self._U = (np.empty((0, 0)) if (U is None) else U) self._V = (np.empty((0, 0)) if (V is None) else V) self.solver.compute(self.kernel.jitter, alpha_real, beta_real, alpha_complex_real, alpha_complex_imag, beta_complex_real, beta_complex_imag, self._A, self._U, self._V, t, (self._yerr ** 2)) self.dirty = False
Compute the extended form of the covariance matrix and factorize Args: x (array[n]): The independent coordinates of the data points. This array must be _sorted_ in ascending order. yerr (Optional[float or array[n]]): The measurement uncertainties for the data points at coordinates ``x``. These values will be added in quadrature to the diagonal of the covariance matrix. (default: ``1.123e-12``) check_sorted (bool): If ``True``, ``x`` will be checked to make sure that it is properly sorted. If ``False``, the coordinates will be assumed to be in the correct order. Raises: ValueError: For un-sorted data or mismatched dimensions. solver.LinAlgError: For non-positive definite matrices.
codesearchnet
def _process_image(filename, coder): with tf.gfile.FastGFile(filename, 'r') as f: image_data = f.read() if _is_png(filename): print('Converting PNG to JPEG for %s' % filename) image_data = coder.png_to_jpeg(image_data) elif _is_cmyk(filename): print('Converting CMYK to RGB for %s' % filename) image_data = coder.cmyk_to_rgb(image_data) image = coder.decode_jpeg(image_data) assert len(image.shape) == 3 height = image.shape[0] width = image.shape[1] assert image.shape[2] == 3 return image_data, height, width
Process a single image file. Args: filename: string, path to an image file e.g., '/path/to/example.JPG'. coder: instance of ImageCoder to provide TensorFlow image coding utils. Returns: image_buffer: string, JPEG encoding of RGB image. height: integer, image height in pixels. width: integer, image width in pixels.
juraj-google-style
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return cls + token_ids_0 + sep return cls + token_ids_0 + sep + token_ids_1 + sep
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An FNet sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
github-repos
def ReportStatus(self, request, global_params=None): config = self.GetMethodConfig('ReportStatus') return self._RunMethod(config, request, global_params=global_params)
Reports the status of dataflow WorkItems leased by a worker. Args: request: (DataflowProjectsJobsWorkItemsReportStatusRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (ReportWorkItemStatusResponse) The response message.
github-repos
def get_data_for_name(cls, service_name): for service in cls._get_music_services_data().values(): if service_name == service["Name"]: return service raise MusicServiceException( "Unknown music service: '%s'" % service_name)
Get the data relating to a named music service. Args: service_name (str): The name of the music service for which data is required. Returns: dict: Data relating to the music service. Raises: `MusicServiceException`: if the music service cannot be found.
juraj-google-style
def _get_scripts(self, host_metadata): deploy_scripts = host_metadata.get('deploy-scripts', []) if deploy_scripts: return deploy_scripts ovirt_scripts = host_metadata.get('ovirt-scripts', []) if ovirt_scripts: warnings.warn( 'Deprecated entry "ovirt-scripts" will not be supported in ' 'the future, replace with "deploy-scripts"' ) return ovirt_scripts
Temporary method to retrieve the host scripts TODO: remove once the "ovirt-scripts" option gets deprecated Args: host_metadata(dict): host metadata to retrieve the scripts for Returns: list: deploy scripts for the host, empty if none found
juraj-google-style
def _preprocess_sqlite_index(asql_query, library, backend, connection): new_query = None if asql_query.strip().lower().startswith('index'): logger.debug('_preprocess_index: create index query found.\n asql query: {}'.format(asql_query)) index = parse_index(asql_query) partition = library.partition(index.source) table = backend.install(connection, partition, materialize=True) index_name = '{}_{}_ind'.format(partition.vid, '_'.join(index.columns)) new_query = 'CREATE INDEX IF NOT EXISTS {index} ON {table} ({columns});'.format(index=index_name, table=table, columns=','.join(index.columns)) logger.debug('_preprocess_index: preprocess finished.\n asql query: {}\n new query: {}'.format(asql_query, new_query)) return (new_query or asql_query)
Creates materialized view for each indexed partition found in the query. Args: asql_query (str): asql query library (ambry.Library): backend (SQLiteBackend): connection (apsw.Connection): Returns: str: converted asql if it contains index query. If not, returns asql_query as is.
codesearchnet
def __init__(self, clslist): if not hasattr(clslist, '__contains__'): clslist = [clslist] self.required = reduce(set.union, (cls.required for cls in clslist if issubclass(cls, AttributeMapper))) self.optional = reduce(set.union, (cls.optional for cls in clslist if issubclass(cls, AttributeMapper))) self.optional.symmetric_difference_update(self.required)
SCFilter(clslist) Args: clslist (list): List of classes from which to build the filter Returns: new SCFilter instance
juraj-google-style
def API_Retry(job, key=None, retries=3, wait=31): try: data = job.execute() return data if not key else data.get(key, []) except HttpError as e: if e.resp.status in [403, 409, 429, 500, 503]: content = json.loads(e.content.decode()) if content['error']['code'] == 409: return None elif content.get('error', {}).get('status') == 'PERMISSION_DENIED' or content.get('error', {}).get('errors', [{}])[0].get('reason') == 'forbidden': print('ERROR DETAILS:', e.content.decode()) raise elif retries > 0: print('API ERROR:', str(e)) print('API RETRY / WAIT:', retries, wait) sleep(wait) return API_Retry(job, key, retries - 1, wait * 2) else: print('ERROR DETAILS:', e.content.decode()) raise else: raise except RETRIABLE_EXCEPTIONS as e: if retries > 0: print('HTTP ERROR:', str(e)) print('HTTP RETRY / WAIT:', retries, wait) sleep(wait) return API_Retry(job, key, retries - 1, wait * 2) else: raise except SSLError as e: if retries > 0 and 'timed out' in e.message: print('SSL ERROR:', str(e)) print('SSL RETRY / WAIT:', retries, wait) sleep(wait) return API_Retry(job, key, retries - 1, wait * 2) else: raise
API retry that includes back off and some common error handling. CAUTION: Total timeout cannot exceed 5 minutes or the SSL token expires for all future calls. For critical but recoverable errors, the back off executes [retry] times. Each time the [wait] is doubled. By default retries are: 0:31 + 1:02 + 2:04 = 3:37 ( minutes ) The recommended minimum wait is 60 seconds for most APIs. * Errors retried: 429, 500, 503 * Errors ignored: 409 - already exists ( triggered by create only and also returns None ) * Errors raised: ALL OTHERS Args: * job: (object) API call path, everything before the execute() statement to retry. * key: (string) Optional key from json reponse to return. * retries: (int) Number of times to try the job. * wait: (seconds) Time to wait in seconds between retries. Returns: * JSON result of job or key value from JSON result if job succeed. * None if object already exists. Raises: * Any exceptions not listed in comments above.
github-repos
def reload_class_methods(self, class_, verbose=True): if verbose: print('[util_class] Reloading self=%r as class_=%r' % (self, class_)) self.__class__ = class_ for key in dir(class_): func = getattr(class_, key) if isinstance(func, types.MethodType): inject_func_as_method(self, func, class_=class_, allow_override=True, verbose=verbose)
rebinds all class methods Args: self (object): class instance to reload class_ (type): type to reload as Example: >>> # DISABLE_DOCTEST >>> from utool.util_class import * # NOQA >>> self = '?' >>> class_ = '?' >>> result = reload_class_methods(self, class_) >>> print(result)
juraj-google-style
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): local_stream = BytearrayStream() if self._wrapping_method: self._wrapping_method.write( local_stream, kmip_version=kmip_version ) else: raise ValueError( "Invalid struct missing the wrapping method attribute." ) if self._encryption_key_information: self._encryption_key_information.write( local_stream, kmip_version=kmip_version ) if self._mac_signature_key_information: self._mac_signature_key_information.write( local_stream, kmip_version=kmip_version ) if self._attribute_names: for unique_identifier in self._attribute_names: unique_identifier.write( local_stream, kmip_version=kmip_version ) if self._encoding_option: self._encoding_option.write( local_stream, kmip_version=kmip_version ) self.length = local_stream.length() super(KeyWrappingSpecification, self).write( output_stream, kmip_version=kmip_version ) output_stream.write(local_stream.buffer)
Write the data encoding the KeyWrappingSpecification struct to a stream. Args: output_stream (stream): A data stream in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0.
juraj-google-style
def to_json_string(self, include_defaults): json_like = self._to_json_like(include_defaults=include_defaults) json_like['id'] = self.id return serialize_json(json_like)
Returns a JSON string encoding the attributes of this object. References to other objects are serialized as references (just the object ID and type info), so the deserializer will need to separately have the full attributes of those other objects. There's no corresponding ``from_json_string()`` because to deserialize an object is normally done in the context of a Document (since the Document can resolve references). For most purposes it's best to serialize and deserialize entire documents. Args: include_defaults (bool) : whether to include attributes that haven't been changed from the default
codesearchnet
def locator_to_latlong(locator): locator = locator.upper() if ((len(locator) == 5) or (len(locator) < 4)): raise ValueError if ((ord(locator[0]) > ord('R')) or (ord(locator[0]) < ord('A'))): raise ValueError if ((ord(locator[1]) > ord('R')) or (ord(locator[1]) < ord('A'))): raise ValueError if ((ord(locator[2]) > ord('9')) or (ord(locator[2]) < ord('0'))): raise ValueError if ((ord(locator[3]) > ord('9')) or (ord(locator[3]) < ord('0'))): raise ValueError if (len(locator) == 6): if ((ord(locator[4]) > ord('X')) or (ord(locator[4]) < ord('A'))): raise ValueError if ((ord(locator[5]) > ord('X')) or (ord(locator[5]) < ord('A'))): raise ValueError longitude = (((ord(locator[0]) - ord('A')) * 20) - 180) latitude = (((ord(locator[1]) - ord('A')) * 10) - 90) longitude += ((ord(locator[2]) - ord('0')) * 2) latitude += (ord(locator[3]) - ord('0')) if (len(locator) == 6): longitude += ((ord(locator[4]) - ord('A')) * (2 / 24)) latitude += ((ord(locator[5]) - ord('A')) * (1 / 24)) longitude += (1 / 24) latitude += (0.5 / 24) else: longitude += 1 latitude += 0.5 return (latitude, longitude)
converts Maidenhead locator in the corresponding WGS84 coordinates Args: locator (string): Locator, either 4 or 6 characters Returns: tuple (float, float): Latitude, Longitude Raises: ValueError: When called with wrong or invalid input arg TypeError: When arg is not a string Example: The following example converts a Maidenhead locator into Latitude and Longitude >>> from pyhamtools.locator import locator_to_latlong >>> latitude, longitude = locator_to_latlong("JN48QM") >>> print latitude, longitude 48.5208333333 9.375 Note: Latitude (negative = West, positive = East) Longitude (negative = South, positive = North)
codesearchnet
def create_game( self, map_name, bot_difficulty=sc_pb.VeryEasy, bot_race=sc_common.Random, bot_first=False): self._controller.ping() map_inst = maps.get(map_name) map_data = map_inst.data(self._run_config) if map_name not in self._saved_maps: self._controller.save_map(map_inst.path, map_data) self._saved_maps.add(map_name) create = sc_pb.RequestCreateGame( local_map=sc_pb.LocalMap(map_path=map_inst.path, map_data=map_data), disable_fog=False) if not bot_first: create.player_setup.add(type=sc_pb.Participant) create.player_setup.add( type=sc_pb.Computer, race=bot_race, difficulty=bot_difficulty) if bot_first: create.player_setup.add(type=sc_pb.Participant) self._controller.create_game(create)
Create a game, one remote agent vs the specified bot. Args: map_name: The map to use. bot_difficulty: The difficulty of the bot to play against. bot_race: The race for the bot. bot_first: Whether the bot should be player 1 (else is player 2).
juraj-google-style
def __init__(self, item_id, desc, resources, uri, metadata_dict, music_service=None): _LOG.debug('%s.__init__ with item_id=%s, desc=%s, resources=%s, ' 'uri=%s, metadata_dict=..., music_service=%s', self.__class__.__name__, item_id, desc, resources, uri, music_service) super(MusicServiceItem, self).__init__(metadata_dict) self.item_id = item_id self.desc = desc self.resources = resources self.uri = uri self.music_service = music_service
Init music service item Args: item_id (str): This is the Didl compatible id NOT the music item id desc (str): A DIDL descriptor, default ``'RINCON_AssociatedZPUDN' resources (list): List of DidlResource uri (str): The uri for the location of the item metdata_dict (dict): Mapping of metadata music_service (MusicService): The MusicService instance the item originates from
juraj-google-style
def get_model(servoid): data = [] data.append(0x09) data.append(servoid) data.append(EEP_READ_REQ) data.append(MODEL_NO1_EEP) data.append(BYTE1) send_data(data) rxdata = [] try: rxdata = SERPORT.read(12) return ord(rxdata[9])&0xFF except: raise HerkulexError("could not communicate with motors")
Get the servo model This function gets the model of the herkules servo, provided its id Args: servoid(int): the id of the servo Returns: int: an integer corresponding to the model number 0x06 for DRS-602 0x04 for DRS-402 0x02 for DRS-202
juraj-google-style
def astype(self, col_dtypes, **kwargs): dtype_indices = {} columns = col_dtypes.keys() numeric_indices = list(self.columns.get_indexer_for(columns)) new_dtypes = self.dtypes.copy() for (i, column) in enumerate(columns): dtype = col_dtypes[column] if ((not isinstance(dtype, type(self.dtypes[column]))) or (dtype != self.dtypes[column])): if (dtype in dtype_indices.keys()): dtype_indices[dtype].append(numeric_indices[i]) else: dtype_indices[dtype] = [numeric_indices[i]] try: new_dtype = np.dtype(dtype) except TypeError: new_dtype = dtype if ((dtype != np.int32) and (new_dtype == np.int32)): new_dtype = np.dtype('int64') elif ((dtype != np.float32) and (new_dtype == np.float32)): new_dtype = np.dtype('float64') new_dtypes[column] = new_dtype new_data = self.data for dtype in dtype_indices.keys(): def astype(df, internal_indices=[]): block_dtypes = {} for ind in internal_indices: block_dtypes[df.columns[ind]] = dtype return df.astype(block_dtypes) new_data = new_data.apply_func_to_select_indices(0, astype, dtype_indices[dtype], keep_remaining=True) return self.__constructor__(new_data, self.index, self.columns, new_dtypes)
Converts columns dtypes to given dtypes. Args: col_dtypes: Dictionary of {col: dtype,...} where col is the column name and dtype is a numpy dtype. Returns: DataFrame with updated dtypes.
codesearchnet
def __init__(self, mount_path=None, path_specification=None): super(MountPoint, self).__init__() self.mount_path = mount_path self.path_specification = path_specification
Initializes a mount point. Args: mount_path (Optional[str]): path where the path specification is mounted, such as "/mnt/image" or "C:\\". path_specification (Optional[dfvfs.PathSpec]): path specification.
juraj-google-style
def ExpandWindowsEnvironmentVariables(data_string, knowledge_base): win_environ_regex = re.compile('%([^%]+?)%') components = [] offset = 0 for match in win_environ_regex.finditer(data_string): components.append(data_string[offset:match.start()]) kb_value = getattr(knowledge_base, ('environ_%s' % match.group(1).lower()), None) if (isinstance(kb_value, string_types) and kb_value): components.append(kb_value) else: components.append(('%%%s%%' % match.group(1))) offset = match.end() components.append(data_string[offset:]) return ''.join(components)
r"""Take a string and expand any windows environment variables. Args: data_string: A string, e.g. "%SystemRoot%\\LogFiles" knowledge_base: A knowledgebase object. Returns: A string with available environment variables expanded. If we can't expand we just return the string with the original variables.
codesearchnet
def char_style(self, style): styleset = {'normal': 0, 'outline': 1, 'shadow': 2, 'outlineshadow': 3 } if style in styleset: self.send(chr(27) + 'q' + chr(styleset[style])) else: raise RuntimeError('Invalid character style in function charStyle')
Sets the character style. Args: style: The desired character style. Choose from 'normal', 'outline', 'shadow', and 'outlineshadow' Returns: None Raises: RuntimeError: Invalid character style
juraj-google-style
def as_numpy(dataset, graph=None): nested_ds = dataset del dataset flat_ds = tf.nest.flatten(nested_ds) flat_np = [] for ds_el in flat_ds: types = [type(el) for el in flat_ds] types = tf.nest.pack_sequence_as(nested_ds, types) if (not (isinstance(ds_el, tf.Tensor) or tf_compat.is_dataset(ds_el))): raise ValueError(('Arguments to as_numpy must be tf.Tensors or tf.data.Datasets. Got: %s' % types)) if tf.executing_eagerly(): for ds_el in flat_ds: if isinstance(ds_el, tf.Tensor): np_el = ds_el.numpy() elif tf_compat.is_dataset(ds_el): np_el = _eager_dataset_iterator(ds_el) else: assert False flat_np.append(np_el) else: with utils.maybe_with_graph(graph, create_if_none=False): ds_iters = [tf.compat.v1.data.make_one_shot_iterator(ds_el).get_next() for ds_el in flat_ds if tf_compat.is_dataset(ds_el)] ds_iters = [_graph_dataset_iterator(ds_iter, graph) for ds_iter in ds_iters] with utils.nogpu_session(graph) as sess: np_arrays = sess.run([tensor for tensor in flat_ds if (not tf_compat.is_dataset(tensor))]) iter_ds = iter(ds_iters) iter_array = iter(np_arrays) flat_np = [(next(iter_ds) if tf_compat.is_dataset(ds_el) else next(iter_array)) for ds_el in flat_ds] return tf.nest.pack_sequence_as(nested_ds, flat_np)
Converts a `tf.data.Dataset` to an iterable of NumPy arrays. `as_numpy` converts a possibly nested structure of `tf.data.Dataset`s and `tf.Tensor`s to iterables of NumPy arrays and NumPy arrays, respectively. Args: dataset: a possibly nested structure of `tf.data.Dataset`s and/or `tf.Tensor`s. graph: `tf.Graph`, optional, explicitly set the graph to use. Returns: A structure matching `dataset` where `tf.data.Dataset`s are converted to generators of NumPy arrays and `tf.Tensor`s are converted to NumPy arrays.
codesearchnet
def _delete_from_hdx(self, object_type, id_field_name): if (id_field_name not in self.data): raise HDXError(('No %s field (mandatory) in %s!' % (id_field_name, object_type))) self._save_to_hdx('delete', id_field_name)
Helper method to deletes a resource from HDX Args: object_type (str): Description of HDX object type (for messages) id_field_name (str): Name of field containing HDX object identifier Returns: None
codesearchnet
def wait_for(self, timeout=10000, interval=1000, asserter=(lambda x: x)): if (not callable(asserter)): raise TypeError('Asserter must be callable.') @retry(retry_on_exception=(lambda ex: isinstance(ex, WebDriverException)), stop_max_delay=timeout, wait_fixed=interval) def _wait_for(driver): asserter(driver) return driver return _wait_for(self)
Wait for driver till satisfy the given condition Support: Android iOS Web(WebView) Args: timeout(int): How long we should be retrying stuff. interval(int): How long between retries. asserter(callable): The asserter func to determine the result. Returns: Return the driver. Raises: WebDriverException.
codesearchnet
def setattr(self, name, val): nodes = self._do_query(multiple=False) try: return self.poco.agent.hierarchy.setAttr(nodes, name, val) except UnableToSetAttributeException as e: raise InvalidOperationException('"{}" of "{}"'.format(str(e), self))
Change the attribute value of the UI element. Not all attributes can be casted to text. If changing the immutable attributes or attributes which do not exist, the InvalidOperationException exception is raised. Args: name: attribute name val: new attribute value to cast Raises: InvalidOperationException: when it fails to set the attribute on UI element
codesearchnet
def get_accounts(cls, soco=None): root = XML.fromstring(cls._get_account_xml(soco)) xml_accounts = root.findall('. result = {} for xml_account in xml_accounts: serial_number = xml_account.get('SerialNum') is_deleted = (True if (xml_account.get('Deleted') == '1') else False) if cls._all_accounts.get(serial_number): if is_deleted: del cls._all_accounts[serial_number] continue else: account = cls._all_accounts.get(serial_number) else: if is_deleted: continue account = Account() account.serial_number = serial_number cls._all_accounts[serial_number] = account account.service_type = xml_account.get('Type') account.deleted = is_deleted account.username = xml_account.findtext('UN') account.metadata = xml_account.findtext('MD') account.nickname = xml_account.findtext('NN') account.oa_device_id = xml_account.findtext('OADevID') account.key = xml_account.findtext('Key') result[serial_number] = account tunein = Account() tunein.service_type = '65031' tunein.deleted = False tunein.username = '' tunein.metadata = '' tunein.nickname = '' tunein.oa_device_id = '' tunein.key = '' tunein.serial_number = '0' result['0'] = tunein return result
Get all accounts known to the Sonos system. Args: soco (`SoCo`, optional): a `SoCo` instance to query. If `None`, a random instance is used. Defaults to `None`. Returns: dict: A dict containing account instances. Each key is the account's serial number, and each value is the related Account instance. Accounts which have been marked as deleted are excluded. Note: Any existing Account instance will have its attributes updated to those currently stored on the Sonos system.
codesearchnet
def number_occurences(self, proc): return len([True for row in self.data if (proc in row[self.command_name])])
Returns the number of occurencies of commands that contain given text Returns: int: The number of occurencies of commands with given text .. note:: 'proc' can match anywhere in the command path, name or arguments.
codesearchnet
def download_decompress(url: str, download_path: [Path, str], extract_paths=None): file_name = Path(urlparse(url).path).name download_path = Path(download_path) if extract_paths is None: extract_paths = [download_path] elif isinstance(extract_paths, list): extract_paths = [Path(path) for path in extract_paths] else: extract_paths = [Path(extract_paths)] cache_dir = os.getenv('DP_CACHE_DIR') extracted = False if cache_dir: cache_dir = Path(cache_dir) url_hash = md5(url.encode('utf8')).hexdigest()[:15] arch_file_path = cache_dir / url_hash extracted_path = cache_dir / (url_hash + '_extracted') extracted = extracted_path.exists() if not extracted and not arch_file_path.exists(): simple_download(url, arch_file_path) else: arch_file_path = download_path / file_name simple_download(url, arch_file_path) extracted_path = extract_paths.pop() if not extracted: log.info('Extracting {} archive into {}'.format(arch_file_path, extracted_path)) extracted_path.mkdir(parents=True, exist_ok=True) if file_name.endswith('.tar.gz'): untar(arch_file_path, extracted_path) elif file_name.endswith('.gz'): ungzip(arch_file_path, extracted_path / Path(file_name).with_suffix('').name) elif file_name.endswith('.zip'): with zipfile.ZipFile(arch_file_path, 'r') as zip_ref: zip_ref.extractall(extracted_path) else: raise RuntimeError(f'Trying to extract an unknown type of archive {file_name}') if not cache_dir: arch_file_path.unlink() for extract_path in extract_paths: for src in extracted_path.iterdir(): dest = extract_path / src.name if src.is_dir(): copytree(src, dest) else: extract_path.mkdir(parents=True, exist_ok=True) shutil.copy(str(src), str(dest))
Download and extract .tar.gz or .gz file to one or several target locations. The archive is deleted if extraction was successful. Args: url: URL for file downloading download_path: path to the directory where downloaded file will be stored until the end of extraction extract_paths: path or list of paths where contents of archive will be extracted
juraj-google-style
def __init__(self, latitude, longitude, altitude=None, name=None, description=None): super(Placemark, self).__init__(latitude, longitude, altitude, name) if altitude: self.altitude = float(altitude) self.description = description
Initialise a new ``Placemark`` object. Args: latitude (float): Placemarks's latitude longitude (float): Placemark's longitude altitude (float): Placemark's altitude name (str): Name for placemark description (str): Placemark's description
juraj-google-style
def _dataset_merge_filestore_newresource(self, new_resource, ignore_fields, filestore_resources): new_resource.check_required_fields(ignore_fields=ignore_fields) self.resources.append(new_resource) if new_resource.get_file_to_upload(): filestore_resources.append(new_resource) new_resource['url'] = Dataset.temporary_url
Helper method to add new resource from dataset including filestore. Args: new_resource (hdx.data.Resource): New resource from dataset ignore_fields (List[str]): List of fields to ignore when checking resource filestore_resources (List[hdx.data.Resource]): List of resources that use filestore (to be appended to) Returns: None
juraj-google-style
def RemoveClass(self, class_name): if class_name not in self._class_mapping: raise problems.NonexistentMapping(class_name) del self._class_mapping[class_name]
Removes an entry from the list of known classes. Args: class_name: A string with the class name that is to be removed. Raises: NonexistentMapping if there is no class with the specified class_name.
juraj-google-style
def __init__(self, value=enums.CertificateType.X_509): super(CertificateType, self).__init__( enums.CertificateType, value, Tags.CERTIFICATE_TYPE)
Construct a CertificateType object. Args: value (CertificateType): A CertificateType enumeration value, (e.g., CertificateType.PGP). Optional, defaults to CertificateType.X_509.
juraj-google-style
def RunValidation(feed, options, problems): util.CheckVersion(problems, options.latest_version) if options.extension: try: __import__(options.extension) extension_module = sys.modules[options.extension] except ImportError: print(('Could not import extension %s! Please ensure it is a proper Python module.' % options.extension)) exit(2) else: extension_module = transitfeed gtfs_factory = extension_module.GetGtfsFactory() print(('validating %s' % feed)) print(('FeedValidator extension used: %s' % options.extension)) loader = gtfs_factory.Loader(feed, problems=problems, extra_validation=False, memory_db=options.memory_db, check_duplicate_trips=options.check_duplicate_trips, gtfs_factory=gtfs_factory) schedule = loader.Load() schedule.Validate(service_gap_interval=options.service_gap_interval, validate_children=False) if (feed == 'IWantMyvalidation-crash.txt'): raise Exception('For testing the feed validator crash handler.') accumulator = problems.GetAccumulator() if accumulator.HasIssues(): print(('ERROR: %s found' % accumulator.FormatCount())) return (schedule, 1) else: print('feed validated successfully') return (schedule, 0)
Validate feed, returning the loaded Schedule and exit code. Args: feed: GTFS file, either path of the file as a string or a file object options: options object returned by optparse problems: transitfeed.ProblemReporter instance Returns: a transitfeed.Schedule object, exit code and plain text string of other problems Exit code is 2 if an extension is provided but can't be loaded, 1 if problems are found and 0 if the Schedule is problem free. plain text string is '' if no other problems are found.
codesearchnet
def __init__(self, x, name): self.x = x self.name = name
Construct DivideDelegateWithName. Args: x: Tensor to use as left operand in operator overloads name: The name that is preferred for the op created.
github-repos
def splitpath(self, path): path = self.normcase(path) sep = self._path_separator(path) path_components = path.split(sep) if (not path_components): return ('', '') starts_with_drive = self._starts_with_drive_letter(path) basename = path_components.pop() colon = self._matching_string(path, ':') if (not path_components): if starts_with_drive: components = basename.split(colon) return ((components[0] + colon), components[1]) return ('', basename) for component in path_components: if component: while (not path_components[(- 1)]): path_components.pop() if starts_with_drive: if (not path_components): components = basename.split(colon) return ((components[0] + colon), components[1]) if ((len(path_components) == 1) and path_components[0].endswith(colon)): return ((path_components[0] + sep), basename) return (sep.join(path_components), basename) return (sep, basename)
Mimic os.path.splitpath using the specified path_separator. Mimics os.path.splitpath using the path_separator that was specified for this FakeFilesystem. Args: path: (str) The path to split. Returns: (str) A duple (pathname, basename) for which pathname does not end with a slash, and basename does not contain a slash.
codesearchnet
def today(boo): tod = datetime.strptime(datetime.today().date().isoformat().replace('-', ' '), '%Y %m %d') if boo: return int(str(tod).replace('-', '')[:8]) else: return str(tod)[:10]
Return today's date as either a String or a Number, as specified by the User. Args: boo: if true, function returns Number (20151230); if false, returns String ("2015-12-30") Returns: either a Number or a string, dependent upon the user's input
juraj-google-style
def find_all(self, kw: YangIdentifier, pref: YangIdentifier=None) -> List['Statement']: return [c for c in self.substatements if ((c.keyword == kw) and (c.prefix == pref))]
Return the list all substatements with the given keyword and prefix. Args: kw: Statement keyword (local part for extensions). pref: Keyword prefix (``None`` for built-in statements).
codesearchnet
def init(module_paths, work_db, config): operator_names = cosmic_ray.plugins.operator_names() work_db.set_config(config=config) work_db.clear() for module_path in module_paths: module_ast = get_ast(module_path, python_version=config.python_version) for op_name in operator_names: operator = get_operator(op_name)(config.python_version) visitor = WorkDBInitVisitor(module_path, op_name, work_db, operator) visitor.walk(module_ast) apply_interceptors(work_db, config.sub('interceptors').get('enabled', ()))
Clear and initialize a work-db with work items. Any existing data in the work-db will be cleared and replaced with entirely new work orders. In particular, this means that any results in the db are removed. Args: module_paths: iterable of pathlib.Paths of modules to mutate. work_db: A `WorkDB` instance into which the work orders will be saved. config: The configuration for the new session.
codesearchnet
def __init__(self, usaf): filename = env.WEATHER_DATA_PATH + '/' + usaf + 'TYA.csv' self.csvfile = None try: self.csvfile = open(filename) except IOError: logger.info("%s not found", filename) download(_tmy_url(usaf), filename) self.csvfile = open(filename) logging.debug('opened %s', self.csvfile.name) header = self.csvfile.readline().split(',') self.tmy_data = csv.DictReader(self.csvfile) self.latitude = float(header[4]) self.longitude = float(header[5]) self.tz = float(header[3])
initialize. Args: usaf (str) Returns: (object)
juraj-google-style
def sample_measurements( self, indices: List[int], repetitions: int=1) -> List[List[bool]]: reversed_indices = [self._num_qubits - 1 - index for index in indices] return sim.sample_state_vector(self._current_state(), reversed_indices, repetitions)
Samples from measurements in the computational basis. Note that this does not collapse the wave function. Args: indices: Which qubits are measured. Returns: Measurement results with True corresponding to the |1> state. The outer list is for repetitions, and the inner corresponds to measurements ordered by the input indices. Raises: ValueError if repetitions is less than one.
juraj-google-style
def update_from_yaml(self, path=join('config', 'hdx_dataset_static.yml')): super(Dataset, self).update_from_yaml(path) self.separate_resources()
Update dataset metadata with static metadata from YAML file Args: path (str): Path to YAML dataset metadata. Defaults to config/hdx_dataset_static.yml. Returns: None
codesearchnet
def _get_logged_ops(graph, run_meta=None, add_trace=True, add_trainable_var=True): if run_meta: graph = _fill_missing_graph_shape(graph, run_meta) op_missing_shape = 0 logged_ops = {} string_to_id = {} string_to_id['none'] = len(string_to_id) for op in graph.get_operations(): try: stats = ops.get_stats_for_node_def(graph, op.node_def, REGISTERED_FLOP_STATS) except ValueError: op_missing_shape += 1 stats = None entry = tfprof_log_pb2.OpLogEntry() entry.name = op.name add_entry = False if stats and stats.value: entry.float_ops = int(stats.value) add_entry = True if add_trace: if op.traceback: for filename, lineno, funcname, line in op.traceback: trace = entry.code_def.traces.add() trace.file_id = _str_id(filename, string_to_id) if filename else 0 trace.lineno = lineno if lineno else -1 trace.function_id = _str_id(funcname, string_to_id) if funcname else 0 trace.line_id = _str_id(line, string_to_id) if line else 0 trace.func_start_line = -1 add_entry = True if add_entry: logged_ops[entry.name] = entry if add_trainable_var: for v in graph.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES): if v.op.name not in logged_ops: entry = tfprof_log_pb2.OpLogEntry() entry.name = v.op.name entry.types.append(TRAINABLE_VARIABLES) logged_ops[entry.name] = entry else: logged_ops[v.op.name].types.append(TRAINABLE_VARIABLES) if op_missing_shape > 0 and (not run_meta): sys.stderr.write('%d ops no flops stats due to incomplete shapes.\n' % op_missing_shape) return (logged_ops, string_to_id)
Extract trainable model parameters and FLOPs for ops from a Graph. Args: graph: tf.Graph. run_meta: RunMetadata proto used to complete shape information. add_trace: Whether to add op trace information. add_trainable_var: Whether to assign tf.compat.v1.trainable_variables() op type '_trainable_variables'. Returns: logged_ops: dict mapping from op_name to OpLogEntry. string_to_id: dict mapping from string to id.
github-repos
def resolve_topic(topic): try: module_name, _, class_name = topic.partition(' module = importlib.import_module(module_name) except ImportError as e: raise TopicResolutionError("{}: {}".format(topic, e)) try: cls = resolve_attr(module, class_name) except AttributeError as e: raise TopicResolutionError("{}: {}".format(topic, e)) return cls
Return class described by given topic. Args: topic: A string describing a class. Returns: A class. Raises: TopicResolutionError: If there is no such class.
juraj-google-style
def read_tracers_h5(xdmf_file, infoname, snapshot, position): xdmf_root = xmlET.parse(str(xdmf_file)).getroot() tra = {} tra[infoname] = [{}, {}] if position: for axis in 'xyz': tra[axis] = [{}, {}] for elt_subdomain in xdmf_root[0][0][snapshot].findall('Grid'): ibk = int(elt_subdomain.get('Name').startswith('meshYang')) if position: for data_attr in elt_subdomain.findall('Geometry'): for (data_item, axis) in zip(data_attr.findall('DataItem'), 'xyz'): (icore, data) = _get_field(xdmf_file, data_item) tra[axis][ibk][icore] = data for data_attr in elt_subdomain.findall('Attribute'): if (data_attr.get('Name') != infoname): continue (icore, data) = _get_field(xdmf_file, data_attr.find('DataItem')) tra[infoname][ibk][icore] = data for info in tra: tra[info] = [trab for trab in tra[info] if trab] for (iblk, trab) in enumerate(tra[info]): tra[info][iblk] = np.concatenate([trab[icore] for icore in range(len(trab))]) return tra
Extract tracers data from hdf5 files. Args: xdmf_file (:class:`pathlib.Path`): path of the xdmf file. infoname (str): name of information to extract. snapshot (int): snapshot number. position (bool): whether to extract position of tracers. Returns: dict of list of numpy.array: Tracers data organized by attribute and block.
codesearchnet
def create_vpc_flow_logs(self, account, region, vpc_id, iam_role_arn): try: flow = self.session.client('ec2', region) flow.create_flow_logs(ResourceIds=[vpc_id], ResourceType='VPC', TrafficType='ALL', LogGroupName=vpc_id, DeliverLogsPermissionArn=iam_role_arn) fvpc = VPC.get(vpc_id) fvpc.set_property('vpc_flow_logs_status', 'ACTIVE') self.log.info('Enabled VPC Logging {}/{}/{}'.format(account, region, vpc_id)) auditlog(event='vpc_flow_logs.create_vpc_flow', actor=self.ns, data={'account': account.account_name, 'region': region, 'vpcId': vpc_id, 'arn': iam_role_arn}) except Exception: self.log.exception('Failed creating VPC Flow Logs for {}/{}/{}.'.format(account, region, vpc_id))
Create a new VPC Flow log Args: account (:obj:`Account`): Account to create the flow in region (`str`): Region to create the flow in vpc_id (`str`): ID of the VPC to create the flow for iam_role_arn (`str`): ARN of the IAM role used to post logs to the log group Returns: `None`
codesearchnet
def override_from_dict(self, values_dict): for name, value in values_dict.items(): self.set_hparam(name, value) return self
Override existing hyperparameter values, parsing new values from a dictionary. Args: values_dict: Dictionary of name:value pairs. Returns: The `HParams` instance. Raises: KeyError: If a hyperparameter in `values_dict` doesn't exist. ValueError: If `values_dict` cannot be parsed.
juraj-google-style
def docx_text_from_xml_node(node: ElementTree.Element, level: int, config: TextProcessingConfig) -> str: text = '' if node.tag == DOCX_TEXT: text += node.text or '' elif node.tag == DOCX_TAB: text += '\t' elif node.tag in DOCX_NEWLINES: text += '\n' elif node.tag == DOCX_NEWPARA: text += '\n\n' if node.tag == DOCX_TABLE: text += '\n\n' + docx_table_from_xml_node(node, level, config) else: for child in node: text += docx_text_from_xml_node(child, level + 1, config) return text
Returns text from an XML node within a DOCX file. Args: node: an XML node level: current level in XML hierarchy (used for recursion; start level is 0) config: :class:`TextProcessingConfig` control object Returns: contents as a string
juraj-google-style
def convert(self): saved_model_convert_result = self._convert_as_saved_model() if saved_model_convert_result: return saved_model_convert_result graph_def, input_tensors, output_tensors, frozen_func = self._freeze_keras_model() graph_def = self._optimize_tf_model(graph_def, input_tensors, output_tensors, frozen_func) return super(TFLiteKerasModelConverterV2, self).convert(graph_def, input_tensors, output_tensors)
Converts a keras model based on instance variables. Returns: The converted data in serialized format. Raises: ValueError: Multiple concrete functions are specified. Input shape is not specified. Invalid quantization parameters.
github-repos
def __init__(self, node: cfg.CFGNode, ctx: _ContextType, f_code: blocks.OrderedCode, f_globals: abstract.LazyConcreteDict, f_locals: abstract.LazyConcreteDict, f_back: FrameType, callargs: dict[str, cfg.Variable], closure: tuple[cfg.Variable, ...] | None, func: cfg.Binding | None, first_arg: cfg.Variable | None, substs: Collection[dict[str, cfg.Variable]]): super().__init__(ctx) self.node = node self.current_opcode = None self.f_code = f_code self.states = {} self.f_globals = f_globals self.f_locals = f_locals self.f_back = f_back if f_back and f_back.f_builtins: self.f_builtins = f_back.f_builtins else: _, bltin = self.ctx.attribute_handler.get_attribute(self.ctx.root_node, f_globals, '__builtins__') builtins_pu, = bltin.bindings self.f_builtins = builtins_pu.data self.f_lineno = f_code.firstlineno self.first_arg = first_arg self.allowed_returns = None self.check_return = False self.return_variable = self.ctx.program.NewVariable() self.yield_variable = self.ctx.program.NewVariable() self.current_block = None self.targets = collections.defaultdict(list) self.overloads = collections.defaultdict(list) self.closure = closure freevars = closure or [] assert len(f_code.freevars) == len(freevars) if self.ctx.python_version < (3, 11): cell_names = f_code.cellvars elif freevars: cell_names = f_code.localsplus[:-len(freevars)] else: cell_names = f_code.localsplus self.cells = [self.ctx.program.NewVariable() for _ in cell_names] self.cells.extend(freevars) if callargs: for name, value in sorted(callargs.items()): if name in f_code.cellvars: i = cell_names.index(name) self.cells[i].PasteVariable(value, node) else: self.ctx.attribute_handler.set_attribute(node, f_locals, name, value) self.class_closure_var = None if func and isinstance(func.data, abstract.InterpreterFunction): closure_name = abstract.BuildClass.CLOSURE_NAME if func.data.is_class_builder and closure_name in f_code.cellvars: self.class_closure_var = self.get_cell_by_name(closure_name) self.func = func self.substs = substs self.skip_in_tracebacks = False if f_code.filename: self.module_name = module_utils.path_to_module_name(f_code.filename) else: self.module_name = '' self.functions_created_in_frame: dict[str, list[abstract.InterpreterFunction]] = collections.defaultdict(list)
Initialize a special frame as needed by TypegraphVirtualMachine. Args: node: The current CFG graph node. ctx: The owning abstract context. f_code: The code object to execute in this frame. f_globals: The global context to execute in as a SimpleValue as used by TypegraphVirtualMachine. f_locals: Local variables. Will be modified if callargs is passed. f_back: The frame above this one on the stack. callargs: Additional function arguments to store in f_locals. closure: A tuple containing the new co_freevars. func: Optionally, a binding to the function this frame corresponds to. first_arg: First argument to the function. substs: Maps from type parameter names in scope for this frame to their possible values. Raises: NameError: If we can't resolve any references into the outer frame.
github-repos
def _xys(date): X, Y, s_xy2 = _xysxy2(date) dX, dY = date.eop.dx / 1000., date.eop.dy / 1000. X = np.radians((X + dX) / 3600.) Y = np.radians((Y + dY) / 3600.) s = np.radians(s_xy2 / 3600.) - (X * Y / 2) return X, Y, s
Get The X, Y and s coordinates Args: date (Date): Return: 3-tuple of float: Values of X, Y and s, in radians
juraj-google-style
def determine_plasma_store_config(object_store_memory=None, plasma_directory=None, huge_pages=False): system_memory = ray.utils.get_system_memory() if (object_store_memory is None): object_store_memory = int((system_memory * 0.3)) if (object_store_memory > ray_constants.DEFAULT_OBJECT_STORE_MAX_MEMORY_BYTES): logger.warning(('Warning: Capping object memory store to {}GB. '.format((ray_constants.DEFAULT_OBJECT_STORE_MAX_MEMORY_BYTES object_store_memory = ray_constants.DEFAULT_OBJECT_STORE_MAX_MEMORY_BYTES if (plasma_directory is None): if ((sys.platform == 'linux') or (sys.platform == 'linux2')): shm_avail = ray.utils.get_shared_memory_bytes() if (shm_avail > object_store_memory): plasma_directory = '/dev/shm' else: plasma_directory = '/tmp' logger.warning("WARNING: The object store is using /tmp instead of /dev/shm because /dev/shm has only {} bytes available. This may slow down performance! You may be able to free up space by deleting files in /dev/shm or terminating any running plasma_store_server processes. If you are inside a Docker container, you may need to pass an argument with the flag '--shm-size' to 'docker run'.".format(shm_avail)) else: plasma_directory = '/tmp' if (object_store_memory > system_memory): raise Exception('The requested object store memory size is greater than the total available memory.') else: plasma_directory = os.path.abspath(plasma_directory) logger.warning('WARNING: object_store_memory is not verified when plasma_directory is set.') if (not os.path.isdir(plasma_directory)): raise Exception('The file {} does not exist or is not a directory.'.format(plasma_directory)) return (object_store_memory, plasma_directory)
Figure out how to configure the plasma object store. This will determine which directory to use for the plasma store (e.g., /tmp or /dev/shm) and how much memory to start the store with. On Linux, we will try to use /dev/shm unless the shared memory file system is too small, in which case we will fall back to /tmp. If any of the object store memory or plasma directory parameters are specified by the user, then those values will be preserved. Args: object_store_memory (int): The user-specified object store memory parameter. plasma_directory (str): The user-specified plasma directory parameter. huge_pages (bool): The user-specified huge pages parameter. Returns: A tuple of the object store memory to use and the plasma directory to use. If either of these values is specified by the user, then that value will be preserved.
codesearchnet
def parse_isoformat(timestamp): if (len(timestamp) == 20): zone = TzOffset('+00:00') timestamp = timestamp[:(- 1)] elif (len(timestamp) == 24): zone = TzOffset(('%s:%s' % (timestamp[(- 5):(- 2)], timestamp[(- 2):]))) timestamp = timestamp[:(- 5)] elif (len(timestamp) == 25): zone = TzOffset(timestamp[(- 6):]) timestamp = timestamp[:(- 6)] timestamp = Timestamp.strptime(timestamp, '%Y-%m-%dT%H:%M:%S') timestamp = timestamp.replace(tzinfo=zone) return timestamp
Parse an ISO 8601 formatted time stamp. Args: timestamp (str): Timestamp to parse Returns: Timestamp: Parsed timestamp
codesearchnet
def get_op_or_tensor_by_name(name): G = tfv1.get_default_graph() def f(n): if ((len(n) >= 3) and (n[(- 2)] == ':')): return G.get_tensor_by_name(n) else: return G.get_operation_by_name(n) if (not isinstance(name, list)): return f(name) else: return list(map(f, name))
Get either tf.Operation of tf.Tensor from names. Args: name (list[str] or str): names of operations or tensors. Raises: KeyError, if the name doesn't exist
codesearchnet
def reset(self): with tf.name_scope((self._name + '/reset')): return tf.group(self._count.assign(0), self._mean.assign(tf.zeros_like(self._mean)), self._var_sum.assign(tf.zeros_like(self._var_sum)))
Reset the estimates of mean and variance. Resets the full state of this class. Returns: Operation.
codesearchnet
def _make_ctx_options(ctx_options, config_cls=ContextOptions): if (not ctx_options): return None for key in list(ctx_options): translation = _OPTION_TRANSLATIONS.get(key) if translation: if (translation in ctx_options): raise ValueError(('Cannot specify %s and %s at the same time' % (key, translation))) ctx_options[translation] = ctx_options.pop(key) return config_cls(**ctx_options)
Helper to construct a ContextOptions object from keyword arguments. Args: ctx_options: A dict of keyword arguments. config_cls: Optional Configuration class to use, default ContextOptions. Note that either 'options' or 'config' can be used to pass another Configuration object, but not both. If another Configuration object is given it provides default values. Returns: A Configuration object, or None if ctx_options is empty.
codesearchnet
def waitAndGet(self, event_name, timeout=DEFAULT_TIMEOUT): if timeout: if (timeout > MAX_TIMEOUT): raise Error(self._ad, ('Specified timeout %s is longer than max timeout %s.' % (timeout, MAX_TIMEOUT))) timeout_ms = int((timeout * 1000)) try: raw_event = self._event_client.eventWaitAndGet(self._id, event_name, timeout_ms) except Exception as e: if ('EventSnippetException: timeout.' in str(e)): raise TimeoutError(self._ad, ('Timed out after waiting %ss for event "%s" triggered by %s (%s).' % (timeout, event_name, self._method_name, self._id))) raise return snippet_event.from_dict(raw_event)
Blocks until an event of the specified name has been received and return the event, or timeout. Args: event_name: string, name of the event to get. timeout: float, the number of seconds to wait before giving up. Returns: SnippetEvent, the oldest entry of the specified event. Raises: Error: If the specified timeout is longer than the max timeout supported. TimeoutError: The expected event does not occur within time limit.
codesearchnet
def _get_common_params(self, user_id, attributes): commonParams = {} commonParams[self.EventParams.PROJECT_ID] = self._get_project_id() commonParams[self.EventParams.ACCOUNT_ID] = self._get_account_id() visitor = {} visitor[self.EventParams.END_USER_ID] = user_id visitor[self.EventParams.SNAPSHOTS] = [] commonParams[self.EventParams.USERS] = [] commonParams[self.EventParams.USERS].append(visitor) commonParams[self.EventParams.USERS][0][self.EventParams.ATTRIBUTES] = self._get_attributes(attributes) commonParams[self.EventParams.SOURCE_SDK_TYPE] = 'python-sdk' commonParams[self.EventParams.ENRICH_DECISIONS] = True commonParams[self.EventParams.SOURCE_SDK_VERSION] = version.__version__ commonParams[self.EventParams.ANONYMIZE_IP] = self._get_anonymize_ip() commonParams[self.EventParams.REVISION] = self._get_revision() return commonParams
Get params which are used same in both conversion and impression events. Args: user_id: ID for user. attributes: Dict representing user attributes and values which need to be recorded. Returns: Dict consisting of parameters common to both impression and conversion events.
codesearchnet
def load_model(self, the_metamodel, filename, is_main_model, encoding='utf-8', add_to_local_models=True): if (not self.local_models.has_model(filename)): if self.all_models.has_model(filename): new_model = self.all_models.filename_to_model[filename] else: new_model = the_metamodel.internal_model_from_file(filename, pre_ref_resolution_callback=(lambda other_model: self.pre_ref_resolution_callback(other_model)), is_main_model=is_main_model, encoding=encoding) self.all_models.filename_to_model[filename] = new_model if add_to_local_models: self.local_models.filename_to_model[filename] = new_model assert self.all_models.has_model(filename) return self.all_models.filename_to_model[filename]
load a single model Args: the_metamodel: the metamodel used to load the model filename: the model to be loaded (if not cached) Returns: the loaded/cached model
codesearchnet