code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def logdet(x): if any_symbolic_tensors((x,)): return Logdet().symbolic_call(x) return backend.math.logdet(x)
Computes log of the determinant of a hermitian positive definite matrix. Args: x: Input matrix. It must 2D and square. Returns: The natural log of the determinant of matrix.
github-repos
def _locate_elements_in_line(line, indices_list, ref_indices): batch_size = len(indices_list) offsets = [indices[-1] - ref_indices[-1] for indices in indices_list] start_columns = [None] * batch_size end_columns = [None] * batch_size if _NUMPY_OMISSION in line: ellipsis_index = line.find(_NUMPY_OMISSION) else: ellipsis_index = len(line) matches_iter = re.finditer(_NUMBER_REGEX, line) batch_pos = 0 offset_counter = 0 for match in matches_iter: if match.start() > ellipsis_index: break if offset_counter == offsets[batch_pos]: start_columns[batch_pos] = match.start() end_columns[batch_pos] = match.end() - 1 batch_pos += 1 if batch_pos >= batch_size: break offset_counter += 1 return (start_columns, end_columns)
Determine the start and end indices of an element in a line. Args: line: (str) the line in which the element is to be sought. indices_list: (list of list of int) list of indices of the element to search for. Assumes that the indices in the batch are unique and sorted in ascending order. ref_indices: (list of int) reference indices, i.e., the indices of the first element represented in the line. Returns: start_columns: (list of int) start column indices, if found. If not found, None. end_columns: (list of int) end column indices, if found. If not found, None. If found, the element is represented in the left-closed-right-open interval [start_column, end_column].
github-repos
def match_hail_size_step_distributions(self, model_tracks, obs_tracks, track_pairings): label_columns = ["Matched", "Max_Hail_Size", "Num_Matches", "Shape", "Location", "Scale"] s = 0 for m, model_track in enumerate(model_tracks): model_track.observations = pd.DataFrame(index=model_track.times, columns=label_columns, dtype=np.float64) model_track.observations.loc[:, :] = 0 model_track.observations["Matched"] = model_track.observations["Matched"].astype(np.int32) for t, time in enumerate(model_track.times): model_track.observations.loc[time, "Matched"] = track_pairings.loc[s, "Matched"] if model_track.observations.loc[time, "Matched"] > 0: all_hail_sizes = [] step_pairs = track_pairings.loc[s, "Pairings"] for step_pair in step_pairs: obs_step = obs_tracks[step_pair[0]].timesteps[step_pair[1]].ravel() obs_mask = obs_tracks[step_pair[0]].masks[step_pair[1]].ravel() all_hail_sizes.append(obs_step[(obs_mask == 1) & (obs_step >= self.mrms_ew.min_thresh)]) combined_hail_sizes = np.concatenate(all_hail_sizes) min_hail = combined_hail_sizes.min() - 0.1 model_track.observations.loc[time, "Max_Hail_Size"] = combined_hail_sizes.max() model_track.observations.loc[time, "Num_Matches"] = step_pairs.shape[0] model_track.observations.loc[time, ["Shape", "Location", "Scale"]] = gamma.fit(combined_hail_sizes, floc=min_hail) s += 1
Given a matching set of observed tracks for each model track, Args: model_tracks: obs_tracks: track_pairings: Returns:
juraj-google-style
def transpose(self): graph = self.graph transposed = DAG() for (node, edges) in graph.items(): transposed.add_node(node) for (node, edges) in graph.items(): for edge in edges: transposed.add_edge(edge, node) return transposed
Builds a new graph with the edges reversed. Returns: :class:`stacker.dag.DAG`: The transposed graph.
codesearchnet
def get_memberships(self): response = self._get_xml((self.rest_url + '/group/membership')) if (not response.ok): return None xmltree = etree.fromstring(response.content) memberships = {} for mg in xmltree.findall('membership'): group = u'{}'.format(mg.get('group')) users = [u'{}'.format(u.get('name')) for u in mg.find('users').findall('user')] groups = [u'{}'.format(g.get('name')) for g in mg.find('groups').findall('group')] memberships[group] = {u'users': users, u'groups': groups} return memberships
Fetches all group memberships. Returns: dict: key: group name value: (array of users, array of groups)
codesearchnet
def distance_and_image_from_frac_coords(self, fcoords, jimage=None): return self.lattice.get_distance_and_image(self.frac_coords, fcoords, jimage=jimage)
Gets distance between site and a fractional coordinate assuming periodic boundary conditions. If the index jimage of two sites atom j is not specified it selects the j image nearest to the i atom and returns the distance and jimage indices in terms of lattice vector translations. If the index jimage of atom j is specified it returns the distance between the i atom and the specified jimage atom, the given jimage is also returned. Args: fcoords (3x1 array): fcoords to get distance from. jimage (3x1 array): Specific periodic image in terms of lattice translations, e.g., [1,0,0] implies to take periodic image that is one a-lattice vector away. If jimage is None, the image that is nearest to the site is found. Returns: (distance, jimage): distance and periodic lattice translations of the other site for which the distance applies.
codesearchnet
def push(self, targets, jobs=None, remote=None, show_checksums=False): return self.repo.cache.local.push( targets, jobs=jobs, remote=self._get_cloud(remote, "push"), show_checksums=show_checksums, )
Push data items in a cloud-agnostic way. Args: targets (list): list of targets to push to the cloud. jobs (int): number of jobs that can be running simultaneously. remote (dvc.remote.base.RemoteBase): optional remote to push to. By default remote from core.remote config option is used. show_checksums (bool): show checksums instead of file names in information messages.
juraj-google-style
def regression_signature_def(examples, predictions): if examples is None: raise ValueError('Regression `examples` cannot be None.') if not isinstance(examples, tensor_lib.Tensor): raise ValueError(f'Expected regression `examples` to be of type Tensor. Found `examples` of type {type(examples)}.') if predictions is None: raise ValueError('Regression `predictions` cannot be None.') input_tensor_info = utils.build_tensor_info(examples) if input_tensor_info.dtype != types_pb2.DT_STRING: raise ValueError(f'Regression input tensors must be of type string. Found tensors with type {input_tensor_info.dtype}.') signature_inputs = {signature_constants.REGRESS_INPUTS: input_tensor_info} output_tensor_info = utils.build_tensor_info(predictions) if output_tensor_info.dtype != types_pb2.DT_FLOAT: raise ValueError(f'Regression output tensors must be of type float. Found tensors with type {output_tensor_info.dtype}.') signature_outputs = {signature_constants.REGRESS_OUTPUTS: output_tensor_info} signature_def = build_signature_def(signature_inputs, signature_outputs, signature_constants.REGRESS_METHOD_NAME) return signature_def
Creates regression signature from given examples and predictions. This function produces signatures intended for use with the TensorFlow Serving Regress API (tensorflow_serving/apis/prediction_service.proto), and so constrains the input and output types to those allowed by TensorFlow Serving. Args: examples: A string `Tensor`, expected to accept serialized tf.Examples. predictions: A float `Tensor`. Returns: A regression-flavored signature_def. Raises: ValueError: If examples is `None`.
github-repos
def compute_one_decoding_video_metrics(iterator, feed_dict, num_videos): output, target = iterator.get_next() metrics = psnr_and_ssim(output, target) with tf.Session() as sess: sess.run(tf.local_variables_initializer()) initalizer = iterator._initializer if initalizer is not None: sess.run(initalizer, feed_dict=feed_dict) all_psnr, all_ssim = [], [] for i in range(num_videos): print("Computing video: %d" % i) psnr_np, ssim_np = sess.run(metrics) all_psnr.append(psnr_np) all_ssim.append(ssim_np) all_psnr = np.array(all_psnr) all_ssim = np.array(all_ssim) return all_psnr, all_ssim
Computes the average of all the metric for one decoding. Args: iterator: dataset iterator. feed_dict: feed dict to initialize iterator. num_videos: number of videos. Returns: all_psnr: 2-D Numpy array, shape=(num_samples, num_frames) all_ssim: 2-D Numpy array, shape=(num_samples, num_frames)
juraj-google-style
def compute_mel_filterbank_features(waveforms, sample_rate=16000, dither=(1.0 / np.iinfo(np.int16).max), preemphasis=0.97, frame_length=25, frame_step=10, fft_length=None, window_fn=functools.partial(tf.contrib.signal.hann_window, periodic=True), lower_edge_hertz=80.0, upper_edge_hertz=7600.0, num_mel_bins=80, log_noise_floor=0.001, apply_mask=True): wav_lens = (tf.reduce_max((tf.expand_dims(tf.range(tf.shape(waveforms)[1]), 0) * tf.to_int32(tf.not_equal(waveforms, 0.0))), axis=(- 1)) + 1) if (dither > 0): waveforms += tf.random_normal(tf.shape(waveforms), stddev=dither) if (preemphasis > 0): waveforms = (waveforms[(:, 1:)] - (preemphasis * waveforms[(:, :(- 1))])) wav_lens -= 1 frame_length = int(((frame_length * sample_rate) / 1000.0)) frame_step = int(((frame_step * sample_rate) / 1000.0)) if (fft_length is None): fft_length = int((2 ** np.ceil(np.log2(frame_length)))) stfts = tf.contrib.signal.stft(waveforms, frame_length=frame_length, frame_step=frame_step, fft_length=fft_length, window_fn=window_fn, pad_end=True) stft_lens = ((wav_lens + (frame_step - 1)) masks = tf.to_float(tf.less_equal(tf.expand_dims(tf.range(tf.shape(stfts)[1]), 0), tf.expand_dims(stft_lens, 1))) magnitude_spectrograms = tf.abs(stfts) num_spectrogram_bins = magnitude_spectrograms.shape[(- 1)].value linear_to_mel_weight_matrix = tf.contrib.signal.linear_to_mel_weight_matrix(num_mel_bins, num_spectrogram_bins, sample_rate, lower_edge_hertz, upper_edge_hertz) mel_spectrograms = tf.tensordot(magnitude_spectrograms, linear_to_mel_weight_matrix, 1) mel_spectrograms.set_shape(magnitude_spectrograms.shape[:(- 1)].concatenate(linear_to_mel_weight_matrix.shape[(- 1):])) log_mel_sgram = tf.log(tf.maximum(log_noise_floor, mel_spectrograms)) if apply_mask: log_mel_sgram *= tf.expand_dims(tf.to_float(masks), (- 1)) return tf.expand_dims(log_mel_sgram, (- 1), name='mel_sgrams')
Implement mel-filterbank extraction using tf ops. Args: waveforms: float32 tensor with shape [batch_size, max_len] sample_rate: sampling rate of the waveform dither: stddev of Gaussian noise added to waveform to prevent quantization artefacts preemphasis: waveform high-pass filtering constant frame_length: frame length in ms frame_step: frame_Step in ms fft_length: number of fft bins window_fn: windowing function lower_edge_hertz: lowest frequency of the filterbank upper_edge_hertz: highest frequency of the filterbank num_mel_bins: filterbank size log_noise_floor: clip small values to prevent numeric overflow in log apply_mask: When working on a batch of samples, set padding frames to zero Returns: filterbanks: a float32 tensor with shape [batch_size, len, num_bins, 1]
codesearchnet
def get_loss_reduction(): if not distribute_lib.get_strategy()._scale_loss_for_estimator: return ReduceOp.SUM last_reduction = ops.get_default_graph()._last_loss_reduction if last_reduction == losses_impl.Reduction.SUM or last_reduction == 'sum': return ReduceOp.SUM return ReduceOp.MEAN
`tf.distribute.ReduceOp` corresponding to the last loss reduction. Returns: `tf.distribute.ReduceOp` corresponding to the last loss reduction for estimator and v1 optimizer use case. `tf.distribute.ReduceOp.SUM` otherwise.
github-repos
def call(command, collect_missing=False, silent=True): return (_execCommand if silent else execCommand)(shlex.split(command), collect_missing)
r"""Calls a task, as if it were called from the command line. Args: command (str): A route followed by params (as if it were entered in the shell). collect_missing (bool): Collects any missing argument for the command through the shell. Defaults to False. Returns: The return value of the called command.
codesearchnet
def parse_numpy_printoption(kv_str): k_v_str = kv_str.split('=', 1) if len(k_v_str) != 2 or not k_v_str[0]: raise argparse.ArgumentTypeError("'%s' is not in the form k=v." % kv_str) k, v_str = k_v_str printoptions = np.get_printoptions() if k not in printoptions: raise argparse.ArgumentTypeError("'%s' is not a valid printoption." % k) v_type = type(printoptions[k]) if v_type is type(None): raise argparse.ArgumentTypeError("Setting '%s' from the command line is not supported." % k) try: v = v_type(v_str) if v_type is not bool else flags.BooleanParser().parse(v_str) except ValueError as e: raise argparse.ArgumentTypeError(e.message) np.set_printoptions(**{k: v})
Sets a single numpy printoption from a string of the form 'x=y'. See documentation on numpy.set_printoptions() for details about what values x and y can take. x can be any option listed there other than 'formatter'. Args: kv_str: A string of the form 'x=y', such as 'threshold=100000' Raises: argparse.ArgumentTypeError: If the string couldn't be used to set any numpy printoption.
github-repos
def execute_code_block(elem, doc): command = select_executor(elem, doc).split(' ') code = elem.text if (('plt' in elem.attributes) or ('plt' in elem.classes)): code = save_plot(code, elem) command.append(code) if ('args' in elem.attributes): for arg in elem.attributes['args'].split(): command.append(arg) cwd = (elem.attributes['wd'] if ('wd' in elem.attributes) else None) return subprocess.run(command, encoding='utf8', stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=cwd).stdout
Executes a code block by passing it to the executor. Args: elem The AST element. doc The document. Returns: The output of the command.
codesearchnet
async def do_upload(context, files): status = 0 try: (await upload_artifacts(context, files)) except ScriptWorkerException as e: status = worst_level(status, e.exit_code) log.error('Hit ScriptWorkerException: {}'.format(e)) except aiohttp.ClientError as e: status = worst_level(status, STATUSES['intermittent-task']) log.error('Hit aiohttp error: {}'.format(e)) except Exception as e: log.exception('SCRIPTWORKER_UNEXPECTED_EXCEPTION upload {}'.format(e)) raise return status
Upload artifacts and return status. Returns the integer status of the upload. args: context (scriptworker.context.Context): the scriptworker context. files (list of str): list of files to be uploaded as artifacts Raises: Exception: on unexpected exception. Returns: int: exit status
codesearchnet
def new(cls, access_token, environment='prod'): api_client = ApiClient.new(access_token, environment) return cls(api_client)
Create new storage service client. Arguments: environment(str): The service environment to be used for the client. 'prod' or 'dev'. access_token(str): The access token used to authenticate with the service Returns: A storage_service.Client instance
codesearchnet
def set_status(self, status): text = '' colour = ' if (status == 0): text = 'OFFLINE' colour = ' elif (status == 1): text = 'STARTING' colour = ' elif (status == 2): text = 'ONLINE' colour = ' self.status.set(text) self.statusbar.config(background=colour)
Updates the status text Args: status (int): The offline/starting/online status of Modis 0: offline, 1: starting, 2: online
codesearchnet
def scroll(self, direction='vertical', percent=0.6, duration=2.0): if (direction not in ('vertical', 'horizontal')): raise ValueError('Argument `direction` should be one of "vertical" or "horizontal". Got {}'.format(repr(direction))) focus1 = (self._focus or [0.5, 0.5]) focus2 = list(focus1) half_distance = (percent / 2) if (direction == 'vertical'): focus1[1] += half_distance focus2[1] -= half_distance else: focus1[0] += half_distance focus2[0] -= half_distance return self.focus(focus1).drag_to(self.focus(focus2), duration=duration)
Simply touch down from point A and move to point B then release up finally. This action is performed within specific motion range and duration. Args: direction (:py:obj:`str`): scrolling direction. "vertical" or "horizontal" percent (:py:obj:`float`): scrolling distance percentage of selected UI height or width according to direction duration (:py:obj:`float`): time interval in which the action is performed Raises: PocoNoSuchNodeException: raised when the UI element does not exist
codesearchnet
def _unicode_def_src_to_str(srclist: List[Union[str, int]]) -> str: charlist = [] for src in srclist: if isinstance(src, int): charlist.append(chr(src)) else: first, last = [int(x, 16) for x in src.split("-")] charlist += [chr(x) for x in range(first, last + 1)] return "".join(charlist)
Used to create :data:`UNICODE_CATEGORY_STRINGS`. Args: srclist: list of integers or hex range strings like ``"0061-007A"`` Returns: a string with all characters described by ``srclist``: either the character corresponding to the integer Unicode character number, or all characters corresponding to the inclusive range described
juraj-google-style
def bytestring_to_tar_tuple(filename, bytes): info = tarfile.TarInfo(filename) info.size = len(bytes) return info, BytesIO(bytes)
Take a string + filename, return a (tarinfo, stringbuf) tuple for insertion. Args: bytes (bstring): Bytestring representation of the filedata. filename (string): Filepath relative to tarfile root. Returns: tuple: (tarfile.TarInfo,io.BytesIO). This can be passed directly to TarFile.addfile().
juraj-google-style
def tar_add_bytes(tf, filename, bytestring): if (not isinstance(bytestring, bytes)): bytestring = bytestring.encode('ascii') buff = io.BytesIO(bytestring) tarinfo = tarfile.TarInfo(filename) tarinfo.size = len(bytestring) tf.addfile(tarinfo, buff)
Add a file to a tar archive Args: tf (tarfile.TarFile): tarfile to add the file to filename (str): path within the tar file bytestring (bytes or str): file contents. Must be :class:`bytes` or ascii-encodable :class:`str`
codesearchnet
def _lookup_in_all_namespaces(self, symbol): namespace = self.namespaces namespace_stack = [] for current in symbol.namespace_stack: namespace = namespace.get(current) if namespace is None or not isinstance(namespace, dict): break namespace_stack.append(namespace) for namespace in reversed(namespace_stack): try: return self._lookup_namespace(symbol, namespace) except Error: pass return None
Helper for lookup_symbol that looks for symbols in all namespaces. Args: symbol: Symbol
juraj-google-style
def CheckSupportedFormat(cls, path, check_readable_only=False): try: connection = sqlite3.connect( path, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES) cursor = connection.cursor() query = 'SELECT * FROM metadata' cursor.execute(query) metadata_values = {row[0]: row[1] for row in cursor.fetchall()} cls._CheckStorageMetadata( metadata_values, check_readable_only=check_readable_only) connection.close() result = True except (IOError, sqlite3.DatabaseError): result = False return result
Checks if the storage file format is supported. Args: path (str): path to the storage file. check_readable_only (Optional[bool]): whether the store should only be checked to see if it can be read. If False, the store will be checked to see if it can be read and written to. Returns: bool: True if the format is supported.
juraj-google-style
def _GetStructureValue(self, structure, key): value = structure.get(key) return (value if (not isinstance(value, pyparsing.ParseResults)) else None)
Retrieves a value from a parsed log line, removing empty results. Args: structure (pyparsing.ParseResults): parsed log line. key (str): results key to retrieve from the parsed log line. Returns: type or None: the value of the named key in the parsed log line, or None if the value is a ParseResults object.
codesearchnet
def objects(self, prefix=None, delimiter=None): return _object.Objects(self._name, prefix, delimiter, context=self._context)
Get an iterator for the objects within this bucket. Args: prefix: an optional prefix to match objects. delimiter: an optional string to simulate directory-like semantics. The returned objects will be those whose names do not contain the delimiter after the prefix. For the remaining objects, the names will be returned truncated after the delimiter with duplicates removed (i.e. as pseudo-directories). Returns: An iterable list of objects within this bucket.
codesearchnet
def _check(self, check, radl): if check[0] == float: if not isinstance(self.value, int) and not isinstance(self.value, float): raise RADLParseException("Invalid type; expected %s" % check[0], line=self.line) elif check[0] == str: if not isinstance(self.value, str) and not isinstance(self.value, unicode): raise RADLParseException("Invalid type; expected %s" % check[0], line=self.line) else: if not isinstance(self.value, check[0]): raise RADLParseException("Invalid type; expected %s" % check[0], line=self.line) if (isinstance(self.value, str) or isinstance(self.value, unicode)) and self.prop.find('version') == -1: if self.operator != "=": raise RADLParseException("Invalid operator; expected '='", line=self.line) elif isinstance(self.value, int) or isinstance(self.value, float) or self.prop.find('version') >= 0: if self.operator not in ["=", "<=", ">=", ">", "<"]: raise RADLParseException("Invalid operator; expected '=', '<=', " + "'>=', '>' or '<'", line=self.line) elif isinstance(self.value, Features): if self.operator != "contains": raise RADLParseException( "Invalid operator; expected 'contains'", line=self.line) if isinstance(check[1], list): if self.value.upper() not in check[1]: raise RADLParseException("Invalid value; expected one of %s" % check[1], line=self.line) elif callable(check[1]): if not check[1](self, radl): raise RADLParseException("Invalid value in property '%s'" % self.prop, line=self.line) if len(check) < 3 or check[2] is None: if self.unit: raise RADLParseException("Invalid unit; expected none", line=self.line) elif len(check) > 2 and check[2]: if self.unit.upper() not in check[2]: raise RADLParseException( "Invalid unit; expected one of %s" % check[2], line=self.line) return True
Check type, operator and unit in a feature. Args: - check(tuple): - v[0]: expected type of the feature value. - v[1]: can be a list of possible values or a function to test the value or None. - v[2] (optional): can be a list of possible units; if None or not set the unit valid is none. - radl: second argument passed when calling v[1].
juraj-google-style
def setup_test_logger(log_path, prefix=None, filename=None): utils.create_dir(log_path) _setup_test_logger(log_path, prefix) logging.info('Test output folder: "%s"', log_path) create_latest_log_alias(log_path)
Customizes the root logger for a test run. Args: log_path: Location of the report file. prefix: A prefix for each log line in terminal. filename: Name of the files. The default is the time the objects are requested.
juraj-google-style
def _CreateDictReader(self, line_reader): delimiter = self.DELIMITER quotechar = self.QUOTE_CHAR magic_test_string = self._MAGIC_TEST_STRING if py2to3.PY_3: delimiter = delimiter.decode(self._encoding) quotechar = quotechar.decode(self._encoding) magic_test_string = magic_test_string.decode(self._encoding) return csv.DictReader( line_reader, delimiter=delimiter, fieldnames=self.COLUMNS, quotechar=quotechar, restkey=magic_test_string, restval=magic_test_string)
Returns a reader that processes each row and yields dictionaries. csv.DictReader does this job well for single-character delimiters; parsers that need multi-character delimiters need to override this method. Args: line_reader (iter): yields lines from a file-like object. Returns: iter: a reader of dictionaries, as returned by csv.DictReader().
juraj-google-style
def run_example(example_cls: Example, args=None): values = parse_args(args) window_cls = get_window_cls(values.window) window = window_cls( title=example_cls.title, size=example_cls.window_size, fullscreen=values.fullscreen, resizable=example_cls.resizable, gl_version=example_cls.gl_version, aspect_ratio=example_cls.aspect_ratio, vsync=values.vsync, samples=values.samples, cursor=values.cursor, ) window.example = example_cls(ctx=window.ctx, wnd=window) start_time = time.time() current_time = start_time prev_time = start_time frame_time = 0 while not window.is_closing: current_time, prev_time = time.time(), current_time frame_time = max(current_time - prev_time, 1 / 1000) window.render(current_time - start_time, frame_time) window.swap_buffers() duration = time.time() - start_time window.destroy() print("Duration: {0:.2f}s @ {1:.2f} FPS".format(duration, window.frames / duration))
Run an example entering a blocking main loop Args: example_cls: The exmaple class to render args: Override sys.args
juraj-google-style
def _FormatSubjectOrProcessToken(self, token_data): ip_address = self._FormatPackedIPv4Address(token_data.ip_address) return { 'aid': token_data.audit_user_identifier, 'euid': token_data.effective_user_identifier, 'egid': token_data.effective_group_identifier, 'uid': token_data.real_user_identifier, 'gid': token_data.real_group_identifier, 'pid': token_data.process_identifier, 'session_id': token_data.session_identifier, 'terminal_port': token_data.terminal_port, 'terminal_ip': ip_address}
Formats a subject or process token as a dictionary of values. Args: token_data (bsm_token_data_subject32|bsm_token_data_subject64): AUT_SUBJECT32, AUT_PROCESS32, AUT_SUBJECT64 or AUT_PROCESS64 token data. Returns: dict[str, str]: token values.
juraj-google-style
def add_residues_highlight_to_nglview(view, structure_resnums, chain, res_color='red'): chain = ssbio.utils.force_list(chain) if isinstance(structure_resnums, list): structure_resnums = list(set(structure_resnums)) elif isinstance(structure_resnums, int): structure_resnums = ssbio.utils.force_list(structure_resnums) else: raise ValueError('Input must either be a residue number of a list of residue numbers') to_show_chains = '( ' for c in chain: to_show_chains += ':{} or'.format(c) to_show_chains = to_show_chains.strip(' or ') to_show_chains += ' )' to_show_res = '( ' for m in structure_resnums: to_show_res += '{} or '.format(m) to_show_res = to_show_res.strip(' or ') to_show_res += ' )' log.info('Selection: {} and not hydrogen and {}'.format(to_show_chains, to_show_res)) view.add_ball_and_stick(selection='{} and not hydrogen and {}'.format(to_show_chains, to_show_res), color=res_color)
Add a residue number or numbers to an NGLWidget view object. Args: view (NGLWidget): NGLWidget view object structure_resnums (int, list): Residue number(s) to highlight, structure numbering chain (str, list): Chain ID or IDs of which residues are a part of. If not provided, all chains in the mapped_chains attribute will be used. If that is also empty, and exception is raised. res_color (str): Color to highlight residues with
juraj-google-style
def GetEntries(self, parser_mediator, match=None, **unused_kwargs): devices = match.get('Devices', {}) for device_identifier, device_information in iter(devices.items()): datetime_value = device_information.get('Connected', None) if not datetime_value: continue event_data = IPodPlistEventData() event_data.device_id = device_identifier for key, value in iter(device_information.items()): if key == 'Connected': continue attribute_name = key.lower().replace(' ', '_') setattr(event_data, attribute_name, value) event = time_events.PythonDatetimeEvent( datetime_value, definitions.TIME_DESCRIPTION_LAST_CONNECTED) parser_mediator.ProduceEventWithEventData(event, event_data)
Extract device information from the iPod plist. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
juraj-google-style
def extract_random_video_patch(videos, num_frames=-1): if num_frames == -1: return videos batch_size, num_total_frames, h, w, c = common_layers.shape_list(videos) if num_total_frames < num_frames: raise ValueError("Expected num_frames <= %d, got %d" % (num_total_frames, num_frames)) frame_start = tf.random_uniform( shape=(batch_size,), minval=0, maxval=num_total_frames - num_frames + 1, dtype=tf.int32) range_inds = tf.expand_dims(tf.range(num_frames), axis=0) frame_inds = range_inds + tf.expand_dims(frame_start, axis=1) frame_inds = tf.reshape(frame_inds, [-1]) batch_inds = tf.expand_dims(tf.range(batch_size), axis=1) batch_inds = tf.tile(batch_inds, [1, num_frames]) batch_inds = tf.reshape(batch_inds, [-1]) gather_inds = tf.stack((batch_inds, frame_inds), axis=1) video_patches = tf.gather_nd(videos, gather_inds) return tf.reshape(video_patches, (batch_size, num_frames, h, w, c))
For every video, extract a random consecutive patch of num_frames. Args: videos: 5-D Tensor, (NTHWC) num_frames: Integer, if -1 then the entire video is returned. Returns: video_patch: 5-D Tensor, (NTHWC) with T = num_frames. Raises: ValueError: If num_frames is greater than the number of total frames in the video.
juraj-google-style
def analogy_rank_score(analogies, word_vectors, no_threads=1): input_vectors = ((word_vectors[analogies[(:, 1)]] + word_vectors[analogies[(:, 2)]]) - word_vectors[analogies[(:, 0)]]) word_vector_norms = np.linalg.norm(word_vectors, axis=1) rank_violations = np.zeros(input_vectors.shape[0], dtype=np.int32) compute_rank_violations(word_vectors, word_vector_norms, input_vectors, analogies[(:, 3)], analogies, rank_violations, no_threads) return (rank_violations / float(word_vectors.shape[0]))
Calculate the analogy rank score for the given set of analogies. A rank of zero denotes a perfect score; with random word vectors we would expect a rank of 0.5. Arguments: - analogies: a numpy array holding the ids of the words in the analogy tasks, as constructed by `construct_analogy_test_set`. - word_vectors: numpy array holding the word vectors to use. - num_threads: number of parallel threads to use in the calculation. Returns: - ranks: a numpy array holding the normalized rank of the target word in each analogy task. Rank 0 means that the target words was returned first; rank 1 means it was returned last.
codesearchnet
def find(pcoll, regex, group=0): regex = Regex._regex_compile(regex) def _process(element): r = regex.search(element) if r: yield r.group(group) return pcoll | FlatMap(_process)
Returns the matches if a portion of the line matches the Regex. Returns the entire group (group 0 by default). Group can be integer value or a string value. Args: regex: the regular expression string or (re.compile) pattern. group: (optional) name of the group, it can be integer or a string value.
github-repos
def get_drives(self, id_or_uri): uri = (self._client.build_uri(id_or_uri=id_or_uri) + self.DRIVES_PATH) return self._client.get(id_or_uri=uri)
Gets the list of drives allocated to this SAS logical JBOD. Args: id_or_uri: Can be either the SAS logical JBOD ID or the SAS logical JBOD URI. Returns: list: A list of Drives
codesearchnet
def list_street_poi_parking(self, **kwargs): url_args = { 'language': util.language_code(kwargs.get('lang')), 'address': kwargs.get('address', '') } result = self.make_request('list_street_poi_parking', url_args) if not util.check_result(result): return False, result.get('message', 'UNKNOWN ERROR') values = util.response_list(result, 'Data') return True, [emtype.ParkingPoi(**a) for a in values]
Obtain a list of addresses and POIs. This endpoint uses an address to perform the search Args: lang (str): Language code (*es* or *en*). address (str): Address in which to perform the search. Returns: Status boolean and parsed response (list[ParkingPoi]), or message string in case of error.
juraj-google-style
def save_json(obj, filename, **kwargs): with open(filename, 'w', encoding='utf-8') as f: json.dump(obj, f, **kwargs)
Save an object as a JSON file. Args: obj: The object to save. Must be JSON-serializable. filename: Path to the output file. **kwargs: Additional arguments to `json.dump`.
juraj-google-style
def __init__(self, graph=None, op_log=None): if not graph and (not context.executing_eagerly()): graph = ops.get_default_graph() self._coverage = 0.0 self._graph = graph op_log = tfprof_logger.merge_default_with_oplog(self._graph, op_log=op_log) print_mdl.NewProfiler(_graph_string(self._graph), op_log.SerializeToString())
Constructor. Args: graph: tf.Graph. If None and eager execution is not enabled, use default graph. op_log: optional. tensorflow::tfprof::OpLogProto proto. Used to define extra op types.
github-repos
def __init__(self, domain_mapper, mode='classification', class_names=None, random_state=None): self.random_state = random_state self.mode = mode self.domain_mapper = domain_mapper self.local_exp = {} self.intercept = {} self.score = None self.local_pred = None self.scaled_data = None if mode == 'classification': self.class_names = class_names self.top_labels = None self.predict_proba = None elif mode == 'regression': self.class_names = ['negative', 'positive'] self.predicted_value = None self.min_value = 0.0 self.max_value = 1.0 self.dummy_label = 1 else: raise LimeError('Invalid explanation mode "{}". ' 'Should be either "classification" ' 'or "regression".'.format(mode))
Initializer. Args: domain_mapper: must inherit from DomainMapper class type: "classification" or "regression" class_names: list of class names (only used for classification) random_state: an integer or numpy.RandomState that will be used to generate random numbers. If None, the random state will be initialized using the internal numpy seed.
juraj-google-style
def load(self, train=True, test=True, shuffle=True) -> tuple: return self.__load(self.__load_files, train, test, shuffle=shuffle)
Load the vectorized representations of the stored data files Args: train: Whether to load train data test: Whether to load test data
juraj-google-style
def get_transcript_credentials_state_for_org(org, provider=None): query_filter = {'org': org} if provider: query_filter['provider'] = provider return { credential.provider: credential.exists for credential in ThirdPartyTranscriptCredentialsState.objects.filter(**query_filter) }
Returns transcript credentials state for an org Arguments: org (unicode): course organization provider (unicode): transcript provider Returns: dict: provider name and their credential existance map { u'Cielo24': True } { u'3PlayMedia': False, u'Cielo24': True }
juraj-google-style
def _init_from_bool(self, z, x): if z is None: raise QiskitError("z vector must not be None.") if x is None: raise QiskitError("x vector must not be None.") if len(z) != len(x): raise QiskitError("length of z and x vectors must be " "the same. (z: {} vs x: {})".format(len(z), len(x))) z = _make_np_bool(z) x = _make_np_bool(x) self._z = z self._x = x return self
Construct pauli from boolean array. Args: z (numpy.ndarray): boolean, z vector x (numpy.ndarray): boolean, x vector Returns: Pauli: self Raises: QiskitError: if z or x are None or the length of z and x are different.
juraj-google-style
def set_smartplug_state(self, device_label, state): response = None try: response = requests.post(urls.smartplug(self._giid), headers={'Content-Type': 'application/json', 'Cookie': 'vid={}'.format(self._vid)}, data=json.dumps([{'deviceLabel': device_label, 'state': state}])) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response)
Turn on or off smartplug Args: device_label (str): Smartplug device label state (boolean): new status, 'True' or 'False'
codesearchnet
def lookup_zone_exception(self, callsign, timestamp=datetime.utcnow().replace(tzinfo=UTC)): callsign = callsign.strip().upper() if (self._lookuptype == 'clublogxml'): return self._check_zone_exception_for_date(callsign, timestamp, self._zone_exceptions, self._zone_exceptions_index) elif (self._lookuptype == 'redis'): (data_dict, index) = self._get_dicts_from_redis('_zone_ex_', '_zone_ex_index_', self._redis_prefix, callsign) return self._check_zone_exception_for_date(callsign, timestamp, data_dict, index) raise KeyError
Returns a CQ Zone if an exception exists for the given callsign Args: callsign (string): Amateur radio callsign timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC) Returns: int: Value of the the CQ Zone exception which exists for this callsign (at the given time) Raises: KeyError: No matching callsign found APIKeyMissingError: API Key for Clublog missing or incorrect Example: The following code checks the Clublog XML database if a CQ Zone exception exists for the callsign DP0GVN. >>> from pyhamtools import LookupLib >>> my_lookuplib = LookupLib(lookuptype="clublogxml", apikey="myapikey") >>> print my_lookuplib.lookup_zone_exception("DP0GVN") 38 The prefix "DP" It is assigned to Germany, but the station is located in Antarctica, and therefore in CQ Zone 38 Note: This method is available for - clublogxml - redis
codesearchnet
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is None: return [1] + [0] * len(token_ids_0) + [1] return [1] + [0] * len(token_ids_0) + [1] + [1] + [0] * len(token_ids_1) + [1]
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
github-repos
def pairwise_iou(boxlist1, boxlist2): intersections = pairwise_intersection(boxlist1, boxlist2) areas1 = area(boxlist1) areas2 = area(boxlist2) unions = ( tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections) return tf.where( tf.equal(intersections, 0.0), tf.zeros_like(intersections), tf.truediv(intersections, unions))
Computes pairwise intersection-over-union between box collections. Args: boxlist1: Nx4 floatbox boxlist2: Mx4 Returns: a tensor with shape [N, M] representing pairwise iou scores.
juraj-google-style
def read_from_directory(self, dataset_info_dir): if not dataset_info_dir: raise ValueError( "Calling read_from_directory with undefined dataset_info_dir.") json_filename = self._dataset_info_filename(dataset_info_dir) parsed_proto = read_from_json(json_filename) self._set_splits(splits_lib.SplitDict.from_proto(parsed_proto.splits)) if self.features: self.features.load_metadata(dataset_info_dir) for field_name, field in self.as_proto.DESCRIPTOR.fields_by_name.items(): field_value = getattr(self._info_proto, field_name) field_value_restored = getattr(parsed_proto, field_name) try: is_defined = self._info_proto.HasField(field_name) except ValueError: is_defined = bool(field_value) try: is_defined_in_restored = parsed_proto.HasField(field_name) except ValueError: is_defined_in_restored = bool(field_value_restored) if is_defined: if field_value != field_value_restored: logging.info( "Field info.%s from disk and from code do not match. Keeping " "the one from code.", field_name) continue if not is_defined_in_restored: continue if field.type == field.TYPE_MESSAGE: field_value.MergeFrom(field_value_restored) else: setattr(self._info_proto, field_name, field_value_restored) if self._builder._version != self.version: raise AssertionError( "The constructed DatasetInfo instance and the restored proto version " "do not match. Builder version: {}. Proto version: {}".format( self._builder._version, self.version)) self._fully_initialized = True
Update DatasetInfo from the JSON file in `dataset_info_dir`. This function updates all the dynamically generated fields (num_examples, hash, time of creation,...) of the DatasetInfo. This will overwrite all previous metadata. Args: dataset_info_dir: `str` The directory containing the metadata file. This should be the root directory of a specific dataset version.
juraj-google-style
def input_streams(self): streams = [] for (walker, _trigger) in self.inputs: if ((walker.selector is None) or (not walker.selector.singular)): continue streams.append(walker.selector.as_stream()) return streams
Return a list of DataStream objects for all singular input streams. This function only returns individual streams, not the streams that would be selected from a selector like 'all outputs' for example. Returns: list(DataStream): A list of all of the individual DataStreams that are inputs of the node. Input selectors that select multiple streams are not included
codesearchnet
def webhook(self, webhook_url): if not webhook_url: raise Exception('Url can not be None') matcher = re.match(self.__webhook_url_format, webhook_url) if not matcher: raise Exception('Invalid url format, looking for: ' + self.__webhook_url_format) self.api_keys(int(matcher.group(1)), matcher.group(2))
Load object with webhook_url Args: webhook_url (str): full webhook url given by Discord 'create webhook' func
juraj-google-style
def WriteRow(self, values): precondition.AssertDictType(values, text, text) row = [] for column in self._columns: try: value = values[column] except KeyError: raise ValueError("Row does not contain required column `%s`" % column) row.append(value) self._writer.WriteRow(row)
Writes a single row to the underlying buffer. Args: values: A dictionary mapping column names to values to be inserted into the CSV output.
juraj-google-style
def check_configuration(ctx, base_key, needed_keys): if base_key not in ctx.keys(): exit("[{}ERROR{}] missing configuration for '{}'" .format(ERROR_COLOR, RESET_COLOR, base_key)) if ctx.releaser is None: exit("[{}ERROR{}] empty configuration for '{}' found" .format(ERROR_COLOR, RESET_COLOR, base_key)) for my_key in needed_keys: if my_key not in ctx[base_key].keys(): exit("[{}ERROR{}] missing configuration key '{}.{}'" .format(ERROR_COLOR, RESET_COLOR, base_key, my_key))
Confrim a valid configuration. Args: ctx (invoke.context): base_key (str): the base configuration key everything is under. needed_keys (list): sub-keys of the base key that are checked to make sure they exist.
juraj-google-style
def __RenderOurModuleKeyFlags(self, module, output_lines, prefix=''): key_flags = self._GetKeyFlagsForModule(module) if key_flags: self.__RenderModuleFlags(module, key_flags, output_lines, prefix)
Generates a help string for the key flags of a given module. Args: module: A module object or a module name (a string). output_lines: A list of strings. The generated help message lines will be appended to this list. prefix: A string that is prepended to each generated help line.
codesearchnet
def find_by(cls, payload, require=False): if (not isinstance(payload, dict)): raise ValueError("The 'payload' parameter must be provided a dictionary object.") url = os.path.join(cls.URL, 'find_by') payload = {'find_by': payload} cls.debug_logger.debug('Searching Pulsar {} for {}'.format(cls.__name__, json.dumps(payload, indent=4))) res = requests.post(url=url, json=payload, headers=HEADERS, verify=False) res.raise_for_status() res_json = res.json() if res_json: try: res_json = res_json[cls.MODEL_NAME] except KeyError: pass elif require: raise RecordNotFound("Can't find any {} records with search criteria: '{}'.".format(cls.__name__, payload)) return res_json
Searches the model in question by AND joining the query parameters. Implements a Railsy way of looking for a record using a method by the same name and passing in the query as a dict. as well. Only the first hit is returned, and there is no particular ordering specified in the server-side API method. Args: payload: `dict`. The attributes of a record to restrict the search to. require: `bool`. True means to raise a `pulsarpy.models.RecordNotFound` exception if no record is found. Returns: `dict`: The JSON serialization of the record, if any, found by the API call. `None`: If the API call didnt' return any results. Raises: `pulsarpy.models.RecordNotFound`: No records were found, and the `require` parameter is True.
codesearchnet
def _GetTimeValues(self, number_of_seconds): number_of_seconds = int(number_of_seconds) number_of_minutes, seconds = divmod(number_of_seconds, 60) number_of_hours, minutes = divmod(number_of_minutes, 60) number_of_days, hours = divmod(number_of_hours, 24) return number_of_days, hours, minutes, seconds
Determines time values. Args: number_of_seconds (int|decimal.Decimal): number of seconds. Returns: tuple[int, int, int, int]: days, hours, minutes, seconds.
juraj-google-style
class _BaseThresholdDoFn(beam.DoFn): def __init__(self, threshold_fn_spec: Spec): self._threshold_fn_spec = threshold_fn_spec def _apply_threshold_to_predictions(self, result: AnomalyResult) -> AnomalyResult: predictions = [dataclasses.replace(p, label=self._threshold_fn.apply(p.score), threshold=self._threshold_fn.threshold) for p in result.predictions] return dataclasses.replace(result, predictions=predictions)
Applies a ThresholdFn to anomaly detection results. This abstract base class defines the structure for DoFns that use a `ThresholdFn` to convert anomaly scores into anomaly labels (e.g., normal or outlier). It handles the core logic of applying the threshold function and updating the prediction labels within `AnomalyResult` objects. Args: threshold_fn_spec (Spec): Specification defining the `ThresholdFn` to be used.
github-repos
def CreateDefaultPartition(client, ad_group_id): ad_group_criterion_service = client.GetService('AdGroupCriterionService', version='v201809') operations = [{ 'operator': 'ADD', 'operand': { 'xsi_type': 'BiddableAdGroupCriterion', 'adGroupId': ad_group_id, 'criterion': { 'xsi_type': 'ProductPartition', 'partitionType': 'UNIT' }, 'biddingStrategyConfiguration': { 'bids': [{ 'xsi_type': 'CpcBid', 'bid': { 'microAmount': 500000 } }] } } }] ad_group_criterion = ad_group_criterion_service.mutate(operations)['value'][0] print ('Ad group criterion with ID "%d" in ad group with ID "%d" was added.' % (ad_group_criterion['criterion']['id'], ad_group_criterion['adGroupId']))
Creates a default partition. Args: client: an AdWordsClient instance. ad_group_id: an integer ID for an ad group.
juraj-google-style
def subspace_index(self, little_endian_bits_int: int) -> Tuple[(Union[(slice, int, 'ellipsis')], ...)]: return linalg.slice_for_qubits_equal_to(self.axes, little_endian_bits_int)
An index for the subspace where the target axes equal a value. Args: little_endian_bits_int: The desired value of the qubits at the targeted `axes`, packed into an integer. The least significant bit of the integer is the desired bit for the first axis, and so forth in increasing order. Returns: A value that can be used to index into `target_tensor` and `available_buffer`, and manipulate only the part of Hilbert space corresponding to a given bit assignment. Example: If `target_tensor` is a 4 qubit tensor and `axes` is `[1, 3]` and then this method will return the following when given `little_endian_bits=0b01`: `(slice(None), 0, slice(None), 1, Ellipsis)` Therefore the following two lines would be equivalent: args.target_tensor[args.subspace_index(0b01)] += 1 args.target_tensor[:, 0, :, 1] += 1
codesearchnet
def __strip_extra_attributes(self, node: yaml.Node, known_attrs: List[str]) -> None: known_keys = list(known_attrs) known_keys.remove('self') if 'yatiml_extra' in known_keys: known_keys.remove('yatiml_extra') for key_node, value_node in node.value: if (not isinstance(key_node, yaml.ScalarNode) or key_node.tag != 'tag:yaml.org,2002:str'): raise RecognitionError( ('{}{}Mapping keys that are not of type' ' string are not supported by YAtiML.').format( node.start_mark, os.linesep)) if key_node.value not in known_keys: self.__strip_tags(value_node)
Strips tags from extra attributes. This prevents nodes under attributes that are not part of our \ data model from being converted to objects. They'll be plain \ CommentedMaps instead, which then get converted to OrderedDicts \ for the user. Args: node: The node to process known_attrs: The attributes to not strip
juraj-google-style
def rename_keys(d: Dict[(str, Any)], mapping: Dict[(str, str)]) -> Dict[(str, Any)]: result = {} for (k, v) in d.items(): if (k in mapping): k = mapping[k] result[k] = v return result
Returns a copy of the dictionary ``d`` with its keys renamed according to ``mapping``. Args: d: the starting dictionary mapping: a dictionary of the format ``{old_key_name: new_key_name}`` Returns: a new dictionary Keys that are not in ``mapping`` are left unchanged. The input parameters are not modified.
codesearchnet
def calc_checksum(sentence): if sentence.startswith('$'): sentence = sentence[1:] sentence = sentence.split('*')[0] return reduce(xor, map(ord, sentence))
Calculate a NMEA 0183 checksum for the given sentence. NMEA checksums are a simple XOR of all the characters in the sentence between the leading "$" symbol, and the "*" checksum separator. Args: sentence (str): NMEA 0183 formatted sentence
juraj-google-style
def reset_sequence(cls, value=None, force=False): cls._meta.reset_sequence(value, force=force)
Reset the sequence counter. Args: value (int or None): the new 'next' sequence value; if None, recompute the next value from _setup_next_sequence(). force (bool): whether to force-reset parent sequence counters in a factory inheritance chain.
juraj-google-style
def get_session_tensor(handle, dtype, name=None): handle_device = TensorHandle._get_device_name(handle) with ops.device(handle_device): holder = array_ops.placeholder(dtypes.string) _register_handle_feeder(holder.graph, holder, dtype) tensor = gen_data_flow_ops.get_session_tensor(holder, dtype, name=name) return (holder, tensor)
Get the tensor of type `dtype` by feeding a tensor handle. This is EXPERIMENTAL and subject to change. Get the value of the tensor from a tensor handle. The tensor is produced in a previous run() and stored in the state of the session. Args: handle: The string representation of a persistent tensor handle. dtype: The type of the output tensor. name: Optional name prefix for the return tensor. Returns: A pair of tensors. The first is a placeholder for feeding a tensor handle and the second is the tensor in the session state keyed by the tensor handle. Example: ```python c = tf.multiply(a, b) h = tf.compat.v1.get_session_handle(c) h = sess.run(h) p, a = tf.compat.v1.get_session_tensor(h.handle, tf.float32) b = tf.multiply(a, 10) c = sess.run(b, feed_dict={p: h.handle}) ```
github-repos
def tf_loss_per_instance(self, states, internals, actions, terminal, reward, next_states, next_internals, update, reference=None): raise NotImplementedError
Creates the TensorFlow operations for calculating the loss per batch instance. Args: states: Dict of state tensors. internals: Dict of prior internal state tensors. actions: Dict of action tensors. terminal: Terminal boolean tensor. reward: Reward tensor. next_states: Dict of successor state tensors. next_internals: List of posterior internal state tensors. update: Boolean tensor indicating whether this call happens during an update. reference: Optional reference tensor(s), in case of a comparative loss. Returns: Loss per instance tensor.
codesearchnet
def merge_from(self, dev): self.job, self.replica, self.task, self.device_type, self.device_index = self._get_combined_properties(dev)
Merge the properties of "dev" into this `DeviceSpec`. Note: Will be removed in TensorFlow 2.x since DeviceSpecs will become immutable. Args: dev: a `DeviceSpec`.
github-repos
def __init__(self, request_builder, upload_url, current_content_length=0, is_last=False): self._request_builder = request_builder if current_content_length < 0: raise googleads.errors.GoogleAdsValueError( 'Current content length %s is < 0.' % current_content_length) self._current_content_length = current_content_length self._is_last = is_last self._url_opener = urllib2.build_opener( *self._request_builder.client.proxy_config.GetHandlers()) if self._request_builder.client.custom_http_headers: self._url_opener.addheaders.extend( self._request_builder.client.custom_http_headers.items()) self._upload_url = self._InitializeURL(upload_url, current_content_length)
Initializes the IncrementalUpload. Args: request_builder: an AbstractUploadRequestBuilder instance. upload_url: a string url provided by the BatchJobService. current_content_length: an integer identifying the current content length of data uploaded to the Batch Job. is_last: a boolean indicating whether this is the final increment. Raises: GoogleAdsValueError: if the content length is lower than 0.
juraj-google-style
def SetDayOfWeekHasService(self, dow, has_service=True): assert(dow >= 0 and dow < 7) self.day_of_week[dow] = has_service
Set service as running (or not) on a day of the week. By default the service does not run on any days. Args: dow: 0 for Monday through 6 for Sunday has_service: True if this service operates on dow, False if it does not. Returns: None
juraj-google-style
def _get_shoulds(options): if (options.version == '2.0'): return shoulds20.list_shoulds(options) else: return shoulds21.list_shoulds(options)
Return the list of 'SHOULD' validators for the correct version of STIX. Args: options: ValidationOptions instance with validation options for this validation run, including the STIX spec version.
codesearchnet
def trivial_reward(example): return example
Reward for the trivial search space. The reward (i.e. fitness) is the value itself. The goal of the search, therefore, is to find the value 1. Args: example: a materialized value. Returns: The corresponding reward.
github-repos
def find_files(paths, file_predicate): file_list = [] for path in paths: p = abs_path(path) for dirPath, _, fileList in os.walk(p): for fname in fileList: name, ext = os.path.splitext(fname) if file_predicate(name, ext): file_list.append((dirPath, name, ext)) return file_list
Locate files whose names and extensions match the given predicate in the specified directories. Args: paths: A list of directory paths where to find the files. file_predicate: A function that returns True if the file name and extension are desired. Returns: A list of files that match the predicate.
juraj-google-style
def format_arguments(*args): positional_args = [] kwargs = {} split_key = None for arg in args: if arg.startswith('--'): arg = arg[2:] if ('=' in arg): (key, value) = arg.split('=', 1) kwargs[key.replace('-', '_')] = value else: split_key = arg.replace('-', '_') elif split_key: kwargs[split_key] = arg split_key = None else: positional_args.append(arg) return (positional_args, kwargs)
Converts a list of arguments from the command line into a list of positional arguments and a dictionary of keyword arguments. Handled formats for keyword arguments are: * --argument=value * --argument value Args: *args (list): a list of arguments Returns: ([positional_args], {kwargs})
codesearchnet
def __add__(self, other): sum_roc = DistributedROC(self.thresholds, self.obs_threshold) sum_roc.contingency_tables = self.contingency_tables + other.contingency_tables return sum_roc
Add two DistributedROC objects together and combine their contingency table values. Args: other: Another DistributedROC object.
juraj-google-style
def maybe_download(self, filename, work_directory, source_url): if not os.path.exists(work_directory): os.makedirs(work_directory) filepath = os.path.join(work_directory, filename) if not os.path.exists(filepath): temp_file_name, _ = urllib.request.urlretrieve(source_url) copyfile(temp_file_name, filepath) print('Successfully downloaded', filename) return filepath
Download the data from source url, unless it's already here. Args: filename: string, name of the file in the directory. work_directory: string, path to working directory. source_url: url to download from if file doesn't exist. Returns: Path to resulting file.
juraj-google-style
def write_uint16(self, value, little_endian=True): if little_endian: endian = '<' else: endian = '>' return self.pack(('%sH' % endian), value)
Pack the value as an unsigned integer and write 2 bytes to the stream. Args: value: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written.
codesearchnet
def ConvertMessage(self, value, message): message_descriptor = message.DESCRIPTOR full_name = message_descriptor.full_name if _IsWrapperMessage(message_descriptor): self._ConvertWrapperMessage(value, message) elif (full_name in _WKTJSONMETHODS): methodcaller(_WKTJSONMETHODS[full_name][1], value, message)(self) else: self._ConvertFieldValuePair(value, message)
Convert a JSON object into a message. Args: value: A JSON object. message: A WKT or regular protocol message to record the data. Raises: ParseError: In case of convert problems.
codesearchnet
def parse_docs(docs, marks): if docs is None: return {} indexs = [] for mark in marks: i = docs.find(mark) if i >= 0: indexs.append(i) if not indexs: return {"$desc": textwrap.dedent(docs).strip()} start = min(indexs) start = docs.rfind("\n", 0, start) yamltext = textwrap.dedent(docs[start + 1:]) meta = yaml.load(yamltext) meta["$desc"] = textwrap.dedent(docs[:start]).strip() return meta
Parse YAML syntax content from docs If docs is None, return {} If docs has no YAML content, return {"$desc": docs} Else, parse YAML content, return {"$desc": docs, YAML} Args: docs (str): docs to be parsed marks (list): list of which indicate YAML content starts Returns: A dict contains information of docs
juraj-google-style
def RegisterHelper(cls, resolver_helper): if resolver_helper.type_indicator in cls._resolver_helpers: raise KeyError(( 'Resolver helper object already set for type indicator: ' '{0!s}.').format(resolver_helper.type_indicator)) cls._resolver_helpers[resolver_helper.type_indicator] = resolver_helper
Registers a path specification resolver helper. Args: resolver_helper (ResolverHelper): resolver helper. Raises: KeyError: if resolver helper object is already set for the corresponding type indicator.
juraj-google-style
def GetFrequencyStopTimes(self, problems=None): stoptimes_list = [] stoptime_pattern = self.GetStopTimes() first_secs = stoptime_pattern[0].arrival_secs stoptime_class = self.GetGtfsFactory().StopTime for run_secs in self.GetFrequencyStartTimes(): stoptimes = [] for st in stoptime_pattern: (arrival_secs, departure_secs) = (None, None) if (st.arrival_secs != None): arrival_secs = ((st.arrival_secs - first_secs) + run_secs) if (st.departure_secs != None): departure_secs = ((st.departure_secs - first_secs) + run_secs) stoptimes.append(stoptime_class(problems=problems, stop=st.stop, arrival_secs=arrival_secs, departure_secs=departure_secs, stop_headsign=st.stop_headsign, pickup_type=st.pickup_type, drop_off_type=st.drop_off_type, shape_dist_traveled=st.shape_dist_traveled, stop_sequence=st.stop_sequence, timepoint=st.timepoint)) stoptimes_list.append(stoptimes) return stoptimes_list
Return a list of StopTime objects for each headway-based run. Returns: a list of list of StopTime objects. Each list of StopTime objects represents one run. If this trip doesn't have headways returns an empty list.
codesearchnet
def dummy_inputs(self) -> Dict[str, tf.Tensor]: dummies = {} for key, spec in self.input_signature.items(): dummy_shape = [dim if dim is not None else 2 for dim in spec.shape] if spec.shape[0] is None: dummy_shape[0] = 1 dummies[key] = tf.ones(shape=dummy_shape, dtype=spec.dtype) if key == 'token_type_ids': dummies[key] = tf.zeros_like(dummies[key]) if self.config.add_cross_attention and 'encoder_hidden_states' in inspect.signature(self.call).parameters: if 'encoder_hidden_states' not in dummies: if self.main_input_name == 'input_ids': dummies['encoder_hidden_states'] = tf.ones(shape=(1, 2, self.config.hidden_size), dtype=tf.float32, name='encoder_hidden_states') else: raise NotImplementedError("Model has cross-attention but we couldn't infer the shape for the encoder hidden states. Please manually override dummy_inputs!") return dummies
Dummy inputs to build the network. Returns: `Dict[str, tf.Tensor]`: The dummy inputs.
github-repos
def select_segments(self, jsonpath: str) -> List[Segment]: path = self.etk.parse_json_path(jsonpath) matches = path.find(self.cdr_document) segments = list() for a_match in matches: this_segment = Segment(str(a_match.full_path), a_match.value, self) segments.append(this_segment) return segments
Dereferences the json_path inside the document and returns the selected elements. This method should compile and cache the compiled json_path in case the same path is reused by multiple extractors. Args: jsonpath (str): a valid JSON path. Returns: A list of Segments object that contains the elements selected by the json path.
juraj-google-style
def fetch(self, customer_id, data={}, **kwargs): return super(Customer, self).fetch(customer_id, data, **kwargs)
Fetch Customer for given Id Args: customer_id : Id for which customer object has to be retrieved Returns: Order dict for given customer Id
codesearchnet
def _get_single_variable(self, name, shape=None, dtype=dtypes.float32, initializer=None, regularizer=None, partition_info=None, reuse=None, trainable=None, caching_device=None, validate_shape=True, constraint=None, synchronization=vs.VariableSynchronization.AUTO, aggregation=vs.VariableAggregation.NONE): initializing_from_value = False if initializer is not None and (not callable(initializer)): initializing_from_value = True if shape is not None and initializing_from_value: raise ValueError('If initializer is a constant, do not specify shape.') dtype = dtypes.as_dtype(dtype) shape = as_shape(shape) if name in self._vars: if reuse is False: err_msg = 'Variable %s already exists, disallowed. Did you mean to set reuse=True or reuse=tf.AUTO_REUSE in VarScope?' % name raise ValueError(err_msg) found_var = self._vars[name] if not shape.is_compatible_with(found_var.get_shape()): raise ValueError('Trying to share variable %s, but specified shape %s and found shape %s.' % (name, shape, found_var.get_shape())) if not dtype.is_compatible_with(found_var.dtype): dtype_str = dtype.name found_type_str = found_var.dtype.name raise ValueError('Trying to share variable %s, but specified dtype %s and found dtype %s.' % (name, dtype_str, found_type_str)) return found_var if reuse is True: raise ValueError('Variable %s does not exist, or was not created with tf.get_variable(). Did you mean to set reuse=tf.AUTO_REUSE in VarScope?' % name) if initializer is None: initializer, initializing_from_value = self._get_default_initializer(name=name, shape=shape, dtype=dtype) with ops.init_scope(): if initializing_from_value: init_val = initializer variable_dtype = None else: if tf_inspect.isclass(initializer): initializer = initializer() if shape.is_fully_defined(): if 'partition_info' in tf_inspect.getargspec(initializer).args: init_val = functools.partial(initializer, shape.as_list(), dtype=dtype, partition_info=partition_info) else: init_val = functools.partial(initializer, shape.as_list(), dtype=dtype) variable_dtype = dtype.base_dtype else: init_val = initializer variable_dtype = None with ops.init_scope(): v = variables.Variable(initial_value=init_val, name=name, trainable=trainable, caching_device=caching_device, dtype=variable_dtype, validate_shape=validate_shape, constraint=constraint, synchronization=synchronization, aggregation=aggregation) self._vars[name] = v logging.vlog(1, 'Created variable %s with shape %s and init %s', v.name, format(shape), initializer) if regularizer: self.add_regularizer(v, regularizer) return v
Get or create a single Variable (e.g. a shard or entire variable). See the documentation of get_variable above (ignore partitioning components) for details. Args: name: see get_variable. shape: see get_variable. dtype: see get_variable. initializer: see get_variable. regularizer: see get_variable. partition_info: _PartitionInfo object. reuse: see get_variable. trainable: see get_variable. caching_device: see get_variable. validate_shape: see get_variable. constraint: see get_variable. synchronization: see get_variable. aggregation: see get_variable. Returns: A Variable. See documentation of get_variable above. Raises: ValueError: See documentation of get_variable above.
github-repos
def generate_contour_data(pid): if isinstance(pid, GenInput): pid = pid.return_dict() begin_time = time.time() WORKING_DIRECTORY = '.' if 'WORKING_DIRECTORY' not in pid['general'].keys(): pid['general']['WORKING_DIRECTORY'] = WORKING_DIRECTORY running_process = GenProcess(**{**pid, **pid['generate_info']}) running_process.set_parameters() running_process.run_snr() file_out = FileReadOut(running_process.xvals, running_process.yvals, running_process.final_dict, **{**pid['general'], **pid['generate_info'], **pid['output_info']}) print('outputing file:', pid['general']['WORKING_DIRECTORY'] + '/' + pid['output_info']['output_file_name']) getattr(file_out, file_out.output_file_type + '_read_out')() print(time.time()-begin_time) return
Main function for this program. This will read in sensitivity_curves and binary parameters; calculate snrs with a matched filtering approach; and then read the contour data out to a file. Args: pid (obj or dict): GenInput class or dictionary containing all of the input information for the generation. See BOWIE documentation and example notebooks for usage of this class.
juraj-google-style
def delete(filething): t = OggFLAC(filething) filething.fileobj.seek(0) t.delete(filething)
delete(filething) Arguments: filething (filething) Raises: mutagen.MutagenError Remove tags from a file.
juraj-google-style
def _determine_hpp_url(self, platform, action): base_uri = settings.BASE_HPP_URL.format(platform) service = (action + '.shtml') result = '/'.join([base_uri, service]) return result
This returns the Adyen HPP endpoint based on the provided platform, and action. Args: platform (str): Adyen platform, ie 'live' or 'test'. action (str): the HPP action to perform. possible actions: select, pay, skipDetails, directory
codesearchnet
def expand_char_ngrams(source, minn, maxn, itself='ASIS', name=None): with ops.name_scope(name, 'ExpandCharNgrams', [source]): source = convert_to_tensor_or_sparse_tensor(source, dtype=tf.string) if isinstance(source, tf.SparseTensor): (child_indices, child_values, child_shape) = ops_module.expand_char_ngrams(source.values, minn, maxn, itself) result = _combine_sparse_successor(source.indices, source.dense_shape, child_indices, child_values, child_shape) else: (indices, values, shape) = ops_module.expand_char_ngrams(source, minn, maxn, itself) result = tf.SparseTensor(indices=indices, values=values, dense_shape=shape) return result
Split unicode strings into char ngrams. Ngrams size configures with minn and max Args: source: `Tensor` or `SparseTensor` of any shape, strings to split minn: Minimum length of char ngram minn: Maximum length of char ngram itself: Scalar value, strategy for source word preserving. One of `"ASIS"`, `"NEVER"`, `"ALWAYS"`, `"ALONE"`. name: A name for the operation (optional). Returns: `SparseTensor` with an additional dimension of size 1 added.
codesearchnet
def unwrap_outputs(distribution_strategy, grouped_outputs, with_loss_tensor=False): if not with_loss_tensor: return flatten_per_replica_values(distribution_strategy, grouped_outputs) if not isinstance(grouped_outputs, list): grouped_outputs = [grouped_outputs] loss = distribution_strategy.reduce(reduce_util.ReduceOp.SUM, grouped_outputs[0], axis=None) all_outputs = flatten_per_replica_values(distribution_strategy, grouped_outputs[1:]) if backend.is_tpu_strategy(distribution_strategy) and ops.executing_eagerly_outside_functions(): all_outputs = all_outputs[::distribution_strategy.num_replicas_in_sync] return [loss] + all_outputs
Unwrap the list of outputs contained in the PerReplica parameters. This function calls `flatten_per_replica_values` to parse each of the input parameters into a list of outputs on the different devices. If we set `with_loss_tensor` to be True, we also call `reduce` on the list of losses on the different devices to give us one loss tensor. Args: distribution_strategy: DistributionStrategy used to distribute training and validation. grouped_outputs: PerReplica outputs returned from the train or test function that we ran on each device. with_loss_tensor: Boolean that indicates if we need to add the reduced loss tensor as one of the outputs. Returns: Values of each of the PerReplica outputs.
github-repos
def dml_loss(pred, labels, weights_fn=_weights_one_third, reduce_sum=True): real_labels = convert_rgb_to_symmetric_real(labels) dml_loss_value = discretized_mix_logistic_loss(pred=pred, labels=real_labels) weights = weights_fn(labels) loss_num = (weights * dml_loss_value) loss_den = weights_nonzero(weights) if reduce_sum: loss_num = tf.reduce_sum(loss_num) loss_den = tf.reduce_sum(loss_den) return (loss_num, loss_den)
Discretized mixture of logistics loss. Args: pred: A [batch, height, width, num_mixtures*10] tensor of floats comprising one unconstrained mixture probability, three means (one per channel), three standard deviations (one per channel), and three coefficients which linearly parameterize dependence across channels. labels: A [batch, height, width, channels] tensor of 8-bit pixel intensities. The computation assumes channels is 3. weights_fn: A function of labels, returning a Tensor of shape [batch, height, width] which weights each loss term. Default is to scale each loss term by 1/3 so that they capture the average across channels. reduce_sum: A boolean, to return scalar loss instead of per position. Returns: Tuple of loss tensors for numerator and denominator, each a scalar if reduce_sum else of shape [batch, height, width]. The sum of their divisions is the number of nats for each pixel in labels.
codesearchnet
def __init__(self, filename: str, mode: str = 'r+', *, validate: bool = True, spec_version: str = "2.0.1") -> None: if not os.path.exists(filename): raise IOError(f"File '{filename}' not found") if mode != 'r+' and mode != 'r': raise ValueError("Mode must be either 'r' or 'r+'") self.filename = filename if validate: lv = loompy.LoomValidator(version=spec_version) if not lv.validate(filename): raise ValueError("\n".join(lv.errors) + f"\n{filename} does not appead to be a valid Loom file according to Loom spec version '{spec_version}'") self._file = h5py.File(filename, mode) self._closed = False if "matrix" in self._file: self.shape = self._file["/matrix"].shape else: self.shape = (0, 0) self.layers = loompy.LayerManager(self) self.view = loompy.ViewManager(self) self.ra = loompy.AttributeManager(self, axis=0) self.ca = loompy.AttributeManager(self, axis=1) self.attrs = loompy.FileAttributeManager(self._file) self.row_graphs = loompy.GraphManager(self, axis=0) self.col_graphs = loompy.GraphManager(self, axis=1) self.layer = self.layers self.row_attrs = self.ra self.col_attrs = self.ca
Establish a connection to a Loom file. Args: filename: Name of the .loom file to open mode: read/write mode, accepts 'r+' (read/write) or 'r' (read-only), defaults to 'r+' without arguments, and to 'r' with incorrect arguments validate: Validate that the file conforms with the Loom specification Returns: Nothing.
juraj-google-style
def create_workspace(self, did, name, version_id=None): payload = { 'isPublic': True, 'name': name, } if version_id: payload['versionId'] = version_id return self._api.request('post', '/api/documents/d/' + did + '/workspaces', body=payload)
Create a workspace in the specified document. Args: - did (str): the document id of where to create the new workspace - name (str): the new name of the copied workspace. - version_id (str): the ID of the version to be copied into a new workspace Returns: - requests.Response: Onshape response data
juraj-google-style
def post_file(self, url, filename, file_stream, *args, **kwargs): res = self._conn.post(url, files={filename: file_stream}, headers=self._prepare_headers(**kwargs)) if res.status_code == 200 or res.status_code == 201: return res.text else: return None
Uploads file to provided url. Returns contents as text Args: **url**: address where to upload file **filename**: Name of the uploaded file **file_stream**: file like object to upload .. versionadded:: 0.3.2 **additional_headers**: (optional) Additional headers to be used with request Returns: string
juraj-google-style
def __init__(self, python_function, name, input_signature=None, autograph=True, jit_compile=None, reduce_retracing=False, experimental_implements=None, experimental_autograph_options=None, experimental_attributes=None): self._lock = threading.RLock() self._python_function = python_function self._function_type, self._default_values = function_type_utils.make_function_type(python_function, input_signature) self._function_cache = function_cache.FunctionCache() self._function_captures = capture_container.FunctionCaptures() self._attributes = {} if experimental_implements is not None: self._attributes = self._create_implements_attribute(experimental_implements) if experimental_attributes is not None: self._attributes.update(experimental_attributes) for attribute in self._attributes: if attribute not in attributes_lib.POLYMORPHIC_FUNCTION_ALLOWLIST: raise ValueError(f'`{attribute} is not supported by tf.function as an attribute.') self._is_pure = self._attributes and attributes_lib.IMPLEMENTS in self._attributes self._shared_rendezvous = None self._autograph = autograph self._experimental_autograph_options = experimental_autograph_options self._reduce_retracing = reduce_retracing self._jit_compile = jit_compile self._created_variables = None self._variable_creation_config = None self._no_variable_creation_config = None self._descriptor_cache = weakref.WeakKeyDictionary() self._name = name self._key_for_call_stats = self._get_key_for_call_stats() self._omit_frequent_tracing_warning = False ops._tf_function_api_gauge.get_cell().set(True)
Initializes a `Function`. Args: python_function: the function to be wrapped. name: the name given to it. input_signature: See the documentation for `tf.function`. autograph: See the documentation for `tf.function`. jit_compile: See the documentation for `tf.function`. reduce_retracing: See the documentation for `tf.function`. experimental_implements: See the documentation for `tf.function`. experimental_autograph_options: See the documentation for `tf.function`. experimental_attributes: See the documentation for `tf.function`. Raises: ValueError: if `input_signature` is not None and the `python_function`'s argspec has keyword arguments.
github-repos
def _read_signer(key_filename): filename = key_filename if filename is None: filename = os.path.join(os.path.expanduser('~'), '.sawtooth', 'keys', getpass.getuser() + '.priv') try: with open(filename, 'r') as key_file: signing_key = key_file.read().strip() except IOError as e: raise CliException('Unable to read key file: {}'.format(str(e))) try: private_key = Secp256k1PrivateKey.from_hex(signing_key) except ParseError as e: raise CliException('Unable to read key in file: {}'.format(str(e))) context = create_context('secp256k1') crypto_factory = CryptoFactory(context) return crypto_factory.new_signer(private_key)
Reads the given file as a hex key. Args: key_filename: The filename where the key is stored. If None, defaults to the default key for the current user. Returns: Signer: the signer Raises: CliException: If unable to read the file.
juraj-google-style
def synchronize_task(self, func, *args, **kwargs): async def _runner(): return func(*args, **kwargs) return self.emulator.run_task_external(_runner())
Run callable in the rpc thread and wait for it to finish. The callable ``func`` will be passed into the EmulationLoop and run there. This method will block until ``func`` is finished and return/raise whatever that callable returns/raises. This method is mainly useful for performing an activity that needs to be synchronized with the rpc thread for safety reasons. If this method is called from the rpc thread itself, it will just run the task and return its result. Args: func (callable): A method with signature callable(*args, **kwargs), that will be called with the optional *args and **kwargs passed to this method. *args: Arguments that will be passed to callable. **kwargs: Keyword arguments that will be passed to callable. Returns: object: Whatever callable returns after it runs.
codesearchnet
def _calculateCrcString(inputstring): _checkString(inputstring, description='input CRC string') register = 0xFFFF for char in inputstring: register = (register >> 8) ^ _CRC16TABLE[(register ^ ord(char)) & 0xFF] return _numToTwoByteString(register, LsbFirst=True)
Calculate CRC-16 for Modbus. Args: inputstring (str): An arbitrary-length message (without the CRC). Returns: A two-byte CRC string, where the least significant byte is first.
juraj-google-style
def extract_keywords(self, sentence, span_info=False): keywords_extracted = [] if (not sentence): return keywords_extracted if (not self.case_sensitive): sentence = sentence.lower() current_dict = self.keyword_trie_dict sequence_start_pos = 0 sequence_end_pos = 0 reset_current_dict = False idx = 0 sentence_len = len(sentence) while (idx < sentence_len): char = sentence[idx] if (char not in self.non_word_boundaries): if ((self._keyword in current_dict) or (char in current_dict)): sequence_found = None longest_sequence_found = None is_longer_seq_found = False if (self._keyword in current_dict): sequence_found = current_dict[self._keyword] longest_sequence_found = current_dict[self._keyword] sequence_end_pos = idx if (char in current_dict): current_dict_continued = current_dict[char] idy = (idx + 1) while (idy < sentence_len): inner_char = sentence[idy] if ((inner_char not in self.non_word_boundaries) and (self._keyword in current_dict_continued)): longest_sequence_found = current_dict_continued[self._keyword] sequence_end_pos = idy is_longer_seq_found = True if (inner_char in current_dict_continued): current_dict_continued = current_dict_continued[inner_char] else: break idy += 1 else: if (self._keyword in current_dict_continued): longest_sequence_found = current_dict_continued[self._keyword] sequence_end_pos = idy is_longer_seq_found = True if is_longer_seq_found: idx = sequence_end_pos current_dict = self.keyword_trie_dict if longest_sequence_found: keywords_extracted.append((longest_sequence_found, sequence_start_pos, idx)) reset_current_dict = True else: current_dict = self.keyword_trie_dict reset_current_dict = True elif (char in current_dict): current_dict = current_dict[char] else: current_dict = self.keyword_trie_dict reset_current_dict = True idy = (idx + 1) while (idy < sentence_len): char = sentence[idy] if (char not in self.non_word_boundaries): break idy += 1 idx = idy if ((idx + 1) >= sentence_len): if (self._keyword in current_dict): sequence_found = current_dict[self._keyword] keywords_extracted.append((sequence_found, sequence_start_pos, sentence_len)) idx += 1 if reset_current_dict: reset_current_dict = False sequence_start_pos = idx if span_info: return keywords_extracted return [value[0] for value in keywords_extracted]
Searches in the string for all keywords present in corpus. Keywords present are added to a list `keywords_extracted` and returned. Args: sentence (str): Line of text where we will search for keywords Returns: keywords_extracted (list(str)): List of terms/keywords found in sentence that match our corpus Examples: >>> from flashtext import KeywordProcessor >>> keyword_processor = KeywordProcessor() >>> keyword_processor.add_keyword('Big Apple', 'New York') >>> keyword_processor.add_keyword('Bay Area') >>> keywords_found = keyword_processor.extract_keywords('I love Big Apple and Bay Area.') >>> keywords_found >>> ['New York', 'Bay Area']
codesearchnet
def get_metrics_namespace(self) -> str: return 'BeamML_PyTorch'
Returns: A namespace for metrics collected by the RunInference transform.
github-repos
def service_status(self, short_name): if short_name not in self.services: raise ArgumentError("Unknown service name", short_name=short_name) info = {} service = self.services[short_name]['state'] info['heartbeat_age'] = monotonic() - service.last_heartbeat info['numeric_status'] = service.state info['string_status'] = service.string_state return info
Get the current status of a service. Returns information about the service such as the length since the last heartbeat, any status messages that have been posted about the service and whether the heartbeat should be considered out of the ordinary. Args: short_name (string): The short name of the service to query Returns: dict: A dictionary with the status of the service
juraj-google-style
def values_override(self) -> Optional[Mapping[str, Any]]: if hasattr(self._config, 'use_cache'): return {'use_cache': False} return None
Dictionary of keys to override in the model's config before exporting Returns: Dictionary with the keys (and their corresponding values) to override
github-repos
def nic_b(msg): tc = typecode(msg) if tc < 9 or tc > 18: raise RuntimeError("%s: Not a airborne position message, expecting 8<TC<19" % msg) msgbin = common.hex2bin(msg) nic_b = int(msgbin[39]) return nic_b
Obtain NICb, navigation integrity category supplement-b Args: msg (string): 28 bytes hexadecimal message string Returns: int: NICb number (0 or 1)
juraj-google-style