code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def SetFileContext(self, file_name, row_num, row, headers): self._context = (file_name, row_num, row, headers)
Save the current context to be output with any errors. Args: file_name: string row_num: int row: list of strings headers: list of column headers, its order corresponding to row's
codesearchnet
def assign(var, new_val, assign_fn=assign_slice): if isinstance(var, Tensor): var = var.operation if (not isinstance(var, Variable)): raise ValueError('var must be a mtf.Variable or its output Tensor.') return Assign([var], [new_val], assign_fn=assign_fn)
Assign a new value to a variable. Args: var: either a Variable operation or its output Tensor. new_val: a Tensor assign_fn: a function from (mtf.Variable, tf.Variable, tf.Tensor) -> tf.Operation Returns: an Operation Raises: ValueError: if var is not a Variable and var.operation is not a Variable
codesearchnet
def any(self, predicate=None): if self.closed(): raise ValueError('Attempt to call any() on a closed Queryable.') if (predicate is None): predicate = (lambda x: True) if (not is_callable(predicate)): raise TypeError('any() parameter predicate={predicate} is not callable'.format(predicate=repr(predicate))) for item in self.select(predicate): if item: return True return False
Determine if the source sequence contains any elements which satisfy the predicate. Only enough of the sequence to satisfy the predicate once is consumed. Note: This method uses immediate execution. Args: predicate: An optional single argument function used to test each element. If omitted, or None, this method returns True if there is at least one element in the source. Returns: True if the sequence contains at least one element which satisfies the predicate, otherwise False. Raises: ValueError: If the Queryable is closed()
codesearchnet
def unlock_kinetis(jlink): if (not jlink.connected()): raise ValueError('No target to unlock.') method = UNLOCK_METHODS.get(jlink.tif, None) if (method is None): raise NotImplementedError('Unsupported target interface for unlock.') return method(jlink)
Unlock for Freescale Kinetis K40 or K60 device. Args: jlink (JLink): an instance of a J-Link that is connected to a target. Returns: ``True`` if the device was successfully unlocked, otherwise ``False``. Raises: ValueError: if the J-Link is not connected to a target.
codesearchnet
def extract_block(content: str, indent_level: int=0) -> str: current_object = [] lines = content.split('\n') end_markers = [')', ']', '}', '"""'] for idx, line in enumerate(lines): if idx == 0 and indent_level > 0 and (not is_empty_line(line)) and (find_indent(line) != indent_level): raise ValueError(f'When `indent_level > 0`, the first line in `content` should have indent level {indent_level}. Got {find_indent(line)} instead.') if find_indent(line) < indent_level and (not is_empty_line(line)): break is_valid_object = len(current_object) > 0 if not is_empty_line(line) and (not line.endswith(':')) and (find_indent(line) == indent_level) and is_valid_object: if line.lstrip() in end_markers: current_object.append(line) return '\n'.join(current_object) else: current_object.append(line) if len(current_object) > 0: return '\n'.join(current_object)
Return the first block in `content` with the indent level `indent_level`. The first line in `content` should be indented at `indent_level` level, otherwise an error will be thrown. This method will immediately stop the search when a (non-empty) line with indent level less than `indent_level` is encountered. Args: content (`str`): The content to parse indent_level (`int`, *optional*, default to 0): The indent level of the blocks to search for Returns: `str`: The first block in `content` with the indent level `indent_level`.
github-repos
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): local_stream = utils.BytearrayStream() if self._major: self._major.write(local_stream, kmip_version=kmip_version) else: raise ValueError('Invalid struct missing the major protocol version number.') if self._minor: self._minor.write(local_stream, kmip_version=kmip_version) else: raise ValueError('Invalid struct missing the minor protocol version number.') self.length = local_stream.length() super(ProtocolVersion, self).write(output_stream, kmip_version=kmip_version) output_stream.write(local_stream.buffer)
Write the data encoding the ProtocolVersion struct to a stream. Args: output_stream (stream): A data stream in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0. Raises: ValueError: Raised if the data attribute is not defined.
codesearchnet
def __init__(self, resolver_context): super(EncodedStreamFileSystem, self).__init__(resolver_context) self._encoding_method = None
Initializes an encoded file system. Args: resolver_context (Context): a resolver context.
juraj-google-style
def _json_clean(d): result = {} compkeys = {} for (k, v) in d.items(): if (not isinstance(k, tuple)): result[k] = v else: key = 'c.{}'.format(id(k)) result[key] = v compkeys[key] = k return (result, compkeys)
Cleans the specified python `dict` by converting any tuple keys to strings so that they can be serialized by JSON. Args: d (dict): python dictionary to clean up. Returns: dict: cleaned-up dictionary.
codesearchnet
def read_core_state_eigen(self): with zopen(self.filename, 'rt') as foutcar: line = foutcar.readline() while (line != ''): line = foutcar.readline() if ('NIONS =' in line): natom = int(line.split('NIONS =')[1]) cl = [defaultdict(list) for i in range(natom)] if ('the core state eigen' in line): iat = (- 1) while (line != ''): line = foutcar.readline() if ('E-fermi' in line): break data = line.split() if ((len(data) % 2) == 1): iat += 1 data = data[1:] for i in range(0, len(data), 2): cl[iat][data[i]].append(float(data[(i + 1)])) return cl
Read the core state eigenenergies at each ionic step. Returns: A list of dict over the atom such as [{"AO":[core state eig]}]. The core state eigenenergie list for each AO is over all ionic step. Example: The core state eigenenergie of the 2s AO of the 6th atom of the structure at the last ionic step is [5]["2s"][-1]
codesearchnet
def tokenize_to_spacy_doc(self, text: str) -> Doc: if (not self.keep_multi_space): text = re.sub(' +', ' ', text) doc = self.nlp(text, disable=['parser']) for a_token in doc: self.custom_token(a_token) return doc
Tokenize the given text, returning a spacy doc. Used for spacy rule extractor Args: text (string): Returns: Doc
codesearchnet
def _Open(self, path_spec=None, mode='rb'): if not path_spec: raise ValueError('Missing path specification.') if not path_spec.HasParent(): raise errors.PathSpecError( 'Unsupported path specification without parent.') table_name = getattr(path_spec, 'table_name', None) if table_name is None: raise errors.PathSpecError('Path specification missing table name.') column_name = getattr(path_spec, 'column_name', None) if column_name is None: raise errors.PathSpecError('Path specification missing column name.') row_condition = getattr(path_spec, 'row_condition', None) if row_condition: if not isinstance(row_condition, tuple) or len(row_condition) != 3: raise errors.PathSpecError(( 'Unsupported row_condition not a tuple in the form: ' '(column_name, operator, value).')) row_index = getattr(path_spec, 'row_index', None) if row_index is not None: if not isinstance(row_index, py2to3.INTEGER_TYPES): raise errors.PathSpecError( 'Unsupported row_index not of integer type.') if not row_condition and row_index is None: raise errors.PathSpecError( 'Path specification requires either a row_condition or row_index.') if self._database_object: raise IOError('Database file already set.') file_object = resolver.Resolver.OpenFileObject( path_spec.parent, resolver_context=self._resolver_context) try: database_object = sqlite_database.SQLiteDatabaseFile() database_object.Open(file_object) finally: file_object.close() error_string = '' if not database_object.HasTable(table_name): error_string = 'Missing table: {0:s}'.format(table_name) elif not database_object.HasColumn(table_name, column_name): error_string = 'Missing column: {0:s} in table: {1:s}'.format( column_name, table_name) elif not row_condition: query = 'SELECT {0:s} FROM {1:s} LIMIT 1 OFFSET {2:d}'.format( column_name, table_name, row_index) rows = database_object.Query(query) elif not database_object.HasColumn(table_name, row_condition[0]): error_string = ( 'Missing row condition column: {0:s} in table: {1:s}'.format( row_condition[0], table_name)) elif row_condition[1] not in self._OPERATORS: error_string = ( 'Unsupported row condition operator: {0:s}.'.format( row_condition[1])) else: query = 'SELECT {0:s} FROM {1:s} WHERE {2:s} {3:s} ?'.format( column_name, table_name, row_condition[0], row_condition[1]) rows = database_object.Query(query, parameters=(row_condition[2], )) if not error_string and (len(rows) != 1 or len(rows[0]) != 1): if not row_condition: error_string = ( 'Unable to open blob in table: {0:s} and column: {1:s} ' 'for row: {2:d}.').format(table_name, column_name, row_index) else: row_condition_string = ' '.join([ '{0!s}'.format(value) for value in iter(row_condition)]) error_string = ( 'Unable to open blob in table: {0:s} and column: {1:s} ' 'where: {2:s}.').format( table_name, column_name, row_condition_string) if error_string: database_object.Close() raise IOError(error_string) self._blob = rows[0][0] self._current_offset = 0 self._database_object = database_object self._size = len(self._blob) self._table_name = table_name
Opens the file-like object defined by path specification. Args: path_spec (PathSpec): path specification. mode (Optional[str]): file access mode. Raises: AccessError: if the access to open the file was denied. IOError: if the file-like object could not be opened. OSError: if the file-like object could not be opened. PathSpecError: if the path specification is incorrect. ValueError: if the path specification is invalid.
juraj-google-style
def _get_typed_value(self, key, target_type, type_convert, is_optional=False, is_secret=False, is_local=False, default=None, options=None): try: value = self._get(key) except KeyError: if (not is_optional): raise RheaError('No value was provided for the non optional key `{}`.'.format(key)) return default if isinstance(value, six.string_types): try: self._add_key(key, is_secret=is_secret, is_local=is_local) self._check_options(key=key, value=value, options=options) return type_convert(value) except ValueError: raise RheaError('Cannot convert value `{}` (key: `{}`) to `{}`'.format(value, key, target_type)) if isinstance(value, target_type): self._add_key(key, is_secret=is_secret, is_local=is_local) self._check_options(key=key, value=value, options=options) return value raise RheaError('Cannot convert value `{}` (key: `{}`) to `{}`'.format(value, key, target_type))
Return the value corresponding to the key converted to the given type. Args: key: the dict key. target_type: The type we expect the variable or key to be in. type_convert: A lambda expression that converts the key to the desired type. is_optional: To raise an error if key was not found. is_secret: If the key is a secret. is_local: If the key is a local to this service. default: default value if is_optional is True. options: list/tuple if provided, the value must be one of these values. Returns: The corresponding value of the key converted.
codesearchnet
def _AddSaveOps(self, filename_tensor, saveables): save = self.save_op(filename_tensor, saveables) return control_flow_ops.with_dependencies([save], filename_tensor)
Add ops to save variables that are on the same shard. Args: filename_tensor: String Tensor. saveables: A list of SaveableObject objects. Returns: A tensor with the filename used to save.
github-repos
def inputs(self, name): self._closed() step = self._get_step(name, make_copy=False) return step.list_inputs()
List input names and types of a step in the steps library. Args: name (str): name of a step in the steps library.
codesearchnet
def dynamic_rope_update(rope_forward): def longrope_frequency_update(self, position_ids, device): seq_len = torch.max(position_ids) + 1 if hasattr(self.config, 'original_max_position_embeddings'): original_max_position_embeddings = self.config.original_max_position_embeddings else: original_max_position_embeddings = self.config.max_position_embeddings if seq_len > original_max_position_embeddings: if not hasattr(self, 'long_inv_freq'): self.long_inv_freq, _ = self.rope_init_fn(self.config, device, seq_len=original_max_position_embeddings + 1) self.register_buffer('inv_freq', self.long_inv_freq, persistent=False) else: self.original_inv_freq = self.original_inv_freq.to(device) self.register_buffer('inv_freq', self.original_inv_freq, persistent=False) def dynamic_frequency_update(self, position_ids, device): seq_len = torch.max(position_ids) + 1 if seq_len > self.max_seq_len_cached: inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, seq_len=seq_len) self.register_buffer('inv_freq', inv_freq, persistent=False) self.max_seq_len_cached = seq_len if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: self.original_inv_freq = self.original_inv_freq.to(device) self.register_buffer('inv_freq', self.original_inv_freq, persistent=False) self.max_seq_len_cached = self.original_max_seq_len @wraps(rope_forward) def wrapper(self, x, position_ids): if 'dynamic' in self.rope_type: dynamic_frequency_update(self, position_ids, device=x.device) elif self.rope_type == 'longrope': longrope_frequency_update(self, position_ids, device=x.device) return rope_forward(self, x, position_ids) return wrapper
Decorator function to update the RoPE parameters in the forward pass, if the model is using a dynamic RoPE (i.e. a RoPE implementation that may recompute its frequencies in the forward pass). Args: rope_forward (Callable): The forward pass of the RoPE implementation. Returns: The decorated forward pass.
github-repos
def _create_vocab_table_lookup_model_tf1(self, sess: session.Session) -> Tuple[core.Tensor, core.Tensor, core.Tensor]: asset_dir = self.create_tempdir('assets').full_path asset_file = os.path.join(asset_dir, 'vocab_file.txt') file_io.write_string_to_file(filename=asset_file, file_content='hello,model,quantization\n') vocab_file = asset.Asset(asset_file) raw_vocab = io_ops.read_file(vocab_file) vocabs = ragged_string_ops.string_split_v2(string_ops.string_strip(raw_vocab), sep=',') kv_init = lookup_ops.KeyValueTensorInitializer(keys=vocabs, values=np.array([0, 1, 2]), value_dtype=dtypes.int64) table = lookup_ops.StaticVocabularyTable(kv_init, num_oov_buckets=5) input_vocabs_placeholder = array_ops.placeholder(dtypes.string, shape=(None,), name='input_vocabs') lookup_vals = math_ops.cast(table.lookup(input_vocabs_placeholder), dtypes.float32) matmul_input = array_ops_stack.stack([lookup_vals, lookup_vals]) weight_row = array_ops.ones(shape=array_ops.shape(input_vocabs_placeholder), dtype=dtypes.float32) weight = array_ops.transpose_v2(array_ops_stack.stack([weight_row, weight_row])) output_tensor = math_ops.matmul(matmul_input, weight) return (input_vocabs_placeholder, lookup_vals, output_tensor)
Creates a simple model that initializes and lookups a vocab table. This model creates an asset file at "vocab_file.txt" containing comma-separated vocabularies. It also initializes a `StaticVocabularyTable` and performs a lookup with the input vocabs, which is a 1D tensor of strings. Args: sess: Tensorflow Session to create the model in. Returns: (input_vocabs_placeholder, lookup_vals, output_tensor), where * input_vocabs_placeholder is a placeholder tensor of 1D strings * lookup_vals is an output tensor that is a direct result of table lookup * output_tensor is a float 2x2 matrix
github-repos
def _StartWorkerProcess(self, process_name, storage_writer): analysis_plugin = self._analysis_plugins.get(process_name, None) if not analysis_plugin: logger.error('Missing analysis plugin: {0:s}'.format(process_name)) return None if self._use_zeromq: queue_name = '{0:s} output event queue'.format(process_name) output_event_queue = zeromq_queue.ZeroMQPushBindQueue( name=queue_name, timeout_seconds=self._QUEUE_TIMEOUT) output_event_queue.Open() else: output_event_queue = multi_process_queue.MultiProcessingQueue( timeout=self._QUEUE_TIMEOUT) self._event_queues[process_name] = output_event_queue if self._use_zeromq: queue_name = '{0:s} input event queue'.format(process_name) input_event_queue = zeromq_queue.ZeroMQPullConnectQueue( name=queue_name, delay_open=True, port=output_event_queue.port, timeout_seconds=self._QUEUE_TIMEOUT) else: input_event_queue = output_event_queue process = analysis_process.AnalysisProcess( input_event_queue, storage_writer, self._knowledge_base, analysis_plugin, self._processing_configuration, data_location=self._data_location, event_filter_expression=self._event_filter_expression, name=process_name) process.start() logger.info('Started analysis plugin: {0:s} (PID: {1:d}).'.format( process_name, process.pid)) try: self._StartMonitoringProcess(process) except (IOError, KeyError) as exception: logger.error(( 'Unable to monitor analysis plugin: {0:s} (PID: {1:d}) ' 'with error: {2!s}').format(process_name, process.pid, exception)) process.terminate() return None self._RegisterProcess(process) return process
Creates, starts, monitors and registers a worker process. Args: process_name (str): process name. storage_writer (StorageWriter): storage writer for a session storage used to create task storage. Returns: MultiProcessWorkerProcess: extraction worker process or None on error.
juraj-google-style
def versions_from_trove(trove): versions = set() for classifier in trove: if 'Programming Language :: Python ::' in classifier: ver = classifier.split('::')[-1] major = ver.split('.')[0].strip() if major: versions.add(major) return sorted( set([v for v in versions if v.replace('.', '', 1).isdigit()]))
Finds out python version from list of trove classifiers. Args: trove: list of trove classifiers Returns: python version string
juraj-google-style
def add_record_references(self, app_id, record_id, field_id, target_record_ids): self._swimlane.request( 'post', 'app/{0}/record/{1}/add-references'.format(app_id, record_id), json={ 'fieldId': field_id, 'targetRecordIds': target_record_ids } )
Bulk operation to directly add record references without making any additional requests Warnings: Does not perform any app, record, or target app/record validation Args: app_id (str): Full App ID string record_id (str): Full parent Record ID string field_id (str): Full field ID to target reference field on parent Record string target_record_ids (List(str)): List of full target reference Record ID strings
juraj-google-style
def select_rows(self, rows): self.values = self.values.iloc[rows] self.index = self.index.iloc[(rows, :)] for prop in self._property_columns: vals = getattr(self, prop)[rows] setattr(self, prop, vals)
Truncate internal arrays to keep only the specified rows. Args: rows (array): An integer or boolean array identifying the indices of rows to keep.
codesearchnet
def all_folders(path_name, keyword='', has_date=False, date_fmt=DATE_FMT) -> list: if (not os.path.exists(path=path_name)): return [] path_name = path_name.replace('\\', '/') if keyword: folders = sort_by_modified([f.replace('\\', '/') for f in glob.iglob(f'{path_name}/*{keyword}*') if (os.path.isdir(f) and (f.replace('\\', '/').split('/')[(- 1)][0] != '~'))]) else: folders = sort_by_modified([f'{path_name}/{f}' for f in os.listdir(path=path_name) if (os.path.isdir(f'{path_name}/{f}') and (f[0] != '~'))]) if has_date: folders = filter_by_dates(folders, date_fmt=date_fmt) return folders
Search all folders with criteria Returned list will be sorted by last modified Args: path_name: full path name keyword: keyword to search has_date: whether has date in file name (default False) date_fmt: date format to check for has_date parameter Returns: list: all folder names fulfilled criteria
codesearchnet
def get_node_angle(self, node): return atan2(self.pos[0]-node.pos[0], self.pos[1]-node.pos[1]) - pi / 2
Get the angle beetween 2 nodes relative to the horizont. Args: node (object): The other node. Returns: rad: The angle
juraj-google-style
def exec_resize(self, exec_id, height=None, width=None): if isinstance(exec_id, dict): exec_id = exec_id.get('Id') params = {'h': height, 'w': width} url = self._url('/exec/{0}/resize', exec_id) res = self._post(url, params=params) self._raise_for_status(res)
Resize the tty session used by the specified exec command. Args: exec_id (str): ID of the exec instance height (int): Height of tty session width (int): Width of tty session
codesearchnet
def query(self, batch=False, query_functions=None, credential=None): batch_item = self._build_query_batch_item(query_functions) if batch: self.batch_items.append(batch_item) else: request = self._build_request_message(credential, [batch_item]) response = self._send_and_receive_message(request) results = self._process_batch_items(response) return results[0]
Send a Query request to the server. Args: batch (boolean): A flag indicating if the operation should be sent with a batch of additional operations. Defaults to False. query_functions (list): A list of QueryFunction enumerations indicating what information the client wants from the server. Optional, defaults to None. credential (Credential): A Credential object containing authentication information for the server. Optional, defaults to None.
codesearchnet
def render(self, trajectories: Tuple[(NonFluents, Fluents, Fluents, Fluents, np.array)], batch: Optional[int]=None) -> None: (non_fluents, initial_state, states, actions, interms, rewards) = trajectories non_fluents = dict(non_fluents) states = dict(((name, fluent[0]) for (name, fluent) in states)) actions = dict(((name, fluent[0]) for (name, fluent) in actions)) rewards = rewards[0] idx = self._compiler.rddl.domain.state_fluent_ordering.index('location/1') start = initial_state[idx][0] g = non_fluents['GOAL/1'] path = states['location/1'] deltas = actions['move/1'] centers = non_fluents['DECELERATION_ZONE_CENTER/2'] decays = non_fluents['DECELERATION_ZONE_DECAY/1'] zones = [(x, y, d) for ((x, y), d) in zip(centers, decays)] self._ax1 = plt.gca() self._render_state_space() self._render_start_and_goal_positions(start, g) self._render_deceleration_zones(zones) self._render_state_action_trajectory(start, path, deltas) plt.title('Navigation', fontweight='bold') plt.legend(loc='lower right') plt.show()
Render the simulated state-action `trajectories` for Navigation domain. Args: stats: Performance statistics. trajectories: NonFluents, states, actions, interms and rewards. batch: Number of batches to render.
codesearchnet
def raster_erosion(rasterfile): if is_string(rasterfile): origin_raster = RasterUtilClass.read_raster(str(rasterfile)) elif isinstance(rasterfile, Raster): origin_raster = rasterfile.data elif isinstance(rasterfile, numpy.ndarray): origin_raster = rasterfile else: return "Your rasterfile has a wrong type. Type must be string or " \ "numpy.array or class Raster in pygeoc." max_value_raster = origin_raster.max() erosion_raster = numpy.zeros((origin_raster.shape[0], origin_raster.shape[1])) add_row = numpy.full((1, origin_raster.shape[1]), max_value_raster) temp_origin_raster = numpy.vstack((numpy.vstack((add_row, origin_raster)), add_row)) add_col = numpy.full((origin_raster.shape[0] + 2, 1), max_value_raster) expand_origin_raster = numpy.hstack((numpy.hstack((add_col, temp_origin_raster)), add_col)) for i in range(origin_raster.shape[0]): for j in range(origin_raster.shape[1]): min_pixel_value = max_value_raster for k in range(3): for l in range(3): if expand_origin_raster[i + k, j + l] <= min_pixel_value: min_pixel_value = expand_origin_raster[i + k, j + l] erosion_raster[i, j] = min_pixel_value return erosion_raster
Erode the raster image. Find the min pixel's value in 8-neighborhood. Then change the compute pixel's value into the min pixel's value. Args: rasterfile: input original raster image, type can be filename(string, like "test1.tif"), rasterfile(class Raster) or numpy.ndarray. Returns: erosion_raster: raster image after erosion, type is numpy.ndarray.
juraj-google-style
def _create_tensor_watch_maps(self, device_name): self._watch_key_to_datum[device_name] = {} self._watch_key_to_rel_time[device_name] = {} self._watch_key_to_dump_size_bytes[device_name] = {} for datum in self._dump_tensor_data[device_name]: if datum.watch_key not in self._watch_key_to_devices: self._watch_key_to_devices[datum.watch_key] = {device_name} else: self._watch_key_to_devices[datum.watch_key].add(device_name) if datum.watch_key not in self._watch_key_to_datum[device_name]: self._watch_key_to_datum[device_name][datum.watch_key] = [datum] self._watch_key_to_rel_time[device_name][datum.watch_key] = [datum.timestamp - self._t0] self._watch_key_to_dump_size_bytes[device_name][datum.watch_key] = [datum.dump_size_bytes] else: self._watch_key_to_datum[device_name][datum.watch_key].append(datum) self._watch_key_to_rel_time[device_name][datum.watch_key].append(datum.timestamp - self._t0) self._watch_key_to_dump_size_bytes[device_name][datum.watch_key].append(datum.dump_size_bytes)
Create maps from tensor watch keys to datum and to timestamps. Create a map from watch key (tensor name + debug op) to `DebugTensorDatum` item. Also make a map from watch key to relative timestamp. "relative" means (absolute timestamp - t0). Args: device_name: (str) name of the device.
github-repos
def delete_s3_bucket(client, resource): if dbconfig.get('enable_delete_s3_buckets', NS_AUDITOR_REQUIRED_TAGS, False): client.delete_bucket(Bucket=resource.id) return ActionStatus.SUCCEED, resource.metrics()
Delete an S3 bucket This function will try to delete an S3 bucket Args: client (:obj:`boto3.session.Session.client`): A boto3 client object resource (:obj:`Resource`): The resource object to terminate Returns: `ActionStatus`
juraj-google-style
def deserialize(doc_xml, pyxb_binding=None): pyxb_binding = pyxb_binding or d1_common.types.dataoneTypes try: return pyxb_binding.CreateFromDocument(doc_xml) except pyxb.ValidationError as e: raise ValueError( 'Unable to deserialize XML to PyXB. error="{}" xml="{}"'.format( e.details(), doc_xml ) ) except (pyxb.PyXBException, xml.sax.SAXParseException, Exception) as e: raise ValueError( 'Unable to deserialize XML to PyXB. error="{}" xml="{}"'.format( str(e), doc_xml ) )
Deserialize DataONE XML types to PyXB. Args: doc_xml: UTF-8 encoded ``bytes`` pyxb_binding: PyXB binding object. If not specified, the correct one should be selected automatically. Returns: PyXB object See Also: ``deserialize_d1_exception()`` for deserializing DataONE Exception types.
juraj-google-style
def _show_status_for_work(self, work): work_count = len(work.work) work_completed = {} work_completed_count = 0 for v in itervalues(work.work): if v['is_completed']: work_completed_count += 1 worker_id = v['claimed_worker_id'] if (worker_id not in work_completed): work_completed[worker_id] = {'completed_count': 0, 'last_update': 0.0} work_completed[worker_id]['completed_count'] += 1 work_completed[worker_id]['last_update'] = max(work_completed[worker_id]['last_update'], v['claimed_worker_start_time']) print('Completed {0}/{1} work'.format(work_completed_count, work_count)) for k in sorted(iterkeys(work_completed)): last_update_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(work_completed[k]['last_update'])) print('Worker {0}: completed {1} last claimed work at {2}'.format(k, work_completed[k]['completed_count'], last_update_time))
Shows status for given work pieces. Args: work: instance of either AttackWorkPieces or DefenseWorkPieces
codesearchnet
def run_compiler(self, compiler=GCC, inputs=None, output=None): prog = RunningProgram(self, *compiler_cmdline(compiler=compiler, inputs=inputs, output=output)) prog.expect_exit_status(0)
Runs a compiler in the working directory. Args: compiler (tuple): The compiler program and its command-line arguments, including placeholders for output and input files. inputs (tuple): The list of input files for the compiler. output (str): The name of the output file.
juraj-google-style
def load_scatter_table(self, fn): data = pickle.load(file(fn)) if (('version' not in data) or (data['version'] != tmatrix_aux.VERSION)): warnings.warn('Loading data saved with another version.', Warning) (self.num_points, self.D_max, self._psd_D, self._S_table, self._Z_table, self._angular_table, self._m_table, self.geometries) = data['psd_scatter'] return (data['time'], data['description'])
Load the scattering lookup tables. Load the scattering lookup tables saved with save_scatter_table. Args: fn: The name of the scattering table file.
codesearchnet
def _MakeServiceDescriptor(self, service_proto, service_index, scope, package, file_desc): if package: service_name = '.'.join((package, service_proto.name)) else: service_name = service_proto.name methods = [self._MakeMethodDescriptor(method_proto, service_name, package, scope, index) for index, method_proto in enumerate(service_proto.method)] desc = descriptor.ServiceDescriptor(name=service_proto.name, full_name=service_name, index=service_index, methods=methods, options=_OptionsOrNone(service_proto), file=file_desc) self._service_descriptors[service_name] = desc return desc
Make a protobuf ServiceDescriptor given a ServiceDescriptorProto. Args: service_proto: The descriptor_pb2.ServiceDescriptorProto protobuf message. service_index: The index of the service in the File. scope: Dict mapping short and full symbols to message and enum types. package: Optional package name for the new message EnumDescriptor. file_desc: The file containing the service descriptor. Returns: The added descriptor.
juraj-google-style
def config(self, name='skype'): self.conn('PUT', '{0}/users/ME/endpoints/{1}/presenceDocs/messagingService'.format(self.conn.msgsHost, self.id), auth=SkypeConnection.Auth.RegToken, json={'id': 'messagingService', 'type': 'EndpointPresenceDoc', 'selfLink': 'uri', 'privateInfo': {'epname': name}, 'publicInfo': {'capabilities': '', 'type': 1, 'skypeNameVersion': 'skype.com', 'nodeInfo': 'xx', 'version': '908/1.30.0.128'}})
Configure this endpoint to allow setting presence. Args: name (str): display name for this endpoint
codesearchnet
def _sign_threshold_signature_fulfillment(cls, input_, message, key_pairs): input_ = deepcopy(input_) message = sha3_256(message.encode()) if input_.fulfills: message.update('{}{}'.format(input_.fulfills.txid, input_.fulfills.output).encode()) for owner_before in set(input_.owners_before): ccffill = input_.fulfillment subffills = ccffill.get_subcondition_from_vk(base58.b58decode(owner_before)) if (not subffills): raise KeypairMismatchException('Public key {} cannot be found in the fulfillment'.format(owner_before)) try: private_key = key_pairs[owner_before] except KeyError: raise KeypairMismatchException('Public key {} is not a pair to any of the private keys'.format(owner_before)) for subffill in subffills: subffill.sign(message.digest(), base58.b58decode(private_key.encode())) return input_
Signs a ThresholdSha256. Args: input_ (:class:`~bigchaindb.common.transaction. Input`) The Input to be signed. message (str): The message to be signed key_pairs (dict): The keys to sign the Transaction with.
codesearchnet
def files_sharedPublicURL(self, *, id: str, **kwargs) -> SlackResponse: self._validate_xoxp_token() kwargs.update({"id": id}) return self.api_call("files.sharedPublicURL", json=kwargs)
Enables a file for public/external sharing. Args: id (str): The file id. e.g. 'F1234467890'
juraj-google-style
def _render_our_module_key_flags(self, module, output_lines, prefix=''): key_flags = self.get_key_flags_for_module(module) if key_flags: self._render_module_flags(module, key_flags, output_lines, prefix)
Returns a help string for the key flags of a given module. Args: module: module|str, the module to render key flags for. output_lines: [str], a list of strings. The generated help message lines will be appended to this list. prefix: str, a string that is prepended to each generated help line.
codesearchnet
def check_model_doc(overwrite: bool=False): with open(PATH_TO_TOC, encoding='utf-8') as f: content = yaml.safe_load(f.read()) api_idx = 0 while content[api_idx]['title'] != 'API': api_idx += 1 api_doc = content[api_idx]['sections'] model_idx = 0 while api_doc[model_idx]['title'] != 'Models': model_idx += 1 model_doc = api_doc[model_idx]['sections'] modalities_docs = [(idx, section) for idx, section in enumerate(model_doc) if 'sections' in section] diff = False for idx, modality_doc in modalities_docs: old_modality_doc = modality_doc['sections'] new_modality_doc = clean_model_doc_toc(old_modality_doc) if old_modality_doc != new_modality_doc: diff = True if overwrite: model_doc[idx]['sections'] = new_modality_doc if diff: if overwrite: api_doc[model_idx]['sections'] = model_doc content[api_idx]['sections'] = api_doc with open(PATH_TO_TOC, 'w', encoding='utf-8') as f: f.write(yaml.dump(content, allow_unicode=True)) else: raise ValueError('The model doc part of the table of content is not properly sorted, run `make style` to fix this.')
Check that the content of the table of content in `_toctree.yml` is clean (no duplicates and sorted for the model API doc) and potentially auto-cleans it. Args: overwrite (`bool`, *optional*, defaults to `False`): Whether to just check if the TOC is clean or to auto-clean it (when `overwrite=True`).
github-repos
def saturate_kwargs(keys, **kwargs): if isinstance(keys, str): keys = [keys] keys = [k for k in keys if ((k in kwargs) and hasattr(kwargs.get(k, None), '__iter__'))] if (len(keys) == 0): return [] kw_corr = list(product(*(range(len(kwargs[k])) for k in keys))) kw_arr = [] for corr in kw_corr: kw_arr.append(dict(zip(keys, [kwargs[keys[i]][corr[i]] for i in range(len(keys))]))) for k in keys: kwargs.pop(k, None) kw_arr = [{**k, **kwargs} for k in kw_arr] return kw_arr
Saturate all combinations of kwargs Args: keys: keys in kwargs that want to use process **kwargs: kwargs for func
codesearchnet
def evaluate(dataset, predictions, output_folder, **kwargs): args = dict( dataset=dataset, predictions=predictions, output_folder=output_folder, **kwargs ) if isinstance(dataset, datasets.COCODataset): return coco_evaluation(**args) elif isinstance(dataset, datasets.PascalVOCDataset): return voc_evaluation(**args) else: dataset_name = dataset.__class__.__name__ raise NotImplementedError("Unsupported dataset type {}.".format(dataset_name))
evaluate dataset using different methods based on dataset type. Args: dataset: Dataset object predictions(list[BoxList]): each item in the list represents the prediction results for one image. output_folder: output folder, to save evaluation files or results. **kwargs: other args. Returns: evaluation result
juraj-google-style
def walk(self, walk_func): nodes = self.topological_sort() nodes.reverse() for n in nodes: walk_func(n)
Walks each node of the graph in reverse topological order. This can be used to perform a set of operations, where the next operation depends on the previous operation. It's important to note that walking happens serially, and is not paralellized. Args: walk_func (:class:`types.FunctionType`): The function to be called on each node of the graph.
juraj-google-style
def fulfill_order(self, order_number, site_code=None, email_opt_in=False): max_fulfillment_retries = get_configuration('MAX_FULFILLMENT_RETRIES', site_code=site_code) api = get_ecommerce_client(site_code=site_code) try: logger.info('Requesting fulfillment of order [%s].', order_number) api.orders(order_number).fulfill.put(email_opt_in=email_opt_in) except exceptions.HttpClientError as exc: status_code = exc.response.status_code if (status_code == 406): logger.info('Order [%s] has already been fulfilled. Ignoring.', order_number) raise Ignore() else: logger.warning('Fulfillment of order [%s] failed because of HttpClientError. Retrying', order_number, exc_info=True) _retry_order(self, exc, max_fulfillment_retries, order_number) except (exceptions.HttpServerError, exceptions.Timeout, SSLError) as exc: _retry_order(self, exc, max_fulfillment_retries, order_number)
Fulfills an order. Arguments: order_number (str): Order number indicating which order to fulfill. Returns: None
codesearchnet
def launch(self, task, **kwargs): if task.status == task.S_LOCKED: raise ValueError("You shall not submit a locked task!") task.build() if isinstance(task, AbinitTask): args = kwargs.get("exec_args", []) if args is None: args = [] args = args[:] args.append("--timelimit %s" % qu.time2slurm(self.qadapter.timelimit)) kwargs["exec_args"] = args script_file = self.write_jobfile(task, **kwargs) try: qjob, process = self.qadapter.submit_to_queue(script_file) task.set_status(task.S_SUB, msg='Submitted to queue') task.set_qjob(qjob) return process except self.qadapter.MaxNumLaunchesError as exc: task.set_status(task.S_ERROR, msg="max_num_launches reached: %s" % str(exc)) raise
Build the input files and submit the task via the :class:`Qadapter` Args: task: :class:`TaskObject` Returns: Process object.
juraj-google-style
def SerializeExclusiveData(self, writer): writer.WriteVarBytes(self.Script) if self.Version >= 1: writer.WriteFixed8(self.Gas)
Serialize object. Args: writer (neo.IO.BinaryWriter):
juraj-google-style
def handle_metrics(split, metrics, output_dir): logger.info(f'***** {split} metrics *****') for key in sorted(metrics.keys()): logger.info(f' {key} = {metrics[key]}') save_json(metrics, os.path.join(output_dir, f'{split}_results.json'))
Log and save metrics Args: - split: one of train, val, test - metrics: metrics dict - output_dir: where to save the metrics
github-repos
def print_middleware_tree(self, *, EOL=os.linesep, **kwargs): def mask_to_method_name(mask): if mask == HTTPMethod.ALL: return 'ALL' methods = set(HTTPMethod) - {HTTPMethod.ALL} names = (method.name for method in methods if method.value & mask) return '+'.join(names) def path_to_str(path): if isinstance(path, str): return path return path.pattern.replace('\\', '') def decend_into_tree(chain, level): lines_ = [] for mw in chain: info = (mask_to_method_name(mw.mask), path_to_str(mw.path), mw.func) prefix = "│   " * level lines_ += [prefix + "├── %s %s %s" % info] if mw.is_subchain: lines_ += decend_into_tree(mw.func, level + 1) if level: lines_[-1] = lines_[-1].replace('├', '└') return lines_ lines = [self.name] lines += decend_into_tree(self.middleware, 0) lines.append('┴') print(EOL.join(lines), **kwargs)
Prints a unix-tree-like output of the structure of the web application to the file specified (stdout by default). Args: EOL (str): The character or string that ends the line. **kwargs: Arguments pass to the standard print function. This allows specifying the file to write to and the ability to flush output upon creation.
juraj-google-style
def errors(self): ret_errs = list() errors = (self.get('error').get('errors', None) or list()) assert isinstance(errors, list) for err in errors: when = parse_datetime(err.get('when', None)) msg = err.get('message', '') e = ErrorEvent(when, msg) ret_errs.append(e) return ret_errs
Returns the list of recent errors. Returns: list: of :obj:`.ErrorEvent` tuples.
codesearchnet
def remove(self, workflow_id): try: db = self._client[self.database] fs = GridFSProxy(GridFS(db.unproxied_object)) for grid_doc in fs.find({'workflow_id': workflow_id}, no_cursor_timeout=True): fs.delete(grid_doc._id) col = db[WORKFLOW_DATA_COLLECTION_NAME] return col.delete_one({'_id': ObjectId(workflow_id)}) except ConnectionFailure: raise DataStoreNotConnected()
Removes a document specified by its id from the data store. All associated GridFs documents are deleted as well. Args: workflow_id (str): The id of the document that represents a workflow run. Raises: DataStoreNotConnected: If the data store is not connected to the server.
codesearchnet
def stop(self, wait=True): for context in self._applications.values(): context.run_unload_hook() self._stats_job.stop() if self._mem_job is not None: self._mem_job.stop() self._cleanup_job.stop() if self._ping_job is not None: self._ping_job.stop() self._clients.clear()
Stop the Bokeh Server application. Args: wait (bool): whether to wait for orderly cleanup (default: True) Returns: None
juraj-google-style
def response_data_to_model_instance(self, response_data): response_data['datetime_created'] = dateutil.parser.parse(response_data['datetime_created']) return super(BaseTaskTypeManager, self).response_data_to_model_instance(response_data)
Convert response data to a task type model. Args: response_data (dict): The data from the request's response. Returns: :class:`saltant.models.base_task_type.BaseTaskType`: A model instance representing the task type from the reponse data.
codesearchnet
def char_matches(s1, s2, n=3): return __matches(s1, s2, char_ngrams, n=n)
Character-level n-grams that match between two strings Args: s1: a string s2: another string n: an int for the n in n-gram Returns: set: the n-grams found in both strings
juraj-google-style
def add_to_collection(self, name, value) -> None: self._check_not_finalized() with self._lock: if name not in self._collections: self._collections[name] = [value] else: self._collections[name].append(value)
Stores `value` in the collection with the given `name`. Note that collections are not sets, so it is possible to add a value to a collection several times. Args: name: The key for the collection. The `GraphKeys` class contains many standard names for collections. value: The value to add to the collection.
github-repos
def _get_connection(self, uri, headers=None): connection = None if uri.scheme == 'https': if not uri.port: connection = httplib.HTTPSConnection(uri.host) else: connection = httplib.HTTPSConnection(uri.host, int(uri.port)) else: if not uri.port: connection = httplib.HTTPConnection(uri.host) else: connection = httplib.HTTPConnection(uri.host, int(uri.port)) return connection
Opens a socket connection to the server to set up an HTTP request. Args: uri: The full URL for the request as a Uri object. headers: A dict of string pairs containing the HTTP headers for the request.
juraj-google-style
def read(self, length, timeout): self._read_messages_until_true((lambda : (self._buffer_size and (self._buffer_size >= length))), timeout) with self._read_buffer_lock: (data, push_back) = (''.join(self._read_buffer), '') if length: (data, push_back) = (data[:length], data[length:]) self._read_buffer.clear() self._buffer_size = len(push_back) if push_back: self._read_buffer.appendleft(push_back) return data
Read 'length' bytes from this stream transport. Args: length: If not 0, read this many bytes from the stream, otherwise read all available data (at least one byte). timeout: timeouts.PolledTimeout to use for this read operation. Returns: The bytes read from this stream.
codesearchnet
def as_tmpfile(self, tmpdir=None): import tempfile, shutil tmpdir = tempfile.mkdtemp() if tmpdir is None else tmpdir new_path = os.path.join(tmpdir, self.basename) shutil.copy(self.filepath, new_path) root, ext = os.path.splitext(self.filepath) djrepo = root + ".djrepo" if os.path.exists(djrepo): shutil.copy(djrepo, os.path.join(tmpdir, os.path.basename(djrepo))) new = self.__class__.from_file(new_path) if self.has_dojo_report: new.dojo_report = self.dojo_report.deepcopy() return new
Copy the pseudopotential to a temporary a file and returns a new pseudopotential object. Useful for unit tests in which we have to change the content of the file. Args: tmpdir: If None, a new temporary directory is created and files are copied here else tmpdir is used.
juraj-google-style
def beginning_offsets(self, partitions): offsets = self._fetcher.beginning_offsets(partitions, self.config['request_timeout_ms']) return offsets
Get the first offset for the given partitions. This method does not change the current consumer position of the partitions. Note: This method may block indefinitely if the partition does not exist. Arguments: partitions (list): List of TopicPartition instances to fetch offsets for. Returns: ``{TopicPartition: int}``: The earliest available offsets for the given partitions. Raises: UnsupportedVersionError: If the broker does not support looking up the offsets by timestamp. KafkaTimeoutError: If fetch failed in request_timeout_ms.
codesearchnet
def pearson_correlation(y_true, y_pred, axis=-1): y_pred = ops.convert_to_tensor(y_pred) y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype) y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred) y_true_norm = y_true - ops.mean(y_true, axis=axis, keepdims=True) y_pred_norm = y_pred - ops.mean(y_pred, axis=axis, keepdims=True) y_true_norm = y_true_norm / ops.std(y_true_norm, axis=axis, keepdims=True) y_pred_norm = y_pred_norm / ops.std(y_pred_norm, axis=axis, keepdims=True) return ops.mean(y_true_norm * y_pred_norm, axis=axis)
Computes the Pearson coefficient between labels and predictions. Formula: ```python loss = mean(l2norm(y_true - mean(y_true) * l2norm(y_pred - mean(y_pred))) ``` Args: y_true: Tensor of true targets. y_pred: Tensor of predicted targets. axis: Axis along which to determine similarity. Defaults to `-1`. Returns: Pearson Correlation Coefficient tensor. Example: >>> y_true = [[0, 1, 0.5], [1, 1, 0.2]] >>> y_pred = [[0.1, 0.9, 0.5], [1, 0.9, 0.2]] >>> loss = keras.losses.concordance_correlation( ... y_true, y_pred, axis=-1 ... ).numpy() [1. 0.99339927]
github-repos
def unpack(self, gpsd_socket_response): try: fresh_data = json.loads(gpsd_socket_response) package_name = fresh_data.pop('class', 'ERROR') package = getattr(self, package_name, package_name) for key in package.keys(): package[key] = fresh_data.get(key, 'n/a') except AttributeError: sys.stderr.write('There is an unexpected exception in DataStream.unpack') return except (ValueError, KeyError) as error: sys.stderr.write(str(error)) return
Sets new socket data as DataStream attributes in those initialised dictionaries Arguments: gpsd_socket_response (json object): Provides: self attribute dictionaries, e.g., self.TPV['lat'], self.SKY['gdop'] Raises: AttributeError: 'str' object has no attribute 'keys' when the device falls out of the system ValueError, KeyError: most likely extra, or mangled JSON data, should not happen, but that applies to a lot of things.
juraj-google-style
def _relative_position_to_absolute_position_unmasked(x): x_shape = common_layers.shape_list(x) batch = x_shape[0] heads = x_shape[1] length = x_shape[2] col_pad = tf.zeros((batch, heads, length, 1)) x = tf.concat([x, col_pad], axis=3) flat_x = tf.reshape(x, [batch, heads, length * 2 * length]) flat_pad = tf.zeros((batch, heads, length-1)) flat_x_padded = tf.concat([flat_x, flat_pad], axis=2) final_x = tf.reshape(flat_x_padded, [batch, heads, length+1, 2*length-1]) final_x = final_x[:, :, :, length-1:] final_x = final_x[:, :, :length, :] return final_x
Converts tensor from relative to aboslute indexing for local attention. Args: x: a Tensor of shape [batch (or batch*num_blocks), heads, length, 2 * length - 1] Returns: A Tensor of shape [batch (or batch*num_blocks), heads, length, length-1]
juraj-google-style
def Deserialize(self, reader): sv = reader.ReadByte() if sv != self.StateVersion: raise Exception("Incorrect State format")
Deserialize full object. Args: reader (neocore.IO.BinaryReader): Raises: Exception: if the state version is incorrect.
juraj-google-style
def _TrimNode(node, index, depth, flags): if ((depth == 1) or (node.LeftChild is None)): return if (depth == 2): if ((not flags[(index * 2)]) and (not flags[((index * 2) + 1)])): node.LeftChild = None node.RightChild = None else: MerkleTree._TrimNode(node.LeftChild, (index * 2), (depth - 1), flags) MerkleTree._TrimNode(node.RightChild, (index * 2), (depth - 1), flags) if ((node.LeftChild.LeftChild is None) and (node.RightChild.RightChild is None)): node.LeftChild = None node.RightChild = None
Internal helper method to trim a node. Args: node (MerkleTreeNode): index (int): flag index. depth (int): node tree depth to start trim from. flags (bytearray): of left/right pairs. 1 byte for the left node, 1 byte for the right node. 00 to erase, 11 to keep. Will keep the node if either left or right is not-0
codesearchnet
def state_probability(self, direction, repertoire, purview): purview_state = self.purview_state(direction) index = tuple(((node_state if (node in purview) else 0) for (node, node_state) in enumerate(purview_state))) return repertoire[index]
Compute the probability of the purview in its current state given the repertoire. Collapses the dimensions of the repertoire that correspond to the purview nodes onto their state. All other dimension are already singular and thus receive 0 as the conditioning index. Returns: float: A single probabilty.
codesearchnet
def add(self, arg, tag=None, name=None, aggregate=None, index_override=None): if tag is None: if aggregate is not None: raise ValueError('You must specify `tag` if using aggregate.') global_index = self._get_new_global_index(index_override) sort_index = None else: if aggregate is None: raise ValueError('You must specify `aggregate` if using tag.') if tag not in self._tag_to_global_index: self._tag_to_global_index[tag] = self._get_new_global_index(index_override) self._tag_to_next_sort_index[tag] = 0 elif index_override and index_override != self._tag_to_global_index[tag]: raise ValueError('Tag %r was called with two indices %r and %r' % (tag, index_override, self._tag_to_global_index[tag])) global_index = self._tag_to_global_index[tag] sort_index = self._tag_to_next_sort_index[tag] self._tag_to_next_sort_index[tag] += 1 uuid = self._unique_function_id name = '%s-%s-%s-%r-%r-%s' % (self._node_name_prefix, self._function_name, uuid, global_index, sort_index, name) identity_op = _array_ops.identity(arg, name=name) identity_op.op._set_attr(OpHint.FUNCTION_NAME_ATTR, _attr_value_pb2.AttrValue(s=_compat.as_bytes(self._function_name))) identity_op.op._set_attr(OpHint.FUNCTION_UUID_ATTR, _attr_value_pb2.AttrValue(s=_compat.as_bytes(self._unique_function_id))) identity_op.op._set_attr(self._attr_name, _attr_value_pb2.AttrValue(i=global_index)) identity_op.op._set_attr(OpHint.FUNCTION_LEVEL_ATTR, _attr_value_pb2.AttrValue(i=self._level)) if self._children_inputs_mappings: identity_op.op._set_attr(OpHint.CHILDREN_INPUTS_MAPPINGS, _attr_value_pb2.AttrValue(s=_compat.as_bytes(_json.dumps(self._children_inputs_mappings)))) if sort_index is not None: identity_op.op._set_attr(OpHint.FUNCTION_SORT_INDEX_ATTR, _attr_value_pb2.AttrValue(i=sort_index)) if aggregate is not None: identity_op.op._set_attr(OpHint.FUNCTION_AGGREGATE_ATTR, _attr_value_pb2.AttrValue(s=_compat.as_bytes(aggregate))) return identity_op
Return a wrapped tensor of an input tensor as an argument. Args: arg: A TensorFlow tensor that should be considered an argument. tag: String tag to identify arguments that should be packed. name: Name of argument. This is included in the Identity hint op names. aggregate: Strategy to aggregate. Acceptable values are OpHint.AGGREGATE_FIRST, OpHint.AGGREGATE_LAST, and OpHint.AGGREGATE_STACK. Note, aggregate is only valid if tag is specified. index_override: Specify what input/output index should this be in the final stub. i.e. add(arg0, index=1); add(arg1, index=0) will make the final stub be as stub_func(inputs[arg1, arg0], outputs=[]) rather than the default call order based ordering. Returns: A tensor representing the wrapped argument. Raises: ValueError: When indices are not consistent.
github-repos
def get_filename_safe_string(string): invalid_filename_chars = ['\\', '/', ':', '"', '*', '?', '|', '\n', '\r'] if string is None: string = "None" for char in invalid_filename_chars: string = string.replace(char, "") string = string.rstrip(".") return string
Converts a string to a string that is safe for a filename Args: string (str): A string to make safe for a filename Returns: str: A string safe for a filename
juraj-google-style
def create(self, group, grouptype): try: self.client.add(self.__distinguished_name(group), API.__object_class(), self.__ldap_attr(group, grouptype)) except ldap3.core.exceptions.LDAPNoSuchObjectResult: print('Error creating LDAP Group.\nRequest: ', self.__ldap_attr(group, grouptype), '\nDistinguished Name: ', self.__distinguished_name(group), file=sys.stderr) except ldap3.core.exceptions.LDAPEntryAlreadyExistsResult: print('Error creating LDAP Group. Group already exists. \nRequest: ', self.__ldap_attr(group, grouptype), '\nDistinguished Name: ', self.__distinguished_name(group), file=sys.stderr)
Create an LDAP Group. Raises: ldap3.core.exceptions.LDAPNoSuchObjectResult: an object involved with the request is missing ldap3.core.exceptions.LDAPEntryAlreadyExistsResult: the entity being created already exists
codesearchnet
def convert_gru_weights(weights, from_cudnn=True): kernels = transform_kernels(weights[0], transpose_input(from_cudnn), n_gates) recurrent_kernels = transform_kernels(weights[1], lambda k: k.T, n_gates) biases = np.array(weights[2]).reshape((2, -1) if from_cudnn else -1) return [kernels, recurrent_kernels, biases]
Converts the weights between CuDNNGRU and GRU. Args: weights: Original weights. from_cudnn: Indicates whether original weights are from CuDNN layer. Returns: Updated weights compatible with GRU.
github-repos
def set_vocabulary(self, vocabulary, idf_weights=None): self._lookup_layer.set_vocabulary(vocabulary, idf_weights=idf_weights)
Sets vocabulary (and optionally document frequency) for this layer. This method sets the vocabulary and IDF weights for this layer directly, instead of analyzing a dataset through `adapt()`. It should be used whenever the vocab (and optionally document frequency) information is already known. If vocabulary data is already present in the layer, this method will replace it. Args: vocabulary: Either an array or a string path to a text file. If passing an array, can pass a tuple, list, 1D NumPy array, or 1D tensor containing the vocabulary terms. If passing a file path, the file should contain one line per term in the vocabulary. idf_weights: A tuple, list, 1D NumPy array, or 1D tensor of inverse document frequency weights with equal length to vocabulary. Must be set if `output_mode` is `"tf_idf"`. Should not be set otherwise.
github-repos
def export_verified_variants(aggregate_variants, unique_callers): document_lines = [] for variant in aggregate_variants: samples = [] for sample in variant['samples']: line = [] line.append(variant['institute']) line.append(variant['_id']) line.append(variant['category']) line.append(variant['variant_type']) line.append(variant['display_name'][:30]) case_name = variant['case_obj']['display_name'] local_link = '/'.join([ '', variant['institute'], case_name, variant['_id'] ]) line.append(local_link) line.append(variant.get('validation')) line.append(case_name) case_individual = next(ind for ind in variant['case_obj']['individuals'] if ind['individual_id'] == sample['sample_id']) if case_individual['phenotype'] == 2: line.append(' '.join([sample.get('display_name'),'(A)'])) else: line.append(sample.get('display_name')) line.append(''.join(['chr',variant['chromosome'],':',str(variant['position'])])) line.append('>'.join([variant.get('reference')[:10],variant.get('alternative')[:10]])) genes = [] prot_effect = [] funct_anno = [] for gene in variant.get('genes'): genes.append(gene.get('hgnc_symbol','')) funct_anno.append(gene.get('functional_annotation')) for transcript in gene.get('transcripts'): if transcript.get('is_canonical') and transcript.get('protein_sequence_name'): prot_effect.append(urllib.parse.unquote(transcript.get('protein_sequence_name'))) line.append(','.join(prot_effect)) line.append(','.join(funct_anno)) line.append(','.join(genes)) line.append(variant.get('rank_score')) line.append(variant.get('cadd_score')) line.append(sample.get('genotype_call')) line.append(sample['allele_depths'][0]) line.append(sample['allele_depths'][1]) line.append(sample['genotype_quality']) for caller in unique_callers: if variant.get(caller): line.append(variant.get(caller)) else: line.append('-') document_lines.append(line) return document_lines
Create the lines for an excel file with verified variants for an institute Args: aggregate_variants(list): a list of variants with aggregates case data unique_callers(set): a unique list of available callers Returns: document_lines(list): list of lines to include in the document
juraj-google-style
def convert_attribute_tag_to_name(value): if not isinstance(value, Tags): raise ValueError("The attribute tag must be a Tags enumeration.") for entry in attribute_name_tag_table: if value == entry[1]: return entry[0] raise ValueError("Unrecognized attribute tag: {}".format(value))
A utility function that converts an attribute tag into the corresponding attribute name string. For example: enums.Tags.STATE -> 'State' Args: value (enum): The Tags enumeration value of the attribute. Returns: string: The attribute name string that corresponds to the attribute tag. Raises: ValueError: if the attribute tag is not a Tags enumeration or if it is unrecognized attribute tag
juraj-google-style
def call_rpc(self, rpc_id, payload=bytes()): if rpc_id < 0 or rpc_id > 0xFFFF: raise RPCInvalidIDError("Invalid RPC ID: {}".format(rpc_id)) if rpc_id not in self._rpcs: raise RPCNotFoundError("rpc_id: {}".format(rpc_id)) return self._rpcs[rpc_id](payload)
Call an RPC by its ID. Args: rpc_id (int): The number of the RPC payload (bytes): A byte string of payload parameters up to 20 bytes Returns: bytes: The response payload from the RPC
juraj-google-style
def create(provider, count=1, name=None, **kwargs): r count = int(count) provider = provider_by_name(provider) options = provider.create_server_defaults options.update(kwargs) names = [name] * count provider.validate_create_options(**options) return provider.create_servers(count, names, **options)
r''' Create one or more cloud servers Args: * provider (str): Cloud provider, e.g. ec2, digitalocean * count (int) =1: Number of instances * name (str) =None: Name of server(s) * \**kwargs: Provider-specific flags
juraj-google-style
def GetSubkeyByPath(self, key_path): subkey = self for path_segment in key_paths.SplitKeyPath(key_path): subkey = subkey.GetSubkeyByName(path_segment) if not subkey: break return subkey
Retrieves a subkey by path. Args: key_path (str): path of the subkey. Returns: WinRegistryKey: Windows Registry subkey or None if not found.
juraj-google-style
def __init__(self, name, num_qubits, num_clbits, params): if not isinstance(num_qubits, int) or not isinstance(num_clbits, int): raise QiskitError("num_qubits and num_clbits must be integer.") if num_qubits < 0 or num_clbits < 0: raise QiskitError( "bad instruction dimensions: %d qubits, %d clbits." % num_qubits, num_clbits) self.name = name self.num_qubits = num_qubits self.num_clbits = num_clbits self._params = [] self.control = None self._definition = None self.params = params
Create a new instruction. Args: name (str): instruction name num_qubits (int): instruction's qubit width num_clbits (int): instructions's clbit width params (list[sympy.Basic|qasm.Node|int|float|complex|str|ndarray]): list of parameters Raises: QiskitError: when the register is not in the correct format.
juraj-google-style
def download_file_content(self, file_id, etag=None): if (not is_valid_uuid(file_id)): raise StorageArgumentException('Invalid UUID for file_id: {0}'.format(file_id)) headers = {'Accept': '*/*'} if etag: headers['If-None-Match'] = etag resp = self._authenticated_request.to_endpoint('file/{}/content/'.format(file_id)).with_headers(headers).get() if (resp.status_code == 304): return (None, None) if ('ETag' not in resp.headers): raise StorageException('No ETag received from the service with the download') return (resp.headers['ETag'], resp.content)
Download file content. Args: file_id (str): The UUID of the file whose content is requested etag (str): If the content is not changed since the provided ETag, the content won't be downloaded. If the content is changed, it will be downloaded and returned with its new ETag. Note: ETags should be enclosed in double quotes:: my_etag = '"71e1ed9ee52e565a56aec66bc648a32c"' Returns: A tuple of ETag and content (etag, content) if the content was retrieved. If an etag was provided, and content didn't change returns (None, None):: ('"71e1ed9ee52e565a56aec66bc648a32c"', 'Hello world!') Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: Server response code 403 StorageNotFoundException: Server response code 404 StorageException: other 400-600 error codes
codesearchnet
def sg_inject(path, mod_name): r import sys if path not in list(sys.path): sys.path.append(path) globals()[mod_name] = importlib.import_module(mod_name) for func_name in dir(globals()[mod_name]): if isinstance(globals()[mod_name].__dict__.get(func_name), types.FunctionType): if not func_name.startswith('_'): exec('tf.Variable.%s = %s.%s' % (func_name, mod_name, func_name)) exec('tf.Tensor.%s = %s.%s' % (func_name, mod_name, func_name))
r"""Converts all functions in the given Python module to sugar functions so that they can be used in a chainable manner. Args: path: A string. Path to the Python module mod_name: A string. The name of the Python module to inject. Returns: None
juraj-google-style
class StackedRNNCells(Layer): def __init__(self, cells, **kwargs): for cell in cells: if not 'call' in dir(cell): raise ValueError('All cells must have a `call` method. received cells:', cells) if not 'state_size' in dir(cell): raise ValueError('All cells must have a `state_size` attribute. received cells:', cells) self.cells = cells self.reverse_state_order = kwargs.pop('reverse_state_order', False) if self.reverse_state_order: logging.warning('reverse_state_order=True in StackedRNNCells will soon be deprecated. Please update the code to work with the natural order of states if you rely on the RNN states, eg RNN(return_state=True).') super(StackedRNNCells, self).__init__(**kwargs) @property def state_size(self): return tuple((c.state_size for c in (self.cells[::-1] if self.reverse_state_order else self.cells))) @property def output_size(self): if getattr(self.cells[-1], 'output_size', None) is not None: return self.cells[-1].output_size elif _is_multiple_state(self.cells[-1].state_size): return self.cells[-1].state_size[0] else: return self.cells[-1].state_size def get_initial_state(self, inputs=None, batch_size=None, dtype=None): initial_states = [] for cell in self.cells[::-1] if self.reverse_state_order else self.cells: get_initial_state_fn = getattr(cell, 'get_initial_state', None) if get_initial_state_fn: initial_states.append(get_initial_state_fn(inputs=inputs, batch_size=batch_size, dtype=dtype)) else: initial_states.append(_generate_zero_filled_state_for_cell(cell, inputs, batch_size, dtype)) return tuple(initial_states) def call(self, inputs, states, constants=None, training=None, **kwargs): state_size = self.state_size[::-1] if self.reverse_state_order else self.state_size nested_states = nest.pack_sequence_as(state_size, nest.flatten(states)) new_nested_states = [] for cell, states in zip(self.cells, nested_states): states = states if nest.is_nested(states) else [states] is_tf_rnn_cell = getattr(cell, '_is_tf_rnn_cell', None) is not None states = states[0] if len(states) == 1 and is_tf_rnn_cell else states if generic_utils.has_arg(cell.call, 'training'): kwargs['training'] = training else: kwargs.pop('training', None) cell_call_fn = cell.__call__ if callable(cell) else cell.call if generic_utils.has_arg(cell.call, 'constants'): inputs, states = cell_call_fn(inputs, states, constants=constants, **kwargs) else: inputs, states = cell_call_fn(inputs, states, **kwargs) new_nested_states.append(states) return (inputs, nest.pack_sequence_as(state_size, nest.flatten(new_nested_states))) @tf_utils.shape_type_conversion def build(self, input_shape): if isinstance(input_shape, list): input_shape = input_shape[0] for cell in self.cells: if isinstance(cell, Layer) and (not cell.built): with backend.name_scope(cell.name): cell.build(input_shape) cell.built = True if getattr(cell, 'output_size', None) is not None: output_dim = cell.output_size elif _is_multiple_state(cell.state_size): output_dim = cell.state_size[0] else: output_dim = cell.state_size input_shape = tuple([input_shape[0]] + tensor_shape.TensorShape(output_dim).as_list()) self.built = True def get_config(self): cells = [] for cell in self.cells: cells.append(generic_utils.serialize_keras_object(cell)) config = {'cells': cells} base_config = super(StackedRNNCells, self).get_config() return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config, custom_objects=None): from tensorflow.python.keras.layers import deserialize as deserialize_layer cells = [] for cell_config in config.pop('cells'): cells.append(deserialize_layer(cell_config, custom_objects=custom_objects)) return cls(cells, **config)
Wrapper allowing a stack of RNN cells to behave as a single cell. Used to implement efficient stacked RNNs. Args: cells: List of RNN cell instances. Examples: ```python batch_size = 3 sentence_max_length = 5 n_features = 2 new_shape = (batch_size, sentence_max_length, n_features) x = tf.constant(np.reshape(np.arange(30), new_shape), dtype = tf.float32) rnn_cells = [tf.keras.layers.LSTMCell(128) for _ in range(2)] stacked_lstm = tf.keras.layers.StackedRNNCells(rnn_cells) lstm_layer = tf.keras.layers.RNN(stacked_lstm) result = lstm_layer(x) ```
github-repos
def _get_profile_data_generator(self): node_to_file_path = {} node_to_line_number = {} node_to_func_name = {} node_to_op_type = {} for op in self._graph.get_operations(): for trace_entry in reversed(op.traceback): file_path = trace_entry[0] line_num = trace_entry[1] func_name = trace_entry[2] if not source_utils.guess_is_tensorflow_py_library(file_path): break node_to_file_path[op.name] = file_path node_to_line_number[op.name] = line_num node_to_func_name[op.name] = func_name node_to_op_type[op.name] = op.type def profile_data_generator(device_step_stats): for node_stats in device_step_stats.node_stats: if node_stats.node_name == '_SOURCE' or node_stats.node_name == '_SINK': continue yield profiling.ProfileDatum(device_step_stats.device, node_stats, node_to_file_path.get(node_stats.node_name, ''), node_to_line_number.get(node_stats.node_name, 0), node_to_func_name.get(node_stats.node_name, ''), node_to_op_type.get(node_stats.node_name, '')) return profile_data_generator
Get function that generates `ProfileDatum` objects. Returns: A function that generates `ProfileDatum` objects.
github-repos
def delete(self, resource, force=False, timeout=(- 1)): return self._client.delete(resource, force=force, timeout=timeout)
Deletes a Deployment Server object based on its UUID or URI. Args: resource (dict): Object to delete. force: If set to true, the operation completes despite any problems with network connectivity or errors on the resource itself. The default is false. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: bool: Indicates if the volume was successfully deleted.
codesearchnet
def _get_output_columns(nodes, context): columns = [] for node in nodes: for sql_output in sql_context_helpers.get_outputs(node, context): field_name = sql_output.field_name column = sql_context_helpers.get_column(field_name, node, context) column = column.label(sql_output.output_name) columns.append(column) return columns
Get the output columns for a list of SqlNodes. Args: nodes: List[SqlNode], the nodes to get output columns from. context: CompilationContext, global compilation state and metadata. Returns: List[Column], list of SqlAlchemy Columns to output for this query.
codesearchnet
def inspect_commit(self, commit): req = proto.InspectCommitRequest(commit=commit_from(commit)) return self.stub.InspectCommit(req, metadata=self.metadata)
Returns info about a specific Commit. Params: * commit: A tuple, string, or Commit object representing the commit.
juraj-google-style
def greedy_coloring(adj): coloring = {} colors = {} possible_colors = {n: set(range(len(adj))) for n in adj} while possible_colors: n = min(possible_colors, key=(lambda n: len(possible_colors[n]))) color = min(possible_colors[n]) coloring[n] = color if (color not in colors): colors[color] = {n} else: colors[color].add(n) for neighbor in adj[n]: if ((neighbor in possible_colors) and (color in possible_colors[neighbor])): possible_colors[neighbor].remove(color) del possible_colors[n] return (coloring, colors)
Determines a vertex coloring. Args: adj (dict): The edge structure of the graph to be colored. `adj` should be of the form {node: neighbors, ...} where neighbors is a set. Returns: dict: the coloring {node: color, ...} dict: the colors {color: [node, ...], ...} Note: This is a greedy heuristic: the resulting coloring is not necessarily minimal.
codesearchnet
def _load_info(self): url = ('%s/prefix?duration=36000' % self.base_url) r = self.gbdx_connection.get(url) r.raise_for_status() return r.json()
Get user info for GBDX S3, put into instance vars for convenience. Args: None. Returns: Dictionary with S3 access key, S3 secret key, S3 session token, user bucket and user prefix (dict).
codesearchnet
def typing(self, *, channel: str): payload = {"id": self._next_msg_id(), "type": "typing", "channel": channel} self.send_over_websocket(payload=payload)
Sends a typing indicator to the specified channel. This indicates that this app is currently writing a message to send to a channel. Args: channel (str): The channel id. e.g. 'C024BE91L' Raises: SlackClientNotConnectedError: Websocket connection is closed.
juraj-google-style
def sd(line, cell=None): parser = google.datalab.utils.commands.CommandParser(prog='%sd', description=( 'Execute various Stackdriver related operations. Use "%sd ' '<stackdriver_product> -h" for help on a specific Stackdriver product.')) _create_monitoring_subparser(parser) return google.datalab.utils.commands.handle_magic_line(line, cell, parser)
Implements the stackdriver cell magic for ipython notebooks. Args: line: the contents of the storage line. Returns: The results of executing the cell.
juraj-google-style
def report_uninitialized_variables(var_list=None, name='report_uninitialized_variables'): if var_list is None: var_list = global_variables() + local_variables() if not var_list: var_list = [] for op in ops.get_default_graph().get_operations(): if op.type in ['Variable', 'VariableV2', 'AutoReloadVariable']: var_list.append(op.outputs[0]) with ops.name_scope(name): if var_list: init_vars = [state_ops.is_variable_initialized(v) for v in var_list] local_device = os.environ.get('TF_DEVICE_FOR_UNINITIALIZED_VARIABLE_REPORTING', '/cpu:0') with ops.device(local_device): if not var_list: return array_ops.constant([], dtype=dtypes.string) else: variables_mask = math_ops.logical_not(array_ops_stack.stack(init_vars)) variable_names_tensor = array_ops.constant([s.op.name for s in var_list]) return array_ops.boolean_mask(variable_names_tensor, variables_mask)
Adds ops to list the names of uninitialized variables. When run, it returns a 1-D tensor containing the names of uninitialized variables if there are any, or an empty array if there are none. Args: var_list: List of `Variable` objects to check. Defaults to the value of `global_variables() + local_variables()` name: Optional name of the `Operation`. Returns: A 1-D tensor containing names of the uninitialized variables, or an empty 1-D tensor if there are no variables or no uninitialized variables.
github-repos
def query_properties_with_values(self, query, include_defaults=True): themed_keys = set() result = dict() if include_defaults: keys = self.properties() else: keys = (set(self._property_values.keys()) | set(self._unstable_default_values.keys())) if self.themed_values(): themed_keys = set(self.themed_values().keys()) keys |= themed_keys for key in keys: descriptor = self.lookup(key) if (not query(descriptor)): continue value = descriptor.serializable_value(self) if ((not include_defaults) and (key not in themed_keys)): if (isinstance(value, PropertyValueContainer) and (key in self._unstable_default_values)): continue result[key] = value return result
Query the properties values of |HasProps| instances with a predicate. Args: query (callable) : A callable that accepts property descriptors and returns True or False include_defaults (bool, optional) : Whether to include properties that have not been explicitly set by a user (default: True) Returns: dict : mapping of property names and values for matching properties
codesearchnet
def add(self, method_mask, path, func): is_err = len(signature(func).parameters) == 3 is_subchain = isinstance(func, MiddlewareChain) tup = MiddlewareNode(func=func, mask=method_mask, path=path, is_errorhandler=is_err, is_subchain=is_subchain,) self.mw_list.append(tup)
Add a function to the middleware chain. This function is returned when iterating over the chain with matching method and path. Args: method_mask (growler.http.HTTPMethod): A bitwise mask intended to match specific request methods. path (str or regex): An object with which to compare request urls func (callable): The function to be yieled from the generator upon a request matching the method_mask and path
juraj-google-style
def trigger(self, target: str, trigger: str, parameters: Dict[str, Any]={}): pass
Calls the specified Trigger of another Area with the optionally given parameters. Args: target: The name of the target Area. trigger: The name of the Trigger. parameters: The parameters of the function call.
juraj-google-style
def synthesize(self, duration, tick_frequency): sr = self.samplerate.samples_per_second tick = np.random.uniform(low=(- 1.0), high=1.0, size=int((sr * 0.1))) tick *= np.linspace(1, 0, len(tick)) samples = np.zeros(int((sr * (duration / Seconds(1))))) ticks_per_second = (Seconds(1) / tick_frequency) step = int((sr for i in range(0, len(samples), step): size = len(samples[i:(i + len(tick))]) samples[i:(i + len(tick))] += tick[:size] return AudioSamples(samples, self.samplerate)
Synthesize periodic "ticks", generated from white noise and an envelope Args: duration (numpy.timedelta64): The total duration of the sound to be synthesized tick_frequency (numpy.timedelta64): The frequency of the ticking sound
codesearchnet
def _update_fetch_positions(self, partitions): self._fetcher.reset_offsets_if_needed(partitions) if not self._subscription.has_all_fetch_positions(): if (self.config['api_version'] >= (0, 8, 1) and self.config['group_id'] is not None): self._coordinator.refresh_committed_offsets_if_needed() self._fetcher.update_fetch_positions(partitions)
Set the fetch position to the committed position (if there is one) or reset it using the offset reset policy the user has configured. Arguments: partitions (List[TopicPartition]): The partitions that need updating fetch positions. Raises: NoOffsetForPartitionError: If no offset is stored for a given partition and no offset reset policy is defined.
juraj-google-style
def WriteRow(self, values): precondition.AssertIterableType(values, text) if compatibility.PY2: self._csv.writerow([value.encode("utf-8") for value in values]) else: self._csv.writerow(values)
Writes a single row to the underlying buffer. Args: values: A list of string values to be inserted into the CSV output.
juraj-google-style
def hotkey(*args, **kwargs): interval = float(kwargs.get('interval', 0.0)) _failSafeCheck() for c in args: if (len(c) > 1): c = c.lower() platformModule._keyDown(c) time.sleep(interval) for c in reversed(args): if (len(c) > 1): c = c.lower() platformModule._keyUp(c) time.sleep(interval) _autoPause(kwargs.get('pause', None), kwargs.get('_pause', True))
Performs key down presses on the arguments passed in order, then performs key releases in reverse order. The effect is that calling hotkey('ctrl', 'shift', 'c') would perform a "Ctrl-Shift-C" hotkey/keyboard shortcut press. Args: key(s) (str): The series of keys to press, in order. This can also be a list of key strings to press. interval (float, optional): The number of seconds in between each press. 0.0 by default, for no pause in between presses. Returns: None
codesearchnet
def to_json_file(self, json_file_path: Union[str, os.PathLike], use_diff: bool=True): with open(json_file_path, 'w', encoding='utf-8') as writer: writer.write(self.to_json_string(use_diff=use_diff))
Save this instance to a JSON file. Args: json_file_path (`str` or `os.PathLike`): Path to the JSON file in which this configuration instance's parameters will be saved. use_diff (`bool`, *optional*, defaults to `True`): If set to `True`, only the difference between the config instance and the default `GenerationConfig()` is serialized to JSON file.
github-repos
def CheckOperatorSpacing(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] while True: match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line) if match: line = match.group(1) + ('_' * len(match.group(2))) + match.group(3) else: break if ((Search(r'[\w.]=', line) or Search(r'=[\w.]', line)) and not Search(r'\b(if|while|for) ', line) and not Search(r'(>=|<=|==|!=|&=|\^=|\|=|\+=|\*=|\/=|\%=)', line) and not Search(r'operator=', line)): error(filename, linenum, 'whitespace/operators', 4, 'Missing spaces around =') match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line) if match: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around %s' % match.group(1)) elif not Match(r' match = Match(r'^(.*[^\s<])<[^\s=<,]', line) if match: (_, _, end_pos) = CloseExpression( clean_lines, linenum, len(match.group(1))) if end_pos <= -1: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around <') match = Match(r'^(.*[^-\s>])>[^\s=>,]', line) if match: (_, _, start_pos) = ReverseCloseExpression( clean_lines, linenum, len(match.group(1))) if start_pos <= -1: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around >') match = Search(r'(operator|[^\s(<])(?:L|UL|ULL|l|ul|ull)?<<([^\s,=<])', line) if (match and not (match.group(1).isdigit() and match.group(2).isdigit()) and not (match.group(1) == 'operator' and match.group(2) == ';')): error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around <<') match = Search(r'>>[a-zA-Z_]', line) if match: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around >>') match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line) if match: error(filename, linenum, 'whitespace/operators', 4, 'Extra space for operator %s' % match.group(1))
Checks for horizontal spacing around operators. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
juraj-google-style
def _get_current_tf_device(): graph = get_graph() op = _TfDeviceCaptureOp() graph._apply_device_functions(op) if tf2.enabled(): return device_spec.DeviceSpecV2.from_string(op.device) else: return device_spec.DeviceSpecV1.from_string(op.device)
Return explicit device of current context, otherwise returns `None`. Returns: If the current device scope is explicitly set, it returns a string with the device (`CPU` or `GPU`). If the scope is not explicitly set, it will return `None`.
github-repos
def _ExpectedKeysForEntry(self, entry): return [entry.name]
Generate a list of expected cache keys for this type of map. Args: entry: A ShadowMapEntry Returns: A list of strings
github-repos
def get_firmware_version(self, cached=True): if (cached and (self.firmware_version != 'unknown')): return self.firmware_version firmware_version = self.get_characteristic_handle_from_uuid(UUID_FIRMWARE_REVISION) if (firmware_version is None): logger.warn('Failed to find handle for firmware version') return None self.firmware_version = self.dongle._read_attribute(self.conn_handle, firmware_version) return self.firmware_version
Returns the SK8 device firmware version. Args: cached (bool): if True, returns the locally cached copy of the firmware version. If this is set to False, or the version is not cached, it will read from the device instead. Returns: str. The current firmware version string. May be `None` if an error occurs.
codesearchnet
def market_normal(self, session, after_open, before_close) -> Session: logger = logs.get_logger(self.market_normal) if (session not in self.exch): return SessNA ss = self.exch[session] s_time = shift_time(ss[0], (int(after_open) + 1)) e_time = shift_time(ss[(- 1)], (- int(before_close))) request_cross = (pd.Timestamp(s_time) >= pd.Timestamp(e_time)) session_cross = (pd.Timestamp(ss[0]) >= pd.Timestamp(ss[1])) if (request_cross and (not session_cross)): logger.warning(f'end time {e_time} is earlier than {s_time} ...') return SessNA return Session(s_time, e_time)
Time intervals between market Args: session: [allday, day, am, pm, night] after_open: mins after open before_close: mins before close Returns: Session of start_time and end_time
codesearchnet
def _setweights(self): for name_w in self.weights: raw_w = getattr(self.module, name_w + '_raw') w = torch.nn.functional.dropout(raw_w, p=self.dropout, training=self.training) if hasattr(self.module, name_w): delattr(self.module, name_w) setattr(self.module, name_w, w)
Uses pytorch's built-in dropout function to apply dropout to the parameters of the wrapped module. Args: None Returns: None
juraj-google-style
def get_compatible_systems(self, id_or_uri): uri = self._client.build_uri(id_or_uri) + "/compatible-systems" return self._client.get(uri)
Retrieves a collection of all storage systems that is applicable to this storage volume template. Args: id_or_uri: Can be either the power device id or the uri Returns: list: Storage systems.
juraj-google-style