code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def launch_run(self, command, project=None, entity=None, run_id=None): query = gql() patch = BytesIO() if self.git.dirty: self.git.repo.git.execute(['git', 'diff'], output_stream=patch) patch.seek(0) cwd = "." if self.git.enabled: cwd = cwd + os.getcwd().replace(self.git.repo.working_dir, "") return self.gql(query, variable_values={ 'entity': entity or self.settings('entity'), 'model': project or self.settings('project'), 'command': command, 'runId': run_id, 'patch': patch.read().decode("utf8"), 'cwd': cwd })
Launch a run in the cloud. Args: command (str): The command to run program (str): The file to run project (str): The project to scope the runs to entity (str, optional): The entity to scope this project to. Defaults to public models run_id (str, optional): The run_id to scope to Returns: [{"podName","status"}]
juraj-google-style
def abs_path(path): return os.path.abspath(os.path.expanduser(path))
Resolve the '.' and '~' in a path to get the absolute path. Args: path: The path to expand. Returns: The absolute path of the input path.
github-repos
class InformerFeatureEmbedder(nn.Module): def __init__(self, cardinalities: List[int], embedding_dims: List[int]) -> None: super().__init__() self.num_features = len(cardinalities) self.embedders = nn.ModuleList([nn.Embedding(c, d) for c, d in zip(cardinalities, embedding_dims)]) def forward(self, features: torch.Tensor) -> torch.Tensor: if self.num_features > 1: cat_feature_slices = torch.chunk(features, self.num_features, dim=-1) else: cat_feature_slices = [features] return torch.cat([embed(cat_feature_slice.squeeze(-1)) for embed, cat_feature_slice in zip(self.embedders, cat_feature_slices)], dim=-1)
Embed a sequence of categorical features. Args: cardinalities (`list[int]`): List of cardinalities of the categorical features. embedding_dims (`list[int]`): List of embedding dimensions of the categorical features.
github-repos
def init_backend(self, phonemizer_lang: str): requires_backends(self, 'phonemizer') from phonemizer.backend import BACKENDS self.backend = BACKENDS[self.phonemizer_backend](phonemizer_lang, language_switch='remove-flags')
Initializes the backend. Args: phonemizer_lang (`str`): The language to be used.
github-repos
def serialCmdPwdAuth(self, password_str): result = False try: req_start = "0150310228" + binascii.hexlify(password_str) + "2903" req_crc = self.calc_crc16(req_start[2:].decode("hex")) req_str = req_start + req_crc self.m_serial_port.write(req_str.decode("hex")) if self.m_serial_port.getResponse(self.getContext()).encode("hex") == "06": ekm_log("Password accepted (" + self.getContext() + ")") result = True else: ekm_log("Password call failure no 06(" + self.getContext() + ")") except: ekm_log("Password call failure by exception(" + self.getContext() + ")") ekm_log(traceback.format_exc(sys.exc_info())) return result
Password step of set commands This method is normally called within another serial command, so it does not issue a termination string. Any default password is set in the caller parameter list, never here. Args: password_str (str): Required password. Returns: bool: True on completion and ACK.
juraj-google-style
def decode_conjure_union_type(cls, obj, conjure_type): type_of_union = obj['type'] for (attr, conjure_field) in conjure_type._options().items(): if (conjure_field.identifier == type_of_union): attribute = attr conjure_field_definition = conjure_field break else: raise ValueError('unknown union type {0} for {1}'.format(type_of_union, conjure_type)) deserialized = {} if ((type_of_union not in obj) or (obj[type_of_union] is None)): cls.check_null_field(obj, deserialized, conjure_field_definition) else: value = obj[type_of_union] field_type = conjure_field_definition.field_type deserialized[attribute] = cls.do_decode(value, field_type) return conjure_type(**deserialized)
Decodes json into a conjure union type. Args: obj: the json object to decode conjure_type: a class object which is the union type we're decoding into Returns: An instance of type conjure_type.
codesearchnet
def init_config_json(config_file): json_data = None try: if os.path.exists(config_file): with open(config_file) as json_file: json_data = json.load(json_file) return unicode_convert(json_data) else: return None except: line, filename, synerror = trace() raise ArcRestHelperError({ "function": "init_config_json", "line": line, "filename": filename, "synerror": synerror, } ) finally: json_data = None del json_data gc.collect()
Deserializes a JSON configuration file. Args: config_file (str): The path to the JSON file. Returns: dict: A dictionary object containing the JSON data. If ``config_file`` does not exist, returns ``None``.
juraj-google-style
def add_sync_methods(cls): for name in cls.__dict__.keys(): if name.endswith('_async'): sync_name = name[:-6] if not hasattr(cls, sync_name): setattr(cls, sync_name, _make_sync_method(name)) return cls
Class decorator to add synchronous methods corresponding to async methods. This modifies the class in place, adding additional methods to it. If a synchronous method of a given name already exists it is not replaced. Args: cls: A class. Returns: The same class, modified in place.
juraj-google-style
def load_validation_plugin(name=None): if not name: return BaseValidationRules plugin = None for entry_point in iter_entry_points('bigchaindb.validation', name): plugin = entry_point.load() if not plugin: raise ResolutionError( 'No plugin found in group `bigchaindb.validation` with name `{}`'. format(name)) if not issubclass(plugin, (BaseValidationRules,)): raise TypeError('object of type "{}" does not implement `bigchaindb.' 'validation.BaseValidationRules`'.format(type(plugin))) return plugin
Find and load the chosen validation plugin. Args: name (string): the name of the entry_point, as advertised in the setup.py of the providing package. Returns: an uninstantiated subclass of ``bigchaindb.validation.AbstractValidationRules``
juraj-google-style
async def selfplay(state, flagfile='selfplay'): output_dir = os.path.join(fsdb.selfplay_dir(), state.output_model_name) holdout_dir = os.path.join(fsdb.holdout_dir(), state.output_model_name) lines = await run( 'bazel-bin/cc/selfplay', '--flagfile={}.flags'.format(os.path.join(FLAGS.flags_dir, flagfile)), '--model={}'.format(state.best_model_path), '--output_dir={}'.format(output_dir), '--holdout_dir={}'.format(holdout_dir), '--seed={}'.format(state.seed)) result = '\n'.join(lines[-6:]) logging.info(result) stats = parse_win_stats_table(result, 1)[0] num_games = stats.total_wins logging.info('Black won %0.3f, white won %0.3f', stats.black_wins.total / num_games, stats.white_wins.total / num_games) pattern = os.path.join(output_dir, '*', '*.zz') random.seed(state.seed) tf.set_random_seed(state.seed) np.random.seed(state.seed) buffer = example_buffer.ExampleBuffer(sampling_frac=1.0) logging.info('Writing golden chunk from "{}"'.format(pattern)) buffer.parallel_fill(tf.gfile.Glob(pattern)) buffer.flush(os.path.join(fsdb.golden_chunk_dir(), state.output_model_name + '.tfrecord.zz'))
Run selfplay and write a training chunk to the fsdb golden_chunk_dir. Args: state: the RL loop State instance. flagfile: the name of the flagfile to use for selfplay, either 'selfplay' (the default) or 'boostrap'.
juraj-google-style
def _group_and_publish_tasks_statistics(self, result): for i in result: executor_id = i['executor_id'] i['executor_id'] = executor_id[:executor_id.rfind('.')] i['statistics']['instances_count'] = 1 r = {} for i in result: executor_id = i['executor_id'] r[executor_id] = r.get(executor_id, {}) r[executor_id]['framework_id'] = i['framework_id'] r[executor_id]['statistics'] = r[executor_id].get('statistics', {}) r[executor_id]['statistics'] = self._sum_statistics(i['statistics'], r[executor_id]['statistics']) self._add_cpu_usage(r) self._add_cpu_percent(r) self._add_mem_percent(r) self._publish(r)
This function group statistics of same tasks by adding them. It also add 'instances_count' statistic to get information about how many instances is running on the server Args: result: result of mesos query. List of dictionaries with 'executor_id', 'framework_id' as a strings and 'statistics' as dictionary of labeled numbers
codesearchnet
def find_dependencies(self, dataset_keys, **dfilter): unknown_datasets = set() for key in dataset_keys.copy(): n, unknowns = self._find_dependencies(key, **dfilter) dataset_keys.discard(key) if n is not None: dataset_keys.add(n.name) if unknowns: unknown_datasets.update(unknowns) continue self.add_child(self, n) return unknown_datasets
Create the dependency tree. Args: dataset_keys (iterable): Strings or DatasetIDs to find dependencies for **dfilter (dict): Additional filter parameters. See `satpy.readers.get_key` for more details. Returns: (Node, set): Root node of the dependency tree and a set of unknown datasets
juraj-google-style
def run_cm(cm, time_scale): cm = np.linalg.matrix_power(cm, time_scale) cm[(cm > 1)] = 1 return cm
Iterate a connectivity matrix the specified number of steps. Args: cm (np.ndarray): A connectivity matrix. time_scale (int): The number of steps to run. Returns: np.ndarray: The connectivity matrix at the new timescale.
codesearchnet
def CheckVlogArguments(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line): error(filename, linenum, 'runtime/vlog', 5, 'VLOG() should be used with numeric verbosity level. ' 'Use LOG() if you want symbolic severity levels.')
Checks that VLOG() is only used for defining a logging level. For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and VLOG(FATAL) are not. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
juraj-google-style
def clean_output_files(self, follow_parents=True): paths = [] if self.status != self.S_OK: logger.warning("Calling task.clean_output_files on a task whose status != S_OK") self.tmpdir.clean() except_exts = set() for child in self.get_children(): if child.status == self.S_OK: continue i = [dep.node for dep in child.deps].index(self) except_exts.update(child.deps[i].exts) exts = self.gc.exts.difference(except_exts) paths += self.outdir.remove_exts(exts) if not follow_parents: return paths for parent in self.get_parents(): ext2nodes = collections.defaultdict(list) for child in parent.get_children(): if child.status == child.S_OK: continue i = [d.node for d in child.deps].index(parent) for ext in child.deps[i].exts: ext2nodes[ext].append(child) except_exts = [k for k, lst in ext2nodes.items() if lst] exts = self.gc.exts.difference(except_exts) paths += parent.outdir.remove_exts(exts) self.history.info("Removed files: %s" % paths) return paths
This method is called when the task reaches S_OK. It removes all the output files produced by the task that are not needed by its children as well as the output files produced by its parents if no other node needs them. Args: follow_parents: If true, the output files of the parents nodes will be removed if possible. Return: list with the absolute paths of the files that have been removed.
juraj-google-style
def GetSysFeeAmountByHeight(self, height): hash = self.GetBlockHash(height) return self.GetSysFeeAmount(hash)
Get the system fee for the specified block. Args: height (int): block height. Returns: int:
juraj-google-style
def merge_csv(filenames: List[str], outfile: TextIO = sys.stdout, input_dialect: str = 'excel', output_dialect: str = 'excel', debug: bool = False, headers: bool = True) -> None: writer = csv.writer(outfile, dialect=output_dialect) written_header = False header_items = [] for filename in filenames: log.info("Processing file " + repr(filename)) with open(filename, 'r') as f: reader = csv.reader(f, dialect=input_dialect) if headers: if not written_header: header_items = next(reader) if debug: log.debug("Header row: {!r}", header_items) writer.writerow(header_items) written_header = True else: new_headers = next(reader) if new_headers != header_items: raise ValueError( "Header line in file {filename} doesn't match - " "it was {new} but previous was {old}".format( filename=repr(filename), new=repr(new_headers), old=repr(header_items), )) if debug: log.debug("Header row matches previous") else: if debug: log.debug("No headers in use") for row in reader: if debug: log.debug("Data row: {!r}", row) writer.writerow(row)
Amalgamate multiple CSV/TSV/similar files into one. Args: filenames: list of filenames to process outfile: file-like object to write output to input_dialect: dialect of input files, as passed to ``csv.reader`` output_dialect: dialect to write, as passed to ``csv.writer`` debug: be verbose? headers: do the files have header lines?
juraj-google-style
def parse_keys(self, sn: 'DataNode') -> Dict[(InstanceName, ScalarValue)]: res = {} for k in self.keys: knod = sn.get_data_child(*k) if (knod is None): raise NonexistentSchemaNode(sn.qual_name, *k) kval = knod.type.parse_value(self.keys[k]) if (kval is None): raise InvalidKeyValue(self.keys[k]) res[knod.iname()] = kval return res
Parse key dictionary in the context of a schema node. Args: sn: Schema node corresponding to a list.
codesearchnet
def van_enc_2d(x, first_depth, reuse=False): with tf.variable_scope('van_enc', reuse=reuse): a = 4 b = 4 enc = tf.nn.relu(x) enc = tf.layers.dense(enc, first_depth * a * b, tf.nn.relu) enc = tf.contrib.layers.layer_norm(enc) enc = tf.reshape(enc, [-1, a, b, first_depth]) enc = tf.layers.conv2d_transpose( enc, first_depth, 3, padding='same', activation=tf.nn.relu, strides=1) enc = tf.contrib.layers.layer_norm(enc) enc = tf.layers.conv2d_transpose( enc, first_depth * 2, 3, padding='same', activation=tf.nn.relu, strides=2) van_higher_level_2 = tf.reshape(enc, [-1, a * 2 * b * 2 * first_depth * 2]) enc = tf.layers.conv2d_transpose( enc, first_depth * 2, 3, padding='same', activation=tf.nn.relu, strides=1) enc = tf.contrib.layers.layer_norm(enc) enc = tf.layers.conv2d_transpose( enc, first_depth * 4, 3, padding='same', activation=tf.nn.relu, strides=1) van_higher_level_4 = tf.reshape(enc, [-1, a * 2 * b * 2 * first_depth * 4]) van_higher_level = tf.concat([x, van_higher_level_2, van_higher_level_4], 1) return enc, van_higher_level
The higher level structure encoder for the VAN. The high level structure is a vector instead of an image. Args: x: The higher level structure to encode. first_depth: The depth of the first layer. Depth is increased in subsequent layers. reuse: To reuse in variable scope or not. Returns: The encoded image.
juraj-google-style
def get_descriptor_defaults(self, api_info, hostname=None): hostname = (hostname or endpoints_util.get_app_hostname() or api_info.hostname) protocol = 'http' if ((hostname and hostname.startswith('localhost')) or endpoints_util.is_running_on_devserver()) else 'https' base_path = api_info.base_path.strip('/') defaults = { 'extends': 'thirdParty.api', 'root': '{0}: 'name': api_info.name, 'version': api_info.api_version, 'api_version': api_info.api_version, 'path_version': api_info.path_version, 'defaultVersion': True, 'abstract': False, 'adapter': { 'bns': '{0}: 'type': 'lily', 'deadline': 10.0 } } if api_info.canonical_name: defaults['canonicalName'] = api_info.canonical_name if api_info.owner_domain: defaults['ownerDomain'] = api_info.owner_domain if api_info.owner_name: defaults['ownerName'] = api_info.owner_name if api_info.package_path: defaults['packagePath'] = api_info.package_path if api_info.title: defaults['title'] = api_info.title if api_info.documentation: defaults['documentation'] = api_info.documentation return defaults
Gets a default configuration for a service. Args: api_info: _ApiInfo object for this service. hostname: string, Hostname of the API, to override the value set on the current service. Defaults to None. Returns: A dictionary with the default configuration.
juraj-google-style
def query_icao(icao: str): params = { 'dataSource': 'metars', 'requestType': 'retrieve', 'format': 'csv', 'hoursBeforeNow': 24, } AWC._validate_icao(icao) params['stationString'] = icao try: return AWC._query(params) except RequestsConnectionError: raise AWCRequestFailed('failed to obtain requested data from AWC')
Queries AWC for the METAR of a given station Args: icao: station ID as a four letters-digits ICAO code Returns: AWC result for the station
juraj-google-style
def recipe_dv360_segmentology(config, auth_read, recipe_timezone, auth_write, recipe_name, date_range, recipe_slug, partners, advertisers): dataset(config, {'description': 'Create a dataset for bigquery tables.', 'hour': [4], 'auth': auth_write, 'dataset': recipe_slug}) bigquery(config, {'auth': auth_write, 'function': 'Pearson Significance Test', 'to': {'dataset': recipe_slug}}) dbm(config, {'auth': auth_read, 'report': {'filters': {'FILTER_PARTNER': {'values': partners}, 'FILTER_ADVERTISER': {'values': advertisers}}, 'body': {'timezoneCode': recipe_timezone, 'metadata': {'title': recipe_name, 'dataRange': date_range, 'format': 'CSV'}, 'params': {'type': 'TYPE_CROSS_PARTNER', 'groupBys': ['FILTER_PARTNER', 'FILTER_PARTNER_NAME', 'FILTER_ADVERTISER', 'FILTER_ADVERTISER_NAME', 'FILTER_MEDIA_PLAN', 'FILTER_MEDIA_PLAN_NAME', 'FILTER_ZIP_POSTAL_CODE'], 'metrics': ['METRIC_BILLABLE_IMPRESSIONS', 'METRIC_CLICKS', 'METRIC_TOTAL_CONVERSIONS']}, 'schedule': {'frequency': 'WEEKLY'}}}}) dbm(config, {'auth': auth_read, 'report': {'name': recipe_name}, 'out': {'bigquery': {'auth': auth_write, 'dataset': recipe_slug, 'table': 'DV360_KPI', 'header': True, 'schema': [{'name': 'Partner_Id', 'type': 'INTEGER', 'mode': 'REQUIRED'}, {'name': 'Partner', 'type': 'STRING', 'mode': 'REQUIRED'}, {'name': 'Advertiser_Id', 'type': 'INTEGER', 'mode': 'REQUIRED'}, {'name': 'Advertiser', 'type': 'STRING', 'mode': 'REQUIRED'}, {'name': 'Campaign_Id', 'type': 'INTEGER', 'mode': 'REQUIRED'}, {'name': 'Campaign', 'type': 'STRING', 'mode': 'REQUIRED'}, {'name': 'Zip', 'type': 'STRING', 'mode': 'NULLABLE'}, {'name': 'Impressions', 'type': 'FLOAT', 'mode': 'NULLABLE'}, {'name': 'Clicks', 'type': 'FLOAT', 'mode': 'NULLABLE'}, {'name': 'Conversions', 'type': 'FLOAT', 'mode': 'NULLABLE'}]}}}) bigquery(config, {'auth': auth_write, 'from': {'query': 'SELECT\n Partner_Id,\n Partner,\n Advertiser_Id,\n Advertiser,\n Campaign_Id,\n Campaign,\n Zip,\n SAFE_DIVIDE(Impressions, SUM(Impressions) OVER(PARTITION BY Advertiser_Id)) AS Impression,\n SAFE_DIVIDE(Clicks, Impressions) AS Click,\n SAFE_DIVIDE(Conversions, Impressions) AS Conversion,\n Impressions AS Impressions FROM\n `{dataset}.DV360_KPI`; ', 'parameters': {'dataset': recipe_slug}, 'legacy': False}, 'to': {'dataset': recipe_slug, 'view': 'DV360_KPI_Normalized'}}) census(config, {'auth': auth_write, 'normalize': {'census_geography': 'zip_codes', 'census_year': '2018', 'census_span': '5yr'}, 'to': {'dataset': recipe_slug, 'type': 'view'}}) census(config, {'auth': auth_write, 'correlate': {'join': 'Zip', 'pass': ['Partner_Id', 'Partner', 'Advertiser_Id', 'Advertiser', 'Campaign_Id', 'Campaign'], 'sum': ['Impressions'], 'correlate': ['Impression', 'Click', 'Conversion'], 'dataset': recipe_slug, 'table': 'DV360_KPI_Normalized', 'significance': 80}, 'to': {'dataset': recipe_slug, 'type': 'view'}})
DV360 funnel analysis using Census data. Args: auth_read (authentication) - Credentials used for reading data. recipe_timezone (timezone) - Timezone for report dates. auth_write (authentication) - Authorization used for writing data. recipe_name (string) - Name of report, not needed if ID used. date_range (choice) - Timeframe to run the report for. recipe_slug (string) - Name of Google BigQuery dataset to create. partners (integer_list) - DV360 partner id. advertisers (integer_list) - Comma delimited list of DV360 advertiser ids.
github-repos
def delete(self, *, auto_commit=False): try: db.session.delete(self.resource) if auto_commit: db.session.commit() except SQLAlchemyError: self.log.exception('Failed deleting resource: {}'.format(self.id)) db.session.rollback()
Removes a resource from the database Args: auto_commit (bool): Automatically commit the transaction. Default: `False` Returns: `None`
codesearchnet
class AriaSharedExpertsMLP(LlamaMLP): def __init__(self, config: AriaTextConfig): super().__init__(self) self.intermediate_size = config.intermediate_size * config.moe_num_shared_experts
Shared Expert MLP for shared experts. Unlike routed experts, shared experts process all tokens without routing. This class reconfigures the intermediate size in comparison to the LlamaMLP. Args: config (`AriaTextConfig`): Configuration object for the Aria language model.
github-repos
def take_bug_reports(ads, test_name, begin_time, destination=None): begin_time = mobly_logger.normalize_log_line_timestamp(str(begin_time)) def take_br(test_name, begin_time, ad, destination): ad.take_bug_report(test_name, begin_time, destination=destination) args = [(test_name, begin_time, ad, destination) for ad in ads] utils.concurrent_exec(take_br, args)
Takes bug reports on a list of android devices. If you want to take a bug report, call this function with a list of android_device objects in on_fail. But reports will be taken on all the devices in the list concurrently. Bug report takes a relative long time to take, so use this cautiously. Args: ads: A list of AndroidDevice instances. test_name: Name of the test method that triggered this bug report. begin_time: timestamp taken when the test started, can be either string or int. destination: string, path to the directory where the bugreport should be saved.
codesearchnet
def setup(options): if not options.misc.debug: requests.packages.urllib3.disable_warnings( requests.packages.urllib3.exceptions.InsecureRequestWarning )
Initialize debug/logging in third party libraries correctly. Args: options (:class:`nyawc.Options`): The options to use for the current crawling runtime.
juraj-google-style
def _virtual_molecule(self, mol, ilabels, eq_atoms): vmol = ob.OBMol() non_unique_atoms = set([a for g in eq_atoms for a in g]) all_atoms = set(range(1, (len(ilabels) + 1))) unique_atom_labels = sorted((all_atoms - non_unique_atoms)) for i in unique_atom_labels: orig_idx = ilabels[(i - 1)] oa1 = mol.GetAtom(orig_idx) a1 = vmol.NewAtom() a1.SetAtomicNum(oa1.GetAtomicNum()) a1.SetVector(oa1.GetVector()) if (vmol.NumAtoms() < 3): for symm in eq_atoms: (c1x, c1y, c1z) = self._group_centroid(mol, ilabels, symm) min_distance = float('inf') for i in range(1, (vmol.NumAtoms() + 1)): va = vmol.GetAtom(i) distance = math.sqrt(((((c1x - va.x()) ** 2) + ((c1y - va.y()) ** 2)) + ((c1z - va.z()) ** 2))) if (distance < min_distance): min_distance = distance if (min_distance > 0.2): a1 = vmol.NewAtom() a1.SetAtomicNum(9) a1.SetVector(c1x, c1y, c1z) return vmol
Create a virtual molecule by unique atoms, the centriods of the equivalent atoms Args: mol: The molecule. OpenBabel OBMol object ilables: inchi label map eq_atoms: equivalent atom labels farthest_group_idx: The equivalent atom group index in which there is the farthest atom to the centroid Return: The virtual molecule
codesearchnet
def lu_slogdet(LU): LU = (asarray(LU[0], float), asarray(LU[1], float)) adet = _sum(log(_abs(LU[0].diagonal()))) s = prod(sign(LU[0].diagonal())) nrows_exchange = (LU[1].size - _sum((LU[1] == arange(LU[1].size, dtype='int32')))) odd = ((nrows_exchange % 2) == 1) if odd: s *= (- 1.0) return (s, adet)
r"""Natural logarithm of a LU decomposition. Args: LU (tuple): LU decomposition. Returns: tuple: sign and log-determinant.
codesearchnet
def save(self, filething=None, padding=None): self.to_content_description = {} self.to_extended_content_description = {} self.to_metadata = {} self.to_metadata_library = [] for name, value in self.tags: library_only = (value.data_size() > 0xFFFF or value.TYPE == GUID) can_cont_desc = value.TYPE == UNICODE if library_only or value.language is not None: self.to_metadata_library.append((name, value)) elif value.stream is not None: if name not in self.to_metadata: self.to_metadata[name] = value else: self.to_metadata_library.append((name, value)) elif name in ContentDescriptionObject.NAMES: if name not in self.to_content_description and can_cont_desc: self.to_content_description[name] = value else: self.to_metadata_library.append((name, value)) else: if name not in self.to_extended_content_description: self.to_extended_content_description[name] = value else: self.to_metadata_library.append((name, value)) header = self._header if header.get_child(ContentDescriptionObject.GUID) is None: header.objects.append(ContentDescriptionObject()) if header.get_child(ExtendedContentDescriptionObject.GUID) is None: header.objects.append(ExtendedContentDescriptionObject()) header_ext = header.get_child(HeaderExtensionObject.GUID) if header_ext is None: header_ext = HeaderExtensionObject() header.objects.append(header_ext) if header_ext.get_child(MetadataObject.GUID) is None: header_ext.objects.append(MetadataObject()) if header_ext.get_child(MetadataLibraryObject.GUID) is None: header_ext.objects.append(MetadataLibraryObject()) fileobj = filething.fileobj old_size = header.parse_size(fileobj)[0] data = header.render_full(self, fileobj, old_size, padding) size = len(data) resize_bytes(fileobj, old_size, size, 0) fileobj.seek(0) fileobj.write(data)
save(filething=None, padding=None) Save tag changes back to the loaded file. Args: filething (filething) padding (:obj:`mutagen.PaddingFunction`) Raises: mutagen.MutagenError
juraj-google-style
def read_submissions_from_directory(dirname, use_gpu): result = [] for sub_dir in os.listdir(dirname): submission_path = os.path.join(dirname, sub_dir) try: if not os.path.isdir(submission_path): continue if not os.path.exists(os.path.join(submission_path, 'metadata.json')): continue with open(os.path.join(submission_path, 'metadata.json')) as f: metadata = json.load(f) if use_gpu and ('container_gpu' in metadata): container = metadata['container_gpu'] else: container = metadata['container'] entry_point = metadata['entry_point'] submission_type = metadata['type'] if submission_type == 'attack' or submission_type == 'targeted_attack': submission = Attack(submission_path, container, entry_point, use_gpu) elif submission_type == 'defense': submission = Defense(submission_path, container, entry_point, use_gpu) else: raise ValueError('Invalid type of submission: %s' % submission_type) result.append(submission) except (IOError, KeyError, ValueError): print('Failed to read submission from directory ', submission_path) return result
Scans directory and read all submissions. Args: dirname: directory to scan. use_gpu: whether submissions should use GPU. This argument is used to pick proper Docker container for each submission and create instance of Attack or Defense class. Returns: List with submissions (subclasses of Submission class).
juraj-google-style
def from_api_repr(cls, resource, client): job_ref_properties = resource.get("jobReference", {"projectId": client.project}) job_ref = _JobReference._from_api_repr(job_ref_properties) job = cls(job_ref, client) resource["jobReference"] = job_ref_properties job._properties = resource return job
Construct an UnknownJob from the JSON representation. Args: resource (dict): JSON representation of a job. client (google.cloud.bigquery.client.Client): Client connected to BigQuery API. Returns: UnknownJob: Job corresponding to the resource.
juraj-google-style
def set(self, value): pywrap_tfe.TFE_MonitoringBoolGaugeCellSet(self._cell, value)
Atomically set the value. Args: value: bool value.
github-repos
def delete_handler(Model, name=None, **kwds): from nautilus.database import db async def action_handler(service, action_type, payload, props, notify=True, **kwds): if (action_type == get_crud_action('delete', (name or Model))): try: message_props = {} if ('correlation_id' in props): message_props['correlation_id'] = props['correlation_id'] record_id = (payload['id'] if ('id' in payload) else payload['pk']) try: model_query = Model.select().where((Model.primary_key() == record_id)) except KeyError: raise RuntimeError('Could not find appropriate id to remove service record.') model_query.get().delete_instance() if notify: (await service.event_broker.send(payload='{"status":"ok"}', action_type=change_action_status(action_type, success_status()), **message_props)) except Exception as err: if notify: (await service.event_broker.send(payload=str(err), action_type=change_action_status(action_type, error_status()), **message_props)) else: raise err return action_handler
This factory returns an action handler that deletes a new instance of the specified model when a delete action is recieved, assuming the action follows nautilus convetions. Args: Model (nautilus.BaseModel): The model to delete when the action received. Returns: function(type, payload): The action handler for this model
codesearchnet
def _post(self, url, data, scope): self._create_session(scope) response = self.session.post(url, data=data) return (response.status_code, response.text)
Make a POST request using the session object to a Degreed endpoint. Args: url (str): The url to send a POST request to. data (str): The json encoded payload to POST. scope (str): Must be one of the scopes Degreed expects: - `CONTENT_PROVIDER_SCOPE` - `COMPLETION_PROVIDER_SCOPE`
codesearchnet
def delete(self, main_type, sub_type, unique_id, owner=None): params = {'owner': owner} if owner else {} if not sub_type: url = '/v2/{}/{}'.format(main_type, unique_id) else: url = '/v2/{}/{}/{}'.format(main_type, sub_type, unique_id) return self.tcex.session.delete(url, params=params)
Deletes the Indicator/Group/Victim or Security Label Args: main_type: sub_type: unique_id: owner:
juraj-google-style
def set_string(self, option, value): if (not isinstance(value, str)): raise TypeError(('%s must be a string' % option)) self.options[option] = value
Set a string option. Args: option (str): name of option. value (str): value of the option. Raises: TypeError: Value must be a string.
codesearchnet
def group_associations(self, main_type, sub_type, unique_id, owner=None, params=None): params = params or {} if owner: params['owner'] = owner if not sub_type: url = '/v2/{}/{}/groups'.format(main_type, unique_id) else: url = '/v2/{}/{}/{}/groups'.format(main_type, sub_type, unique_id) for ga in self._iterate(url, params, 'group'): yield ga
Args: owner: main_type: sub_type: unique_id: params: Return:
juraj-google-style
def get_cached_filename(self, filename, extention, settings_list=None): cached_name = '_'.join([filename, self.get_hash()]) return '.'.join([cached_name, extention])
Creates a filename with md5 cache string based on settings list Args: filename (str): the filename without extention extention (str): the file extention without dot. (i.e. 'pkl') settings_list (dict|list): the settings list as list (optional) NB! The dictionaries have to be sorted or hash id will change arbitrarely.
codesearchnet
def delete(self, table_name): dataset = Dataset(self, table_name) deleted = dataset.delete() if deleted: return deleted raise CartoException(.format(table_name))
Delete a table in user's CARTO account. Args: table_name (str): Name of table to delete Returns: bool: `True` if table is removed
juraj-google-style
def relu6(x): if any_symbolic_tensors((x,)): return Relu6().symbolic_call(x) return backend.nn.relu6(x)
Rectified linear unit activation function with upper bound of 6. It is defined as `f(x) = np.clip(x, 0, 6)`. Args: x: Input tensor. Returns: A tensor with the same shape as `x`. Example: >>> x = keras.ops.convert_to_tensor([-3.0, -2.0, 0.1, 0.2, 6.0, 8.0]) >>> keras.ops.relu6(x) array([0.0, 0.0, 0.1, 0.2, 6.0, 6.0], dtype=float32)
github-repos
def normalized_start(self): namespaces_after_key = list(self.make_datastore_query().Run(limit=1)) if (not namespaces_after_key): return None namespace_after_key = (namespaces_after_key[0].name() or '') return NamespaceRange(namespace_after_key, self.namespace_end, _app=self.app)
Returns a NamespaceRange with leading non-existant namespaces removed. Returns: A copy of this NamespaceRange whose namespace_start is adjusted to exclude the portion of the range that contains no actual namespaces in the datastore. None is returned if the NamespaceRange contains no actual namespaces in the datastore.
codesearchnet
def create_binary_descriptor(descriptor): func_names = {0: 'copy_latest_a', 1: 'average_a', 2: 'copy_all_a', 3: 'sum_a', 4: 'copy_count_a', 5: 'trigger_streamer', 6: 'call_rpc', 7: 'subtract_afromb'} func_codes = {y: x for x, y in func_names.items()} node, inputs, processing = parse_node_descriptor(descriptor, DeviceModel()) func_code = func_codes.get(processing) if func_code is None: raise ArgumentError("Unknown processing function", function=processing) stream_a, trigger_a = inputs[0] stream_a = stream_a.encode() if len(inputs) == 2: stream_b, trigger_b = inputs[1] stream_b = stream_b.encode() else: stream_b, trigger_b = 0xFFFF, None if trigger_a is None: trigger_a = TrueTrigger() if trigger_b is None: trigger_b = TrueTrigger() ref_a = 0 if isinstance(trigger_a, InputTrigger): ref_a = trigger_a.reference ref_b = 0 if isinstance(trigger_b, InputTrigger): ref_b = trigger_b.reference trigger_a = _create_binary_trigger(trigger_a) trigger_b = _create_binary_trigger(trigger_b) combiner = node.trigger_combiner bin_desc = struct.pack("<LLHHHBBBB2x", ref_a, ref_b, node.stream.encode(), stream_a, stream_b, func_code, trigger_a, trigger_b, combiner) return bin_desc
Convert a string node descriptor into a 20-byte binary descriptor. This is the inverse operation of parse_binary_descriptor and composing the two operations is a noop. Args: descriptor (str): A string node descriptor Returns: bytes: A 20-byte binary node descriptor.
juraj-google-style
def world_info(world_name, world_config=None, initial_indent="", next_indent=" "): if world_config is None: for config, _ in _iter_packages(): for world in config["maps"]: if world["name"] == world_name: world_config = world if world_config is None: raise HolodeckException("Couldn't find world " + world_name) second_indent = initial_indent + next_indent agent_indent = second_indent + next_indent sensor_indent = agent_indent + next_indent print(initial_indent, world_config["name"]) print(second_indent, "Resolution:", world_config["window_width"], "x", world_config["window_height"]) print(second_indent, "Agents:") for agent in world_config["agents"]: print(agent_indent, "Name:", agent["agent_name"]) print(agent_indent, "Type:", agent["agent_type"]) print(agent_indent, "Sensors:") for sensor in agent["sensors"]: print(sensor_indent, sensor)
Gets and prints the information of a world. Args: world_name (str): the name of the world to retrieve information for world_config (dict optional): A dictionary containing the world's configuration. Will find the config if None. Defaults to None. initial_indent (str optional): This indent will apply to each output line. Defaults to "". next_indent (str optional): This indent will be applied within each nested line. Defaults to " ".
juraj-google-style
def clip_gradient(net, clip_value_min, clip_value_max, name=None): if (not net.dtype.is_floating): raise ValueError('clip_gradient does not support non-float `net` inputs.') with tf.name_scope(name, 'clip_gradient', values=[net]): dtype = net.dtype.base_dtype min_tensor = tf.convert_to_tensor(clip_value_min, dtype=dtype) max_tensor = tf.convert_to_tensor(clip_value_max, dtype=dtype) clip_gradient_op = _clip_gradient_op(dtype) output = clip_gradient_op(net, min_tensor, max_tensor) output.set_shape(net.get_shape()) return output
Clips respective gradients of a given tensor. Acts as identity for the forward pass, but clips gradient tensor element-wise by value during the backward pass. Any gradient values less than `clip_value_min` or greater than `clip_values_max` are set to the respective limit values. Args: net: A `tf.Tensor`. clip_value_min: A 0-D Tensor or scalar. The minimum value to clip by. clip_value_max: A 0-D Tensor or scalar. The maximum value to clip by. name: A name for the operation (optional, default 'clip_gradient'). Returns: A `tf.Tensor` with the same type as the input tensor. Raises: ValueError: If `net` dtype is non-float.
codesearchnet
def _get_oauth2_client_id_and_secret(settings_instance): secret_json = getattr(settings_instance, 'GOOGLE_OAUTH2_CLIENT_SECRETS_JSON', None) if (secret_json is not None): return _load_client_secrets(secret_json) else: client_id = getattr(settings_instance, 'GOOGLE_OAUTH2_CLIENT_ID', None) client_secret = getattr(settings_instance, 'GOOGLE_OAUTH2_CLIENT_SECRET', None) if ((client_id is not None) and (client_secret is not None)): return (client_id, client_secret) else: raise exceptions.ImproperlyConfigured('Must specify either GOOGLE_OAUTH2_CLIENT_SECRETS_JSON, or both GOOGLE_OAUTH2_CLIENT_ID and GOOGLE_OAUTH2_CLIENT_SECRET in settings.py')
Initializes client id and client secret based on the settings. Args: settings_instance: An instance of ``django.conf.settings``. Returns: A 2-tuple, the first item is the client id and the second item is the client secret.
codesearchnet
def tanh(x): return nn.tanh(x)
Hyperbolic tangent activation function. For example: >>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32) >>> b = tf.keras.activations.tanh(a) >>> b.numpy() array([-0.9950547, -0.7615942, 0., 0.7615942, 0.9950547], dtype=float32) Args: x: Input tensor. Returns: Tensor of same shape and dtype of input `x`, with tanh activation: `tanh(x) = sinh(x)/cosh(x) = ((exp(x) - exp(-x))/(exp(x) + exp(-x)))`.
github-repos
def __init__(self, value, ctype=None): if isinstance(value, str) and value == 'INFINITY': self._value = np.inf elif isinstance(value, str) and value == '-INFINITY': self._value = -np.inf else: self._value = np.array(value) self._ctype = ctype or dtype_to_ctype(self._value.dtype) self._mot_float_dtype = None
A kernel input scalar. This will insert the given value directly into the kernel's source code, and will not load it as a buffer. Args: value (number): the number to insert into the kernel as a scalar. ctype (str): the desired c-type for in use in the kernel, like ``int``, ``float`` or ``mot_float_type``. If None it is implied from the value.
juraj-google-style
def prepare_data(data_dir, fileroot, block_pct_tokens_thresh=0.1): if (not (0.0 <= block_pct_tokens_thresh <= 1.0)): raise ValueError('block_pct_tokens_thresh must be in the range [0.0, 1.0]') html = read_html_file(data_dir, fileroot) blocks = read_gold_standard_blocks_file(data_dir, fileroot, split_blocks=True) content_blocks = [] comments_blocks = [] for block in blocks: block_split = block.split('\t') num_block_tokens = len(block_split[2].split()) content_blocks.append((float(block_split[0]), num_block_tokens, block_split[3].split())) comments_blocks.append((float(block_split[1]), num_block_tokens, block_split[4].split())) parsed_content_blocks = _parse_content_or_comments_blocks(content_blocks, block_pct_tokens_thresh) parsed_comments_blocks = _parse_content_or_comments_blocks(comments_blocks, block_pct_tokens_thresh) return (html, parsed_content_blocks, parsed_comments_blocks)
Prepare data for a single HTML + gold standard blocks example, uniquely identified by ``fileroot``. Args: data_dir (str) fileroot (str) block_pct_tokens_thresh (float): must be in [0.0, 1.0] Returns: Tuple[str, Tuple[np.array[int], np.array[int], List[str]], Tuple[np.array[int], np.array[int], List[str]]]: The first element is simply the raw html as a string. The second and third elements are 3-tuples for content and comments, respectively, where the first element is a numpy array of 1s and 0s whose values correspond to whether or not a given block is considered non-content or not; the second element is a numpy integer array whose values are the total number of tokens in each block; and the third element is a flat list of content or comment tokens as strings, concatenated from all blocks. See Also: :func:`prepare_all_data`
codesearchnet
def run_multiple(self, eventLoops): self.nruns += len(eventLoops) return self.communicationChannel.put_multiple(eventLoops)
run the event loops in the background. Args: eventLoops (list): a list of event loops to run
codesearchnet
def enqueue_tpu_embedding_integer_batch(batch, device_ordinal, mode_override=None, name=None): if mode_override is None: mode_override = 'unspecified' return gen_tpu_ops.enqueue_tpu_embedding_integer_batch(batch=batch, device_ordinal=device_ordinal, mode_override=mode_override, name=name)
A placeholder op for enqueueing embedding IDs to the TPU. Args: batch: A list of 1D tensors, one for each embedding table, containing the indices into the tables. device_ordinal: The TPU device to use. Should be >= 0 and less than the number of TPU cores in the task on which the node is placed. mode_override: A string input that overrides the mode specified in the TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', 'train', 'backward_pass_only'}. When set to 'unspecified', the mode set in TPUEmbeddingConfiguration is used, otherwise mode_override is used (optional). name: A name for the operation (optional). Returns: An EnqueueTPUEmbeddingIntegerBatch operation.
github-repos
def output_classes(self): return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_classes(), self._element_spec)
Returns the class of each component of an element of this iterator. The expected values are `tf.Tensor` and `tf.sparse.SparseTensor`. Returns: A (nested) structure of Python `type` objects corresponding to each component of an element of this dataset.
github-repos
def in_labelset(xmrs, nodeids, label=None): nodeids = set(nodeids) if (label is None): label = xmrs.ep(next(iter(nodeids))).label return nodeids.issubset(xmrs._vars[label]['refs']['LBL'])
Test if all nodeids share a label. Args: nodeids: iterable of nodeids label (str, optional): the label that all nodeids must share Returns: bool: `True` if all nodeids share a label, otherwise `False`
codesearchnet
def zpath(filename): for ext in ["", '.gz', '.GZ', '.bz2', '.BZ2', '.z', '.Z']: zfilename = "{}{}".format(filename, ext) if os.path.exists(zfilename): return zfilename return filename
Returns an existing (zipped or unzipped) file path given the unzipped version. If no path exists, returns the filename unmodified. Args: filename: filename without zip extension Returns: filename with a zip extension (unless an unzipped version exists). If filename is not found, the same filename is returned unchanged.
juraj-google-style
def fillPelicanHole(site, username, password, tstat_name, start_time, end_time): start = datetime.strptime(start_time, _INPUT_TIME_FORMAT).replace(tzinfo=pytz.utc).astimezone(_pelican_time) end = datetime.strptime(end_time, _INPUT_TIME_FORMAT).replace(tzinfo=pytz.utc).astimezone(_pelican_time) heat_needs_fan = _lookupHeatNeedsFan(site, username, password, tstat_name) if (heat_needs_fan is None): return None history_blocks = [] while (start < end): block_start = start block_end = min((start + timedelta(days=30)), end) blocks = _lookupHistoricalData(site, username, password, tstat_name, block_start, block_end) if (blocks is None): return None history_blocks.extend(blocks) start += timedelta(days=30, minutes=1) output_rows = [] for block in history_blocks: runStatus = block.find('runStatus').text if runStatus.startswith('Heat'): fanState = (heatNeedsFan == 'Yes') else: fanState = (runStatus != 'Off') api_time = datetime.strptime(block.find('timestamp').text, '%Y-%m-%dT%H:%M').replace(tzinfo=_pelican_time) timestamp = int((api_time.timestamp() * (10 ** 9))) output_rows.append({'temperature': float(block.find('temperature').text), 'relative_humidity': float(block.find('humidity').text), 'heating_setpoint': float(block.find('heatSetting').text), 'cooling_setpoint': float(block.find('coolSetting').text), 'override': (block.find('setBy').text != 'Schedule'), 'fan': fanState, 'mode': _mode_name_mappings[block.find('system').text], 'state': _state_mappings.get(runStatus, 0), 'time': timestamp}) df = pd.DataFrame(output_rows) df.drop_duplicates(subset='time', keep='first', inplace=True) return df
Fill a hole in a Pelican thermostat's data stream. Arguments: site -- The thermostat's Pelican site name username -- The Pelican username for the site password -- The Pelican password for the site tstat_name -- The name of the thermostat, as identified by Pelican start_time -- The start of the data hole in UTC, e.g. "2018-01-29 15:00:00" end_time -- The end of the data hole in UTC, e.g. "2018-01-29 16:00:00" Returns: A Pandas dataframe with historical Pelican data that falls between the specified start and end times. Note that this function assumes the Pelican thermostat's local time zone is US/Pacific. It will properly handle PST vs. PDT.
codesearchnet
def getent(refresh=False): if 'group.getent' in __context__ and not refresh: return __context__['group.getent'] ret = [] results = _get_all_groups() for result in results: group = {'gid': __salt__['file.group_to_gid'](result.Name), 'members': [_get_username(x) for x in result.members()], 'name': result.Name, 'passwd': 'x'} ret.append(group) __context__['group.getent'] = ret return ret
Return info on all groups Args: refresh (bool): Refresh the info for all groups in ``__context__``. If False only the groups in ``__context__`` will be returned. If True the ``__context__`` will be refreshed with current data and returned. Default is False Returns: A list of groups and their information CLI Example: .. code-block:: bash salt '*' group.getent
juraj-google-style
def get_sitej(self, site_index, image_index): atoms_n_occu = self.s[site_index].species lattice = self.s.lattice coords = self.s[site_index].frac_coords + self.offsets[image_index] return PeriodicSite(atoms_n_occu, coords, lattice)
Assuming there is some value in the connectivity array at indices (1, 3, 12). sitei can be obtained directly from the input structure (structure[1]). sitej can be obtained by passing 3, 12 to this function Args: site_index (int): index of the site (3 in the example) image_index (int): index of the image (12 in the example)
juraj-google-style
def get_roles(self): prefix = (_IDENTITY_NS + _ROLE_NS) rolelist_list = [_create_from_bytes(d, identity_pb2.RoleList) for (_, d) in self._state_view.leaves(prefix=prefix)] roles = [] for role_list in rolelist_list: for role in role_list.roles: roles.append(role) return sorted(roles, key=(lambda r: r.name))
Return all the Roles under the Identity namespace. Returns: (list): A list containing all the Roles under the Identity namespace.
codesearchnet
def get_items_by_ids(self, item_ids, item_type=None): urls = [urljoin(self.item_url, F"{i}.json") for i in item_ids] result = self._run_async(urls=urls) items = [Item(r) for r in result if r] if item_type: return [item for item in items if item.item_type == item_type] else: return items
Given a list of item ids, return all the Item objects Args: item_ids (obj): List of item IDs to query item_type (str): (optional) Item type to filter results with Returns: List of `Item` objects for given item IDs and given item type
juraj-google-style
def parse_readable_time_str(time_str): def parse_positive_float(value_str): value = float(value_str) if value < 0: raise ValueError('Invalid time %s. Time value must be positive.' % value_str) return value time_str = time_str.strip() if time_str.endswith('us'): return int(parse_positive_float(time_str[:-2])) elif time_str.endswith('ms'): return int(parse_positive_float(time_str[:-2]) * 1000.0) elif time_str.endswith('s'): return int(parse_positive_float(time_str[:-1]) * 1000000.0) return int(parse_positive_float(time_str))
Parses a time string in the format N, Nus, Nms, Ns. Args: time_str: (`str`) string consisting of an integer time value optionally followed by 'us', 'ms', or 's' suffix. If suffix is not specified, value is assumed to be in microseconds. (e.g. 100us, 8ms, 5s, 100). Returns: Microseconds value.
github-repos
def __init__(self, msg, exception_details=None): message = '%s with exceptions %s' % (msg, exception_details) super().__init__(message) self.exception_details = exception_details
Class representing the errors thrown in the batch file operations. Args: msg: Message string for the exception thrown exception_details: Optional map of individual input to exception for failed operations in batch. This parameter is optional so if specified the user can assume that the all errors in the filesystem operation have been reported. When the details are missing then the operation may have failed anywhere so the user should use match to determine the current state of the system.
github-repos
def replace_in_file(filename: str, text_from: str, text_to: str) -> None: log.info('Amending {}: {} -> {}', filename, repr(text_from), repr(text_to)) with open(filename) as infile: contents = infile.read() contents = contents.replace(text_from, text_to) with open(filename, 'w') as outfile: outfile.write(contents)
Replaces text in a file. Args: filename: filename to process (modifying it in place) text_from: original text to replace text_to: replacement text
codesearchnet
async def client_event_handler(self, client_id, event_tuple, user_data): conn_string, event_name, event = event_tuple if event_name == 'report': report = event.serialize() report['encoded_report'] = base64.b64encode(report['encoded_report']) msg_payload = dict(connection_string=conn_string, serialized_report=report) msg_name = OPERATIONS.NOTIFY_REPORT elif event_name == 'trace': encoded_payload = base64.b64encode(event) msg_payload = dict(connection_string=conn_string, payload=encoded_payload) msg_name = OPERATIONS.NOTIFY_TRACE elif event_name == 'progress': msg_payload = dict(connection_string=conn_string, operation=event.get('operation'), done_count=event.get('finished'), total_count=event.get('total')) msg_name = OPERATIONS.NOTIFY_PROGRESS elif event_name == 'device_seen': msg_payload = event msg_name = OPERATIONS.NOTIFY_DEVICE_FOUND elif event_name == 'broadcast': report = event.serialize() report['encoded_report'] = base64.b64encode(report['encoded_report']) msg_payload = dict(connection_string=conn_string, serialized_report=report) msg_name = OPERATIONS.NOTIFY_BROADCAST else: self._logger.debug("Not forwarding unknown event over websockets: %s", event_tuple) return try: self._logger.debug("Sending event %s: %s", msg_name, msg_payload) await self.server.send_event(user_data, msg_name, msg_payload) except websockets.exceptions.ConnectionClosed: self._logger.debug("Could not send notification because connection was closed for client %s", client_id)
Forward an event on behalf of a client. This method is called by StandardDeviceServer when it has an event that should be sent to a client. Args: client_id (str): The client that we should send this event to event_tuple (tuple): The conn_string, event_name and event object passed from the call to notify_event. user_data (object): The user data passed in the call to :meth:`setup_client`.
juraj-google-style
def RegisterRecordType(cls, record_class): record_type = record_class.MatchType() if record_type not in UpdateRecord.KNOWN_CLASSES: UpdateRecord.KNOWN_CLASSES[record_type] = [] UpdateRecord.KNOWN_CLASSES[record_type].append(record_class)
Register a known record type in KNOWN_CLASSES. Args: record_class (UpdateRecord): An update record subclass.
juraj-google-style
def create_graph_from_data(self, data, **kwargs): self.arguments['{VERBOSE}'] = str(self.verbose).upper() results = self._run_ccdr(data, verbose=self.verbose) return nx.relabel_nodes(nx.DiGraph(results), {idx: i for idx, i in enumerate(data.columns)})
Apply causal discovery on observational data using CCDr. Args: data (pandas.DataFrame): DataFrame containing the data Returns: networkx.DiGraph: Solution given by the CCDR algorithm.
juraj-google-style
def validate_instance(instance, options=None): if 'type' not in instance: raise ValidationError("Input must be an object with a 'type' property.") if not options: options = ValidationOptions() error_gens = [] if instance['type'] == 'bundle' and 'objects' in instance: for sdo in instance['objects']: if 'type' not in sdo: raise ValidationError("Each object in bundle must have a 'type' property.") error_gens += _schema_validate(sdo, options) else: error_gens += _schema_validate(instance, options) must_checks = _get_musts(options) should_checks = _get_shoulds(options) output.info("Running the following additional checks: %s." % ", ".join(x.__name__ for x in chain(must_checks, should_checks))) try: errors = _iter_errors_custom(instance, must_checks, options) warnings = _iter_errors_custom(instance, should_checks, options) if options.strict: chained_errors = chain(errors, warnings) warnings = [] else: chained_errors = errors warnings = [pretty_error(x, options.verbose) for x in warnings] except schema_exceptions.RefResolutionError: raise SchemaInvalidError('Invalid JSON schema: a JSON reference ' 'failed to resolve') error_gens += [(chained_errors, '')] error_list = [] for gen, prefix in error_gens: for error in gen: msg = prefix + pretty_error(error, options.verbose) error_list.append(SchemaError(msg)) if error_list: valid = False else: valid = True return ObjectValidationResults(is_valid=valid, object_id=instance.get('id', ''), errors=error_list, warnings=warnings)
Perform STIX JSON Schema validation against STIX input. Find the correct schema by looking at the 'type' property of the `instance` JSON object. Args: instance: A Python dictionary representing a STIX object with a 'type' property. options: ValidationOptions instance with validation options for this validation run. Returns: A dictionary of validation results
juraj-google-style
def extract_string_pairs_in_ib_file(file_path, special_ui_components_prefix): try: results = [] xmldoc = minidom.parse(file_path) element_name_to_add_func = {'label': add_string_pairs_from_label_element, 'button': add_string_pairs_from_button_element, 'textField': add_string_pairs_from_text_field_element, 'textView': add_string_pairs_from_text_view_element} for element_name in element_name_to_add_func: add_func = element_name_to_add_func[element_name] elements = xmldoc.getElementsByTagName(element_name) for element in elements: add_func(file_path, results, element, special_ui_components_prefix) jtl_brackets_find_results = re.findall(JTL_REGEX, open(file_path).read()) unescaped_jtl_brackets_find_results = [(unescape(x), unescape(y)) for (x, y) in jtl_brackets_find_results] results += unescaped_jtl_brackets_find_results if len(results) > 0: results = [(None, os.path.basename(file_path))] + results return results except Exception, e: logging.warn("ERROR: Error processing %s (%s: %s)", file_path, type(e), str(e)) return []
Extract the strings pairs (key and comment) from a xib file. Args: file_path (str): The path to the xib file. special_ui_components_prefix (str): If not None, extraction will not warn about internationalized UI components with this class prefix. Returns: list: List of tuples representing the string pairs.
juraj-google-style
def __init__(self, name, fn, dataFormat = DataFormats.DEFAULT): Aggregator.__init__(self, name) self.fn = fn
Creates a highlight aggregator - this will pick one of the values to highlight. Args: name: The name of this aggregator. fn: Callable that takes (a, b) and returns True if b should be selected as the highlight, where as is the previous chosen highlight.
juraj-google-style
def line_distance_similarity(p1a, p1b, p2a, p2b, T=CLOSE_DISTANCE_THRESHOLD): d1 = distance_similarity(p1a, p1b, p2a, T=T) d2 = distance_similarity(p1a, p1b, p2b, T=T) return abs(d1 + d2) * 0.5
Line distance similarity between two line segments Args: p1a ([float, float]): x and y coordinates. Line A start p1b ([float, float]): x and y coordinates. Line A end p2a ([float, float]): x and y coordinates. Line B start p2b ([float, float]): x and y coordinates. Line B end Returns: float: between 0 and 1. Where 1 is very similar and 0 is completely different
juraj-google-style
def moma(self, wt_fluxes): reactions = set(self._adjustment_reactions()) v = self._v obj_expr = 0 for f_reaction, f_value in iteritems(wt_fluxes): if f_reaction in reactions: obj_expr += (f_value - v[f_reaction])**2 self._prob.set_objective(obj_expr) self._solve(lp.ObjectiveSense.Minimize)
Minimize the redistribution of fluxes using Euclidean distance. Minimizing the redistribution of fluxes using a quadratic objective function. The distance is minimized by minimizing the sum of (wild type - knockout)^2. Args: wt_fluxes: Dictionary of all the wild type fluxes that will be used to find a close MOMA solution. Fluxes can be expiremental or calculated using :meth: get_fba_flux(objective).
juraj-google-style
def set_of_vars(arg_plot): sovs = set(tuple((var + '+').split('+')[:2]) for var in arg_plot.split(',')) sovs.discard(('', '')) return sovs
Build set of needed field variables. Each var is a tuple, first component is a scalar field, second component is either: - a scalar field, isocontours are added to the plot. - a vector field (e.g. 'v' for the (v1,v2,v3) vector), arrows are added to the plot. Args: arg_plot (str): string with variable names separated with ``,`` (figures), and ``+`` (same plot). Returns: set of str: set of needed field variables.
juraj-google-style
def _default_ising_beta_range(h, J): abs_h = [abs(hh) for hh in h.values() if hh != 0] abs_J = [abs(jj) for jj in J.values() if jj != 0] abs_biases = abs_h + abs_J if not abs_biases: return [0.1, 1.0] min_delta_energy = min(abs_biases) abs_bias_dict = {k: abs(v) for k, v in h.items()} for (k1, k2), v in J.items(): abs_bias_dict[k1] += abs(v) abs_bias_dict[k2] += abs(v) max_delta_energy = max(abs_bias_dict.values()) hot_beta = np.log(2) / max_delta_energy cold_beta = np.log(100) / min_delta_energy return [hot_beta, cold_beta]
Determine the starting and ending beta from h J Args: h (dict) J (dict) Assume each variable in J is also in h. We use the minimum bias to give a lower bound on the minimum energy gap, such at the final sweeps we are highly likely to settle into the current valley.
juraj-google-style
def get_mac_dot_app_dir(directory): return os.path.dirname(os.path.dirname(os.path.dirname(directory)))
Returns parent directory of mac .app Args: directory (str): Current directory Returns: (str): Parent directory of mac .app
juraj-google-style
def index(self, text, terms=None, **kwargs): self.clear() terms = (terms or text.terms.keys()) pairs = combinations(terms, 2) count = comb(len(terms), 2) for (t1, t2) in bar(pairs, expected_size=count, every=1000): score = text.score_braycurtis(t1, t2, **kwargs) self.set_pair(t1, t2, score)
Index all term pair distances. Args: text (Text): The source text. terms (list): Terms to index.
codesearchnet
def get_checkpoint_path(model_path): if (os.path.basename(model_path) == model_path): model_path = os.path.join('.', model_path) if (os.path.basename(model_path) == 'checkpoint'): assert tfv1.gfile.Exists(model_path), model_path model_path = tf.train.latest_checkpoint(os.path.dirname(model_path)) new_path = model_path if ('00000-of-00001' in model_path): new_path = model_path.split('.data')[0] elif model_path.endswith('.index'): new_path = model_path.split('.index')[0] if (new_path != model_path): logger.info('Checkpoint path {} is auto-corrected to {}.'.format(model_path, new_path)) model_path = new_path assert (tfv1.gfile.Exists(model_path) or tfv1.gfile.Exists((model_path + '.index'))), model_path return model_path
Work around TF problems in checkpoint path handling. Args: model_path: a user-input path Returns: str: the argument that can be passed to NewCheckpointReader
codesearchnet
def create_config(sections, section_contents): (sections_length, section_contents_length) = (len(sections), len(section_contents)) if (sections_length != section_contents_length): raise ValueError('Mismatch between argument lengths.\nlen(sections) = {}\nlen(section_contents) = {}'.format(sections_length, section_contents_length)) config = configparser.ConfigParser() for (section, section_content) in zip(sections, section_contents): config[section] = section_content return config
Create a config file from the provided sections and key value pairs. Args: sections (List[str]): A list of section keys. key_value_pairs (Dict[str, str]): A list of of dictionaries. Must be as long as the list of sections. That is to say, if there are two sections, there should be two dicts. Returns: configparser.ConfigParser: A ConfigParser. Raises: ValueError
codesearchnet
def Aggregated(self, request, global_params=None): config = self.GetMethodConfig('Aggregated') return self._RunMethod(config, request, global_params=global_params)
List the jobs of a project across all regions. **Note:** This method doesn't support filtering the list of jobs by name. Args: request: (DataflowProjectsJobsAggregatedRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (ListJobsResponse) The response message.
github-repos
def _date_to_datetime(value): if (not isinstance(value, datetime.date)): raise TypeError(('Cannot convert to datetime expected date value; received %s' % value)) return datetime.datetime(value.year, value.month, value.day)
Convert a date to a datetime for Cloud Datastore storage. Args: value: A datetime.date object. Returns: A datetime object with time set to 0:00.
codesearchnet
def get_by_alias(self, alias): if alias not in self._aliases: raise DataInvalidAlias('A dataset with alias {} does not exist'.format(alias)) return self.get_by_index(self._aliases[alias])
Return a dataset by its alias. Args: alias (str): The alias of the dataset that should be returned. Raises: DataInvalidAlias: If the alias does not represent a valid dataset.
juraj-google-style
def PyParseJoinList(string, location, tokens): join_list = [] for token in tokens: try: join_list.append(str(token)) except UnicodeDecodeError: join_list.append(repr(token)) tokens[0] = ''.join(join_list) del tokens[1:]
Return a joined token from a list of tokens. This is a callback method for pyparsing setParseAction that modifies the returned token list to join all the elements in the list to a single token. Args: string (str): original string. location (int): location in the string where the match was made. tokens (list[str]): extracted tokens, where the string to be converted is stored.
juraj-google-style
def rep(parser: Union[(Parser, Sequence[Input])]) -> RepeatedParser: if isinstance(parser, str): parser = lit(parser) return RepeatedParser(parser)
Match a parser zero or more times repeatedly. This matches ``parser`` multiple times in a row. A list is returned containing the value from each match. If there are no matches, an empty list is returned. Args: parser: Parser or literal
codesearchnet
def _normalize_pattern(pattern): if pattern.startswith('regex:'): pattern_type = 'regex' pattern = pattern[len('regex:'):] elif pattern.startswith('wildcard:'): pattern_type = 'wildcard' pattern = pattern[len('wildcard:'):] elif pattern.startswith('literal:'): pattern_type = 'literal' pattern = pattern[len('literal:'):] elif RegexRoute.like(pattern): pattern_type = 'regex' elif WildcardRoute.like(pattern): pattern_type = 'wildcard' else: pattern_type = 'literal' return pattern_type, pattern
Return a normalized form of the pattern. Normalize the pattern by removing pattern type prefix if it exists in the pattern. Then return the pattern type and the pattern as a tuple of two strings. Arguments: pattern (str): Route pattern to match request paths Returns: tuple: Ruple of pattern type (str) and pattern (str)
juraj-google-style
def trunc_normal_tf_(tensor: torch.Tensor, mean: float=0.0, std: float=1.0, a: float=-2.0, b: float=2.0) -> torch.Tensor: with torch.no_grad(): _trunc_normal_(tensor, 0, 1.0, a, b) tensor.mul_(std).add_(mean)
Fills the input Tensor with values drawn from a truncated normal distribution. The values are effectively drawn from the normal distribution :math:`\mathcal{N}( ext{mean}, ext{std}^2)` with values outside :math:`[a, b]` redrawn until they are within the bounds. The method used for generating the random values works best when :math:`a \leq ext{mean} \leq b`. NOTE: this 'tf' variant behaves closer to Tensorflow / JAX impl where the bounds [a, b] are applied when sampling the normal distribution with mean=0, std=1.0 and the result is subsequently scaled and shifted by the mean and std args. Args: tensor: an n-dimensional `torch.Tensor` mean: the mean of the normal distribution std: the standard deviation of the normal distribution a: the minimum cutoff value b: the maximum cutoff value
github-repos
def serialize_example(transformed_json_data, info_dict): import six import tensorflow as tf def _make_int64_list(x): return tf.train.Feature(int64_list=tf.train.Int64List(value=x)) def _make_bytes_list(x): return tf.train.Feature(bytes_list=tf.train.BytesList(value=x)) def _make_float_list(x): return tf.train.Feature(float_list=tf.train.FloatList(value=x)) if sorted(six.iterkeys(transformed_json_data)) != sorted(six.iterkeys(info_dict)): raise ValueError('Keys do not match %s, %s' % (list(six.iterkeys(transformed_json_data)), list(six.iterkeys(info_dict)))) ex_dict = {} for name, info in six.iteritems(info_dict): if info['dtype'] == tf.int64: ex_dict[name] = _make_int64_list(transformed_json_data[name]) elif info['dtype'] == tf.float32: ex_dict[name] = _make_float_list(transformed_json_data[name]) elif info['dtype'] == tf.string: ex_dict[name] = _make_bytes_list(transformed_json_data[name]) else: raise ValueError('Unsupported data type %s' % info['dtype']) ex = tf.train.Example(features=tf.train.Features(feature=ex_dict)) return ex.SerializeToString()
Makes a serialized tf.example. Args: transformed_json_data: dict of transformed data. info_dict: output of feature_transforms.get_transfrormed_feature_info() Returns: The serialized tf.example version of transformed_json_data.
juraj-google-style
def handle_document_error(self, item_session: ItemSession) -> Actions: self._waiter.increment() self._statistics.errors[ServerError] += 1 action = self.handle_response(item_session) if (action == Actions.NORMAL): item_session.set_status(Status.error) return action
Callback for when the document only describes an server error. Returns: A value from :class:`.hook.Actions`.
codesearchnet
def sort_prefixes(orig, prefixes='@+'): new = '' for prefix in prefixes: if prefix in orig: new += prefix return new
Returns a sorted list of prefixes. Args: orig (str): Unsorted list of prefixes. prefixes (str): List of prefixes, from highest-priv to lowest.
juraj-google-style
def send_tpu_embedding_gradients(inputs, config, learning_rates=None, name=None): if learning_rates is None: learning_rates = [] return gen_tpu_ops.send_tpu_embedding_gradients(inputs=inputs, learning_rates=learning_rates, config=config, name=name)
A placeholder op for feeding per-sample gradients to the embedding layer. Args: inputs: A TensorList of gradients with which to update embedding tables. This argument has the same length and shapes as the return value of RecvTPUEmbeddingActivations, but contains gradients of the model's loss with respect to the embedding activations. The embedding tables are updated from these gradients via the optimizers specified in the TPU embedding configuration given to tpu.initialize_system. config: Serialized TPUEmbeddingConfiguration proto. learning_rates: A TensorList of float32 scalars, one for each dynamic learning rate tag: see the comments in //third_party/tensorflow/core/protobuf/tpu/ optimization_parameters.proto. Multiple tables can share the same dynamic learning rate tag as specified in the configuration. If the learning rates for all tables are constant, this list should be empty. name: A name for the operation (optional). Returns: A SendTPUEmbeddingGradients operation.
github-repos
def shift(self, time: int) -> 'Timeslot': return Timeslot(self.interval.shift(time), self.channel)
Return a new Timeslot shifted by `time`. Args: time: time to be shifted
codesearchnet
def protein_only_and_noH(self, keep_ligands=None, force_rerun=False): log.debug('{}: running protein receptor isolation...'.format(self.id)) if (not self.dockprep_path): return ValueError('Please run dockprep') receptor_mol2 = op.join(self.dock_dir, '{}_receptor.mol2'.format(self.id)) receptor_noh = op.join(self.dock_dir, '{}_receptor_noH.pdb'.format(self.id)) prly_com = op.join(self.dock_dir, 'prly.com') if ssbio.utils.force_rerun(flag=force_rerun, outfile=receptor_noh): with open(prly_com, 'w') as f: f.write('open {}\n'.format(self.dockprep_path)) keep_str = 'delete ~protein' if keep_ligands: keep_ligands = ssbio.utils.force_list(keep_ligands) for res in keep_ligands: keep_str += ' & ~:{} '.format(res) keep_str = (keep_str.strip() + '\n') f.write(keep_str) f.write('write format mol2 0 {}\n'.format(receptor_mol2)) f.write('delete element.H\n') f.write('write format pdb 0 {}\n'.format(receptor_noh)) cmd = 'chimera --nogui {}'.format(prly_com) os.system(cmd) os.remove(prly_com) if (ssbio.utils.is_non_zero_file(receptor_mol2) and ssbio.utils.is_non_zero_file(receptor_noh)): self.receptormol2_path = receptor_mol2 self.receptorpdb_path = receptor_noh log.debug('{}: successful receptor isolation (mol2)'.format(self.receptormol2_path)) log.debug('{}: successful receptor isolation (pdb)'.format(self.receptorpdb_path)) else: log.critical('{}: protein_only_and_noH failed to run on dockprep file'.format(self.dockprep_path))
Isolate the receptor by stripping everything except protein and specified ligands. Args: keep_ligands (str, list): Ligand(s) to keep in PDB file force_rerun (bool): If method should be rerun even if output file exists
codesearchnet
def limit(self, accountID, **kwargs): return self.create( accountID, order=LimitOrderRequest(**kwargs) )
Shortcut to create a Limit Order in an Account Args: accountID : The ID of the Account kwargs : The arguments to create a LimitOrderRequest Returns: v20.response.Response containing the results from submitting the request
juraj-google-style
def unexpected_disconnect(self, conn_or_internal_id): data = { 'id': conn_or_internal_id } action = ConnectionAction('force_disconnect', data, sync=False) self._actions.put(action)
Notify that there was an unexpected disconnection of the device. Any in progress operations are canceled cleanly and the device is transitioned to a disconnected state. Args: conn_or_internal_id (string, int): Either an integer connection id or a string internal_id
juraj-google-style
def copy_update(pb_message, **kwds): result = pb_message.__class__() result.CopyFrom(pb_message) for (k, v) in kwds.items(): setattr(result, k, v) return result
Returns a copy of the PB object, with some fields updated. Args: pb_message: **kwds: Returns:
codesearchnet
def _extract_nn_info(self, structure, nns): if (self.targets is None): targets = structure.composition.elements else: targets = self.targets siw = [] max_weight = max((nn[self.weight] for nn in nns.values())) for nstats in nns.values(): site = nstats['site'] if ((nstats[self.weight] > (self.tol * max_weight)) and self._is_in_targets(site, targets)): nn_info = {'site': site, 'image': self._get_image(structure, site), 'weight': (nstats[self.weight] / max_weight), 'site_index': self._get_original_site(structure, site)} if self.extra_nn_info: poly_info = nstats del poly_info['site'] nn_info['poly_info'] = poly_info siw.append(nn_info) return siw
Given Voronoi NNs, extract the NN info in the form needed by NearestNeighbors Args: structure (Structure): Structure being evaluated nns ([dicts]): Nearest neighbor information for a structure Returns: (list of tuples (Site, array, float)): See nn_info
codesearchnet
def orthologize(ast, bo, species_id: str): if (not species_id): bo.validation_messages.append(('WARNING', 'No species id was provided for orthologization')) return ast if isinstance(ast, NSArg): if ast.orthologs: if ast.orthologs.get(species_id, None): orthologized_nsarg_val = ast.orthologs[species_id]['decanonical'] (ns, value) = orthologized_nsarg_val.split(':') ast.change_nsvalue(ns, value) ast.canonical = ast.orthologs[species_id]['canonical'] ast.decanonical = ast.orthologs[species_id]['decanonical'] ast.orthologized = True bo.ast.species.add((species_id, ast.orthologs[species_id]['species_label'])) else: bo.ast.species.add((ast.species_id, ast.species_label)) bo.validation_messages.append(('WARNING', f'No ortholog found for {ast.namespace}:{ast.value}')) elif ast.species_id: bo.ast.species.add((ast.species_id, ast.species_label)) if hasattr(ast, 'args'): for arg in ast.args: orthologize(arg, bo, species_id) return ast
Recursively orthologize BEL Entities in BEL AST using API endpoint NOTE: - will take first ortholog returned in BEL.bio API result (which may return more than one ortholog) Args: ast (BEL): BEL AST endpoint (str): endpoint url with a placeholder for the term_id Returns: BEL: BEL AST
codesearchnet
def CreateSmartCampaign(client, budget_id, merchant_id): campaign_service = client.GetService('CampaignService', version='v201809') campaign = {'name': ('Shopping campaign campaign_operations = [{'operator': 'ADD', 'operand': campaign}] result = campaign_service.mutate(campaign_operations)['value'][0] print(('Smart Shopping campaign with name "%s" and ID "%s" was added.' % (result['name'], result['id']))) return result['id']
Adds a new Smart Shopping campaign. Args: client: an AdWordsClient instance. budget_id: the str ID of the budget to be associated with the Shopping campaign. merchant_id: the str ID of the merchant account to be associated with the Shopping campaign. Returns: A campaign ID.
codesearchnet
def update_course(self, course, enterprise_customer, enterprise_context): course['course_runs'] = self.update_course_runs(course_runs=(course.get('course_runs') or []), enterprise_customer=enterprise_customer, enterprise_context=enterprise_context) marketing_url = course.get('marketing_url') if marketing_url: query_parameters = dict(enterprise_context, **utils.get_enterprise_utm_context(enterprise_customer)) course.update({'marketing_url': utils.update_query_parameters(marketing_url, query_parameters)}) course.update(enterprise_context) return course
Update course metadata of the given course and return updated course. Arguments: course (dict): Course Metadata returned by course catalog API enterprise_customer (EnterpriseCustomer): enterprise customer instance. enterprise_context (dict): Enterprise context to be added to course runs and URLs.. Returns: (dict): Updated course metadata
codesearchnet
def beam_row_from_dict(row: dict, schema): if not isinstance(schema, (bigquery.TableSchema, bigquery.TableFieldSchema)): schema = get_bq_tableschema(schema) beam_row = {} for field in schema.fields: name = field.name mode = field.mode.upper() type = field.type.upper() if name not in row and mode != 'REQUIRED': row[name] = None value = row[name] if type in ['RECORD', 'STRUCT'] and value: if mode == 'REPEATED': list_of_beam_rows = [] for record in value: list_of_beam_rows.append(beam_row_from_dict(record, field)) beam_row[name] = list_of_beam_rows else: beam_row[name] = beam_row_from_dict(value, field) else: beam_row[name] = value return apache_beam.pvalue.Row(**beam_row)
Converts a dictionary row to a Beam Row. Nested records and lists are supported. Args: row (dict): The row to convert. schema (str, dict, ~apache_beam.io.gcp.internal.clients.bigquery.bigquery_v2_messages.TableSchema): The table schema. Will be used to help convert the row. Returns: ~apache_beam.pvalue.Row: The converted row.
github-repos
def match(self, message: Message) -> bool: if self.template: return self.template.match(message) return True
Matches a message with the behaviour's template Args: message(spade.message.Message): the message to match with Returns: bool: wheter the messaged matches or not
codesearchnet
def write_data(worksheet, data): if not data: return if isinstance(data, list): rows = data else: rows = [data] if isinstance(rows[0], dict): keys = get_keys(rows) worksheet.append([utilities.convert_snake_to_title_case(key) for key in keys]) for row in rows: values = [get_value_from_row(row, key) for key in keys] worksheet.append(values) elif isinstance(rows[0], list): for row in rows: values = [utilities.normalize_cell_value(value) for value in row] worksheet.append(values) else: for row in rows: worksheet.append([utilities.normalize_cell_value(row)])
Writes data into worksheet. Args: worksheet: worksheet to write into data: data to be written
juraj-google-style
def crop_and_resize(image, boxes, box_ind, crop_size, pad_border=True): assert isinstance(crop_size, int), crop_size boxes = tf.stop_gradient(boxes) if pad_border: image = tf.pad(image, [[0, 0], [0, 0], [1, 1], [1, 1]], mode='SYMMETRIC') boxes = boxes + 1 @under_name_scope() def transform_fpcoor_for_tf(boxes, image_shape, crop_shape): x0, y0, x1, y1 = tf.split(boxes, 4, axis=1) spacing_w = (x1 - x0) / tf.cast(crop_shape[1], tf.float32) spacing_h = (y1 - y0) / tf.cast(crop_shape[0], tf.float32) imshape = [tf.cast(image_shape[0] - 1, tf.float32), tf.cast(image_shape[1] - 1, tf.float32)] nx0 = (x0 + spacing_w / 2 - 0.5) / imshape[1] ny0 = (y0 + spacing_h / 2 - 0.5) / imshape[0] nw = spacing_w * tf.cast(crop_shape[1] - 1, tf.float32) / imshape[1] nh = spacing_h * tf.cast(crop_shape[0] - 1, tf.float32) / imshape[0] return tf.concat([ny0, nx0, ny0 + nh, nx0 + nw], axis=1) image_shape = tf.shape(image)[2:] boxes = transform_fpcoor_for_tf(boxes, image_shape, [crop_size, crop_size]) image = tf.transpose(image, [0, 2, 3, 1]) ret = tf.image.crop_and_resize( image, boxes, tf.cast(box_ind, tf.int32), crop_size=[crop_size, crop_size]) ret = tf.transpose(ret, [0, 3, 1, 2]) return ret
Aligned version of tf.image.crop_and_resize, following our definition of floating point boxes. Args: image: NCHW boxes: nx4, x1y1x2y2 box_ind: (n,) crop_size (int): Returns: n,C,size,size
juraj-google-style
def cn_occupation_energy( self, delta_occupation=None ): nn_occupations = self.site_specific_nn_occupation() if delta_occupation: for site in delta_occupation: assert( site in nn_occupations ) nn_occupations[ site ] += delta_occupation[ site ] return sum( [ self.cn_occupation_energies[ s ][ n ] for s, n in nn_occupations.items() ] )
The coordination-number dependent energy for this site. Args: delta_occupation (:obj:Dict(Str:Int), optional): A dictionary of a change in (site-type specific) coordination number, e.g. { 'A' : 1, 'B' : -1 }. If this is not None, the coordination-number dependent energy is calculated including these changes in neighbour-site occupations. Defaults to None Returns: (Float): The coordination-number dependent energy for this site.
juraj-google-style