code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def is_github_repo_owner_the_official_one(context, repo_owner): official_repo_owner = context.config['official_github_repos_owner'] if (not official_repo_owner): raise ConfigError('This worker does not have a defined owner for official GitHub repositories. Given "official_github_repos_owner": {}'.format...
Given a repo_owner, check if it matches the one configured to be the official one. Args: context (scriptworker.context.Context): the scriptworker context. repo_owner (str): the repo_owner to verify Raises: scriptworker.exceptions.ConfigError: when no official owner was defined Returns: bool: True when ``repo_owner``...
codesearchnet
def get_key(self, key, request_only=False): values = {} requested_names = [x.name for x in self._package_requests if (not x.conflict)] for pkg in self.resolved_packages: if ((not request_only) or (pkg.name in requested_names)): value = getattr(pkg, key) if (value is not None)...
Get a data key value for each resolved package. Args: key (str): String key of property, eg 'tools'. request_only (bool): If True, only return the key from resolved packages that were also present in the request. Returns: Dict of {pkg-name: (variant, value)}.
codesearchnet
def validate(self, config): if (not isinstance(config, dict)): raise errors.SchemeValidationError('Scheme can only validate a dictionary config, but was given {} (type: {})'.format(config, type(config))) for arg in self.args: if (arg.name in config): arg.validate(config[arg.name]) ...
Validate the given config against the `Scheme`. Args: config (dict): The configuration to validate. Raises: errors.SchemeValidationError: The configuration fails validation against the `Schema`.
codesearchnet
def get_label_set(self, type_str=None): return {v.label_str for v in self.node_gen if type_str in (None, v.type_str)}
Get a set of label_str for the tree rooted at this node. Args: type_str: SUBJECT_NODE_TAG, TYPE_NODE_TAG or None. If set, only include information from nodes of that type. Returns: set: The labels of the nodes leading up to this node from the root.
juraj-google-style
def test_src_dir_path(relative_path): return os.path.join(os.environ['TEST_SRCDIR'], 'org_tensorflow/tensorflow', relative_path)
Creates an absolute test srcdir path given a relative path. Args: relative_path: a path relative to tensorflow root. e.g. "contrib/session_bundle/example". Returns: An absolute path to the linked in runfiles.
github-repos
def extractSchedule(self, schedule, period): ret = namedtuple('ret', ['Hour', 'Min', 'Tariff', 'Period', 'Schedule']) work_table = self.m_schd_1_to_4 if (Schedules.Schedule_5 <= schedule <= Schedules.Schedule_6): work_table = self.m_schd_5_to_6 period += 1 schedule += 1 ret.Period = str(...
Read a single schedule tariff from meter object buffer. Args: schedule (int): A :class:`~ekmmeters.Schedules` value or in range(Extent.Schedules). tariff (int): A :class:`~ekmmeters.Tariffs` value or in range(Extent.Tariffs). Returns: bool: True on completion.
codesearchnet
def _parse_logging(log_values: dict, service_config: dict): for log_key, log_value in log_values.items(): if 'driver' in log_key: service_config['log_driver'] = log_value if 'options' in log_key: service_config['log_driver_options'] = log_value
Parse log key. Args: log_values (dict): logging configuration values service_config (dict): Service specification
juraj-google-style
def getWindow(title, exact=False): titles = getWindows() hwnd = titles.get(title, None) if ((not hwnd) and (not exact)): for (k, v) in titles.items(): if (title in k): hwnd = v break if hwnd: return Window(hwnd) else: return None
Return Window object if 'title' or its part found in visible windows titles, else return None Return only 1 window found first Args: title: unicode string exact (bool): True if search only exact match
codesearchnet
def connectivity_array(self): cart_coords = np.array(self.s.cart_coords) all_sites = (cart_coords[(:, None, :)] + self.cart_offsets[(None, :, :)]) vt = Voronoi(all_sites.reshape(((- 1), 3))) n_images = all_sites.shape[1] cs = (len(self.s), len(self.s), len(self.cart_offsets)) connectivity = np.z...
Provides connectivity array. Returns: connectivity: An array of shape [atomi, atomj, imagej]. atomi is the index of the atom in the input structure. Since the second atom can be outside of the unit cell, it must be described by both an atom index and an image index. Array data is the solid angle of polygon between ato...
codesearchnet
def run_config_diagnostics(config_path=CONFIG_PATH): config = read_config(config_path) missing_sections = set() malformed_entries = defaultdict(set) for (section, expected_section_keys) in SECTION_KEYS.items(): section_content = config.get(section) if (not section_content): m...
Run diagnostics on the configuration file. Args: config_path (str): Path to the configuration file. Returns: str, Set[str], dict(str, Set[str]): The path to the configuration file, a set of missing sections and a dict that maps each section to the entries that have either missing or empty options.
codesearchnet
def add_arguments(self, parser): parser.add_argument('name', nargs=1, choices=['kinetis'], help='name of MCU to unlock') return self.add_common_arguments(parser, True)
Adds the unlock command arguments to the parser. Args: self (UnlockCommand): the ``UnlockCommand`` instance parser (argparse.ArgumentParser): the parser to add the arguments to Returns: ``None``
juraj-google-style
def acquaint_insides(swap_gate: ops.Gate, acquaintance_gate: ops.Operation, qubits: Sequence[ops.Qid], before: bool, layers: Layers, mapping: Dict[(ops.Qid, int)]) -> None: max_reach = _get_max_reach(len(qubits), round_up=before) reaches = itertools.chain(range(1, (max_reach + 1)), range(max_reach, (- 1), (- 1)...
Acquaints each of the qubits with another set specified by an acquaintance gate. Args: qubits: The list of qubits of which half are individually acquainted with another list of qubits. layers: The layers to put gates into. acquaintance_gate: The acquaintance gate that acquaints the end qubit with another list of qubit...
codesearchnet
def log(self, metric): message = self.LOGFMT.format(**metric) if metric['context']: message += ' context: {context}'.format(context=metric['context']) self._logger.log(self.level, message)
Format and output metric. Args: metric (dict): Complete metric.
codesearchnet
def split_input(cls, mapper_spec): batch_size = int(_get_params(mapper_spec).get( cls.BATCH_SIZE_PARAM, cls._BATCH_SIZE)) shard_count = mapper_spec.shard_count namespace_ranges = namespace_range.NamespaceRange.split(shard_count, contig...
Returns a list of input readers for the input spec. Args: mapper_spec: The MapperSpec for this InputReader. Returns: A list of InputReaders.
juraj-google-style
def Validate(self, expression): parsed = self._Load(expression) if not parsed: raise DefinitionError("Empty StatFilter expression.") bad_keys = set(parsed) - self._KEYS if bad_keys: raise DefinitionError("Invalid parameters: %s" % ",".join(bad_keys)) if self.cfg.mask and not self...
Validates that a parsed rule entry is valid for fschecker. Args: expression: A rule expression. Raises: DefinitionError: If the filter definition could not be validated. Returns: True if the expression validated OK.
juraj-google-style
def gpio_get(self, pins=None): if (pins is None): pins = range(4) size = len(pins) indices = (ctypes.c_uint8 * size)(*pins) statuses = (ctypes.c_uint8 * size)() result = self._dll.JLINK_EMU_GPIO_GetState(ctypes.byref(indices), ctypes.byref(statuses), size) if (result < 0): raise ...
Returns a list of states for the given pins. Defaults to the first four pins if an argument is not given. Args: self (JLink): the ``JLink`` instance pins (list): indices of the GPIO pins whose states are requested Returns: A list of states. Raises: JLinkException: on error.
codesearchnet
def add_primitives_path(path): if path not in _PRIMITIVES_PATHS: if not os.path.isdir(path): raise ValueError('Invalid path: {}'.format(path)) LOGGER.debug('Adding new primitives path %s', path) _PRIMITIVES_PATHS.insert(0, os.path.abspath(path))
Add a new path to look for primitives. The new path will be inserted in the first place of the list, so any primitive found in this new folder will take precedence over any other primitive with the same name that existed in the system before. Args: path (str): path to add Raises: ValueError: A `ValueError` will be r...
juraj-google-style
def is45(msg): if allzeros(msg): return False d = hex2bin(data(msg)) if wrongstatus(d, 1, 2, 3): return False if wrongstatus(d, 4, 5, 6): return False if wrongstatus(d, 7, 8, 9): return False if wrongstatus(d, 10, 11, 12): return False ...
Check if a message is likely to be BDS code 4,5. Meteorological hazard report Args: msg (String): 28 bytes hexadecimal message string Returns: bool: True or False
juraj-google-style
def get_reduced_structure(self, reduction_algo='niggli'): if (reduction_algo == 'niggli'): reduced_latt = self._lattice.get_niggli_reduced_lattice() elif (reduction_algo == 'LLL'): reduced_latt = self._lattice.get_lll_reduced_lattice() else: raise ValueError('Invalid reduction algo :...
Get a reduced structure. Args: reduction_algo (str): The lattice reduction algorithm to use. Currently supported options are "niggli" or "LLL".
codesearchnet
def trace(src, options=None): options = options or config.Options.create() with config.verbosity_from(options): loader = load_pytd.create_loader(options) ret = analyze.infer_types(src=src, options=options, loader=loader) pytd_module = ret.ast raw_traces = [] for op, symbo...
Generates type traces for the given source code. Args: src: The source text. options: A pytype.config.Options object that can be used to specify options such as the target Python version. Returns: A source.Code object.
github-repos
def get_attr_location(self, name, location): line, _ = location src_line = self.line(line) attr = name.split('.')[-1] dot_attr = '.' + attr if dot_attr in src_line: col = src_line.index(dot_attr) return (Location(line, col + 1), len(attr)) else: attr_loc = self._get_multi...
Returns the location and span of the attribute in an attribute access. Args: name: The attribute name. location: The location of the value the attribute is accessed on.
github-repos
def __init__(self, server_id): data = datatools.get_data() self.server_id = server_id self.logger = logging.getLogger("{}.{}".format(__name__, self.server_id)) self.songcache_dir = "{}/{}".format(_root_songcache_dir, self.server_id) self.songcache_next...
Locks onto a server for easy management of various UIs Args: server_id (str): The Discord ID of the server to lock on to
juraj-google-style
def flip_channel_order(self, image): self._ensure_format_supported(image) if isinstance(image, PIL.Image.Image): image = self.to_numpy_array(image) return image[::-1, :, :]
Flips the channel order of `image` from RGB to BGR, or vice versa. Note that this will trigger a conversion of `image` to a NumPy array if it's a PIL Image. Args: image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`): The image whose color channels to flip. If `np.ndarray` or `torch.Tensor`, the channel dimensio...
github-repos
def get_random_value(length=10, character_sets=[string.ascii_uppercase, string.ascii_lowercase]): return ''.join((random.choice(''.join(character_sets)) for i in range(length)))
Get a random string with the given length. Args: length (int): The length of the string to return. character_sets list(str): The caracter sets to use. Returns: str: The random string.
codesearchnet
def _DownloadAuthUrl(self, url, dest_dir): dest_file = tempfile.NamedTemporaryFile(dir=dest_dir, delete=False) dest_file.close() dest = dest_file.name self.logger.info( 'Downloading url from %s to %s using authentication token.', url, dest) if not self.token: response = self.wat...
Download a Google Storage URL using an authentication token. If the token cannot be fetched, fallback to unauthenticated download. Args: url: string, the URL to download. dest_dir: string, the path to a directory for storing metadata scripts. Returns: string, the path to the file storing the metadata script.
juraj-google-style
def uses_star_args_in_call(node): if sys.version_info[:2] >= (3, 5): for arg in node.args: if isinstance(arg, ast.Starred): return True elif node.starargs: return True return False
Check if an ast.Call node uses arbitrary-length positional *args. This function works with the AST call node format of Python3.5+ as well as the different AST format of earlier versions of Python. Args: node: The ast.Call node to check arg values for. Returns: True if the node uses starred variadic positional args o...
github-repos
def sign(self, byts): chosen_hash = c_hashes.SHA256() hasher = c_hashes.Hash(chosen_hash, default_backend()) hasher.update(byts) digest = hasher.finalize() return self.priv.sign(digest, c_ec.ECDSA(c_utils.Prehashed(chosen_hash)))
Compute the ECC signature for the given bytestream. Args: byts (bytes): The bytes to sign. Returns: bytes: The RSA Signature bytes.
codesearchnet
def load_metrics(event_dir, epoch): metrics = {} for filename in tf.gfile.ListDirectory(event_dir): path = os.path.join(event_dir, filename) for event in tf.train.summary_iterator(path): if ((event.step == epoch) and event.HasField('summary')): value = event.summary.v...
Loads metrics for this epoch if they have already been written. This reads the entire event file but it's small with just per-epoch metrics. Args: event_dir: TODO(koz4k): Document this. epoch: TODO(koz4k): Document this. Returns: metrics.
codesearchnet
def RegisterMessage(self, message): desc = message.DESCRIPTOR self._classes[desc.full_name] = message self.pool.AddDescriptor(desc) return message
Registers the given message type in the local database. Calls to GetSymbol() and GetMessages() will return messages registered here. Args: message: a message.Message, to be registered. Returns: The provided message.
juraj-google-style
def GetSectionByIndex(self, section_index): if not self._is_parsed: self._Parse() self._is_parsed = True if section_index < 0 or section_index >= len(self._sections): return None return self._sections[section_index]
Retrieves a specific section based on the index. Args: section_index (int): index of the section. Returns: VolumeExtent: a volume extent or None if not available.
juraj-google-style
def run_commands(self, commands, encoding='json', send_enable=True, **kwargs): commands = make_iterable(commands) commands = [({'cmd': c.split('MULTILINE:')[0], 'input': ('%s\n' % c.split('MULTILINE:')[1].strip())} if ('MULTILINE:' in c) else c) for c in commands] if send_enable: if self._enablepwd:...
Sends the commands over the transport to the device This method sends the commands to the device using the nodes transport. This is a lower layer function that shouldn't normally need to be used, preferring instead to use config() or enable(). Args: commands (list): The ordered list of commands to send to the device...
codesearchnet
def parse(self, filepath, content): try: parsed = yaml.load(content) except yaml.YAMLError as exc: msg = "No YAML object could be decoded from file: {}\n{}" raise SettingsBackendError(msg.format(filepath, exc)) return parsed
Parse opened settings content using YAML parser. Args: filepath (str): Settings object, depends from backend content (str): Settings content from opened file, depends from backend. Raises: boussole.exceptions.SettingsBackendError: If parser can not decode a valid YAML object. Returns: dict: Dictionnary containing pa...
juraj-google-style
def __get_state_by_id(cls, job_id): state = model.MapreduceState.get_by_job_id(job_id) if state is None: raise ValueError("Job state for job %s is missing." % job_id) return state
Get job state by id. Args: job_id: job id. Returns: model.MapreduceState for the job. Raises: ValueError: if the job state is missing.
juraj-google-style
def _print_test_names(test_classes): for test_class in test_classes: cls = test_class(config_parser.TestRunConfig()) test_names = [] try: cls._pre_run() if cls.tests: test_names = list(cls.tests) else: test_names = cls.get_e...
Prints the names of all the tests in all test classes. Args: test_classes: classes, the test classes to print names from.
github-repos
def _get_prop_from_modelclass(modelclass, name): if name == '__key__': return modelclass._key parts = name.split('.') part, more = parts[0], parts[1:] prop = modelclass._properties.get(part) if prop is None: if issubclass(modelclass, model.Expando): prop = model.GenericProperty(part) els...
Helper for FQL parsing to turn a property name into a property object. Args: modelclass: The model class specified in the query. name: The property name. This may contain dots which indicate sub-properties of structured properties. Returns: A Property object. Raises: KeyError if the property doesn't exist and the m...
juraj-google-style
def run_idle(self): if ((not self.idlers) or (self.inactive >= len(self.idlers))): return False idler = self.idlers.popleft() (callback, args, kwds) = idler _logging_debug('idler: %s', callback.__name__) res = callback(*args, **kwds) if (res is not None): if res: self...
Run one of the idle callbacks. Returns: True if one was called, False if no idle callback was called.
codesearchnet
def _MakeFieldDescriptor(self, field_proto, message_name, index, is_extension=False): if message_name: full_name = '.'.join((message_name, field_proto.name)) else: full_name = field_proto.name return descriptor.FieldDescriptor(name=field_proto.name, full_name=full_name, index=index, number=f...
Creates a field descriptor from a FieldDescriptorProto. For message and enum type fields, this method will do a look up in the pool for the appropriate descriptor for that type. If it is unavailable, it will fall back to the _source function to create it. If this type is still unavailable, construction will fail. Arg...
codesearchnet
def __init__(self, qobj_model, **run_config): self._qobj_model = qobj_model self._run_config = run_config
Create new converter. Args: qobj_model (QobjInstruction): marshmallow model to serialize to object. run_config (dict): experimental configuration.
juraj-google-style
def __init__(self, column_names=None, title=None): super(CLITableView, self).__init__(column_names=column_names, title=title) if self._columns: self._column_width = len(self._columns[0]) else: self._column_width = 0
Initializes a command line table view. Args: column_names (Optional[list[str]]): column names. title (Optional[str]): title.
juraj-google-style
def xldate_as_datetime(xldate, datemode=0, option="to_datetime"): if option == "to_float": d = (xldate - 25589) * 86400.0 else: try: d = datetime.datetime(1899, 12, 30) + \ datetime.timedelta(days=xldate + 1462 * datemode) ...
Converts a xls date stamp to a more sensible format. Args: xldate (str): date stamp in Excel format. datemode (int): 0 for 1900-based, 1 for 1904-based. option (str): option in ("to_datetime", "to_float", "to_string"), return value Returns: datetime (datetime object, float, or string).
juraj-google-style
def table_chains(self, table='filter'): return dict(((c['name'], self.get_chain(c['name'], table)) for c in self.get_table(table)))
Get a dict where the keys are all the chains for the given table and each value is the set of rules defined for the given chain. Args: table (str): table name, defaults to ``filter`` Returns: dict: chains with set of defined rules
codesearchnet
def cutting_indices(self, independent_decision_points: List[pg.geno.DecisionPoint], global_state: pg.geno.AttributeDict, step: int) -> List[int]:
Implementation of getting the indices of the cutting points. Args: independent_decision_points: A list of independent decision points. global_state: An optional keyword argument as the global state. Subclass can omit. step: An optional keyword argument as the curent step. Subclass can omit. Returns: A list of integer...
github-repos
def scroll(clicks, x=None, y=None, pause=None, _pause=True): _failSafeCheck() if (type(x) in (tuple, list)): (x, y) = (x[0], x[1]) (x, y) = position(x, y) platformModule._scroll(clicks, x, y) _autoPause(pause, _pause)
Performs a scroll of the mouse scroll wheel. Whether this is a vertical or horizontal scroll depends on the underlying operating system. The x and y parameters detail where the mouse event happens. If None, the current mouse position is used. If a float value, it is rounded down. If outside the boundaries of the scre...
codesearchnet
def sample(self, bqm, num_reads=10): values = tuple(bqm.vartype.value) def _itersample(): for __ in range(num_reads): sample = {v: choice(values) for v in bqm.linear} energy = bqm.energy(sample) yield sample, energy samples,...
Give random samples for a binary quadratic model. Variable assignments are chosen by coin flip. Args: bqm (:obj:`.BinaryQuadraticModel`): Binary quadratic model to be sampled from. num_reads (int, optional, default=10): Number of reads. Returns: :obj:`.SampleSet`
juraj-google-style
def get_property_dict(entity_proto): return dict(((p.key, p.value) for p in entity_proto.property))
Convert datastore.Entity to a dict of property name -> datastore.Value. Args: entity_proto: datastore.Entity proto message. Usage: >>> get_property_dict(entity_proto) {'foo': {string_value='a'}, 'bar': {integer_value=2}} Returns: dict of entity properties.
codesearchnet
def _read_mptcp_join(self, bits, size, kind): if (self._syn and self._ack): return self._read_join_synack(bits, size, kind) elif self._syn: return self._read_join_syn(bits, size, kind) elif self._ack: return self._read_join_ack(bits, size, kind) else: temp = self._read_fi...
Read Join Connection option. Positional arguments: * bits - str, 4-bit data * size - int, length of option * kind - int, 30 (Multipath TCP) Returns: * dict -- extracted Join Connection (MP_JOIN) option Structure of MP_JOIN [RFC 6824]: Octets Bits Name Description 0 0 ...
codesearchnet
def op_nodes(self, op=None): nodes = [] for node in self._multi_graph.nodes(): if node.type == "op": if op is None or isinstance(node.op, op): nodes.append(node) return nodes
Get the list of "op" nodes in the dag. Args: op (Type): Instruction subclass op nodes to return. if op=None, return all op nodes. Returns: list[DAGNode]: the list of node ids containing the given op.
juraj-google-style
def get_plugin_asset(plugin_asset_cls, graph=None): if graph is None: graph = ops.get_default_graph() if not plugin_asset_cls.plugin_name: raise ValueError('Class %s has no plugin_name' % plugin_asset_cls.__name__) name = _PLUGIN_ASSET_PREFIX + plugin_asset_cls.plugin_name container = gr...
Acquire singleton PluginAsset instance from a graph. PluginAssets are always singletons, and are stored in tf Graph collections. This way, they can be defined anywhere the graph is being constructed, and if the same plugin is configured at many different points, the user can always modify the same instance. Args: plu...
github-repos
def __init__(self, loss_scale_value): super(FixedLossScale, self).__init__() if not isinstance(loss_scale_value, (int, float)): raise ValueError('loss_scale_value must be a Python int or float.') if loss_scale_value < 1: raise ValueError('loss_scale_value must be at least 1.') self._loss...
Creates the fixed loss scale. Args: loss_scale_value: A Python float. Its ideal value varies depending on models to run. Choosing a too small loss_scale might affect model quality; a too big loss_scale might cause inf or nan. There is no single right loss_scale to apply. There is no harm choosing a relatively big numb...
github-repos
def _print_download_progress_msg(self, msg, flush=False): if self._interactive_mode(): self._max_prog_str = max(self._max_prog_str, len(msg)) sys.stdout.write(('\r%-{}s'.format(self._max_prog_str) % msg)) sys.stdout.flush() if flush: print('\n') else: logging....
Prints a message about download progress either to the console or TF log. Args: msg: Message to print. flush: Indicates whether to flush the output (only used in interactive mode).
codesearchnet
def _address_content(self, x): mem_keys = tf.layers.dense(self.mem_vals, self.key_depth, bias_initializer=tf.constant_initializer(1.0), name="mem_key") mem_query = tf.layers.dense(x, self.key_depth, bias_initializ...
Address the memory based on content similarity. Args: x: a tensor in the shape of [batch_size, length, depth]. Returns: the logits for each memory entry [batch_size, length, memory_size].
juraj-google-style
def register_user(self, user): self.users[user.index] = {'known_items': set()} self.n_user += 1
For new users, append their information into the dictionaries. Args: user (User): User.
juraj-google-style
def write_fasta_file_from_dict(indict, outname, outdir=None, outext='.faa', force_rerun=False): if (not outdir): outdir = '' outfile = ssbio.utils.outfile_maker(inname='', outname=outname, outdir=outdir, outext=outext) if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile): seqs = [] ...
Write a FASTA file for a dictionary of IDs and their sequence strings. Args: indict: Input dictionary with keys as IDs and values as sequence strings outname: Name of the output file which will have outext appended to it outdir: Path to directory to output sequences to outext: Extension of FASTA file, default ".faa" f...
codesearchnet
def Push(cls, connection, datafile, filename, st_mode=DEFAULT_PUSH_MODE, mtime=0, progress_callback=None): fileinfo = ('{},{}'.format(filename, int(st_mode))).encode('utf-8') cnxn = FileSyncConnection(connection, b'<2I') cnxn.Send(b'SEND', fileinfo) if progress_c...
Push a file-like object to the device. Args: connection: ADB connection datafile: File-like object for reading from filename: Filename to push to st_mode: stat mode for filename mtime: modification time progress_callback: callback method that accepts filename, bytes_written and total_bytes Raises: PushFailedError: Ra...
juraj-google-style
def _tuple_of_big_endian_int(bit_groups: Tuple[(np.ndarray, ...)]) -> Tuple[(int, ...)]: return tuple((_big_endian_int(bits) for bits in bit_groups))
Returns the big-endian integers specified by groups of bits. Args: bit_groups: Groups of descending bits, each specifying a big endian integer with the 1s bit at the end. Returns: A tuple containing the integer for each group.
codesearchnet
def register_for_auto_class(cls, auto_class='AutoProcessor'): if not isinstance(auto_class, str): auto_class = auto_class.__name__ import transformers.models.auto as auto_module if not hasattr(auto_module, auto_class): raise ValueError(f'{auto_class} is not a valid auto class.') cls._aut...
Register this class with a given auto class. This should only be used for custom feature extractors as the ones in the library are already mapped with `AutoProcessor`. Args: auto_class (`str` or `type`, *optional*, defaults to `"AutoProcessor"`): The auto class to register this new feature extractor with.
github-repos
def exe_cmd(*cmds, timeout=DEFAULT_TIMEOUT_SEC): cmd = ' '.join(cmds) ret, out, err = utils.run_command(cmd=cmd, stdout=PIPE, stderr=PIPE, shell=True, timeout=timeout) logging.debug('cmd: %s, stdout: %s, stderr: %s, ret: %s', utils.cli_cmd_to_string(cmds), out, err, ret) if not err: return out ...
Executes commands in a new shell. Directing stderr to PIPE, with timeout. This is fastboot's own exe_cmd because of its peculiar way of writing non-error info to stderr. Args: cmds: A sequence of commands and arguments. timeout: The number of seconds to wait before timing out. Returns: The output of the command run,...
github-repos
def is_distributing_by_cloning(model): if backend.is_tpu_strategy(model._distribution_strategy) and context.executing_eagerly: return False elif ops.executing_eagerly_outside_functions(): return bool(model._compile_distribution) return True
Decide whether this model is going to be distributed via cloning. We are going to distribute the model by cloning in graph mode. Args: model: Keras model to distribute. Returns: True if the `model` is going to be distributed using cloning and False otherwise.
github-repos
def has_chosen(state, correct, msgs): ctxt = {} exec(state.student_code, globals(), ctxt) sel_indx = ctxt['selected_option'] if (sel_indx != correct): state.report(Feedback(msgs[(sel_indx - 1)])) else: state.reporter.success_msg = msgs[(correct - 1)] return state
Verify exercises of the type MultipleChoiceExercise Args: state: State instance describing student and solution code. Can be omitted if used with Ex(). correct: index of correct option, where 1 is the first option. msgs : list of feedback messages corresponding to each option. :Example: The following SCT is f...
codesearchnet
def gather(strategy, value): return nest.map_structure(functools.partial(_gather, strategy), value)
Gathers value from all workers. This is intended for tests before we implement an official all-gather API. Args: strategy: a `tf.distribute.Strategy`. value: a nested structure of n-dim `tf.distribute.DistributedValue` of `tf.Tensor`, or of a `tf.Tensor` if the strategy only has one replica. Cannot contain tf.sparse....
github-repos
def convert_dropout(params, w_name, scope_name, inputs, layers, weights, names): print('Converting dropout ...') if names == 'short': tf_name = 'DO' + random_string(6) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) dropout = keras....
Convert dropout. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
juraj-google-style
def update_defaults(self, new_defaults, respect_none=False): for key, value in six.iteritems(new_defaults): item = self.get_item(key) if item is None: raise YapconfItemNotFound("Cannot update default for {0}, " "there is ...
Update items defaults to the values in the new_defaults dict. Args: new_defaults (dict): A key-value pair of new defaults to be applied. respect_none (bool): Flag to indicate if ``None`` values should constitute an update to the default.
juraj-google-style
def cumulative_probabilities(self): partition_function = np.sum(self.p) return (np.cumsum(self.p) / partition_function)
Cumulative sum of the relative probabilities for all possible jumps. Args: None Returns: (np.array): Cumulative sum of relative jump probabilities.
codesearchnet
def Append(self, item): if self._index >= self._size: self._index = self._index % self._size try: self._list[self._index] = item except IndexError: self._list.append(item) self._index += 1
Add an item to the list. Args: item (object): item.
juraj-google-style
def where(self, *constraints: column_expression_builder.ColumnExpressionBuilder) -> 'View': for constraint in constraints: if constraint.node.return_type != _fhir_path_data_types.Boolean: raise ValueError(('view `where` expressions must be boolean predicates', f' got `{constraint.node.to_fhir_pa...
Returns a new View instance with these added constraints. Args: *constraints: a list of FHIRPath expressions to conjuctively constrain the underlying data. The returned view will apply the both the current and additional constraints defined here.
github-repos
def _add_bound_method(self, bound_method, identify_observed): inst = bound_method.__self__ method_name = bound_method.__name__ key = self.make_key(bound_method) if (key not in self.observers): self.observers[key] = ObserverBoundMethod(inst, method_name, identify_observed, (key, self.observers)) ...
Add an bound method as an observer. Args: bound_method: The bound method to add as an observer. identify_observed: See the docstring for add_observer. Returns: True if the bound method is added, otherwise False.
codesearchnet
def forward(self, inputs_embeds, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states =...
Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's...
github-repos
def filter(self, *query_filter): for query in query_filter: self.query.append(query) return self
Set the query filter to perform the query with Args: *query_filter: Simplified Query Language filter
codesearchnet
def _CreateAdGroup(client, campaign_id): ad_group_service = client.GetService('AdGroupService') operations = [{ 'operator': 'ADD', 'operand': { 'campaignId': campaign_id, 'adGroupType': 'SEARCH_DYNAMIC_ADS', 'name': 'Earth to Mars Cruises 'status': 'PAUSED', ...
Creates an ad group. Args: client: an AdWordsClient instance. campaign_id: an integer campaign ID. Returns: An integer ad group ID.
juraj-google-style
def inverse_transform(self, y): sklearn.base.check_is_fitted(self) xp, _ = sklearn.utils._array_api.get_namespace(y) if self.ndim_ == 1 and y.ndim == 2: return xp.squeeze(y, axis=1) return y
Revert the transformation of transform. Args: y: np.ndarray Transformed numpy array. Returns: np.ndarray If the transformer was fit to a 1D numpy array, and a 2D numpy array with a singleton second dimension is passed, it will be squeezed back to 1D. Otherwise, it will eb left untouched.
github-repos
def prepare_soap_body(self, method, parameters, namespace): tags = [] for (name, value) in parameters: tag = '<{name}>{value}</{name}>'.format(name=name, value=escape(('%s' % value), {'"': '&quot;'})) tags.append(tag) wrapped_params = ''.join(tags) if (namespace is not None): soa...
Prepare the SOAP message body for sending. Args: method (str): The name of the method to call. parameters (list): A list of (name, value) tuples containing the parameters to pass to the method. namespace (str): tThe XML namespace to use for the method. Returns: str: A properly formatted SOAP Body.
codesearchnet
class Iterator(PyDataset): white_list_formats = ('png', 'jpg', 'jpeg', 'bmp', 'ppm', 'tif', 'tiff') def __init__(self, n, batch_size, shuffle, seed): self.n = n self.batch_size = batch_size self.seed = seed self.shuffle = shuffle self.batch_index = 0 self.total_b...
Base class for image data iterators. DEPRECATED. Every `Iterator` must implement the `_get_batches_of_transformed_samples` method. Args: n: Integer, total number of samples in the dataset to loop over. batch_size: Integer, size of a batch. shuffle: Boolean, whether to shuffle the data between epochs. seed: Random se...
github-repos
def main(args): if (not args): raise Exception('Please specify at least one JSON config path') inputs = [] program = [] outputs = [] for arg in args: with open(arg) as fd: config = json.load(fd) inputs.extend(config.get('inputs', [])) program.extend(config...
Invokes run function using a JSON file config. Args: args: CLI args, which can be a JSON file containing an object whose attributes are the parameters to the run function. If multiple JSON files are passed, their contents are concatenated. Returns: 0 if succeeded or nonzero if failed. Raises: Exception: If input data ...
codesearchnet
def request(self,message,message_type): if message_type == MULTIPART: raise Exception("Unsupported request type") super(Requestor,self).send(message,message_type)
Send a request message of the given type Args: - message: the message to publish - message_type: the type of message being sent
juraj-google-style
def GetSourceStrings(cls, event): formatter_object = cls.GetFormatterObject(event.data_type) return formatter_object.GetSources(event)
Retrieves the formatted source strings for a specific event object. Args: event (EventObject): event. Returns: list[str, str]: short and long version of the source of the event.
juraj-google-style
def qrandom(n): import quantumrandom return np.concatenate([quantumrandom.get_data(data_type='uint16', array_length=1024) for i in range(int(np.ceil((n / 1024.0))))])[:n]
Creates an array of n true random numbers obtained from the quantum random number generator at qrng.anu.edu.au This function requires the package quantumrandom and an internet connection. Args: n (int): length of the random array Return: array of ints: array of truly random unsigned 16 bit int values
codesearchnet
def convert_slice(params, w_name, scope_name, inputs, layers, weights, names): print('Converting slice ...') if len(params['axes']) > 1: raise AssertionError('Cannot convert slice by multiple dimensions') if params['axes'][0] not in [0, 1, 2, 3]: raise AssertionError('Slice by dimensi...
Convert slice operation. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
juraj-google-style
def make_bubble_surface(dims=DEFAULT_DIMS, repeat=3): gradients = make_gradients(dims) return ( np.sin((gradients[0] - 0.5) * repeat * np.pi) * np.sin((gradients[1] - 0.5) * repeat * np.pi))
Makes a surface from the product of sine functions on each axis. Args: dims (pair): the dimensions of the surface to create repeat (int): the frequency of the waves is set to ensure this many repetitions of the function Returns: surface: A surface.
juraj-google-style
def discovery(self, logfile=None, tracefile=None): self._enable_logging(logfile=logfile, tracefile=tracefile) self.log("'discovery' method is deprecated. Please 'connect' with force_discovery=True.") self.log('Device discovery process started') self.connect(logfile=logfile, force_discovery=True, tracefi...
Discover the device details. This method discover several device attributes. Args: logfile (file): Optional file descriptor for session logging. The file must be open for write. The session is logged only if ``log_session=True`` was passed to the constructor. It the parameter is not passed then the default *session.l...
codesearchnet
def ctc_loss_and_grad(logits, labels, label_length, logit_length, unique=None): num_labels = _get_dim(logits, 2) max_label_seq_length = _get_dim(labels, 1) ilabel_log_probs = nn_ops.log_softmax(logits) state_log_probs = _ilabel_to_state(labels, num_labels, ilabel_log_probs) state_trans_probs = _ctc_...
Computes the CTC loss and gradients. Most users will want fwd_bwd.ctc_loss This function returns the computed gradient, it does not have a gradient of its own defined. Args: logits: tensor of shape [frames, batch_size, num_labels] labels: tensor of shape [batch_size, max_label_seq_length] label_length: tensor of sha...
github-repos
def _chunk_query(l, n, cn, conn, table, db_type): [insert_query_m(l[i:i + n], table, conn, cn, db_type) for i in range(0, len(l), n)]
Call for inserting SQL query in chunks based on n rows Args: l (list): List of tuples n (int): Number of rows cn (str): Column names conn (connection object): Database connection object table (str): Table name db_type (str): If "sqlite" or "mysql"
juraj-google-style
def correlation_vector(self, value): if value == self._defaults['ai.operation.correlationVector'] and 'ai.operation.correlationVector' in self._values: del self._values['ai.operation.correlationVector'] else: self._values['ai.operation.correlationVector'] = value
The correlation_vector property. Args: value (string). the property value.
juraj-google-style
def emit(self, record): if record.levelno < logging.getLevelName(self.min_level): return evt = LogEvent() evt.level = record.levelname evt.levelno = record.levelno evt.timestamp = datetime.fromtimestamp(record.created) evt.message = record.m...
Persist a record into the database Args: record (`logging.Record`): The logging.Record object to store Returns: `None`
juraj-google-style
def _ot_make_closed(self, access_string): self.observation_table.sm_vector.append(access_string) for i in self.alphabet: self.observation_table.smi_vector.append(access_string + i) for e in self.observation_table.em_vector: self._fill_table_entry(access_s...
Given a state input_string in Smi that is not equivalent with any state in Sm this method will move that state in Sm create a corresponding Smi state and fill the corresponding entries in the table. Args: access_string (str): State access string Returns: None
juraj-google-style
def rpc(self, address, rpc_id, *args, **kwargs): if isinstance(rpc_id, RPCDeclaration): arg_format = rpc_id.arg_format resp_format = rpc_id.resp_format rpc_id = rpc_id.rpc_id else: arg_format = kwargs.get('arg_format', None) resp_format = kwargs.get('resp_format', None) ...
Immediately dispatch an RPC inside this EmulatedDevice. This function is meant to be used for testing purposes as well as by tiles inside a complex EmulatedDevice subclass that need to communicate with each other. It should only be called from the main virtual device thread where start() was called from. **Backgroun...
codesearchnet
def label(self, main_type, sub_type, unique_id, label, action='ADD', owner=None, params=None): params = params or {} if owner: params['owner'] = owner action = action.upper() if not sub_type: url = '/v2/{}/{}/securityLabels/{}'.format(main_type, unique...
Args: owner: main_type: sub_type: unique_id: label: action: params: Return:
juraj-google-style
def from_key_counter(cls, key, counter, alg): counter = _convert_to_state_tensor(counter) key = _convert_to_state_tensor(key) alg = random_ops_util.convert_alg_to_int(alg) counter.shape.assert_is_compatible_with([_get_state_size(alg) - 1]) key.shape.assert_is_compatible_with([]) key = array_ops....
Creates a generator from a key and a counter. This constructor only applies if the algorithm is a counter-based algorithm. See method `key` for the meaning of "key" and "counter". Args: key: the key for the RNG, a scalar of type STATE_TYPE. counter: a vector of dtype STATE_TYPE representing the initial counter for th...
github-repos
def HandleBlockReceived(self, inventory): block = IOHelper.AsSerializableWithType(inventory, 'neo.Core.Block.Block') if not block: return blockhash = block.Hash.ToBytes() try: if blockhash in BC.Default().BlockRequests: BC.Default().Block...
Process a Block inventory payload. Args: inventory (neo.Network.Inventory):
juraj-google-style
def plane_xz(size=(10, 10), resolution=(10, 10)) -> VAO: (sx, sz) = size (rx, rz) = resolution (dx, dz) = ((sx / rx), (sz / rz)) (ox, oz) = (((- sx) / 2), ((- sz) / 2)) def gen_pos(): for z in range(rz): for x in range(rx): (yield (ox + (x * dx))) ...
Generates a plane on the xz axis of a specific size and resolution. Normals and texture coordinates are also included. Args: size: (x, y) tuple resolution: (x, y) tuple Returns: A :py:class:`demosys.opengl.vao.VAO` instance
codesearchnet
def clear_errors(): data = [] data.append(0x0B) data.append(BROADCAST_ID) data.append(RAM_WRITE_REQ) data.append(STATUS_ERROR_RAM) data.append(BYTE2) data.append(0x00) data.append(0x00) send_data(data)
Clears the errors register of all Herkulex servos Args: none
juraj-google-style
def _decorate_run_options_for_debug(self, run_options, debug_urls, debug_ops='DebugIdentity', node_name_regex_allowlist=None, op_type_regex_allowlist=None, tensor_dtype_regex_allowlist=None, tolerate_debug_op_creation_failures=False): run_options.output_partition_graphs = True debug_utils.watch_graph(run_option...
Modify a RunOptions object for debug tensor watching. Specifies request for outputting partition graphs. Adds debug_tensor_watch_opts with proper debug URLs. Args: run_options: (RunOptions) the modified RunOptions object. debug_urls: (list of str) debug URLs to be entered in run_options. debug_tensor_watch_opts. debu...
github-repos
def parse_arguments(argv): parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent()) parser.add_argument('--cloud', action='store_true', help='Analysis will use cloud services.') parser.add_argu...
Parse command line arguments. Args: argv: list of command line arguments, including program name. Returns: An argparse Namespace object. Raises: ValueError: for bad parameters
juraj-google-style
def create_struct(name): sid = idc.GetStrucIdByName(name) if sid != idaapi.BADADDR: raise exceptions.SarkStructAlreadyExists("A struct names {!r} already exists.".format(name)) sid = idc.AddStrucEx(-1, name, 0) if sid == idaapi.BADADDR: raise exceptions.SarkStructCreationF...
Create a structure. Args: name: The structure's name Returns: The sturct ID Raises: exceptions.SarkStructAlreadyExists: A struct with the same name already exists exceptions.SarkCreationFailed: Struct creation failed
juraj-google-style
def deal_with_changeset_stack_policy(self, fqn, stack_policy): if stack_policy: kwargs = generate_stack_policy_args(stack_policy) kwargs["StackName"] = fqn logger.debug("Setting stack policy on %s.", fqn) self.cloudformation.set_stack_policy(**kwargs)
Set a stack policy when using changesets. ChangeSets don't allow you to set stack policies in the same call to update them. This sets it before executing the changeset if the stack policy is passed in. Args: stack_policy (:class:`stacker.providers.base.Template`): A template object representing a stack policy.
juraj-google-style
def get_review(review_struct): review_fn = _resource_context("review.rst") with open(review_fn) as f: review = f.read() with NamedTemporaryFile(suffix=".png") as qr_file: url = pyqrcode.create(review_struct.internal_url) url.png(qr_file.name, scale=5) ...
Generate review from `review_struct`. Args: review_struct (obj): :class:`.GenerateReview` instance. Returns: obj: StringIO file instance containing PDF file.
juraj-google-style
def get_dataset_split(tmp_dir, split, use_control_set): if (not use_control_set): dataset_split = {problem.DatasetSplit.TRAIN: [f for f in tf.gfile.Glob(os.path.join(tmp_dir, 'train-novels*.txt'))], problem.DatasetSplit.EVAL: [os.path.join(tmp_dir, 'lambada_control_test_data_plain_text.txt')]} return da...
Gives the file paths with regards to the given split. Args: tmp_dir: temp directory split: dataset split use_control_set: uses control dataset if true. Returns: list of file paths.
codesearchnet
def summarize_tensors(tensor_dict, tag=None): if (tag is None): tag = 'tensors/' for t_name in list(tensor_dict): t = tensor_dict[t_name] tf.summary.histogram((tag + t_name), t)
Summarize the tensors. Args: tensor_dict: a dictionary of tensors. tag: name scope of the summary; defaults to tensors/.
codesearchnet
def rename(self, container, name): url = self._url("/containers/{0}/rename", container) params = {'name': name} res = self._post(url, params=params) self._raise_for_status(res)
Rename a container. Similar to the ``docker rename`` command. Args: container (str): ID of the container to rename name (str): New name for the container Raises: :py:class:`docker.errors.APIError` If the server returns an error.
juraj-google-style
def predict_array(self, arr): precompute = self.precompute self.precompute = False pred = super().predict_array(arr) self.precompute = precompute return pred
This over-ride is necessary because otherwise the learner method accesses the wrong model when it is called with precompute set to true Args: arr: a numpy array to be used as input to the model for prediction purposes Returns: a numpy array containing the predictions from the model
juraj-google-style
def play_alert(zones, alert_uri, alert_volume=20, alert_duration=0, fade_back=False): for zone in zones: zone.snap = Snapshot(zone) zone.snap.snapshot() print('snapshot of zone: {}'.format(zone.player_name)) for zone in zones: if zone.is_coordinator: if (not zone.is_p...
Demo function using soco.snapshot across multiple Sonos players. Args: zones (set): a set of SoCo objects alert_uri (str): uri that Sonos can play as an alert alert_volume (int): volume level for playing alert (0 tp 100) alert_duration (int): length of alert (if zero then length of track) fade_back (bool): on reinstat...
codesearchnet