code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
|---|---|---|
def __init__(self, saved_model_checksum: int=None, graph_def_program_hash: int=None, signature_def_hash: int=None, saved_object_graph_hash: int=None, checkpoint_hash: int=None, version: int=None):
self.saved_model_checksum = saved_model_checksum
self.graph_def_program_hash = graph_def_program_hash
self.signature_def_hash = signature_def_hash
self.saved_object_graph_hash = saved_object_graph_hash
self.checkpoint_hash = checkpoint_hash
self.version = version
|
Initializes the instance based on values in the SavedModel fingerprint.
Args:
saved_model_checksum: Value of the`saved_model_checksum`.
graph_def_program_hash: Value of the `graph_def_program_hash`.
signature_def_hash: Value of the `signature_def_hash`.
saved_object_graph_hash: Value of the `saved_object_graph_hash`.
checkpoint_hash: Value of the `checkpoint_hash`.
version: Value of the producer field of the VersionDef.
|
github-repos
|
def pivot_samples(self, values, index="ID_REF"):
data = []
for gsm in self.gsms.values():
tmp_data = gsm.table.copy()
tmp_data["name"] = gsm.name
data.append(tmp_data)
ndf = concat(data).pivot(index=index, values=values, columns="name")
return ndf
|
Pivot samples by specified column.
Construct a table in which columns (names) are the samples, index
is a specified column eg. ID_REF and values in the columns are of one
specified type.
Args:
values (:obj:`str`): Column name present in all GSMs.
index (:obj:`str`, optional): Column name that will become an index in
pivoted table. Defaults to "ID_REF".
Returns:
:obj:`pandas.DataFrame`: Pivoted data
|
juraj-google-style
|
def from_list(cls, vals: List[Value]=[], reverse: bool=False) -> 'LinkedList':
res = EmptyList()
for v in (vals if reverse else vals[::(- 1)]):
res = cls(v, res)
return res
|
Create an instance from a standard list.
Args:
vals: Python list of instance values.
|
codesearchnet
|
def n_feature_hash(feature, dims, seeds):
vec = np.zeros(sum(dims))
offset = 0
for seed, dim in zip(seeds, dims):
vec[offset:(offset + dim)] = feature_hash(feature, dim, seed)
offset += dim
return vec
|
N-hot-encoded feature hashing.
Args:
feature (str): Target feature represented as string.
dims (list of int): Number of dimensions for each hash value.
seeds (list of float): Seed of each hash function (mmh3).
Returns:
numpy 1d array: n-hot-encoded feature vector for `s`.
|
juraj-google-style
|
def get_arrays(self, ji_win):
if isinstance(ji_win, dict):
ji_windows = ji_win
else:
ji_windows = self.ji_windows(ji_win)
arrays = []
for filename, res in zip(self._layer_files, self._layer_resolution):
with rasterio.open(filename) as src:
arr = src.read(1, window=ji_windows[res])
arrays.append(arr)
if self.dst_res is not None:
arrays = self._resample(arrays=arrays, ji_windows=ji_windows)
return arrays
|
Get the data of the a window given the ji_windows derived with :method:`ji_windows`.
Arguments:
ji_win {[type]} -- The index of the window or the (multi-resolution) windows returned by :meth:`ji_window`.
Returns:
(list of) array(s) -- List of 2D arrays in native resolution in case `dst_res` is `None`
or a 3D array where all layers are resampled to `dst_res` resolution.
|
juraj-google-style
|
def _test_connection(url):
import psycopg2
try:
with closing(psycopg2.connect(dsn=url)) as conn:
conn.cursor()
except psycopg2.OperationalError as e:
raise ValidationError(e)
|
Attempt to connect to postgres
Args:
url: string in the form "postgres://[user]:[password]@[host][:port][/database]"
|
juraj-google-style
|
def _encode_choice_type_exclusivity(self, builder: expressions.Builder) -> List[validation_pb2.SqlRequirement]:
if not builder.return_type.returns_polymorphic():
return []
field_name = _last_path_token(builder)
constraint_key = f'{field_name}-choice-type-exclusivity'
if constraint_key in self._options.skip_keys:
return []
type_codes = _utils.element_type_codes(builder.return_type.root_element_definition)
if len(type_codes) <= 1:
return []
num_choices_exist: expressions.Builder = _num_fields_exist((builder.ofType(choice_field) for choice_field in type_codes))
exclusivity_constraint: expressions.Builder = num_choices_exist <= 1
parent_builder = builder.get_parent_builder()
result = self._encode_fhir_path_builder_constraint(exclusivity_constraint, parent_builder)
if result is None:
return []
choice_type_path = self._abs_path_invocation(builder)
column_name = _path_to_sql_column_name(choice_type_path)
parent_path = self._abs_path_invocation(parent_builder)
description = f'Choice type {choice_type_path} has more than one of its possible choice data types set.'
return [validation_pb2.SqlRequirement(column_name=column_name, sql_expression=result.sql, fhir_path_sql_expression=result.fhir_path_sql, severity=validation_pb2.ValidationSeverity.SEVERITY_ERROR, type=validation_pb2.ValidationType.VALIDATION_TYPE_CHOICE_TYPE, element_path=parent_path, description=description, fhir_path_key=constraint_key, fhir_path_expression=result.builder.fhir_path, fields_referenced_by_expression=[field_name])]
|
Encodes a constraint ensuring the choice type has only one value set.
If `builder` represents a choice type, encodes SQL ensuring that at most one
of the columns representing that choice type's possible data types is not
null.
Args:
builder: The builder representing a path to a choice type.
Returns:
An empty sequence if `builder` is not a path to a choice type or the
constraint can not be encoded for other reasons. Otherwise, a sequence
containing a single `SqlRequirement` for the choice type.
|
github-repos
|
def register_watched_variable_resolver(resolver):
global _variables_override
assert _variables_override is default_get_variables
_variables_override = resolver
|
Registers the resolver to be used to get the list of variables to watch.
Args:
resolver: callable, takes a Variable and returns a list of Variables that
shall be watched.
|
github-repos
|
def GetMessages(file_protos):
for file_proto in file_protos:
_FACTORY.pool.Add(file_proto)
return _FACTORY.GetMessages([file_proto.name for file_proto in file_protos])
|
Builds a dictionary of all the messages available in a set of files.
Args:
file_protos: A sequence of file protos to build messages out of.
Returns:
A dictionary mapping proto names to the message classes. This will include
any dependent messages as well as any messages defined in the same file as
a specified message.
|
juraj-google-style
|
def preprocess_examples(self, texts: Union[TextInput, List[TextInput]], images: ImageInput=None, bboxes: BboxInput=None, num_image_tokens: Optional[int]=64) -> Union[str, List[str]]:
img_tokens = [self.boi_token] * num_image_tokens
img_info_tokens = ' '.join([self.boi_token] + img_tokens + [self.eoi_token])
batched = True
if isinstance(texts, str):
batched = False
texts = [texts]
if images is None:
images = [None] * len(texts)
elif not is_batched(images):
images = [images]
if len(texts) != len(images):
raise ValueError(f'The number of examples in `texts` and `images` should be the same. Got {len(texts)} v.s. {len(images)} instead.')
if not batched:
self._check_bboxes_for_single_text(bboxes)
bboxes = [bboxes]
elif bboxes is not None:
if not isinstance(bboxes, list):
raise ValueError('`bboxes` should be `None` or a list (as a batch) when `texts` is passed as a batch.')
for x in bboxes:
self._check_bboxes_for_single_text(x)
else:
bboxes = [None] * len(texts)
if len(bboxes) != len(texts):
raise ValueError(f'The number of examples in `texts` and `bboxes` should be the same. Got {len(texts)} v.s. {len(bboxes)} instead.')
result = [self._preprocess_single_example(text, image, bbox, img_info_tokens) for text, image, bbox in zip(texts, images, bboxes)]
if not batched:
result = result[0]
return result
|
Add image and bounding box information to `texts` as image and patch index tokens.
Args:
texts (`Union[TextInput, List[TextInput]]`): The texts to be processed.
images (`ImageInput`, *optional*): The images associated to `texts`.
bboxes (`Union[List[Tuple[int]], List[Tuple[float]], List[List[Tuple[int]]], List[List[Tuple[float]]]]`, *optional*):
The bounding bboxes associated to `texts`.
num_image_tokens (`int`, *optional*, defaults to 64):
The number of image tokens (used as latent queries). This should corresponds to the `latent_query_num`
attribute in `Kosmos2Config`.
Returns:
`Union[TextInput, List[TextInput]]`: The processed texts with image and patch index tokens.
|
github-repos
|
def makedirs(self, dir_name, mode=PERM_DEF, exist_ok=False):
ends_with_sep = self.ends_with_path_separator(dir_name)
dir_name = self.absnormpath(dir_name)
if (ends_with_sep and self.is_macos and self.exists(dir_name, check_link=True) and (not self.exists(dir_name))):
self.remove_object(dir_name)
path_components = self._path_components(dir_name)
current_dir = self.root
for component in path_components:
if ((component not in current_dir.contents) or (not isinstance(current_dir.contents, dict))):
break
else:
current_dir = current_dir.contents[component]
try:
self.create_dir(dir_name, (mode & (~ self.umask)))
except (IOError, OSError) as e:
if ((not exist_ok) or (not isinstance(self.resolve(dir_name), FakeDirectory))):
if (self.is_windows_fs and (e.errno == errno.ENOTDIR)):
e.errno = errno.ENOENT
self.raise_os_error(e.errno, e.filename)
|
Create a leaf Fake directory and create any non-existent
parent dirs.
Args:
dir_name: (str) Name of directory to create.
mode: (int) Mode to create directory (and any necessary parent
directories) with. This argument defaults to 0o777.
The umask is applied to this mode.
exist_ok: (boolean) If exist_ok is False (the default), an OSError is
raised if the target directory already exists.
New in Python 3.2.
Raises:
OSError: if the directory already exists and exist_ok=False,
or as per :py:meth:`create_dir`.
|
codesearchnet
|
def add_file_locations(self, file_locations=[]):
if not hasattr(self, '__file_locations__'):
self.__file_locations__ = copy.copy(file_locations)
else:
self.__file_locations__ += copy.copy(file_locations)
|
Adds a list of file locations to the current list
Args:
file_locations: list of file location tuples
|
juraj-google-style
|
def _publish_response(self, slug, message):
resp_topic = self.topics.gateway_topic(slug, 'data/response')
self._logger.debug('Publishing response message: (topic=%s) (message=%s)', resp_topic, message)
self.client.publish(resp_topic, message)
|
Publish a response message for a device
Args:
slug (string): The device slug that we are publishing on behalf of
message (dict): A set of key value pairs that are used to create the message
that is sent.
|
codesearchnet
|
def _ExtractExtensionInstallEvents(self, settings_dict, parser_mediator):
for extension_id, extension in sorted(settings_dict.items()):
install_time = extension.get('install_time', None)
if not install_time:
parser_mediator.ProduceExtractionWarning(
'installation time missing for extension ID {0:s}'.format(
extension_id))
continue
try:
install_time = int(install_time, 10)
except ValueError:
parser_mediator.ProduceExtractionWarning((
'unable to convert installation time for extension ID '
'{0:s}').format(extension_id))
continue
manifest = extension.get('manifest', None)
if not manifest:
parser_mediator.ProduceExtractionWarning(
'manifest missing for extension ID {0:s}'.format(extension_id))
continue
event_data = ChromeExtensionInstallationEventData()
event_data.extension_id = extension_id
event_data.extension_name = manifest.get('name', None)
event_data.path = extension.get('path', None)
date_time = dfdatetime_webkit_time.WebKitTime(timestamp=install_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Extract extension installation events.
Args:
settings_dict (dict[str: object]): settings data from a Preferences file.
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
|
juraj-google-style
|
def raise_for_api_error(headers: MutableMapping, data: MutableMapping) -> None:
if not data["ok"]:
raise exceptions.SlackAPIError(data.get("error", "unknow_error"), headers, data)
if "warning" in data:
LOG.warning("Slack API WARNING: %s", data["warning"])
|
Check request response for Slack API error
Args:
headers: Response headers
data: Response data
Raises:
:class:`slack.exceptions.SlackAPIError`
|
juraj-google-style
|
def start(self, interval_s):
if self.running:
return False
self.stopped.clear()
def _execute():
if not self.method() and self.stop_if_false:
return
while not self.stopped.wait(interval_s):
if not self.method() and self.stop_if_false:
return
self.thread = threading.Thread(target=_execute)
self.thread.daemon = True
self.thread.start()
return True
|
Starts executing the method at the specified interval.
Args:
interval_s: The amount of time between executions of the method.
Returns:
False if the interval was already running.
|
juraj-google-style
|
def EnableNetworkInterfaces(
self, interfaces, logger, dhclient_script=None):
interfaces_to_up = [i for i in interfaces if i != 'eth0']
if interfaces_to_up:
logger.info('Enabling the Ethernet interfaces %s.', interfaces_to_up)
self._WriteIfcfg(interfaces_to_up, logger)
self._Ifup(interfaces_to_up, logger)
|
Enable the list of network interfaces.
Args:
interfaces: list of string, the output device names to enable.
logger: logger object, used to write to SysLog and serial port.
dhclient_script: string, the path to a dhclient script used by dhclient.
|
juraj-google-style
|
def get_embedded_object(self, signature_id):
request = self._get_request()
return request.get(self.EMBEDDED_OBJECT_GET_URL + signature_id)
|
Retrieves a embedded signing object
Retrieves an embedded object containing a signature url that can be opened in an iFrame.
Args:
signature_id (str): The id of the signature to get a signature url for
Returns:
An Embedded object
|
juraj-google-style
|
def apply_rot_fn(self, fn: Callable[[Rotation], Rotation]) -> Rigid:
return Rigid(fn(self._rots), self._trans)
|
Applies a Rotation -> Rotation function to the stored rotation object.
Args:
fn: A function of type Rotation -> Rotation
Returns:
A transformation object with a transformed rotation.
|
github-repos
|
def add_functions(spec_dict: Mapping[str, Any]) -> Mapping[str, Any]:
spec_dict["functions"]["list"] = []
spec_dict["functions"]["list_long"] = []
spec_dict["functions"]["list_short"] = []
spec_dict["functions"]["primary"] = {}
spec_dict["functions"]["primary"]["list_long"] = []
spec_dict["functions"]["primary"]["list_short"] = []
spec_dict["functions"]["modifier"] = {}
spec_dict["functions"]["modifier"]["list_long"] = []
spec_dict["functions"]["modifier"]["list_short"] = []
spec_dict["functions"]["to_short"] = {}
spec_dict["functions"]["to_long"] = {}
for func_name in spec_dict["functions"]["info"]:
abbreviated_name = spec_dict["functions"]["info"][func_name]["abbreviation"]
spec_dict["functions"]["list"].extend((func_name, abbreviated_name))
spec_dict["functions"]["list_long"].append(func_name)
spec_dict["functions"]["list_short"].append(abbreviated_name)
if spec_dict["functions"]["info"][func_name]["type"] == "primary":
spec_dict["functions"]["primary"]["list_long"].append(func_name)
spec_dict["functions"]["primary"]["list_short"].append(abbreviated_name)
else:
spec_dict["functions"]["modifier"]["list_long"].append(func_name)
spec_dict["functions"]["modifier"]["list_short"].append(abbreviated_name)
spec_dict["functions"]["to_short"][abbreviated_name] = abbreviated_name
spec_dict["functions"]["to_short"][func_name] = abbreviated_name
spec_dict["functions"]["to_long"][abbreviated_name] = func_name
spec_dict["functions"]["to_long"][func_name] = func_name
return spec_dict
|
Add function keys to spec_dict
Args:
spec_dict (Mapping[str, Any]): bel specification dictionary
Returns:
Mapping[str, Any]: bel specification dictionary with added function keys
|
juraj-google-style
|
def _EncodeString(self, string):
try:
encoded_string = string.encode(self._encoding, errors=self._errors)
except UnicodeEncodeError:
if self._errors == 'strict':
logging.error(
'Unable to properly write output due to encoding error. '
'Switching to error tolerant encoding which can result in '
'non Basic Latin (C0) characters to be replaced with "?" or '
'"\\ufffd".')
self._errors = 'replace'
encoded_string = string.encode(self._encoding, errors=self._errors)
return encoded_string
|
Encodes the string.
Args:
string (str): string to encode.
Returns:
bytes: encoded string.
|
juraj-google-style
|
def forward_ports(remote_host, local_host, local_listen_ports,
remote_listen_ports):
if ":" in local_host and not local_host.startswith("["):
local_host = "[%s]" % local_host
ssh = whichcraft.which("ssh") or whichcraft.which("plink")
if not ssh:
raise ValueError("Couldn't find an ssh client.")
args = [ssh, remote_host]
for local_port in local_listen_ports:
args += ["-L", "%s:%s:%s:%s" % (local_host, local_port,
local_host, local_port)]
for remote_port in remote_listen_ports:
args += ["-R", "%s:%s:%s:%s" % (local_host, remote_port,
local_host, remote_port)]
logging.info("SSH port forwarding: %s", " ".join(args))
return subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE, close_fds=(os.name == "posix"))
|
Forwards ports such that multiplayer works between machines.
Args:
remote_host: Where to ssh to.
local_host: "127.0.0.1" or "::1".
local_listen_ports: Which ports to listen on locally to forward remotely.
remote_listen_ports: Which ports to listen on remotely to forward locally.
Returns:
The ssh process.
Raises:
ValueError: if it can't find ssh.
|
juraj-google-style
|
def generate_build(self, image, targetname, rebuilds=None, cache_repo='', cache_tag='', buildargs=None, **kwargs):
from_image = self.get_external_base_image(image)
if (cache_repo or cache_tag):
cache_from = utils.generate_name(image, cache_repo, cache_tag)
else:
cache_from = None
if (from_image is None):
raise errors.NoBaseError(("No base image found in %s's dependencies" % image))
if isinstance(from_image, ExternalDockerfile):
build_first = from_image
base_image = from_image.tag
else:
base_image = from_image
build_first = None
build_steps = []
istep = 0
sourceimages = set()
if (rebuilds is None):
rebuilds = []
else:
rebuilds = set(rebuilds)
for base_name in self.sort_dependencies(image):
istep += 1
buildname = ('dmkbuild_%s_%d' % (image, istep))
secret_files = self.ymldefs[base_name].get('secret_files', None)
squash = self.ymldefs[base_name].get('squash', bool(secret_files))
build_steps.append(dockermake.step.BuildStep(base_name, base_image, self.ymldefs[base_name], buildname, bust_cache=(base_name in rebuilds), build_first=build_first, cache_from=cache_from, buildargs=buildargs, squash=squash, secret_files=secret_files))
base_image = buildname
build_first = None
for (sourceimage, files) in iteritems(self.ymldefs[base_name].get('copy_from', {})):
sourceimages.add(sourceimage)
for (sourcepath, destpath) in iteritems(files):
istep += 1
buildname = ('dmkbuild_%s_%d' % (image, istep))
build_steps.append(dockermake.step.FileCopyStep(sourceimage, sourcepath, destpath, base_name, base_image, self.ymldefs[base_name], buildname, bust_cache=(base_name in rebuilds), build_first=build_first, cache_from=cache_from))
base_image = buildname
sourcebuilds = [self.generate_build(img, img, cache_repo=cache_repo, cache_tag=cache_tag, **kwargs) for img in sourceimages]
return builds.BuildTarget(imagename=image, targetname=targetname, steps=build_steps, sourcebuilds=sourcebuilds, from_image=from_image, **kwargs)
|
Separate the build into a series of one or more intermediate steps.
Each specified build directory gets its own step
Args:
image (str): name of the image as defined in the dockermake.py file
targetname (str): name to tag the final built image with
rebuilds (List[str]): list of image layers to rebuild (i.e., without docker's cache)
cache_repo (str): repository to get images for caches in builds
cache_tag (str): tags to use from repository for caches in builds
buildargs (dict): build-time dockerfile arugments
**kwargs (dict): extra keyword arguments for the BuildTarget object
|
codesearchnet
|
def _collect_paths(element):
output = []
path = vectors.el_to_path_vector(element)
root = path[0]
params = (element.params if element.params else None)
match = root.find(element.getTagName(), params)
if (len(match) == 1):
output.append(PathCall('find', 0, [element.getTagName(), params]))
output.extend(path_patterns.neighbours_pattern(element))
output.extend(path_patterns.predecesors_pattern(element, root))
index_backtrack = []
last_index_backtrack = []
params_backtrack = []
last_params_backtrack = []
for el in reversed(path):
if (not el.parent):
continue
tag_name = el.getTagName()
match = el.parent.wfind(tag_name).childs
index = match.index(el)
index_backtrack.append(PathCall('wfind', index, [tag_name]))
last_index_backtrack.append(PathCall('wfind', (index - len(match)), [tag_name]))
if el.params:
match = el.parent.wfind(tag_name, el.params).childs
index = match.index(el)
params_backtrack.append(PathCall('wfind', index, [tag_name, el.params]))
last_params_backtrack.append(PathCall('wfind', (index - len(match)), [tag_name, el.params]))
else:
params_backtrack.append(PathCall('wfind', index, [tag_name]))
last_params_backtrack.append(PathCall('wfind', (index - len(match)), [tag_name]))
output.extend([Chained(reversed(params_backtrack)), Chained(reversed(last_params_backtrack)), Chained(reversed(index_backtrack)), Chained(reversed(last_index_backtrack))])
return output
|
Collect all possible path which leads to `element`.
Function returns standard path from root element to this, reverse path,
which uses negative indexes for path, also some pattern matches, like
"this is element, which has neighbour with id 7" and so on.
Args:
element (obj): HTMLElement instance.
Returns:
list: List of :class:`.PathCall` and :class:`.Chained` objects.
|
codesearchnet
|
def max_error(grad1, grad2):
error = 0
for j_t, j_n in zip(grad1, grad2):
if j_t.size or j_n.size:
error = np.maximum(error, np.fabs(j_t - j_n).max())
return error
|
Computes maximum elementwise gap.
Computes the maximum elementwise gap between two lists of tensors of the same
shape.
Args:
grad1: a lists of tensors.
grad2: a lists of tensors with the same shape as grad1.
Returns:
The maximum elementwise gap between the two.
|
github-repos
|
def correlation_matrix(df):
columns = df.columns.tolist()
corr = pd.DataFrame(np.corrcoef(df, rowvar=0), columns=columns, index=columns)
return corr
|
Returns a pandas DataFrame with the pair-wise correlations of the columns.
Args:
df: pandas DataFrame with columns to run diagnostics on
|
codesearchnet
|
def SetDocumentType(self, document_type):
self._document_type = document_type
logger.debug('Elasticsearch document type: {0:s}'.format(document_type))
|
Sets the document type.
Args:
document_type (str): document type.
|
juraj-google-style
|
def __init__(self, resolver_context):
super(NTFSFile, self).__init__(resolver_context)
self._file_system = None
self._fsntfs_data_stream = None
self._fsntfs_file_entry = None
|
Initializes a file-like object.
Args:
resolver_context (Context): resolver context.
|
juraj-google-style
|
def section(self, regex, config='running_config'):
if (config in ['running_config', 'startup_config']):
config = getattr(self, config)
match = re.search(regex, config, re.M)
if (not match):
raise TypeError('config section not found')
(block_start, line_end) = match.regs[0]
match = re.search('^[^\\s]', config[line_end:], re.M)
if (not match):
raise TypeError('could not find end block')
(_, block_end) = match.regs[0]
block_end = (line_end + block_end)
return config[block_start:block_end]
|
Returns a section of the config
Args:
regex (str): A valid regular expression used to select sections
of configuration to return
config (str): The configuration to return. Valid values for config
are "running_config" or "startup_config". The default value
is "running_config"
Returns:
The configuration section as a string object.
|
codesearchnet
|
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = math_ops.cast(y_true, self._dtype)
y_pred = math_ops.cast(y_pred, self._dtype)
if y_pred.shape.ndims > 1:
y_pred = array_ops.reshape(y_pred, [-1])
if y_true.shape.ndims > 1:
y_true = array_ops.reshape(y_true, [-1])
if sample_weight is not None:
sample_weight = math_ops.cast(sample_weight, self._dtype)
if sample_weight.shape.ndims > 1:
sample_weight = array_ops.reshape(sample_weight, [-1])
current_cm = confusion_matrix.confusion_matrix(y_true, y_pred, self.num_classes, weights=sample_weight, dtype=self._dtype)
return self.total_cm.assign_add(current_cm)
|
Accumulates the confusion matrix statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
|
github-repos
|
def word_ngrams(s, n=3, token_fn=tokens.on_whitespace):
tokens = token_fn(s)
return __ngrams(tokens, n=min(len(tokens), n))
|
Word-level n-grams in a string
By default, whitespace is assumed to be a word boundary.
>>> ng.word_ngrams('This is not a test!')
[('This', 'is', 'not'), ('is', 'not', 'a'), ('not', 'a', 'test!')]
If the sequence's length is less than or equal to n, the n-grams are
simply the sequence itself.
>>> ng.word_ngrams('Test!')
[('Test!')]
Args:
s: a string
Returns:
list: tuples of word-level n-grams
|
juraj-google-style
|
async def get_pushlog_info(decision_link):
source_env_prefix = decision_link.context.config['source_env_prefix']
repo = get_repo(decision_link.task, source_env_prefix)
rev = get_revision(decision_link.task, source_env_prefix)
context = decision_link.context
pushlog_url = context.config['pushlog_url'].format(
repo=repo, revision=rev
)
log.info("Pushlog url {}".format(pushlog_url))
file_path = os.path.join(context.config["work_dir"], "{}_push_log.json".format(decision_link.name))
pushlog_info = await load_json_or_yaml_from_url(
context, pushlog_url, file_path, overwrite=False
)
if len(pushlog_info['pushes']) != 1:
log.warning("Pushlog error: expected a single push at {} but got {}!".format(
pushlog_url, pushlog_info['pushes']
))
return pushlog_info
|
Get pushlog info for a decision LinkOfTrust.
Args:
decision_link (LinkOfTrust): the decision link to get pushlog info about.
Returns:
dict: pushlog info.
|
juraj-google-style
|
def getSimilarTerms(self, textOrFingerprint):
expression = self._createDictionary(textOrFingerprint)
terms = self._fullClient.getSimilarTermsForExpression(json.dumps(expression), maxResults=20)
return [t.term for t in terms]
|
Get the similar terms for a given text or fingerprint
Args:
textOrFingerprint, str OR list of integers
Returns:
list of str: the 20 most similar terms
Raises:
CorticalioException: if the request was not successful
|
juraj-google-style
|
def _load_element_spec(path: str) -> Any:
dataset_spec_filename = os.path.join(path, dataset_ops.DATASET_SPEC_FILENAME)
if not gfile.Exists(dataset_spec_filename):
raise errors.NotFoundError(node_def=None, op=None, message=f'tf.data snapshot element_spec file not found: {dataset_spec_filename}.')
with gfile.GFile(dataset_spec_filename, 'rb') as f:
encoded_spec = f.read()
try:
return _parse_element_spec(encoded_spec)
except nested_structure_coder.NotEncodableError as e:
raise errors.NotFoundError(node_def=None, op=None, message=f'tf.data snapshot element_spec file not found or invalid: {dataset_spec_filename}.') from e
|
Loads the dataset element spec.
Args:
path: Base path of the snapshot.
Returns:
Dataset element_spec.
Raises:
NotFoundError if the element spec file does not exist or cannot be decoded.
|
github-repos
|
def start_server_on_separate_thread(dump_to_filesystem=True, server_start_delay_sec=0.0, poll_server=False, blocking=True, toggle_watch_on_core_metadata=None):
server_port = portpicker.pick_unused_port()
debug_server_url = 'grpc:
server_dump_dir = tempfile.mkdtemp() if dump_to_filesystem else None
server = EventListenerTestServicer(server_port=server_port, dump_dir=server_dump_dir, toggle_watch_on_core_metadata=toggle_watch_on_core_metadata)
def delay_then_run_server():
time.sleep(server_start_delay_sec)
server.run_server(blocking=blocking)
server_thread = threading.Thread(target=delay_then_run_server)
server_thread.start()
if poll_server:
if not _poll_server_till_success(50, 0.2, debug_server_url, server_dump_dir, server, gpu_memory_fraction=0.1):
raise ValueError('Failed to start test gRPC debug server at port %d' % server_port)
server.clear_data()
return (server_port, debug_server_url, server_dump_dir, server_thread, server)
|
Create a test gRPC debug server and run on a separate thread.
Args:
dump_to_filesystem: (bool) whether the debug server will dump debug data
to the filesystem.
server_start_delay_sec: (float) amount of time (in sec) to delay the server
start up for.
poll_server: (bool) whether the server will be polled till success on
startup.
blocking: (bool) whether the server should be started in a blocking mode.
toggle_watch_on_core_metadata: A list of
(node_name, output_slot, debug_op) tuples to toggle the
watchpoint status during the on_core_metadata calls (optional).
Returns:
server_port: (int) Port on which the server runs.
debug_server_url: (str) grpc:// URL to the server.
server_dump_dir: (str) The debug server's dump directory.
server_thread: The server Thread object.
server: The `EventListenerTestServicer` object.
Raises:
ValueError: If polling the server process for ready state is not successful
within maximum polling count.
|
github-repos
|
def _register_preallocated_ips(self, conf):
for (dom_name, dom_spec) in conf.get('domains', {}).items():
for (idx, nic) in enumerate(dom_spec.get('nics', [])):
if ('ip' not in nic):
continue
net = conf['nets'][nic['net']]
if self._subnet_store.is_leasable_subnet(net['gw']):
nic['ip'] = _create_ip(net['gw'], int(nic['ip'].split('.')[(- 1)]))
dom_name = dom_spec['name']
if (not _ip_in_subnet(net['gw'], nic['ip'])):
raise RuntimeError(("%s:nic%d's IP [%s] is outside the subnet [%s]" % (dom_name, dom_spec['nics'].index(nic), nic['ip'], net['gw'])))
if (nic['ip'] in net['mapping'].values()):
conflict_list = [name for (name, ip) in net['mapping'].items() if (ip == net['ip'])]
raise RuntimeError(('IP %s was to several domains: %s %s' % (nic['ip'], dom_name, ' '.join(conflict_list))))
self._add_nic_to_mapping(net, dom_spec, nic)
|
Parse all the domains in the given conf and preallocate all their ips
into the networks mappings, raising exception on duplicated ips or ips
out of the allowed ranges
See Also:
:mod:`lago.subnet_lease`
Args:
conf (dict): Configuration spec to parse
Returns:
None
Raises:
RuntimeError: if there are any duplicated ips or any ip out of the
allowed range
|
codesearchnet
|
def make_es_id(uri):
try:
uri = uri.clean_uri
except AttributeError:
pass
return sha1(uri.encode()).hexdigest()
|
Creates the id based off of the uri value
Args:
-----
uri: the uri to conver to an elasticsearch id
|
juraj-google-style
|
def extract_ranges(index_list, range_size_limit=32):
if not index_list:
return [], []
first = index_list[0]
last = first
ranges = []
singles = []
for i in index_list[1:]:
if i == last + 1 and (last - first) <= range_size_limit:
last = i
else:
if last > first:
ranges.append([first, last])
else:
singles.append(first)
first = i
last = i
if last > first:
ranges.append([first, last])
else:
singles.append(first)
return ranges, singles
|
Extract consecutive ranges and singles from index_list.
Args:
index_list: List of monotone increasing non-negative integers.
range_size_limit: Largest size range to return. If a larger
consecutive range exists it will be returned as multiple
ranges.
Returns:
ranges, singles where ranges is a list of [first, last] pairs of
consecutive elements in index_list, and singles is all of the
other elements, in original order.
|
juraj-google-style
|
def SetCredential(self, path_spec, identifier, data):
supported_credentials = manager.CredentialsManager.GetCredentials(path_spec)
if identifier not in supported_credentials.CREDENTIALS:
raise KeyError((
'Unsuppored credential: {0:s} for path specification type: '
'{1:s}').format(identifier, path_spec.type_indicator))
credentials = self._credentials_per_path_spec.get(path_spec.comparable, {})
credentials[identifier] = data
self._credentials_per_path_spec[path_spec.comparable] = credentials
|
Sets a specific credential for the path specification.
Args:
path_spec (PathSpec): path specification.
identifier (str): credential identifier.
data (object): credential data.
Raises:
KeyError: if the credential is not supported by the path specification
type.
|
juraj-google-style
|
def files_comments_delete(self, *, file: str, id: str, **kwargs) -> SlackResponse:
kwargs.update({'file': file, 'id': id})
return self.api_call('files.comments.delete', json=kwargs)
|
Deletes an existing comment on a file.
Args:
file (str): The file id. e.g. 'F1234467890'
id (str): The file comment id. e.g. 'Fc1234567890'
|
codesearchnet
|
def _build_graph(self, tags):
graph = SimpleGraph()
for tag_index in xrange(len(tags)):
for entity_index in xrange(len(tags[tag_index].get('entities'))):
a_entity_name = graph_key_from_tag(tags[tag_index], entity_index)
tokens = self.tokenizer.tokenize(tags[tag_index].get('entities', [])[entity_index].get('match'))
for tag in tags[tag_index + 1:]:
start_token = tag.get('start_token')
if start_token >= tags[tag_index].get('start_token') + len(tokens):
for b_entity_index in xrange(len(tag.get('entities'))):
b_entity_name = graph_key_from_tag(tag, b_entity_index)
graph.add_edge(a_entity_name, b_entity_name)
return graph
|
Builds a graph from the entities included in the tags.
Note this is used internally.
Args:
tags (list): A list of the tags to include in graph
Returns:
graph : this is the resulting graph of the tagged entities.
|
juraj-google-style
|
def AddArguments(cls, argument_group):
argument_group.add_argument(
'--user', dest='username', type=str, action='store',
default=cls._DEFAULT_USERNAME, metavar='USERNAME', required=False,
help='The username used to connect to the database.')
argument_group.add_argument(
'--password', dest='password', type=str, action='store',
default=cls._DEFAULT_PASSWORD, metavar='PASSWORD', help=(
'The password for the database user.'))
argument_group.add_argument(
'--db_name', '--db-name', dest='db_name', action='store',
type=str, default=cls._DEFAULT_NAME, required=False, help=(
'The name of the database to connect to.'))
server_config.ServerArgumentsHelper.AddArguments(argument_group)
|
Adds command line arguments the helper supports to an argument group.
This function takes an argument parser or an argument group object and adds
to it all the command line arguments this helper supports.
Args:
argument_group (argparse._ArgumentGroup|argparse.ArgumentParser):
argparse group.
|
juraj-google-style
|
def _ParseFileVersion(file_version):
tokens = file_version.split('brain.Event:')
try:
return float(tokens[(- 1)])
except ValueError:
logger.warn('Invalid event.proto file_version. Defaulting to use of out-of-order event.step logic for purging expired events.')
return (- 1)
|
Convert the string file_version in event.proto into a float.
Args:
file_version: String file_version from event.proto
Returns:
Version number as a float.
|
codesearchnet
|
def sync(self, raw_data, row_change_callback=None):
return self._update(raw_data, row_change_callback, delete_rows=True)
|
Equivalent to the inject method but will delete rows from the
google spreadsheet if their key is not found in the input (raw_data)
dictionary.
Args:
raw_data (dict): See inject method
row_change_callback (Optional) (func): See inject method
Returns:
UpdateResults (object): See inject method
|
juraj-google-style
|
def transpose(self, permutation: Optional[List[int]]=None) -> 'TensorFluent':
if (permutation == []):
return self
t = (tf.transpose(self.tensor, permutation) if (permutation != []) else self.tensor)
scope = self.scope.as_list()
batch = self.batch
return TensorFluent(t, scope, batch=batch)
|
Returns a TensorFluent for the transpose operation with given `permutation`.
Args:
permutation: The output's shape permutation.
Returns:
A TensorFluent wrapping the transpose operation.
|
codesearchnet
|
def __init__(self, wait_until_step):
self._wait_until_step = wait_until_step
|
Initializes a `GlobalStepWaiterHook`.
Args:
wait_until_step: an `int` shows until which global step should we wait.
|
github-repos
|
def add_block_parser(subparsers, parent_parser):
parser = subparsers.add_parser('block', description='Provides subcommands to display information about the blocks in the current blockchain.', help='Displays information on blocks in the current blockchain')
grand_parsers = parser.add_subparsers(title='subcommands', dest='subcommand')
grand_parsers.required = True
description = 'Displays information for all blocks on the current blockchain, including the block id and number, public keys all of allsigners, and number of transactions and batches.'
list_parser = grand_parsers.add_parser('list', help='Displays information for all blocks on the current blockchain', description=description, parents=[base_http_parser(), base_list_parser()], formatter_class=argparse.RawDescriptionHelpFormatter)
list_parser.add_argument('-n', '--count', default=100, type=int, help='the number of blocks to list')
description = 'Displays information about the specified block on the current blockchain'
show_parser = grand_parsers.add_parser('show', help=description, description=(description + '.'), parents=[base_http_parser(), base_show_parser()], formatter_class=argparse.RawDescriptionHelpFormatter)
show_parser.add_argument('block_id', type=str, help='id (header_signature) of the block')
|
Adds arguments parsers for the block list and block show commands
Args:
subparsers: Add parsers to this subparser object
parent_parser: The parent argparse.ArgumentParser object
|
codesearchnet
|
def _GetDistinctValues(self, field_name):
self._cursor.execute(
'SELECT {0:s}, COUNT({0:s}) FROM log2timeline GROUP BY {0:s}'.format(
field_name))
result = {}
row = self._cursor.fetchone()
while row:
if row[0]:
result[row[0]] = row[1]
row = self._cursor.fetchone()
return result
|
Query database for unique field types.
Args:
field_name (str): name of the filed to retrieve.
Returns:
dict[str, int]: counts of field types by name.
|
juraj-google-style
|
def configure(screen_name=None, config_file=None, app=None, **kwargs):
dirs = kwargs.pop('default_directories', None)
bases = kwargs.pop('default_bases', None)
file_config = {}
if (config_file is not False):
config_file = find_file(config_file, dirs, bases)
file_config = parse(config_file)
config = {k: v for (k, v) in file_config.items() if (k not in ('apps', 'users'))}
user_conf = file_config.get('users', {}).get(screen_name, {})
app = (app or user_conf.get('app'))
app_conf = file_config.get('apps', {}).get(app, {})
config.update(app_conf)
config.update(user_conf)
config.update({k: v for (k, v) in kwargs.items() if (v is not None)})
return config
|
Set up a config dictionary using a bots.yaml config file and optional keyword args.
Args:
screen_name (str): screen_name of user to search for in config file
config_file (str): Path to read for the config file
app (str): Name of the app to look for in the config file. Defaults to the one set in users.{screen_name}.
default_directories (str): Directories to read for the bots.yaml/json file. Defaults to CONFIG_DIRS.
default_bases (str): File names to look for in the directories. Defaults to CONFIG_BASES.
|
codesearchnet
|
def _ReplaceByOuterIfNecessary(self, item, substitutions):
containing_union = self._AllContaining(item.type_param)
if not containing_union:
return [item]
class_type_parameters = [type_param for type_param in containing_union if self.IsClassTypeParameter(type_param)]
if class_type_parameters:
substitutions[item.type_param] = pytd_utils.JoinTypes(class_type_parameters)
return []
else:
return [item]
|
Potentially replace a function type param with a class type param.
Args:
item: A pytd.TemplateItem
substitutions: A dictionary to update with what we replaced.
Returns:
Either [item] or [].
|
github-repos
|
def unitized(unit):
def wrap(f):
def wrapped_f(*args, **kwargs):
val = f(*args, **kwargs)
unit_type = _UNAME2UTYPE[unit]
if (isinstance(val, FloatWithUnit) or isinstance(val, ArrayWithUnit)):
return val.to(unit)
elif isinstance(val, collections.abc.Sequence):
return val.__class__([FloatWithUnit(i, unit_type=unit_type, unit=unit) for i in val])
elif isinstance(val, collections.Mapping):
for (k, v) in val.items():
val[k] = FloatWithUnit(v, unit_type=unit_type, unit=unit)
elif isinstance(val, numbers.Number):
return FloatWithUnit(val, unit_type=unit_type, unit=unit)
elif (val is None):
pass
else:
raise TypeError(("Don't know how to assign units to %s" % str(val)))
return val
return wrapped_f
return wrap
|
Useful decorator to assign units to the output of a function. You can also
use it to standardize the output units of a function that already returns
a FloatWithUnit or ArrayWithUnit. For sequences, all values in the sequences
are assigned the same unit. It works with Python sequences only. The creation
of numpy arrays loses all unit information. For mapping types, the values
are assigned units.
Args:
unit: Specific unit (eV, Ha, m, ang, etc.).
Example usage::
@unitized(unit="kg")
def get_mass():
return 123.45
|
codesearchnet
|
def get_vpc_id(account, region):
url = '{0}/networks/aws'.format(API_URL)
response = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)
if (not response.ok):
raise SpinnakerVPCNotFound(response.text)
vpcs = response.json()
for vpc in vpcs:
LOG.debug('VPC: %(name)s, %(account)s, %(region)s => %(id)s', vpc)
if (('name' in vpc) and all([(vpc['name'] == 'vpc'), (vpc['account'] == account), (vpc['region'] == region)])):
LOG.info('Found VPC ID for %s in %s: %s', account, region, vpc['id'])
vpc_id = vpc['id']
break
else:
LOG.fatal('VPC list: %s', vpcs)
raise SpinnakerVPCIDNotFound('No VPC available for {0} [{1}].'.format(account, region))
return vpc_id
|
Get VPC ID configured for ``account`` in ``region``.
Args:
account (str): AWS account name.
region (str): Region name, e.g. us-east-1.
Returns:
str: VPC ID for the requested ``account`` in ``region``.
Raises:
:obj:`foremast.exceptions.SpinnakerVPCIDNotFound`: VPC ID not found for
``account`` in ``region``.
:obj:`foremast.exceptions.SpinnakerVPCNotFound`: Spinnaker has no VPCs
configured.
|
codesearchnet
|
def imsave(path, img, channel_first=False, as_uint16=False, auto_scale=True):
img = _imsave_before(img, channel_first, auto_scale)
if ((img.dtype == np.uint16) or as_uint16):
raise ValueError('Pillow only supports uint8 image to save. Cast img to uint8.If you want to save image as uint16, install pypng or cv2 and nnabla.utils.image_utils automatically change backend to use these module.')
if (auto_scale and (img.dtype != np.uint8)):
img = (img * 255).astype(np.uint8)
Image.fromarray(img).save(path)
|
Save image by pillow module.
Currently, pillow supports only uint8 to save.
Args:
path (str): output filename
img (numpy.ndarray): Image array to save. Image shape is considered as (height, width, channel) by default.
channel_first (bool):
This argument specifies the shape of img is whether (height, width, channel) or (channel, height, width).
Default value is False, which means the img shape is considered as (height, width, channel)
as_uint16 (bool):
In this backend, this argument is always False because pillow dose not support uint16.
If True, exception will be raised.
auto_scale (bool) :
Whether upscale pixel values or not.
If you want to save float image, this argument must be True.
In pillow backend, only float ([0, 1]) to uint8 ([0, 255]) is supported.
|
codesearchnet
|
def delete_rule(name, localport=None, protocol=None, dir=None, remoteip=None):
ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''}
if __salt__['firewall.rule_exists'](name):
ret['changes'] = {'delete rule': name}
else:
ret['comment'] = 'A rule with that name does not exist'
return ret
if __opts__['test']:
ret['result'] = ((not ret['changes']) or None)
ret['comment'] = ret['changes']
ret['changes'] = {}
return ret
try:
__salt__['firewall.delete_rule'](name, localport, protocol, dir, remoteip)
except CommandExecutionError:
ret['comment'] = 'Could not delete rule'
return ret
|
Delete an existing firewall rule identified by name and optionally by ports,
protocols, direction, and remote IP.
.. versionadded:: Neon
Args:
name (str): The name of the rule to delete. If the name ``all`` is used
you must specify additional parameters.
localport (Optional[str]): The port of the rule. If protocol is not
specified, protocol will be set to ``tcp``
protocol (Optional[str]): The protocol of the rule. Default is ``tcp``
when ``localport`` is specified
dir (Optional[str]): The direction of the rule.
remoteip (Optional[str]): The remote IP of the rule.
Example:
.. code-block:: yaml
delete_smb_port_rule:
win_firewall.delete_rule:
- name: SMB (445)
|
codesearchnet
|
def _get_apis(self, apis):
ret = []
for data in apis:
ret.append(SpecificationAPI(specification=self, data=data))
return sorted(ret, key=(lambda x: x.rest_name[1:]))
|
Process apis for the given model
Args:
model: the model processed
apis: the list of apis availble for the current model
relations: dict containing all relations between resources
|
codesearchnet
|
def snapshot(self, filename='tmp.png'):
if (not filename):
filename = 'tmp.png'
if self.handle:
try:
screenshot(filename, self.handle)
except win32gui.error:
self.handle = None
screenshot(filename)
else:
screenshot(filename)
img = aircv.imread(filename)
os.remove(filename)
return img
|
Take a screenshot and save it to `tmp.png` filename by default
Args:
filename: name of file where to store the screenshot
Returns:
display the screenshot
|
codesearchnet
|
def add_pop_block_targets(bytecode: list[opcodes.Opcode]) -> None:
if not bytecode:
return
for op in bytecode:
op.block_target = None
setup_except_op = (opcodes.SETUP_FINALLY, opcodes.SETUP_EXCEPT_311)
todo = [(bytecode[0], ())]
seen = set()
while todo:
op, block_stack = todo.pop()
if op in seen:
continue
else:
seen.add(op)
if isinstance(op, opcodes.POP_BLOCK):
assert block_stack, 'POP_BLOCK without block.'
op.block_target = block_stack[-1].target
block_stack = block_stack[0:-1]
elif isinstance(op, opcodes.RAISE_VARARGS):
for b in reversed(block_stack):
if isinstance(b, setup_except_op):
op.block_target = b.target
break
elif isinstance(op, opcodes.BREAK_LOOP):
for i in reversed(range(len(block_stack))):
b = block_stack[i]
if isinstance(b, opcodes.SETUP_LOOP):
op.block_target = b.target
assert b.target != op
todo.append((op.block_target, block_stack[0:i]))
break
elif isinstance(op, setup_except_op):
todo.append((op.target, block_stack))
block_stack += (op,)
elif op.pushes_block():
assert op.target, f'{op.name} without target'
block_stack += (op,)
elif op.does_jump() and op.target:
if op.push_exc_block:
setup_op = op.target
while not isinstance(setup_op, setup_except_op):
setup_op = setup_op.prev
block_stack += (setup_op,)
todo.append((op.target, block_stack))
if not op.no_next():
assert op.next, f'Bad instruction at end of bytecode: {op!r}.'
todo.append((op.next, block_stack))
|
Modifies bytecode so that each POP_BLOCK has a block_target.
This is to achieve better initial ordering of try/except and try/finally code.
try:
i = 1
a[i]
except IndexError:
return i
By connecting a CFG edge from the end of the block (after the "a[i]") to the
except handler, our basic block ordering algorithm knows that the except block
needs to be scheduled last, whereas if there only was an edge before the
"i = 1", it would be able to schedule it too early and thus encounter an
undefined variable. This is only for ordering. The actual analysis of the
code happens later, in vm.py.
Args:
bytecode: An array of bytecodes.
|
github-repos
|
def _open_tracing_interface(self, connection_id, callback):
try:
context = self.connections.get_context(connection_id)
except ArgumentError:
callback(connection_id, self.id, False, "Could not find connection information")
return
self._logger.info("Attempting to enable tracing")
self.connections.begin_operation(connection_id, 'open_interface', callback, self.get_config('default_timeout'))
try:
characteristic = context['services'][TileBusService][TracingChar]
except KeyError:
self.connections.finish_operation(
connection_id,
False,
"Can't find characteristic to open tracing interface"
)
return
self._register_notification_callback(
context['connection_handle'],
characteristic.value_handle,
lambda trace_chunk: self._trigger_callback('on_trace', connection_id, bytearray(trace_chunk))
)
self.bable.set_notification(
enabled=True,
connection_handle=context['connection_handle'],
characteristic=characteristic,
on_notification_set=[self._on_interface_opened, context],
on_notification_received=self._on_notification_received,
timeout=1.0,
sync=False
)
|
Enable the tracing interface for this IOTile device
Args:
connection_id (int): The unique identifier for the connection
callback (callback): Callback to be called when this command finishes
callback(conn_id, adapter_id, success, failure_reason)
|
juraj-google-style
|
def read(self, n):
if self._EOF:
return ''
while (self._seg_index <= self._last_seg_index):
result = self._read_from_seg(n)
if (result != ''):
return result
else:
self._next_seg()
self._EOF = True
return ''
|
Read data from file segs.
Args:
n: max bytes to read. Must be positive.
Returns:
some bytes. May be smaller than n bytes. "" when no more data is left.
|
codesearchnet
|
def set_task(project_, task_):
global project, task
project = project_
task = task_
msg.okay('Set project name to {}.{}'.format(project, task), 2)
|
Sets the active project and task. All subsequent logging will be saved to
the database with that project and task.
Args:
project_ (str): active project name; a project can have multiple tasks.
task_ (str): active task name. Logging is separated at the project and task
level.
|
codesearchnet
|
def disassemble_instruction(self, instruction):
if (not util.is_integer(instruction)):
raise TypeError('Expected instruction to be an integer.')
buf_size = self.MAX_BUF_SIZE
buf = (ctypes.c_char * buf_size)()
res = self._dll.JLINKARM_DisassembleInst(ctypes.byref(buf), buf_size, instruction)
if (res < 0):
raise errors.JLinkException('Failed to disassemble instruction.')
return ctypes.string_at(buf).decode()
|
Disassembles and returns the assembly instruction string.
Args:
self (JLink): the ``JLink`` instance.
instruction (int): the instruction address.
Returns:
A string corresponding to the assembly instruction string at the
given instruction address.
Raises:
JLinkException: on error.
TypeError: if ``instruction`` is not a number.
|
codesearchnet
|
def parseInt(self, words):
words = words.replace(' and ', ' ').lower()
words = re.sub('(\\b)a(\\b)', '\\g<1>one\\g<2>', words)
def textToNumber(s):
'\n Converts raw number string to an integer.\n Based on text2num.py by Greg Hewill.\n '
a = re.split('[\\s-]+', s)
n = 0
g = 0
for w in a:
x = NumberService.__small__.get(w, None)
if (x is not None):
g += x
elif (w == 'hundred'):
g *= 100
else:
x = NumberService.__magnitude__.get(w, None)
if (x is not None):
n += (g * x)
g = 0
else:
raise NumberService.NumberException(('Unknown number: ' + w))
return (n + g)
return textToNumber(words)
|
Parses words to the integer they describe.
Args:
words (str): Description of the integer.
Returns:
An integer representation of the words.
|
codesearchnet
|
def is_attribute_supported(self, attribute):
if attribute not in self._attribute_rule_sets.keys():
return False
rule_set = self._attribute_rule_sets.get(attribute)
if self._version >= rule_set.version_added:
return True
else:
return False
|
Check if the attribute is supported by the current KMIP version.
Args:
attribute (string): The name of the attribute
(e.g., 'Cryptographic Algorithm'). Required.
Returns:
bool: True if the attribute is supported by the current KMIP
version. False otherwise.
|
juraj-google-style
|
def __getIp6Address(self, addressType):
addrType = ['link local', 'global', 'rloc', 'mesh EID']
addrs = []
globalAddr = []
linkLocal64Addr = ''
rlocAddr = ''
meshEIDAddr = ''
addrs = self.__sendCommand(WPANCTL_CMD + 'getprop -v IPv6:AllAddresses')
for ip6AddrItem in addrs:
if re.match('\[|\]', ip6AddrItem):
continue
if re.match(WPAN_CARRIER_PROMPT, ip6AddrItem, re.M|re.I):
break
ip6AddrItem = ip6AddrItem.strip()
ip6Addr = self.__stripValue(ip6AddrItem).split(' ')[0]
ip6AddrPrefix = ip6Addr.split(':')[0]
if ip6AddrPrefix == 'fe80':
if ip6Addr.split(':')[4] != '0':
linkLocal64Addr = ip6Addr
elif ip6Addr.startswith(self.meshLocalPrefix):
if ip6Addr.split(':')[4] == '0':
rlocAddr = ip6Addr
else:
meshEIDAddr = ip6Addr
print 'meshEIDAddr:' + meshEIDAddr
else:
if ip6Addr:
print 'globalAddr: ' + ip6Addr
globalAddr.append(ip6Addr)
else:
pass
if addressType == addrType[0]:
return linkLocal64Addr
elif addressType == addrType[1]:
return globalAddr
elif addressType == addrType[2]:
return rlocAddr
elif addressType == addrType[3]:
return meshEIDAddr
else:
pass
|
get specific type of IPv6 address configured on OpenThread_WpanCtl
Args:
addressType: the specific type of IPv6 address
link local: link local unicast IPv6 address that's within one-hop scope
global: global unicast IPv6 address
rloc: mesh local unicast IPv6 address for routing in thread network
mesh EID: mesh Endpoint Identifier
Returns:
IPv6 address string
|
juraj-google-style
|
def get_user_info(self, dn, _connection=None):
return self.get_object(dn=dn, filter=self.config.get('LDAP_USER_OBJECT_FILTER'), attributes=self.config.get('LDAP_GET_USER_ATTRIBUTES'), _connection=_connection)
|
Gets info about a user specified at dn.
Args:
dn (str): The dn of the user to find
_connection (ldap3.Connection): A connection object to use when
searching. If not given, a temporary connection will be
created, and destroyed after use.
Returns:
dict: A dictionary of the user info from LDAP
|
codesearchnet
|
def dbmin20years(self, value=None):
if (value is not None):
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float for field `dbmin20years`'.format(value))
self._dbmin20years = value
|
Corresponds to IDD Field `dbmin20years`
20-year return period values for minimum extreme dry-bulb temperature
Args:
value (float): value for IDD Field `dbmin20years`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
codesearchnet
|
def check_valid_values(function):
def decorated(self, X, *args, **kwargs):
if isinstance(X, pd.DataFrame):
W = X.values
else:
W = X
if (not len(W)):
raise ValueError('Your dataset is empty.')
if (W.dtype not in [np.dtype('float64'), np.dtype('int64')]):
raise ValueError('There are non-numerical values in your data.')
if np.isnan(W).any().any():
raise ValueError('There are nan values in your data.')
return function(self, X, *args, **kwargs)
return decorated
|
Raises an exception if the given values are not supported.
Args:
function(callable): Method whose unique argument is a numpy.array-like object.
Returns:
callable: Decorated function
Raises:
ValueError: If there are missing or invalid values or if the dataset is empty.
|
codesearchnet
|
def create_from_wkt(self, wkt, item_type, ingest_source, **attributes):
geojson = load_wkt(wkt).__geo_interface__
vector = {
'type': "Feature",
'geometry': geojson,
'properties': {
'item_type': item_type,
'ingest_source': ingest_source,
'attributes': attributes
}
}
return self.create(vector)[0]
|
Create a single vector in the vector service
Args:
wkt (str): wkt representation of the geometry
item_type (str): item_type of the vector
ingest_source (str): source of the vector
attributes: a set of key-value pairs of attributes
Returns:
id (str): string identifier of the vector created
|
juraj-google-style
|
async def run_tasks(context):
running_tasks = RunTasks()
context.running_tasks = running_tasks
status = await running_tasks.invoke(context)
context.running_tasks = None
return status
|
Run any tasks returned by claimWork.
Returns the integer status of the task that was run, or None if no task was
run.
args:
context (scriptworker.context.Context): the scriptworker context.
Raises:
Exception: on unexpected exception.
Returns:
int: exit status
None: if no task run.
|
juraj-google-style
|
def reboot(self):
if self.is_bootloader:
self.fastboot.reboot()
return
with self.handle_reboot():
self.adb.reboot()
|
Reboots the device.
Generally one should use this method to reboot the device instead of
directly calling `adb.reboot`. Because this method gracefully handles
the teardown and restoration of running services.
This method is blocking and only returns when the reboot has completed
and the services restored.
Raises:
Error: Waiting for completion timed out.
|
github-repos
|
def __setRouterUpgradeThreshold(self, iThreshold):
print 'call __setRouterUpgradeThreshold'
try:
cmd = 'routerupgradethreshold %s' % str(iThreshold)
print cmd
return self.__sendCommand(cmd) == 'Done'
except Exception, e:
ModuleHelper.WriteIntoDebugLogger("setRouterUpgradeThreshold() Error: " + str(e))
|
set router upgrade threshold
Args:
iThreshold: the number of active routers on the Thread network
partition below which a REED may decide to become a Router.
Returns:
True: successful to set the ROUTER_UPGRADE_THRESHOLD
False: fail to set ROUTER_UPGRADE_THRESHOLD
|
juraj-google-style
|
def make_2d_block_raster_mask(query_shape, memory_flange):
query_triangle = common_layers.ones_matrix_band_part(np.prod(query_shape), np.prod(query_shape), (- 1), 0)
split_query_masks = tf.split(query_triangle, query_shape[0], axis=1)
mask_pieces = [tf.concat([tf.ones([np.prod(query_shape), memory_flange[1]]), split_query_masks[i], tf.zeros([np.prod(query_shape), memory_flange[1]])], axis=1) for i in range(query_shape[0])]
final_mask = tf.concat([tf.ones([np.prod(query_shape), ((query_shape[1] + (2 * memory_flange[1])) * memory_flange[0])]), tf.concat(mask_pieces, axis=1)], axis=1)
return (1.0 - final_mask)
|
Creates a mask for 2d block raster scan.
The query mask can look to the left, top left, top, and top right, but
not to the right. Inside the query, we have the standard raster scan
masking.
Args:
query_shape: A tuple of ints (query_height, query_width)
memory_flange: A tuple of ints
(memory_flange_height, memory_flange_width)
Returns:
A tensor of shape query_size, memory_size
|
codesearchnet
|
def start_app_and_connect(self):
|
Starts the server app on the android device and connects to it.
After this, the self.host_port and self.device_port attributes must be
set.
Must be implemented by subclasses.
Raises:
AppStartError: When the app was not able to be started.
|
github-repos
|
def __init__(self, enum_type, name=None, default=None, choices=None, **kwds):
self._enum_type = enum_type
if default is not None:
self._validate(default)
if choices is not None:
map(self._validate, choices)
super(EnumProperty, self).__init__(name, default=default,
choices=choices, **kwds)
|
Constructor.
Args:
enum_type: A subclass of protorpc.messages.Enum.
name: Optional datastore name (defaults to the property name).
Additional keywords arguments specify the same options as
supported by IntegerProperty.
|
juraj-google-style
|
def primitive_wrapper_from_json_value(self, json_value: Optional[Any], primitive_cls: Type[message.Message], *, default_timezone: str=_primitive_time_utils.SIMPLE_ZULU) -> _primitive_wrappers.PrimitiveWrapper:
|
Parses json_value into a FHIR protobuf primitive and wraps.
The wrapper provides necessary information on how to parse json_value into a
corresponding FHIR protobuf message. Afterwards, this is wrapped to provide
stateful information to the parent parser and/or printer.
Args:
json_value: The FHIR json value to parse and wrap.
primitive_cls: The type of FHIR primitive to parse json_value into.
default_timezone: The default timezone string to use when parsing date/
time-like primitives when there is no timezone information available.
Defaults to 'Z'.
Raises:
ValueError: In the event that primitive_cls is not actually a primitive
FHIR type.
Returns:
A wrapper around an instance of primitive_cls parsed from json_value.
|
github-repos
|
def unlock_kinetis_identified(identity, flags):
if (flags.version_code != identity.version_code):
return False
if (flags.part_no != identity.part_no):
return False
return flags.valid
|
Checks whether the given flags are a valid identity.
Args:
identity (Identity): the identity to validate against
flags (register.IDCodeRegisterFlags): the set idcode flags
Returns:
``True`` if the given ``flags`` correctly identify the the debug
interface, otherwise ``False``.
|
codesearchnet
|
def __init__(self, logdir, options=None):
self._logdir = logdir
self._options = options
|
Creates a context manager object for profiler API.
Args:
logdir: profile data will save to this directory.
options: An optional `tf.profiler.experimental.ProfilerOptions` can be
provided to fine tune the profiler's behavior.
|
github-repos
|
def append(self, tp, timestamp_ms, key, value, headers, max_time_to_block_ms, estimated_size=0):
assert isinstance(tp, TopicPartition), 'not TopicPartition'
assert (not self._closed), 'RecordAccumulator is closed'
self._appends_in_progress.increment()
try:
if (tp not in self._tp_locks):
with self._tp_locks[None]:
if (tp not in self._tp_locks):
self._tp_locks[tp] = threading.Lock()
with self._tp_locks[tp]:
dq = self._batches[tp]
if dq:
last = dq[(- 1)]
future = last.try_append(timestamp_ms, key, value, headers)
if (future is not None):
batch_is_full = ((len(dq) > 1) or last.records.is_full())
return (future, batch_is_full, False)
size = max(self.config['batch_size'], estimated_size)
log.debug('Allocating a new %d byte message buffer for %s', size, tp)
buf = self._free.allocate(size, max_time_to_block_ms)
with self._tp_locks[tp]:
assert (not self._closed), 'RecordAccumulator is closed'
if dq:
last = dq[(- 1)]
future = last.try_append(timestamp_ms, key, value, headers)
if (future is not None):
self._free.deallocate(buf)
batch_is_full = ((len(dq) > 1) or last.records.is_full())
return (future, batch_is_full, False)
records = MemoryRecordsBuilder(self.config['message_version'], self.config['compression_attrs'], self.config['batch_size'])
batch = ProducerBatch(tp, records, buf)
future = batch.try_append(timestamp_ms, key, value, headers)
if (not future):
raise Exception()
dq.append(batch)
self._incomplete.add(batch)
batch_is_full = ((len(dq) > 1) or batch.records.is_full())
return (future, batch_is_full, True)
finally:
self._appends_in_progress.decrement()
|
Add a record to the accumulator, return the append result.
The append result will contain the future metadata, and flag for
whether the appended batch is full or a new batch is created
Arguments:
tp (TopicPartition): The topic/partition to which this record is
being sent
timestamp_ms (int): The timestamp of the record (epoch ms)
key (bytes): The key for the record
value (bytes): The value for the record
headers (List[Tuple[str, bytes]]): The header fields for the record
max_time_to_block_ms (int): The maximum time in milliseconds to
block for buffer memory to be available
Returns:
tuple: (future, batch_is_full, new_batch_created)
|
codesearchnet
|
def save_json(py_obj, json_path):
with open(json_path, 'w', encoding='utf-8') as f:
f.write(serialize_to_normalized_pretty_json(py_obj))
|
Serialize a native object to JSON and save it normalized, pretty printed to a
file.
The JSON string is normalized by sorting any dictionary keys.
Args:
py_obj: object
Any object that can be represented in JSON. Some types, such as datetimes are
automatically converted to strings.
json_path: str
File path to which to write the JSON file. E.g.: The path must exist. The
filename will normally end with ".json".
See Also:
ToJsonCompatibleTypes()
|
codesearchnet
|
def complete(command_line,
current_token,
position,
shell: arg(choices=('bash', 'fish'))):
position = int(position)
tokens = shlex.split(command_line[:position])
all_argv, run_argv, command_argv = run.partition_argv(tokens[1:])
run_args = run.parse_args(run_argv)
module = run_args.get('commands_module')
module = module or DEFAULT_COMMANDS_MODULE
module = normalize_path(module)
try:
collection = Collection.load_from_module(module)
except Exception:
collection = {}
found_command = find_command(collection, tokens) or run
if current_token:
if current_token.startswith('-'):
if current_token not in found_command.option_map:
print_command_options(found_command, current_token)
else:
print_commands(collection, shell)
path = os.path.expanduser(current_token)
path = os.path.expandvars(path)
paths = glob.glob('%s*' % path)
if paths:
for entry in paths:
if os.path.isdir(entry):
print('%s/' % entry)
else:
print(entry)
else:
option = found_command.option_map.get(tokens[-1])
if option and option.takes_value:
if option.choices:
for choice in option.choices:
print(choice)
else:
for entry in os.listdir():
if os.path.isdir(entry):
print('%s/' % entry)
else:
print(entry)
else:
print_command_options(found_command)
print_commands(collection, shell)
|
Find completions for current command.
This assumes that we'll handle all completion logic here and that
the shell's automatic file name completion is disabled.
Args:
command_line: Command line
current_token: Token at cursor
position: Current cursor position
shell: Name of shell
|
juraj-google-style
|
def get_definition_name_from_ref(ref):
p = re.compile('
definition_name = re.sub(p, r'\1', ref)
return definition_name
|
Get the definition name of the given $ref value(Swagger value).
Args:
ref: ref value (ex: "#/definitions/CustomDefinition")
Returns:
The definition name corresponding to the ref.
|
juraj-google-style
|
def verify_unused_iterator(self, ds_fn, num_outputs, sparse_tensors=False, verify_exhausted=True, assert_items_equal=False):
self.verify_run_with_breaks(ds_fn, [0], num_outputs, sparse_tensors=sparse_tensors, verify_exhausted=verify_exhausted, assert_items_equal=assert_items_equal)
|
Verifies that saving and restoring an unused iterator works.
Args:
ds_fn: 0-argument function that returns a Dataset.
num_outputs: Total number of outputs expected from this Dataset.
sparse_tensors: Whether dataset is built from SparseTensor(s).
verify_exhausted: Whether to verify that the iterator has been exhausted
after producing `num_outputs` elements.
assert_items_equal: Tests the output has the expected elements regardless
of order.
Raises:
AssertionError if any test fails.
|
github-repos
|
def check(self, key, value):
key = key.lower().strip()
try:
key = key.decode("utf-8")
except UnicodeEncodeError:
pass
key = self._remove_accents(key)
if self.keyword in key.split():
self.value = value
return True
return False
|
Check whether `key` matchs the :attr:`keyword`. If so, set the
:attr:`value` to `value`.
Args:
key (str): Key which will be matched with :attr:`keyword`.
value (str): Value which will be assigned to :attr:`value` if keys
matches.
Returns:
True/False: Whether the key matched :attr:`keyword`.
|
juraj-google-style
|
def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, layer_head_mask: tf.Tensor, is_index_masked: tf.Tensor, is_index_global_attn: tf.Tensor, is_global_attn: bool, training=False):
residual = hidden_states
layer_outputs = self.self_attn([hidden_states, attention_mask, layer_head_mask, is_index_masked, is_index_global_attn, is_global_attn], training=training)
hidden_states = layer_outputs[0]
tf.debugging.assert_equal(shape_list(hidden_states), shape_list(residual), message=f'Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}')
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = self.activation_dropout(hidden_states, training=training)
hidden_states = self.fc2(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
return (hidden_states,) + layer_outputs[1:]
|
Args:
hidden_states (`tf.Tensor`): input to the layer of shape *(batch, seq_len, embed_dim)*
attention_mask (`tf.Tensor`): attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
*(config.encoder_attention_heads,)*.
|
github-repos
|
def read_passwd_file(pass_file):
with open(pass_file) as fin:
passwd = fin.read().strip()
return passwd
|
Read password from external file and retrun as string. The file should
contain just single line. Prevents hard-coding password anywhere in this
script. IMPORTANT! Password is stored as plain text! Do NOT use with your
personal account!"
Args:
pass_file (str): /path/to/pass_file
|
juraj-google-style
|
def _CheckUnresolved(cls):
with cls._unresolved_subjects_lock:
if cls._unresolved_subjects:
msg = ['The following assertions were unresolved. Perhaps you called "AssertThat(thing.IsEmpty())" instead of "AssertThat(thing).IsEmpty()".']
for u in sorted(cls._unresolved_subjects):
msg.append(' * {0}'.format(u))
raise UnresolvedAssertionError('\n'.join(msg))
|
Ensures that all created subjects were eventually resolved.
A subject is considered resolved what at least one proposition has been
executed on it. An unresolved or dangling assertion is almost certainly a
test author error.
Raises:
UnresolvedAssertionError: if any subjects remain unresolved at the time of
this function call.
|
github-repos
|
def add_weights(self, object_name, weights):
if not isinstance(weights, dict):
raise ValueError(f"Argument `weights` should be a dict where keys are weight names (usually '0', '1', etc.) and values are NumPy arrays. Received: type(weights)={type(weights)}")
def add_weight_fn(weights_dict, source_name, target_name=None):
weights_dict[source_name].update(weights)
self._edit_object(add_weight_fn, object_name)
|
Add one or more new weights to an existing object.
Args:
object_name: String, name or path of the
object to add the weights to
(e.g. `"dense_2"` or `"layers/dense_2"`).
weights: Dict mapping weight names to weight
values (arrays),
e.g. `{"0": kernel_value, "1": bias_value}`.
|
github-repos
|
def add_layer(self, label, change_layer=True):
self.layer_stack.insert(self.last_layer() + 1, label)
if change_layer:
self.set_current_layer(self.last_layer())
return None
|
Add new mesh layer to the end of the stack
Args:
label (str): new label for the mesh layer
change_layer (bool): change to the newly created layer
|
juraj-google-style
|
def mtf_transformer_paper_tr(size):
n = 2 ** size
hparams = mtf_transformer_base()
hparams.label_smoothing = 0.1
hparams.batch_size = 128
hparams.d_model = 1024
hparams.d_ff = int(4096 * n)
hparams.num_heads = int(8 * n)
hparams.shared_embedding_and_softmax_weights = False
hparams.learning_rate_decay_steps = 51400
return hparams
|
Config for translation experiments.
Train these on translate_enfr_wmt32k_packed for 154000 steps (3 epochs)
The size parameter is an integer that controls the number of heads and the
size of the size of the feedforward hidden layers. Increasing size by 1
doubles each of these.
Args:
size: an integer
Returns:
a hparams object
|
juraj-google-style
|
def min_count(self, n=1):
word_count = {w: c for (w, c) in iteritems(self.word_count) if (c >= n)}
return CountedVocabulary(word_count=word_count)
|
Returns a vocabulary after eliminating the words that appear < `n`.
Args:
n (integer): specifies the minimum word frequency allowed.
|
codesearchnet
|
def get(self):
resource = dict()
resource.update(self._parse_config())
resource.update(self._parse_interfaces())
return resource
|
Returns the Mlag configuration as a resource dict
Returns:
dict: A dict ojbect containing the Mlag resource attributes.
|
codesearchnet
|
def select_update_method(self, force_interactive, force_change_set):
if (self.interactive or force_interactive):
return self.interactive_update_stack
elif force_change_set:
return self.noninteractive_changeset_update
else:
return self.default_update_stack
|
Select the correct update method when updating a stack.
Args:
force_interactive (str): Whether or not to force interactive mode
no matter what mode the provider is in.
force_change_set (bool): Whether or not to force change set use.
Returns:
function: The correct object method to use when updating.
|
codesearchnet
|
def read(self, size=None):
if not self._is_open:
raise IOError('Not opened.')
if size is None:
size = self._size - self._file_object.tell()
return self._file_object.read(size)
|
Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed.
|
juraj-google-style
|
def get_all(self, include_archived=False):
return [conv for conv in self._conv_dict.values()
if not conv.is_archived or include_archived]
|
Get all the conversations.
Args:
include_archived (bool): (optional) Whether to include archived
conversations. Defaults to ``False``.
Returns:
List of all :class:`.Conversation` objects.
|
juraj-google-style
|
def get_struct(name):
sid = idc.GetStrucIdByName(name)
if (sid == idaapi.BADADDR):
raise exceptions.SarkStructNotFound()
return sid
|
Get a struct by it's name.
Args:
name: The name of the struct
Returns:
The struct's id
Raises:
exceptions.SarkStructNotFound: is the struct does not exist.
|
codesearchnet
|
def encode(self, label):
label = super().encode(label)
return torch.tensor(self.stoi.get(label, self.unknown_index))
|
Encodes a ``label``.
Args:
label (object): Label to encode.
Returns:
torch.Tensor: Encoding of the label.
|
juraj-google-style
|
def convert(self, inp):
inp = self._preprocess(inp)
n = NumberService().longestNumber(inp)
units = self.extractUnits(inp)
quantity = pq.Quantity(float(n), units[0])
quantity.units = units[1]
return quantity
|
Converts a string representation of some quantity of units into a
quantities object.
Args:
inp (str): A textual representation of some quantity of units,
e.g., "fifty kilograms".
Returns:
A quantities object representing the described quantity and its
units.
|
codesearchnet
|
def search_env_paths(fname, key_list=None, verbose=None):
import utool as ut
if (key_list is None):
key_list = [key for key in os.environ if (key.find('PATH') > (- 1))]
print(('key_list = %r' % (key_list,)))
found = ut.ddict(list)
for key in key_list:
dpath_list = os.environ[key].split(os.pathsep)
for dpath in dpath_list:
matches = ut.glob(dpath, fname)
found[key].extend(matches)
return dict(found)
|
r"""
Searches your PATH to see if fname exists
Args:
fname (str): file name to search for (can be glob pattern)
CommandLine:
python -m utool search_env_paths --fname msvcr*.dll
python -m utool search_env_paths --fname '*flann*'
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_cplat import * # NOQA
>>> import utool as ut
>>> fname = 'opencv2/highgui/libopencv_highgui.so'
>>> fname = ut.get_argval('--fname', default='*')
>>> print('fname = %r' % (fname,))
>>> key_list = None # ['PATH']
>>> found = search_env_paths(fname, key_list)
>>> print(ut.repr4(found, nl=True, strvals=True))
Ignore:
OpenCV_DIR:PATH={share_opencv}
OpenCV_CONFIG_PATH:FILEPATH={share_opencv}
|
codesearchnet
|
def count_nonzero(x, axis=None):
if any_symbolic_tensors((x,)):
return CountNonzero(axis=axis).symbolic_call(x)
return backend.numpy.count_nonzero(x, axis=axis)
|
Counts the number of non-zero values in `x` along the given `axis`.
If no axis is specified then all non-zeros in the tensor are counted.
Args:
x: Input tensor.
axis: Axis or tuple of axes along which to count the number of
non-zeros. Defaults to `None`.
Returns:
int or tensor of ints.
Examples:
>>> x = keras.ops.array([[0, 1, 7, 0], [3, 0, 2, 19]])
>>> keras.ops.count_nonzero(x)
5
>>> keras.ops.count_nonzero(x, axis=0)
array([1, 1, 2, 1], dtype=int64)
>>> keras.ops.count_nonzero(x, axis=1)
array([2, 3], dtype=int64)
|
github-repos
|
def get_image_features(self, pixel_values_images: torch.FloatTensor, vision_feature_layer: Optional[Union[int, List[int]]]=None, vision_feature_select_strategy: Optional[str]=None):
vision_feature_layer = vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
vision_feature_select_strategy = vision_feature_select_strategy if vision_feature_select_strategy is not None else self.config.vision_feature_select_strategy
if vision_feature_select_strategy not in ['default', 'full']:
raise ValueError(f'Unexpected select feature strategy: {self.config.vision_feature_select_strategy}')
image_outputs = self.image_tower(pixel_values_images, output_hidden_states=True)
if isinstance(vision_feature_layer, int):
image_outputs = image_outputs.hidden_states[vision_feature_layer]
if vision_feature_select_strategy == 'default':
image_outputs = image_outputs[:, 1:]
else:
hs_pool = [image_outputs.hidden_states[layer_idx] for layer_idx in vision_feature_layer]
if vision_feature_select_strategy == 'default':
hs_pool = [hs[:, 1:] for hs in hs_pool]
image_outputs = torch.cat(hs_pool, dim=-1)
image_features = self.multi_modal_projector(image_outputs)
return image_features
|
Obtains image last hidden states from the vision tower and apply multimodal projection.
Args:
pixel_values_images (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`)
The tensors corresponding to the input images.
vision_feature_layer (`Union[int, List[int]]`, *optional*):
The index of the layer to select the vision feature. If multiple indices are provided,
the vision feature of the corresponding indices will be concatenated to form the
vision features.
vision_feature_select_strategy (`str`, *optional*):
The feature selection strategy used to select the vision feature from the vision backbone.
Can be one of `"default"` or `"full"`
Returns:
image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).
|
github-repos
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.