code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
|---|---|---|
def keyDown(key, pause=None, _pause=True):
if len(key) > 1:
key = key.lower()
_failSafeCheck()
platformModule._keyDown(key)
_autoPause(pause, _pause)
|
Performs a keyboard key press without the release. This will put that
key in a held down state.
NOTE: For some reason, this does not seem to cause key repeats like would
happen if a keyboard key was held down on a text field.
Args:
key (str): The key to be pressed down. The valid names are listed in
KEYBOARD_KEYS.
Returns:
None
|
juraj-google-style
|
def read(self, size):
now = time.time()
missing_dt = self._sleep_until - now
if missing_dt > 0:
time.sleep(missing_dt)
self._sleep_until = time.time() + self._sleep_time(size)
data = (self._wavep.readframes(size)
if self._wavep
else self._fp.read(size))
if not data:
return b'\x00' * size
return data
|
Read bytes from the stream and block until sample rate is achieved.
Args:
size: number of bytes to read from the stream.
|
juraj-google-style
|
def merge_resources(resource1, resource2):
merged = resource1.copy()
merged.update(resource2)
return merged
|
Updates a copy of resource1 with resource2 values and returns the merged dictionary.
Args:
resource1: original resource
resource2: resource to update resource1
Returns:
dict: merged resource
|
codesearchnet
|
def get_image_features(self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.FloatTensor]=None, vision_feature_layer: int=-1):
vision_feature_layer = vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
patch_attention_mask = self._create_patch_attention_mask(pixel_mask)
image_outputs = self.vision_tower(pixel_values, patch_attention_mask=patch_attention_mask, output_hidden_states=True)
image_attn_mask = None
if patch_attention_mask is not None:
flattened_mask = patch_attention_mask.flatten(1)
image_attn_mask = torch.logical_not(flattened_mask)
selected_image_feature = image_outputs.hidden_states[vision_feature_layer]
image_features = self.multi_modal_projector(selected_image_feature, attn_mask=image_attn_mask)
return image_features
|
Obtains image last hidden states from the vision tower and apply multimodal projection.
Args:
pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`):
The tensors corresponding to the input images.
pixel_mask (`torch.FloatTensor]`, *optional*):
The tensors corresponding to the input image mask.
vision_feature_layer (`Union[int, List[int]]`, *optional*):
The index of the layer to select the vision feature. If multiple indices are provided,
the vision feature of the corresponding indices will be concatenated to form the
vision features.
Returns:
image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).
|
github-repos
|
def __init__(self,
url,
params,
name=None,
eta=None,
countdown=None,
parent=None,
headers=None):
self.url = url
self.name = name
self.eta = eta
self.countdown = countdown
self._headers = {
"Content-Type": "application/octet-stream",
self.PAYLOAD_VERSION_HEADER: self.PAYLOAD_VERSION
}
if headers:
self._headers.update(headers)
payload_str = urllib.urlencode(params)
compressed_payload = ""
if len(payload_str) > self.MAX_TASK_PAYLOAD:
compressed_payload = zlib.compress(payload_str)
if not compressed_payload:
self._payload = payload_str
elif len(compressed_payload) < self.MAX_TASK_PAYLOAD:
self._payload = self.PAYLOAD_PARAM + compressed_payload
elif len(compressed_payload) > self.MAX_DB_PAYLOAD:
raise ValueError(
"Payload from %s to big to be stored in database: %s" %
(self.name, len(compressed_payload)))
else:
if not parent:
raise ValueError("Huge tasks should specify parent entity.")
payload_entity = _HugeTaskPayload(payload=compressed_payload,
parent=parent)
payload_key = payload_entity.put()
self._payload = self.PAYLOAD_KEY_PARAM + str(payload_key)
|
Init.
Args:
url: task url in str.
params: a dict from str to str.
name: task name.
eta: task eta.
countdown: task countdown.
parent: parent entity of huge task's payload.
headers: a dict of headers for the task.
Raises:
ValueError: when payload is too big even for datastore, or parent is
not specified when payload is stored in datastore.
|
juraj-google-style
|
def Serialize(self, writer):
self.SerializeUnsigned(writer)
writer.WriteByte(1)
self.Script.Serialize(writer)
|
Serialize full object.
Args:
writer (neo.IO.BinaryWriter):
|
juraj-google-style
|
def post_async(self, path, params=None):
request = Post(self._get_next_id(), path, params)
request.set_callback(self._q.put)
future = self._dispatch_request(request)
return future
|
Asynchronously calls a function on a child block
Args:
path (list): The path to post to
params (dict): parameters for the call
Returns:
Future: as single Future that will resolve to the result
|
codesearchnet
|
def __init__(self, channel):
self.BatchAnnotateImages = channel.unary_unary(
"/google.cloud.vision.v1p4beta1.ImageAnnotator/BatchAnnotateImages",
request_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_image__annotator__pb2.BatchAnnotateImagesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_image__annotator__pb2.BatchAnnotateImagesResponse.FromString,
)
self.BatchAnnotateFiles = channel.unary_unary(
"/google.cloud.vision.v1p4beta1.ImageAnnotator/BatchAnnotateFiles",
request_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_image__annotator__pb2.BatchAnnotateFilesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_image__annotator__pb2.BatchAnnotateFilesResponse.FromString,
)
self.AsyncBatchAnnotateImages = channel.unary_unary(
"/google.cloud.vision.v1p4beta1.ImageAnnotator/AsyncBatchAnnotateImages",
request_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_image__annotator__pb2.AsyncBatchAnnotateImagesRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.AsyncBatchAnnotateFiles = channel.unary_unary(
"/google.cloud.vision.v1p4beta1.ImageAnnotator/AsyncBatchAnnotateFiles",
request_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_image__annotator__pb2.AsyncBatchAnnotateFilesRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
|
Constructor.
Args:
channel: A grpc.Channel.
|
juraj-google-style
|
def verify(self, byts, sign):
try:
chosen_hash = c_hashes.SHA256()
hasher = c_hashes.Hash(chosen_hash, default_backend())
hasher.update(byts)
digest = hasher.finalize()
self.publ.verify(sign,
digest,
c_ec.ECDSA(c_utils.Prehashed(chosen_hash))
)
return True
except InvalidSignature:
logger.exception('Error in publ.verify')
return False
|
Verify the signature for the given bytes using the ECC
public key.
Args:
byts (bytes): The data bytes.
sign (bytes): The signature bytes.
Returns:
bool: True if the data was verified, False otherwise.
|
juraj-google-style
|
def get_op(self, id: str, **kwargs: str) -> dict:
path = self._get_path_for_op_id(id)
return self.get_path(path, kwargs)
|
Queries the ESI by looking up an operation id.
Endpoints are cached, so calls to this method
for the same op and args will return the data
from the cache instead of making the API call.
Args:
id: operation id
kwargs: data to populate the endpoint's URL variables
Returns:
ESI data
|
codesearchnet
|
def _UpdateSudoer(self, user, sudoer=False):
if sudoer:
self.logger.info('Adding user %s to the Google sudoers group.', user)
command = self.gpasswd_add_cmd.format(
user=user, group=self.google_sudoers_group)
else:
self.logger.info('Removing user %s from the Google sudoers group.', user)
command = self.gpasswd_remove_cmd.format(
user=user, group=self.google_sudoers_group)
try:
subprocess.check_call(command.split(' '))
except subprocess.CalledProcessError as e:
self.logger.warning('Could not update user %s. %s.', user, str(e))
return False
else:
self.logger.debug('Removed user %s from the Google sudoers group.', user)
return True
|
Update sudoer group membership for a Linux user account.
Args:
user: string, the name of the Linux user account.
sudoer: bool, True if the user should be a sudoer.
Returns:
bool, True if user update succeeded.
|
juraj-google-style
|
def dump_values(self, with_defaults=True, dict_cls=dict, flat=False):
values = dict_cls()
if flat:
for (str_path, item) in self.iter_items(recursive=True, key='str_path'):
if item.has_value:
if (with_defaults or (not item.is_default)):
values[str_path] = item.value
else:
for (item_name, item) in self._tree.items():
if is_config_section(item):
section_values = item.dump_values(with_defaults=with_defaults, dict_cls=dict_cls)
if section_values:
values[item_name] = section_values
elif item.has_value:
if (with_defaults or (not item.is_default)):
values[item.name] = item.value
return values
|
Export values of all items contained in this section to a dictionary.
Items with no values set (and no defaults set if ``with_defaults=True``) will be excluded.
Returns:
dict: A dictionary of key-value pairs, where for sections values are dictionaries
of their contents.
|
codesearchnet
|
def get_boards(self, **query_params):
boards = self.get_boards_json(self.base_uri, query_params=query_params)
boards_list = []
for board_json in boards:
boards_list.append(self.create_board(board_json))
return boards_list
|
Get all the boards for this organisation. Returns a list of Board s.
Returns:
list(Board): The boards attached to this organisation
|
codesearchnet
|
def find_file_in_load_dirs(relpath):
if relpath.startswith(os.path.sep):
relpath = relpath.lstrip(os.path.sep)
for ld in settings.DATA_DIRECTORIES:
possible_path = os.path.join(ld, relpath)
if os.path.exists(possible_path):
return possible_path
|
If given relative path exists in one of DevAssistant load paths,
return its full path.
Args:
relpath: a relative path, e.g. "assitants/crt/test.yaml"
Returns:
absolute path of the file, e.g. "/home/x/.devassistant/assistanta/crt/test.yaml
or None if file is not found
|
codesearchnet
|
def execute_rex_code(self, code, filename=None, shell=None, parent_environ=None, **Popen_args):
def _actions_callback(executor):
executor.execute_code(code, filename=filename)
return self.execute_shell(shell=shell, parent_environ=parent_environ, command='', block=False, actions_callback=_actions_callback, **Popen_args)
|
Run some rex code in the context.
Note:
This is just a convenience form of `execute_shell`.
Args:
code (str): Rex code to execute.
filename (str): Filename to report if there are syntax errors.
shell: Shell type, for eg 'bash'. If None, the current shell type
is used.
parent_environ: Environment to run the shell process in, if None
then the current environment is used.
Popen_args: args to pass to the shell process object constructor.
Returns:
`subprocess.Popen` object for the shell process.
|
codesearchnet
|
def retry(self, **kwargs):
path = ('%s/%s/retry' % (self.manager.path, self.get_id()))
self.manager.gitlab.http_post(path)
|
Retry the job.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabJobRetryError: If the job could not be retried
|
codesearchnet
|
def build_graph(path, term_depth=1000, skim_depth=10,
d_weights=False, **kwargs):
click.echo('\nTokenizing text...')
t = Text.from_file(path)
click.echo('Extracted %d tokens' % len(t.tokens))
m = Matrix()
click.echo('\nIndexing terms:')
m.index(t, t.most_frequent_terms(term_depth), **kwargs)
g = Skimmer()
click.echo('\nGenerating graph:')
g.build(t, m, skim_depth, d_weights)
return g
|
Tokenize a text, index a term matrix, and build out a graph.
Args:
path (str): The file path.
term_depth (int): Consider the N most frequent terms.
skim_depth (int): Connect each word to the N closest siblings.
d_weights (bool): If true, give "close" nodes low weights.
Returns:
Skimmer: The indexed graph.
|
juraj-google-style
|
def MakeSelfExtractingZip(self, payload_data, output_path):
context = (self.context + ['Client Context'])
src_zip = zipfile.ZipFile(io.BytesIO(payload_data), mode='r')
zip_data = io.BytesIO()
output_zip = zipfile.ZipFile(zip_data, mode='w', compression=zipfile.ZIP_DEFLATED)
config_file_name = config.CONFIG.Get('ClientBuilder.config_filename', context=context)
for template_file in src_zip.namelist():
if (template_file != config_file_name):
CopyFileInZip(src_zip, template_file, output_zip)
client_config_content = self.GetClientConfig(context)
output_zip.writestr(config_file_name, client_config_content.encode('utf-8'), compress_type=zipfile.ZIP_STORED)
output_zip.comment = (b'$AUTORUN$>%s' % config.CONFIG.Get('ClientBuilder.autorun_command_line', context=context).encode('utf-8'))
output_zip.close()
utils.EnsureDirExists(os.path.dirname(output_path))
with open(output_path, 'wb') as fd:
stub_data = io.BytesIO()
unzipsfx_stub = config.CONFIG.Get('ClientBuilder.unzipsfx_stub', context=context)
stub_raw = open(unzipsfx_stub, 'rb').read()
if (b'level="requireAdministrator' not in stub_raw):
raise RuntimeError('Bad unzip binary in use. Not compiled with therequireAdministrator manifest option.')
stub_data.write(stub_raw)
SetPeSubsystem(stub_data, console=config.CONFIG.Get('ClientBuilder.console', context=context))
end_of_file = (zip_data.tell() + stub_data.tell())
offset_to_rsrc = stub_data.getvalue().find(b'.rsrc')
stub_data.seek((offset_to_rsrc + 20))
start_of_rsrc_section = struct.unpack('<I', stub_data.read(4))[0]
stub_data.seek((offset_to_rsrc + 16))
stub_data.write(struct.pack('<I', (end_of_file - start_of_rsrc_section)))
out_data = io.BytesIO()
out_data.write(stub_data.getvalue())
out_data.write(zip_data.getvalue())
fd.write(out_data.getvalue())
if self.signer:
self.signer.SignFile(output_path)
logging.info('Deployable binary generated at %s', output_path)
return output_path
|
Repack the installer into the payload.
Args:
payload_data: data payload for zip file
output_path: filename for the zip output
Raises:
RuntimeError: if the ClientBuilder.unzipsfx_stub doesn't require admin.
Returns:
output_path: filename string of zip output file
|
codesearchnet
|
def combine_heads(self, x):
with tf.name_scope('combine_heads'):
batch_size = tf.shape(x)[0]
length = tf.shape(x)[2]
x = tf.transpose(x, [0, 2, 1, 3])
return tf.reshape(x, [batch_size, length, self.hidden_size])
|
Combine tensor that has been split.
Args:
x: A tensor [batch_size, num_heads, length, hidden_size/num_heads]
Returns:
A tensor with shape [batch_size, length, hidden_size]
|
codesearchnet
|
def connect_async(self, connection_id, connection_string, callback):
if callback is not None:
callback(connection_id, self.id, False, "connect command is not supported in device adapter")
|
Asynchronously connect to a device
Args:
connection_id (int): A unique identifier that will refer to this connection
connection_string (string): A DeviceAdapter specific string that can be used to connect to
a device using this DeviceAdapter.
callback (callable): A function that will be called when the connection attempt finishes as
callback(connection_id, adapter_id, success: bool, failure_reason: string or None)
|
juraj-google-style
|
def pack_eager_tensors(self, tensors):
self.ensure_initialized()
return pywrap_tfe.TFE_Py_PackEagerTensors(self._handle, tensors)
|
Pack multiple `EagerTensor`s of the same dtype and shape.
Args:
tensors: a list of EagerTensors to pack.
Returns:
A packed EagerTensor.
|
github-repos
|
def objects_delete(self, bucket, key):
url = (Api._ENDPOINT + (Api._OBJECT_PATH % (bucket, Api._escape_key(key))))
datalab.utils.Http.request(url, method='DELETE', credentials=self._credentials, raw_response=True)
|
Deletes the specified object.
Args:
bucket: the name of the bucket.
key: the key of the object within the bucket.
Raises:
Exception if there is an error performing the operation.
|
codesearchnet
|
def configure(self, sbi_config: str):
config_dict = json.loads(sbi_config)
self.debug_stream('SBI configuration:\n%s', json.dumps(config_dict, indent=2))
try:
sbi = Subarray(self.get_name()).configure_sbi(config_dict)
except jsonschema.exceptions.ValidationError as error:
return json.dumps(dict(path=error.absolute_path.__str__(), schema_path=error.schema_path.__str__(), message=error.message), indent=2)
except RuntimeError as error:
return json.dumps(dict(error=str(error)), indent=2)
return 'Accepted SBI: {}'.format(sbi.id)
|
Configure an SBI for this subarray.
Args:
sbi_config (str): SBI configuration JSON
Returns:
str,
|
codesearchnet
|
def prod(x, axis=None, keepdims=False, dtype=None):
if any_symbolic_tensors((x,)):
return Prod(axis=axis, keepdims=keepdims, dtype=dtype).symbolic_call(x)
return backend.numpy.prod(x, axis=axis, keepdims=keepdims, dtype=dtype)
|
Return the product of tensor elements over a given axis.
Args:
x: Input tensor.
axis: Axis or axes along which a product is performed. The default,
`axis=None`, will compute the product of all elements
in the input tensor.
keepdims: If this is set to `True`, the axes which are reduce
are left in the result as dimensions with size one.
dtype: Data type of the returned tensor.
Returns:
Product of elements of `x` over the given axis or axes.
|
github-repos
|
def _make_model_class(message_type, indexed_fields, **props):
analyzed = _analyze_indexed_fields(indexed_fields)
for (field_name, sub_fields) in analyzed.iteritems():
if (field_name in props):
raise ValueError(('field name %s is reserved' % field_name))
try:
field = message_type.field_by_name(field_name)
except KeyError:
raise ValueError(('Message type %s has no field named %s' % (message_type.__name__, field_name)))
if isinstance(field, messages.MessageField):
if (not sub_fields):
raise ValueError(('MessageField %s cannot be indexed, only sub-fields' % field_name))
sub_model_class = _make_model_class(field.type, sub_fields)
prop = model.StructuredProperty(sub_model_class, field_name, repeated=field.repeated)
else:
if (sub_fields is not None):
raise ValueError(('Unstructured field %s cannot have indexed sub-fields' % field_name))
if isinstance(field, messages.EnumField):
prop = EnumProperty(field.type, field_name, repeated=field.repeated)
elif isinstance(field, messages.BytesField):
prop = model.BlobProperty(field_name, repeated=field.repeated, indexed=True)
else:
prop = model.GenericProperty(field_name, repeated=field.repeated)
props[field_name] = prop
return model.MetaModel(('_%s__Model' % message_type.__name__), (model.Model,), props)
|
Construct a Model subclass corresponding to a Message subclass.
Args:
message_type: A Message subclass.
indexed_fields: A list of dotted and undotted field names.
**props: Additional properties with which to seed the class.
Returns:
A Model subclass whose properties correspond to those fields of
message_type whose field name is listed in indexed_fields, plus
the properties specified by the **props arguments. For dotted
field names, a StructuredProperty is generated using a Model
subclass created by a recursive call.
Raises:
Whatever _analyze_indexed_fields() raises.
ValueError if a field name conflicts with a name in **props.
ValueError if a field name is not valid field of message_type.
ValueError if an undotted field name designates a MessageField.
|
codesearchnet
|
def _update_annotations(discretized_pulse: Callable) -> Callable:
undecorated_annotations = list(discretized_pulse.__annotations__.items())
decorated_annotations = undecorated_annotations[1:]
decorated_annotations.insert(0, ('duration', int))
discretized_pulse.__annotations__ = dict(decorated_annotations)
return discretized_pulse
|
Update annotations of discretized continuous pulse function with duration.
Args:
discretized_pulse: Discretized decorated continuous pulse.
|
juraj-google-style
|
def setNetworkKey(self, key):
masterKey = ''
print '%s call setNetworkKey' % self.port
try:
if not isinstance(key, str):
masterKey = self.__convertLongToString(key)
if len(masterKey) < 32:
masterKey = masterKey.zfill(32)
cmd = WPANCTL_CMD + 'setprop Network:Key %s' % masterKey
datasetCmd = WPANCTL_CMD + 'setprop Dataset:MasterKey %s' % masterKey
else:
masterKey = key
cmd = WPANCTL_CMD + 'setprop Network:Key %s' % masterKey
datasetCmd = WPANCTL_CMD + 'setprop Dataset:MasterKey %s' % masterKey
self.networkKey = masterKey
self.hasActiveDatasetToCommit = True
return self.__sendCommand(cmd)[0] != 'Fail' and self.__sendCommand(datasetCmd)[0] != 'Fail'
except Exception, e:
ModuleHelper.WriteIntoDebugLogger('setNetworkkey() Error: ' + str(e))
|
set Thread Network master key
Args:
key: Thread Network master key used in secure the MLE/802.15.4 packet
Returns:
True: successful to set the Thread Network master key
False: fail to set the Thread Network master key
|
juraj-google-style
|
def markdown_to_text(body):
md = markdown.markdown(body, extensions=['markdown.extensions.extra'])
soup = BeautifulSoup(md, 'html.parser')
return soup.get_text()
|
Converts markdown to text.
Args:
body: markdown (or plaintext, or maybe HTML) input
Returns:
Plaintext with all tags and frills removed
|
codesearchnet
|
def intraday(ticker, dt, session='', **kwargs) -> pd.DataFrame:
from xbbg.core import intervals
cur_data = bdib(ticker=ticker, dt=dt, typ=kwargs.get('typ', 'TRADE'))
if cur_data.empty:
return pd.DataFrame()
fmt = '%H:%M:%S'
ss = intervals.SessNA
ref = kwargs.get('ref', None)
exch = (pd.Series() if (ref is None) else const.exch_info(ticker=ref))
if session:
ss = intervals.get_interval(ticker=kwargs.get('ref', ticker), session=session)
start_time = kwargs.get('start_time', None)
end_time = kwargs.get('end_time', None)
if (ss != intervals.SessNA):
start_time = pd.Timestamp(ss.start_time).strftime(fmt)
end_time = pd.Timestamp(ss.end_time).strftime(fmt)
if (start_time and end_time):
kw = dict(start_time=start_time, end_time=end_time)
if (not exch.empty):
cur_tz = cur_data.index.tz
res = cur_data.tz_convert(exch.tz).between_time(**kw)
if kwargs.get('keep_tz', False):
res = res.tz_convert(cur_tz)
return pd.DataFrame(res)
return pd.DataFrame(cur_data.between_time(**kw))
return cur_data
|
Bloomberg intraday bar data within market session
Args:
ticker: ticker
dt: date
session: examples include
day_open_30, am_normal_30_30, day_close_30, allday_exact_0930_1000
**kwargs:
ref: reference ticker or exchange for timezone
keep_tz: if keep tz if reference ticker / exchange is given
start_time: start time
end_time: end time
typ: [TRADE, BID, ASK, BID_BEST, ASK_BEST, BEST_BID, BEST_ASK]
Returns:
pd.DataFrame
|
codesearchnet
|
def pop(self, identifier, default=None):
if (identifier in self.children):
item = self[identifier]
self.__delitem__(identifier)
return item
else:
return default
|
Pop a node of the AttrTree using its path string.
Args:
identifier: Path string of the node to return
default: Value to return if no node is found
Returns:
The node that was removed from the AttrTree
|
codesearchnet
|
def get_index(self, prefix=''):
if prefix:
prefixed = ('%s_index' % prefix)
else:
prefixed = 'index'
if ((prefixed in self.__cli) and self.__cli[prefixed]):
index = self.__cli.get(prefixed)
from_conf = False
else:
index = self.__config.get(prefixed)
from_conf = True
return self.__abspath(index, from_conf)
|
Retrieve the absolute path to an index, according to
`prefix`.
Args:
prefix: str, the desired prefix or `None`.
Returns:
str: An absolute path, or `None`
|
codesearchnet
|
def _get_no_split_modules(self, device_map: str):
_no_split_modules = set()
modules_to_check = [self]
while len(modules_to_check) > 0:
module = modules_to_check.pop(-1)
if module.__class__.__name__ not in _no_split_modules:
if isinstance(module, PreTrainedModel):
if module._no_split_modules is None:
raise ValueError(f"{module.__class__.__name__} does not support `device_map='{device_map}'`. To implement support, the model class needs to implement the `_no_split_modules` attribute.")
else:
_no_split_modules = _no_split_modules | set(module._no_split_modules)
modules_to_check += list(module.children())
return list(_no_split_modules)
|
Get the modules of the model that should not be spit when using device_map. We iterate through the modules to
get the underlying `_no_split_modules`.
Args:
device_map (`str`):
The device map value. Options are ["auto", "balanced", "balanced_low_0", "sequential"]
Returns:
`List[str]`: List of modules that should not be split
|
github-repos
|
def submodules(self):
return tuple(self._flatten(predicate=_is_module))
|
Sequence of all sub-modules.
Submodules are modules which are properties of this module, or found as
properties of modules which are properties of this module (and so on).
>>> a = tf.Module()
>>> b = tf.Module()
>>> c = tf.Module()
>>> a.b = b
>>> b.c = c
>>> list(a.submodules) == [b, c]
True
>>> list(b.submodules) == [c]
True
>>> list(c.submodules) == []
True
Returns:
A sequence of all submodules.
|
github-repos
|
async def update_state(self, name, state):
await self.send_command(OPERATIONS.CMD_UPDATE_STATE,
{'name': name, 'new_status': state},
MESSAGES.UpdateStateResponse, timeout=5.0)
|
Update the state for a service.
Args:
name (string): The name of the service
state (int): The new state of the service
|
juraj-google-style
|
def mnist_common_generator(tmp_dir,
training,
how_many,
data_filename,
label_filename,
start_from=0):
data_path = os.path.join(tmp_dir, data_filename)
labels_path = os.path.join(tmp_dir, label_filename)
images = _extract_mnist_images(data_path, 60000 if training else 10000)
labels = _extract_mnist_labels(labels_path, 60000 if training else 10000)
data = list(zip(images, labels))
random.shuffle(data)
images, labels = list(zip(*data))
return image_utils.image_generator(images[start_from:start_from + how_many],
labels[start_from:start_from + how_many])
|
Image generator for MNIST.
Args:
tmp_dir: path to temporary storage directory.
training: a Boolean; if true, we use the train set, otherwise the test set.
how_many: how many images and labels to generate.
data_filename: file that contains features data.
label_filename: file that contains labels.
start_from: from which image to start.
Returns:
An instance of image_generator that produces MNIST images.
|
juraj-google-style
|
def __init__(self, latent_size, hidden_size):
super(EncoderDynamicFactorized, self).__init__()
self.latent_size = latent_size
self.hidden_size = hidden_size
self.dense = tf.keras.layers.Dense(hidden_size, activation=tf.nn.leaky_relu)
self.output_layer = tf.keras.layers.Dense(2*latent_size)
|
Constructs a "factorized" encoder for `z_t`.
Args:
latent_size: An integer corresponding to the
dimensionality of the distribution.
hidden_size: Dimensionality of the affine function parameters.
|
juraj-google-style
|
def _create_hash_str(self, input_arg, output_arg, node_def):
hasher = hashlib.sha1()
def update_num(n):
hasher.update(compat.as_bytes('%x' % n))
def update_str(s):
update_num(len(s))
hasher.update(compat.as_bytes(s))
def update_strs(slist):
update_num(len(slist))
for s in slist:
update_str(s)
for adef in input_arg:
update_str(adef.SerializeToString())
for adef in output_arg:
update_str(adef.SerializeToString())
for n in sorted(node_def, key=lambda n: n.name):
update_str(n.name)
update_str(n.op)
update_strs(n.input)
update_num(len(n.attr))
for k in sorted(n.attr):
update_str(k)
update_str(n.attr[k].SerializeToString())
return hasher.hexdigest()[:8]
|
Creates an 8-character string unique to this input.
Args:
input_arg: the input_arg field of an OpDef
(e.g. self._definition.signature.input_arg)
output_arg: the output_arg field of an OpDef
(e.g. self._definition.signature.output_arg)
node_def: the node_def field of a FunctionDef
(e.g. self._definition.node_def)
Returns:
The unique string for this input
|
github-repos
|
def __init__(self, semantic_config: Optional[Dict]=None, coarse_acoustics_config: Optional[Dict]=None, fine_acoustics_config: Optional[Dict]=None, sample_rate=24000, codebook_size=1024, **kwargs):
if semantic_config is None:
semantic_config = {}
logger.info('semantic_config is None. initializing the semantic model with default values.')
if coarse_acoustics_config is None:
coarse_acoustics_config = {}
logger.info('coarse_acoustics_config is None. initializing the coarse model with default values.')
if fine_acoustics_config is None:
fine_acoustics_config = {}
logger.info('fine_acoustics_config is None. initializing the fine model with default values.')
self.semantic_config = BarkSemanticGenerationConfig(**semantic_config)
self.coarse_acoustics_config = BarkCoarseGenerationConfig(**coarse_acoustics_config)
self.fine_acoustics_config = BarkFineGenerationConfig(**fine_acoustics_config)
self.sample_rate = sample_rate
self.codebook_size = codebook_size
|
Class that holds a generation configuration for [`BarkModel`].
The [`BarkModel`] does not have a `generate` method, but uses this class to generate speeches with a nested
[`BarkGenerationConfig`] which uses [`BarkSemanticGenerationConfig`], [`BarkCoarseGenerationConfig`],
[`BarkFineGenerationConfig`].
This configuration inherit from [`GenerationConfig`] and can be used to control the model generation. Read the
documentation from [`GenerationConfig`] for more information.
Args:
semantic_config (`Dict`, *optional*):
Semantic generation configuration.
coarse_acoustics_config (`Dict`, *optional*):
Coarse generation configuration.
fine_acoustics_config (`Dict`, *optional*):
Fine generation configuration.
sample_rate (`int`, *optional*, defaults to 24_000):
Sample rate.
codebook_size (`int`, *optional*, defaults to 1024):
Vector length for each codebook.
|
github-repos
|
def speed_metrics(split, start_time, num_samples=None, num_steps=None, num_tokens=None):
runtime = time.time() - start_time
result = {f'{split}_runtime': round(runtime, 4)}
if runtime == 0:
return result
if num_samples is not None:
samples_per_second = num_samples / runtime
result[f'{split}_samples_per_second'] = round(samples_per_second, 3)
if num_steps is not None:
steps_per_second = num_steps / runtime
result[f'{split}_steps_per_second'] = round(steps_per_second, 3)
if num_tokens is not None:
tokens_per_second = num_tokens / runtime
result[f'{split}_tokens_per_second'] = round(tokens_per_second, 3)
return result
|
Measure and return speed performance metrics.
This function requires a time snapshot `start_time` before the operation to be measured starts and this function
should be run immediately after the operation to be measured has completed.
Args:
- split: name to prefix metric (like train, eval, test...)
- start_time: operation start time
- num_samples: number of samples processed
- num_steps: number of steps processed
- num_tokens: number of tokens processed
|
github-repos
|
def setModelData(self, editor, model, index):
model.setData(index, editor.itemText(editor.currentIndex()))
|
Updates the model after changing data in the editor.
Args:
editor (QtGui.QComboBox): The current editor for the item. Should be
a `QtGui.QComboBox` as defined in `createEditor`.
model (ColumnDtypeModel): The model which holds the displayed data.
index (QtCore.QModelIndex): The index of the current item of the model.
|
juraj-google-style
|
def ValidateServiceGaps(self, problems, validation_start_date, validation_end_date, service_gap_interval):
if (service_gap_interval is None):
return
departures = self.GenerateDateTripsDeparturesList(validation_start_date, validation_end_date)
first_day_without_service = validation_start_date
last_day_without_service = validation_start_date
consecutive_days_without_service = 0
for (day_date, day_trips, _) in departures:
if (day_trips == 0):
if (consecutive_days_without_service == 0):
first_day_without_service = day_date
consecutive_days_without_service += 1
last_day_without_service = day_date
else:
if (consecutive_days_without_service >= service_gap_interval):
problems.TooManyDaysWithoutService(first_day_without_service, last_day_without_service, consecutive_days_without_service)
consecutive_days_without_service = 0
if (consecutive_days_without_service >= service_gap_interval):
problems.TooManyDaysWithoutService(first_day_without_service, last_day_without_service, consecutive_days_without_service)
|
Validate consecutive dates without service in the feed.
Issue a warning if it finds service gaps of at least
"service_gap_interval" consecutive days in the date range
[validation_start_date, last_service_date)
Args:
problems: The problem reporter object
validation_start_date: A date object representing the date from which the
validation should take place
validation_end_date: A date object representing the first day the feed is
active
service_gap_interval: An integer indicating how many consecutive days the
service gaps need to have for a warning to be issued
Returns:
None
|
codesearchnet
|
def add_edge_bias(x, filter_size):
x_shape = common_layers.shape_list(x)
if filter_size[0] == 1 and filter_size[1] == 1:
return x
a = (filter_size[0] - 1)
b = (filter_size[1] - 1)
padding = [[0, 0], [a, a], [b, b], [0, 0]]
x_bias = tf.zeros(x_shape[:-1] + [1])
x = tf.pad(x, padding)
x_pad = tf.pad(x_bias, padding, constant_values=1)
return tf.concat([x, x_pad], axis=3)
|
Pad x and concatenates an edge bias across the depth of x.
The edge bias can be thought of as a binary feature which is unity when
the filter is being convolved over an edge and zero otherwise.
Args:
x: Input tensor, shape (NHWC)
filter_size: filter_size to determine padding.
Returns:
x_pad: Input tensor, shape (NHW(c+1))
|
juraj-google-style
|
def generate_states(state_count, process_matrix, process_covariance, initial_state=None):
process_matrix = np.atleast_2d(process_matrix)
process_covariance = np.atleast_2d(process_covariance)
state_dim = process_matrix.shape[0]
if (process_matrix.shape != (state_dim, state_dim)):
raise ValueError('Process matrix has inconsistent shape: {}'.format(process_matrix.shape))
if (process_covariance.shape != (state_dim, state_dim)):
raise ValueError('Process covariance has inconsistent shape: {}'.format(process_covariance.shape))
if (initial_state is None):
initial_state = np.zeros(process_matrix.shape[0])
states = [initial_state]
while (len(states) < state_count):
states.append((process_matrix.dot(states[(- 1)]) + np.random.multivariate_normal(mean=np.zeros(state_dim), cov=process_covariance)))
return np.vstack(states)
|
Generate states by simulating a linear system with constant process matrix
and process noise covariance.
Args:
state_count (int): Number of states to generate.
process_matrix (array): Square array
process_covariance (array): Square array specifying process noise
covariance.
initial_state (array or None): If omitted, use zero-filled vector as
initial state.
|
codesearchnet
|
def curie_search(self, curie:str) -> dict:
ilx_row = self.curie2row.get(curie)
if not ilx_row:
return None
else:
return ilx_row
|
Returns the row in InterLex associated with the curie
Note:
Pressumed to not have duplicate curies in InterLex
Args:
curie: The "prefix:fragment_id" of the existing_id pertaining to the ontology
Returns:
None or dict
|
juraj-google-style
|
def generate_code(max_length, max_nest, ops):
stack = []
def fetch_one():
if stack:
return stack.pop()
else:
value = random.randint(10 ** (max_length - 1), 10 ** max_length - 1)
code = str(value)
return value, code
def fetch(num_operands):
values, codes = zip(*[fetch_one() for _ in six.moves.range(num_operands)])
return values, codes
for _ in six.moves.range(max_nest):
op = random.choice(ops)
values, codes = fetch(op.num_operands)
new_value = op.eval(values)
new_code = op.get_code(codes)
stack.append((new_value, "(" + new_code + ")"))
final_value, final_code = stack.pop()
final_code = final_code[1:-1]
final_code.strip("()")
if not op.is_memory:
final_value = int(final_value) % 10 ** (max_length+1)
return str(final_value), final_code
|
Generates code samples.
Args:
max_length: int. max literal length.
max_nest: int. max nesting level.
ops: CodeOp. set of allowable operations.
Returns:
1. (str) output value.
2. (str) Code operation.
|
juraj-google-style
|
def read_committed_file(gitref, filename):
repo = Repo()
commitobj = repo.commit(gitref)
blob = commitobj.tree[(_delta_dir() + filename)]
return blob.data_stream.read()
|
Retrieve the content of a file in an old commit and returns it.
Ketword Arguments:
:gitref: (str) -- full reference of the git commit
:filename: (str) -- name (full path) of the file
Returns:
str -- content of the file
|
codesearchnet
|
def get_records(self, name):
if (name in self._cache):
return self._cache[name].values()
else:
return []
|
Return all the records for the given name in the cache.
Args:
name (string): The name which the required models are stored under.
Returns:
list: A list of :class:`cinder_data.model.CinderModel` models.
|
codesearchnet
|
def _get_storage_model():
storage_model_settings = getattr(django.conf.settings, 'GOOGLE_OAUTH2_STORAGE_MODEL', None)
if (storage_model_settings is not None):
return (storage_model_settings['model'], storage_model_settings['user_property'], storage_model_settings['credentials_property'])
else:
return (None, None, None)
|
This configures whether the credentials will be stored in the session
or the Django ORM based on the settings. By default, the credentials
will be stored in the session, unless `GOOGLE_OAUTH2_STORAGE_MODEL`
is found in the settings. Usually, the ORM storage is used to integrate
credentials into an existing Django user system.
Returns:
A tuple containing three strings, or None. If
``GOOGLE_OAUTH2_STORAGE_MODEL`` is configured, the tuple
will contain the fully qualifed path of the `django.db.model`,
the name of the ``django.contrib.auth.models.User`` field on the
model, and the name of the
:class:`oauth2client.contrib.django_util.models.CredentialsField`
field on the model. If Django ORM storage is not configured,
this function returns None.
|
codesearchnet
|
def encode_json_body(data):
if hasattr(data, "read"):
return data
response.content_type = "application/json; charset=utf-8"
return json.dumps(
data,
indent=4,
separators=(',', ': ')
)
|
Return prettified JSON `data`, set ``response.content_type`` to
``application/json; charset=utf-8``.
Args:
data (any): Any basic python data structure.
Returns:
str: Data converted to prettified JSON.
|
juraj-google-style
|
def GetEntries(self, parser_mediator, match=None, **unused_kwargs):
if ('RememberedNetworks' not in match):
return
for wifi in match['RememberedNetworks']:
ssid = wifi.get('SSIDString', 'UNKNOWN_SSID')
security_type = wifi.get('SecurityType', 'UNKNOWN_SECURITY_TYPE')
event_data = plist_event.PlistTimeEventData()
event_data.desc = '[WiFi] Connected to network: <{0:s}> using security {1:s}'.format(ssid, security_type)
event_data.key = 'item'
event_data.root = '/RememberedNetworks'
datetime_value = wifi.get('LastConnected', None)
if datetime_value:
event = time_events.PythonDatetimeEvent(datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
else:
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Extracts relevant Airport entries.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
|
codesearchnet
|
def list_offers(access_token, subscription_id, location, publisher):
endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Compute/', 'locations/', location, '/publishers/', publisher, '/artifacttypes/vmimage/offers?api-version=', COMP_API])
return do_get(endpoint, access_token)
|
List available VM image offers from a publisher.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
location (str): Azure data center location. E.g. westus.
publisher (str): Publisher name, e.g. Canonical.
Returns:
HTTP response with JSON list of image offers.
|
codesearchnet
|
def _get_batch_data(self, batch_size, num_objects, num_features):
all_inputs = []
all_labels = []
for _ in six.moves.range(batch_size):
inputs, labels = self._get_single_set(num_objects, num_features)
all_inputs += [inputs]
all_labels += [labels]
input_data = np.concatenate(all_inputs, axis=0)
label_data = np.concatenate(all_labels, axis=0)
return input_data, label_data
|
Assembles a batch of input tensors and output labels.
Args:
batch_size: int. number of sequence batches.
num_objects: int. number of objects in the sequence.
num_features: int. feature size of each object.
Returns:
1. np.ndarray (`batch_size`, `num_objects`,
(`num_features` + 3 * `num_objects`)).
2. np.ndarray (`batch_size`). Output object reference label.
|
juraj-google-style
|
def write_top_half(f, row_metadata_df, col_metadata_df, metadata_null, filler_null):
size_of_top_half_df = (1 + col_metadata_df.shape[1],
1 + row_metadata_df.shape[1] + col_metadata_df.shape[0])
top_half_df = pd.DataFrame(np.full(size_of_top_half_df, filler_null, dtype=object))
top_half_df.iloc[0, :] = np.hstack(("id", row_metadata_df.columns.values, col_metadata_df.index.values))
top_half_df.iloc[range(1, top_half_df.shape[0]), 0] = col_metadata_df.columns.values
col_metadata_indices = (range(1, top_half_df.shape[0]),
range(1 + row_metadata_df.shape[1], top_half_df.shape[1]))
top_half_df.at[col_metadata_indices[0], col_metadata_indices[1]] = (
col_metadata_df.astype(str).replace("nan", value=metadata_null).T.values)
top_half_df.to_csv(f, header=False, index=False, sep="\t")
|
Write the top half of the gct file: top-left filler values, row metadata
headers, and top-right column metadata.
Args:
f (file handle): handle for output file
row_metadata_df (pandas df)
col_metadata_df (pandas df)
metadata_null (string): how to represent missing values in the metadata
filler_null (string): what value to fill the top-left filler block with
Returns:
None
|
juraj-google-style
|
def _fn(arg0, arg1, deprecated=True):
return arg0 + arg1 if deprecated else arg1 + arg0
|
fn doc.
Args:
arg0: Arg 0.
arg1: Arg 1.
deprecated: Deprecated!
Returns:
Sum of args.
|
github-repos
|
def add_request(self, request):
queue_item = QueueItem(request, Response(request.url))
self.add(queue_item)
return queue_item
|
Add a request to the queue.
Args:
request (:class:`nyawc.http.Request`): The request to add.
Returns:
:class:`nyawc.QueueItem`: The created queue item.
|
codesearchnet
|
def __init__(self, options, queue_item):
self.__options = options
self.__queue_item = queue_item
self.__queue_item.response = self.__make_request(
self.__queue_item.request.url,
self.__queue_item.request.method,
self.__queue_item.request.data,
self.__queue_item.request.auth,
self.__queue_item.request.cookies,
self.__queue_item.request.headers,
self.__queue_item.request.proxies,
self.__queue_item.request.timeout,
self.__queue_item.request.verify
)
self.__queue_item.response.url = str(self.__queue_item.response.url)
|
Construct the HTTP handler.
Args:
options (:class:`nyawc.Options`): The settins/options object.
queue_item (:class:`nyawc.QueueItem`): The queue item containing the request.
|
juraj-google-style
|
def _add_result(self, dict_entry, entry, dt, start_time):
time_entry = {}
time_entry['dt'] = dt
time_entry['start_time'] = start_time
dict_entry[entry] = time_entry
|
Adds a result to the dictionary.
Args:
dict_entry: main dict to add entry
entry: slot for this entry (likely an integer)
dt: the timing for the entry
start_time: when the entry started unix time float
|
juraj-google-style
|
def __init__(self, name, lim_low=85., lim_high=95., **kwargs):
self.lim_low = lim_low
self.lim_high = lim_high
super(DayNightCompositor, self).__init__(name, **kwargs)
|
Collect custom configuration values.
Args:
lim_low (float): lower limit of Sun zenith angle for the
blending of the given channels
lim_high (float): upper limit of Sun zenith angle for the
blending of the given channels
|
juraj-google-style
|
def __init__(self, parent=None):
super(DelimiterSelectionWidget, self).__init__(parent)
self.semicolonRadioButton = None
self.commaRadioButton = None
self.tabRadioButton = None
self.otherRadioButton = None
self.otherSeparatorLineEdit = None
self._initUI()
|
Constructs the object with the given parent.
Args:
parent (QObject, optional): Causes the objected to be owned
by `parent` instead of Qt. Defaults to `None`.
|
juraj-google-style
|
def get_battery_level(self):
battery_level = self.get_characteristic_handle_from_uuid(UUID_BATTERY_LEVEL)
if (battery_level is None):
logger.warn('Failed to find handle for battery level')
return None
level = self.dongle._read_attribute(self.conn_handle, battery_level)
if (level is None):
return (- 1)
return ord(level)
|
Reads the battery level descriptor on the device.
Returns:
int. If successful this will be a positive value representing the current
battery level as a percentage. On error, -1 is returned.
|
codesearchnet
|
def _get_output_tensors(self, interpreter: _interpreter.Interpreter) -> List[np.ndarray]:
outputs = []
for output_detail in interpreter.get_output_details():
tensor = interpreter.get_tensor(output_detail['index'])
if output_detail['dtype'] == np.int8:
quant_params = _get_quant_params(output_detail)
if quant_params:
scale, zero_point = quant_params
tensor = ((tensor.astype(np.float32) - zero_point) * scale).astype(np.float32)
outputs.append(tensor)
return outputs
|
Returns output tensors of given TFLite model Interpreter.
Args:
interpreter: a tf.lite.Interpreter object with allocated tensors.
Returns:
a list of numpy arrays representing output tensor results.
|
github-repos
|
def getattr_sdk(attr, name):
if inspect.isroutine(attr):
if hasattr(attr, '_sdkmeta'):
return attr
raise AttributeError(name)
|
Filter SDK attributes
Args:
attr(attribute): Attribute as returned by :func:`getattr`.
name(str): Attribute name.
Returns:
`attr` if passed.
|
codesearchnet
|
def _UpdateCounters(self, event):
self._session.parsers_counter['total'] += 1
parser_name = getattr(event, 'parser', '')
_, _, parser_name = parser_name.rpartition('/')
if not parser_name:
parser_name = 'N/A'
self._session.parsers_counter[parser_name] += 1
|
Updates the counters.
Args:
event (EventObject): event.
|
juraj-google-style
|
def __init__(self, initializer=None):
if initializer is None:
self.data = []
return
if isinstance(initializer, Timeseries):
self.data = copy.deepcopy(initializer.data)
return
raise RuntimeError("Unrecognized initializer.")
|
Create a timeseries with an optional initializer.
Args:
initializer: An optional Timeseries to clone.
Raises:
RuntimeError: If initializer is not understood.
|
juraj-google-style
|
def put(cls, obj):
return PyarrowOnRayFramePartition(ray.put(pyarrow.Table.from_pandas(obj)))
|
Put an object in the Plasma store and wrap it in this object.
Args:
obj: The object to be put.
Returns:
A `RayRemotePartition` object.
|
juraj-google-style
|
def dot(A, B):
try:
result = A.__matmul__(B)
if result is NotImplemented:
result = B.__rmatmul__(A)
except AttributeError:
result = B.__rmatmul__(A)
return result
|
Matrix multiplication between A and B
This function is equivalent to ``A @ B``, which is unfortunately
not possible under python 2.x.
Args:
A (sequence):
B (sequence):
Returns:
sequence:
|
juraj-google-style
|
def dframe(self, dimensions=None, multi_index=False):
import pandas as pd
if dimensions is None:
outer_dimensions = self.kdims
inner_dimensions = None
else:
outer_dimensions = [self.get_dimension(d) for d in dimensions
if d in self.kdims]
inner_dimensions = [d for d in dimensions
if d not in outer_dimensions]
inds = [(d, self.get_dimension_index(d)) for d in outer_dimensions]
dframes = []
for key, element in self.data.items():
df = element.dframe(inner_dimensions, multi_index)
names = [d.name for d in outer_dimensions]
key_dims = [(d.name, key[i]) for d, i in inds]
if multi_index:
length = len(df)
indexes = [[v]*length for _, v in key_dims]
if df.index.names != [None]:
indexes += [df.index]
names += list(df.index.names)
df = df.set_index(indexes)
df.index.names = names
else:
for dim, val in key_dims:
dimn = 1
while dim in df:
dim = dim+'_%d' % dimn
if dim in df:
dimn += 1
df.insert(0, dim, val)
dframes.append(df)
return pd.concat(dframes)
|
Convert dimension values to DataFrame.
Returns a pandas dataframe of columns along each dimension,
either completely flat or indexed by key dimensions.
Args:
dimensions: Dimensions to return as columns
multi_index: Convert key dimensions to (multi-)index
Returns:
DataFrame of columns corresponding to each dimension
|
juraj-google-style
|
def tcp_ping(
task: Task, ports: List[int], timeout: int = 2, host: Optional[str] = None
) -> Result:
if isinstance(ports, int):
ports = [ports]
if isinstance(ports, list):
if not all(isinstance(port, int) for port in ports):
raise ValueError("Invalid value for 'ports'")
else:
raise ValueError("Invalid value for 'ports'")
host = host or task.host.hostname
result = {}
for port in ports:
s = socket.socket()
s.settimeout(timeout)
try:
status = s.connect_ex((host, port))
if status == 0:
connection = True
else:
connection = False
except (socket.gaierror, socket.timeout, socket.error):
connection = False
finally:
s.close()
result[port] = connection
return Result(host=task.host, result=result)
|
Tests connection to a tcp port and tries to establish a three way
handshake. To be used for network discovery or testing.
Arguments:
ports (list of int): tcp ports to ping
timeout (int, optional): defaults to 2
host (string, optional): defaults to ``hostname``
Returns:
Result object with the following attributes set:
* result (``dict``): Contains port numbers as keys with True/False as values
|
juraj-google-style
|
def formula_html(self, reversed_=False):
if (self.H_count == 1):
text = 'H'
elif (self.H_count > 1):
text = 'H<sub>{}</sub>'.format(self.H_count)
else:
text = ''
seq = [self.symbol, text, self.charge_sign_html()]
if reversed_:
seq = reversed(seq)
return ''.join(seq)
|
Chemical formula HTML
Args:
reversed (bool): reversed text for leftmost atom groups
|
codesearchnet
|
def _add_collection_def(meta_graph_def, key, export_scope=None):
meta_graph.add_collection_def(meta_graph_def, key, export_scope=export_scope)
|
Adds a collection to MetaGraphDef protocol buffer.
Args:
meta_graph_def: MetaGraphDef protocol buffer.
key: One of the GraphKeys or user-defined string.
export_scope: Optional `string`. Name scope to remove.
|
github-repos
|
def __getattr__(cls, item):
if item in cls._meta.settings.keys():
return cls._meta.settings[item]
raise AttributeError("'%s' class has no attribute '%s'" % (cls.__name__, item))
|
Return a setting object if it is in the ``_meta.settings`` dictionary.
Args:
item (str):
the name of the setting variable (not the setting's name).
Returns:
``Setting``: the setting object.
Raises:
AttributeError if the setting does not exist.
|
juraj-google-style
|
def load_img(path, grayscale=False, target_size=None):
img = io.imread(path, grayscale)
if target_size:
img = transform.resize(img, target_size, preserve_range=True).astype('uint8')
return img
|
Utility function to load an image from disk.
Args:
path: The image file path.
grayscale: True to convert to grayscale image (Default value = False)
target_size: (w, h) to resize. (Default value = None)
Returns:
The loaded numpy image.
|
juraj-google-style
|
def set_mac_addr_adv_interval(self, name, vrid, value=None, disable=False, default=False, run=True):
if ((not default) and (not disable)):
if ((not int(value)) or (int(value) < 1) or (int(value) > 3600)):
raise ValueError("vrrp property 'mac_addr_adv_interval' must be in the range 1-3600")
cmd = self.command_builder(('vrrp %d mac-address advertisement-interval' % vrid), value=value, default=default, disable=disable)
if run:
result = self.configure_interface(name, cmd)
if (result is False):
return self.error
return result
return cmd
|
Set the mac_addr_adv_interval property of the vrrp
Args:
name (string): The interface to configure.
vrid (integer): The vrid number for the vrrp to be managed.
value (integer): mac-address advertisement-interval value to
assign to the vrrp.
disable (boolean): Unset mac-address advertisement-interval
if True.
default (boolean): Set mac-address advertisement-interval to
default if True.
run (boolean): Set to True to execute the command, False to
return a string with the formatted command.
Returns:
If run is True, returns True if the command executed successfully,
error if failure.
If run is False, returns the formatted command string which can
be passed to the node
|
codesearchnet
|
def is_table(engine, sql):
if engine.dialect.has_table(engine, sql):
return True
return False
|
Check with the given sql arg is query or table
Args:
engine: SQLAlchemy connection engine
sql: SQL query or table name
Returns:
True for table or False if not
|
codesearchnet
|
def root_cause(binding, node, seen=()):
if isinstance(binding, (list, tuple)):
bindings = list(binding)
else:
bindings = [binding]
del binding
key = frozenset(bindings)
if key in seen:
return (next(iter(bindings), None), node)
for b in bindings:
if not node.HasCombination([b]):
for o in b.origins:
for source_set in o.source_sets:
cause, n = root_cause(list(source_set), o.where)
if cause is not None:
return (cause, n)
return (b, node)
return (None, None)
|
Tries to determine why a binding isn't possible at a node.
This tries to find the innermost source that's still impossible. It only works
if the failure isn't due to a combination of bindings.
Args:
binding: A binding, or a list of bindings.
node: The node at which (one of the) binding(s) is impossible.
seen: Internal. Bindings already looked at.
Returns:
A tuple (binding, node), with "binding" the innermost binding that's
not possible, and "node" the CFG node at which it isn't.
|
github-repos
|
def EnrolFleetspeakClient(self, client_id):
client_urn = rdf_client.ClientURN(client_id)
if data_store.RelationalDBEnabled():
try:
data_store.REL_DB.ReadClientMetadata(client_id)
return False
except db.UnknownClientError:
pass
else:
if aff4.FACTORY.ExistsWithType(
client_urn, aff4_type=aff4_grr.VFSGRRClient, token=self.token):
return False
logging.info("Enrolling a new Fleetspeak client: %r", client_id)
if data_store.RelationalDBEnabled():
now = rdfvalue.RDFDatetime.Now()
data_store.REL_DB.WriteClientMetadata(
client_id, first_seen=now, fleetspeak_enabled=True, last_ping=now)
if data_store.AFF4Enabled():
with aff4.FACTORY.Create(
client_urn,
aff4_type=aff4_grr.VFSGRRClient,
mode="rw",
token=self.token) as client:
client.Set(client.Schema.FLEETSPEAK_ENABLED, rdfvalue.RDFBool(True))
index = client_index.CreateClientIndex(token=self.token)
index.AddClient(client)
if data_store.RelationalDBEnabled():
client_obj = rdf_objects.ClientSnapshot(
client_id=client_urn.Basename())
index = client_index.ClientIndex()
index.AddClient(client_obj)
events.Events.PublishEvent("ClientEnrollment", client_urn, token=self.token)
return True
|
Enrols a Fleetspeak-enabled client for use with GRR.
Args:
client_id: GRR client-id for the client.
Returns:
True if the client is new, and actually got enrolled. This method
is a no-op if the client already exists (in which case False is returned).
|
juraj-google-style
|
def min_sequence_length(self, dataset_split):
return {problem.DatasetSplit.TRAIN: 8, problem.DatasetSplit.EVAL: 65, problem.DatasetSplit.TEST: 65}[dataset_split]
|
Determine the minimum sequence length given a dataset_split.
Args:
dataset_split: A problem.DatasetSplit.
Returns:
The minimum length that a sequence can be for this dataset_split.
|
codesearchnet
|
def rApply(d, f):
remainingDicts = [(d, ())]
while len(remainingDicts) > 0:
current, prevKeys = remainingDicts.pop()
for k, v in current.iteritems():
keys = prevKeys + (k,)
if isinstance(v, dict):
remainingDicts.insert(0, (v, keys))
else:
f(v, keys)
|
Recursively applies f to the values in dict d.
Args:
d: The dict to recurse over.
f: A function to apply to values in d that takes the value and a list of
keys from the root of the dict to the value.
|
juraj-google-style
|
def run(self, feed_dict=None, session=None) -> None:
_run_using_default_session(self, feed_dict, self.graph, session)
|
Runs this operation in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for this operation.
*N.B.* Before invoking `Operation.run()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values. See
`tf.Session.run` for a description of the valid feed values.
session: (Optional.) The `Session` to be used to run to this operation. If
none, the default session will be used.
|
github-repos
|
def stop(self):
self.log.debug('Stopping snippet package %s.', self.package)
self.close_connection()
self._stop_server()
self._destroy_event_client()
self.log.debug('Snippet package %s stopped.', self.package)
|
Releases all the resources acquired in `initialize`.
This function releases following resources:
* Close the socket connection.
* Stop forwarding the device port to host.
* Stop the standing server subprocess running on the host side.
* Stop the snippet server running on the device side.
* Stop the event client and set `self._event_client` to None.
Raises:
android_device_lib_errors.DeviceError: if the server exited with errors on
the device side.
|
github-repos
|
def __init__(self, code, message=None, command=None):
super().__init__(message)
self.code = code
self.command = command
|
Initializes a new instance of SMTPCommandFailedError.
Args:
code (int): Error code returned by the SMTP server.
message (str): Exception message, ideally providing help for the
user.
command (str): Command sent to the server that originated the
exception.
|
juraj-google-style
|
def run(components=None, broker=None):
components = (components or COMPONENTS[GROUPS.single])
components = _determine_components(components)
broker = (broker or Broker())
for component in run_order(components):
start = time.time()
try:
if ((component not in broker) and (component in DELEGATES) and is_enabled(component)):
log.info(('Trying %s' % get_name(component)))
result = DELEGATES[component].process(broker)
broker[component] = result
except MissingRequirements as mr:
if log.isEnabledFor(logging.DEBUG):
name = get_name(component)
reqs = stringify_requirements(mr.requirements)
log.debug(('%s missing requirements %s' % (name, reqs)))
broker.add_exception(component, mr)
except SkipComponent:
pass
except Exception as ex:
tb = traceback.format_exc()
log.warn(tb)
broker.add_exception(component, ex, tb)
finally:
broker.exec_times[component] = (time.time() - start)
broker.fire_observers(component)
return broker
|
Executes components in an order that satisfies their dependency
relationships.
Keyword Args:
components: Can be one of a dependency graph, a single component, a
component group, or a component type. If it's anything other than a
dependency graph, the appropriate graph is built for you and before
evaluation.
broker (Broker): Optionally pass a broker to use for evaluation. One is
created by default, but it's often useful to seed a broker with an
initial dependency.
Returns:
Broker: The broker after evaluation.
|
codesearchnet
|
def create_raw(self, key, value):
data = None
if key is not None and value is not None:
data = self.db.create(key.strip(), value)
else:
self.tcex.log.warning(u'The key or value field was None.')
return data
|
Create method of CRUD operation for raw data.
Args:
key (string): The variable to write to the DB.
value (any): The data to write to the DB.
Returns:
(string): Result of DB write.
|
juraj-google-style
|
def create(self, algorithm, length, operation_policy_name=None, name=None, cryptographic_usage_mask=None):
if (not isinstance(algorithm, enums.CryptographicAlgorithm)):
raise TypeError('algorithm must be a CryptographicAlgorithm enumeration')
elif ((not isinstance(length, six.integer_types)) or (length <= 0)):
raise TypeError('length must be a positive integer')
if (cryptographic_usage_mask is not None):
if ((not isinstance(cryptographic_usage_mask, list)) or (all((isinstance(item, enums.CryptographicUsageMask) for item in cryptographic_usage_mask)) is False)):
raise TypeError('cryptographic_usage_mask must be a list of CryptographicUsageMask enumerations')
common_attributes = self._build_common_attributes(operation_policy_name)
key_attributes = self._build_key_attributes(algorithm, length, cryptographic_usage_mask)
key_attributes.extend(common_attributes)
if name:
key_attributes.extend(self._build_name_attribute(name))
template = cobjects.TemplateAttribute(attributes=key_attributes)
result = self.proxy.create(enums.ObjectType.SYMMETRIC_KEY, template)
status = result.result_status.value
if (status == enums.ResultStatus.SUCCESS):
return result.uuid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
|
Create a symmetric key on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the symmetric key.
length (int): The length in bits for the symmetric key.
operation_policy_name (string): The name of the operation policy
to use for the new symmetric key. Optional, defaults to None
name (string): The name to give the key. Optional, defaults to None
cryptographic_usage_mask (list): list of enumerations of crypto
usage mask passing to the symmetric key. Optional, defaults to
None
Returns:
string: The uid of the newly created symmetric key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
|
codesearchnet
|
def search(self, filepath=None, basedir=None, kind=None):
if (filepath is None):
filepath = ''
if (basedir is None):
basedir = '.'
if ((not basedir) and (not filepath)):
msg = 'Either basedir or filepath is required for discovering'
raise SettingsDiscoveryError(msg)
if (kind and (kind not in self.engines)):
msg = 'Given settings format is unknow: {}'
raise SettingsDiscoveryError(msg.format(kind))
if (not filepath):
(filename, engine) = self.guess_filename(basedir, kind)
filepath = os.path.join(basedir, filename)
else:
if os.path.isabs(filepath):
(basedir, filename) = os.path.split(filepath)
else:
filepath = os.path.join(basedir, filepath)
if (not os.path.exists(filepath)):
msg = 'Given settings file does not exists: {}'
raise SettingsDiscoveryError(msg.format(filepath))
engine = self.get_engine(filepath, kind)
return (filepath, engine)
|
Search for a settings file.
Keyword Arguments:
filepath (string): Path to a config file, either absolute or
relative. If absolute set its directory as basedir (omitting
given basedir argument). If relative join it to basedir.
basedir (string): Directory path where to search for.
kind (string): Backend engine kind name (value of attribute
``_kind_name``) to help discovering with empty or relative
filepath. Also if explicit absolute filepath is given, this
will enforce the backend engine (such as yaml kind will be
forced for a ``foo.json`` file).
Returns:
tuple: Absolute filepath and backend engine class.
|
codesearchnet
|
def _parse_trunk_native_vlan(self, config):
match = re.search('switchport trunk native vlan (\\d+)', config)
return dict(trunk_native_vlan=match.group(1))
|
Scans the specified config and parse the trunk native vlan value
Args:
config (str): The interface configuration block to scan
Returns:
dict: A Python dict object with the value of switchport trunk
native vlan value. The dict returned is intended to be
merged into the resource dict
|
codesearchnet
|
def get_video_features(self, pixel_values_videos: torch.FloatTensor, vision_feature_layer: Optional[Union[int, List[int]]]=None):
vision_feature_layer = vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
batch_size_vid, num_frames, channels, height, width = pixel_values_videos.shape
pixel_values = pixel_values_videos.reshape(batch_size_vid * num_frames, channels, height, width)
video_outputs = self.video_tower(pixel_values, output_hidden_states=True)
if isinstance(vision_feature_layer, int):
video_features = video_outputs.hidden_states[vision_feature_layer]
else:
hs_pool = [video_outputs.hidden_states[layer_idx] for layer_idx in vision_feature_layer]
video_features = torch.cat(hs_pool, dim=-1)
video_features = self.multi_modal_projector(video_features)
return (video_features, num_frames)
|
Obtains video last hidden states from the vision tower and apply multimodal projection.
Args:
pixel_values_videos (`torch.FloatTensor]` of shape `(batch_size, num_frames, channels, height, width)`)
The tensors corresponding to the input videos.
vision_feature_layer (`Union[int, List[int]]`, *optional*):
The index of the layer to select the vision feature. If multiple indices are provided,
the vision feature of the corresponding indices will be concatenated to form the
vision features.
Returns:
video_features (`torch.Tensor`): Video feature tensor of shape `(num_videos * num_frames, image_length, embed_dim)`).
frames (`int`): Number of frames the videos have.
|
github-repos
|
def _schema_line(args):
name = args['table'] if args['table'] else args['view']
if name is None:
raise Exception('No table or view specified; cannot show schema')
schema = _get_schema(name)
if schema:
html = _repr_html_table_schema(schema)
return IPython.core.display.HTML(html)
else:
raise Exception('%s is not a schema and does not appear to have a schema member' % name)
|
Implements the BigQuery schema magic used to display table/view schemas.
Args:
args: the arguments following '%bigquery schema'.
Returns:
The HTML rendering for the schema.
|
juraj-google-style
|
def get_file(self, url, path_or_file=None, headers=None, filename=None):
path_or_file = (path_or_file or filename)
if self.debug:
print(('GET FILE: %s, headers=%s' % (url, headers)))
self.headers = self._get_default_headers()
if (headers is not None):
self.headers.update(headers)
response = requests.get(url, headers=self.headers, auth=self.auth, verify=self.verify_ssl)
self.http_status_code = response.status_code
try:
self._check_error(response)
try:
path_or_file.write(response.content)
except AttributeError:
fd = os.open(path_or_file, (os.O_CREAT | os.O_RDWR))
with os.fdopen(fd, 'w+b') as f:
f.write(response.content)
except:
return False
return True
|
Get a file from a url and save it as `filename`
Args:
url (str): URL to send the request to
path_or_file (str or file): A writable File-like object or a path to save the file to.
filename (str): [DEPRECATED] File name to save the file as, this can be either
a full path or a relative path
headers (str, optional): custom headers
Returns:
True if file is downloaded and written successfully, False
otherwise.
|
codesearchnet
|
def pipelines(self, **kwargs):
path = '%s/%s/pipelines' % (self.manager.path, self.get_id())
return self.manager.gitlab.http_get(path, **kwargs)
|
List the merge request pipelines.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabListError: If the list could not be retrieved
Returns:
RESTObjectList: List of changes
|
juraj-google-style
|
def mount(self, app=None):
for endpoint in self._routes:
endpoint.register_app(app)
return self
|
Mounts all registered routes to a bottle.py application instance.
Args:
app (instance): A `bottle.Bottle()` application instance.
Returns:
The Router instance (for chaining purposes).
|
codesearchnet
|
def get_mysql_vars(mysql: str, host: str, port: int, user: str) -> Dict[(str, str)]:
cmdargs = [mysql, '-h', host, '-P', str(port), '-e', 'SHOW VARIABLES; SHOW STATUS', '-u', user, '-p']
log.info('Connecting to MySQL with user: {}', user)
log.debug(cmdargs)
process = subprocess.Popen(cmdargs, stdout=subprocess.PIPE)
(out, err) = process.communicate()
lines = out.decode('utf8').splitlines()
mysqlvars = {}
for line in lines:
(var, val) = line.split('\t')
mysqlvars[var] = val
return mysqlvars
|
Asks MySQL for its variables and status.
Args:
mysql: ``mysql`` executable filename
host: host name
port: TCP/IP port number
user: username
Returns:
dictionary of MySQL variables/values
|
codesearchnet
|
def proc_ovrds(**kwargs):
return [
(k, v) for k, v in kwargs.items()
if k not in list(ELEM_KEYS.keys()) + list(ELEM_KEYS.values()) + PRSV_COLS
]
|
Bloomberg overrides
Args:
**kwargs: overrides
Returns:
list of tuples
Examples:
>>> proc_ovrds(DVD_Start_Dt='20180101')
[('DVD_Start_Dt', '20180101')]
>>> proc_ovrds(DVD_Start_Dt='20180101', cache=True, has_date=True)
[('DVD_Start_Dt', '20180101')]
|
juraj-google-style
|
def unshare(self, group_id, **kwargs):
path = '/projects/%s/share/%s' % (self.get_id(), group_id)
self.manager.gitlab.http_delete(path, **kwargs)
|
Delete a shared project link within a group.
Args:
group_id (int): ID of the group.
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabDeleteError: If the server failed to perform the request
|
juraj-google-style
|
def ones_like(array, dtype=None, keepmeta=True):
if keepmeta:
return xr.ones_like(array, dtype)
else:
return dc.ones(array.shape, dtype)
|
Create an array of ones with the same shape and type as the input array.
Args:
array (xarray.DataArray): The shape and data-type of it define
these same attributes of the output array.
dtype (data-type, optional): If spacified, this function overrides
the data-type of the output array.
keepmeta (bool, optional): Whether *coords, attrs, and name of the input
array are kept in the output one. Default is True.
Returns:
array (decode.array): Decode array filled with ones.
|
codesearchnet
|
def initGP(self,fast=False):
if fast:
assert self.n_terms==2, 'CVarianceDecomposition: for fast inference number of terms must be == 2'
assert self.P>1, 'CVarianceDecomposition: for fast inference number of traits must be > 1'
self.vd.initGPkronSum()
else:
self.vd.initGP()
self.gp=self.vd.getGP()
self.init=True
self.fast=fast
|
Initialize GP objetct
Args:
fast: if fast==True initialize gpkronSum gp
|
juraj-google-style
|
def call_requests(
requests: Union[Request, Iterable[Request]], methods: Methods, debug: bool
) -> Response:
if isinstance(requests, collections.Iterable):
return BatchResponse(safe_call(r, methods, debug=debug) for r in requests)
return safe_call(requests, methods, debug=debug)
|
Takes a request or list of Requests and calls them.
Args:
requests: Request object, or a collection of them.
methods: The list of methods that can be called.
debug: Include more information in error responses.
|
juraj-google-style
|
def _bit_list_to_bytes(bit_list):
num_bits = len(bit_list)
byte_vals = bytearray()
for start in six.moves.xrange(0, num_bits, 8):
curr_bits = bit_list[start:(start + 8)]
char_val = sum(((val * digit) for (val, digit) in six.moves.zip(_POW2, curr_bits)))
byte_vals.append(char_val)
return bytes(byte_vals)
|
Converts an iterable of 1s and 0s to bytes.
Combines the list 8 at a time, treating each group of 8 bits
as a single byte.
Args:
bit_list (Sequence): Sequence of 1s and 0s.
Returns:
bytes: The decoded bytes.
|
codesearchnet
|
def _assert_splits_match(nested_splits_lists):
error_msg = 'Inputs must have identical ragged splits'
for splits_list in nested_splits_lists:
if len(splits_list) != len(nested_splits_lists[0]):
raise ValueError(error_msg)
return [check_ops.assert_equal(s1, s2, message=error_msg) for splits_list in nested_splits_lists[1:] for s1, s2 in zip(nested_splits_lists[0], splits_list)]
|
Checks that the given splits lists are identical.
Performs static tests to ensure that the given splits lists are identical,
and returns a list of control dependency op tensors that check that they are
fully identical.
Args:
nested_splits_lists: A list of nested_splits_lists, where each split_list is
a list of `splits` tensors from a `RaggedTensor`, ordered from outermost
ragged dimension to innermost ragged dimension.
Returns:
A list of control dependency op tensors.
Raises:
ValueError: If the splits are not identical.
|
github-repos
|
def load_fasta_file(filename):
with open(filename, 'r') as handle:
records = list(SeqIO.parse(handle, 'fasta'))
return records
|
Load a FASTA file and return the sequences as a list of SeqRecords
Args:
filename (str): Path to the FASTA file to load
Returns:
list: list of all sequences in the FASTA file as Biopython SeqRecord objects
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.