code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
|---|---|---|
def loss_masks(self, masks_queries_logits: Tensor, mask_labels: List[Tensor], indices: Tuple[np.array], num_masks: int) -> Dict[str, Tensor]:
src_idx = self._get_predictions_permutation_indices(indices)
tgt_idx = self._get_targets_permutation_indices(indices)
pred_masks = masks_queries_logits[src_idx]
target_masks, _ = self._pad_images_to_max_in_batch(mask_labels)
target_masks = target_masks[tgt_idx]
pred_masks = pred_masks[:, None]
target_masks = target_masks[:, None]
with torch.no_grad():
point_coords = self.sample_points_using_uncertainty(pred_masks, self.calculate_uncertainty, self.num_points, self.oversample_ratio, self.importance_sample_ratio)
point_labels = sample_point(target_masks, point_coords, align_corners=False).squeeze(1)
point_logits = sample_point(pred_masks, point_coords, align_corners=False).squeeze(1)
losses = {'loss_mask': sigmoid_cross_entropy_loss(point_logits, point_labels, num_masks), 'loss_dice': dice_loss(point_logits, point_labels, num_masks)}
del pred_masks
del target_masks
return losses
|
Compute the losses related to the masks using focal and dice loss.
Args:
masks_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, height, width`
mask_labels (`torch.Tensor`):
List of mask labels of shape `(labels, height, width)`.
indices (`Tuple[np.array])`:
The indices computed by the Hungarian matcher.
num_masks (`int)`:
The number of masks, used for normalization.
Returns:
`Dict[str, Tensor]`: A dict of `torch.Tensor` containing two keys:
- **loss_mask** -- The loss computed using sigmoid ce loss on the predicted and ground truth masks.
- **loss_dice** -- The loss computed using dice loss on the predicted on the predicted and ground truth
masks.
|
github-repos
|
def _get_configured_module(option_name, known_modules=None):
from furious.job_utils import path_to_reference
config = get_config()
option_value = config[option_name]
if not known_modules:
known_modules = {}
module_path = known_modules.get(option_value) or option_value
return path_to_reference(module_path)
|
Get the module specified by the value of option_name. The value of the
configuration option will be used to load the module by name from the known
module list or treated as a path if not found in known_modules.
Args:
option_name: name of persistence module
known_modules: dictionary of module names and module paths,
ie: {'ndb':'furious.extras.appengine.ndb_persistence'}
Returns:
module of the module path matching the name in known_modules
|
juraj-google-style
|
def __init__(self, variant_type='snv'):
super(VcfPlugin, self).__init__()
self.individual_objs = []
self.case_objs = []
self.variant_type = variant_type
logger.info("Setting variant type to {0}".format(variant_type))
self.variant_columns = ['CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER']
self.head = None
self.vep_header = None
self.snpeff_header = None
self.filters.can_filter_gene = True
self.filters.can_filter_frequency = True
self.filters.can_filter_cadd = True
self.filters.can_filter_consequence = True
self.filters.can_filter_impact_severity = True
self.filters.can_filter_sv = True
self.filters.can_filter_sv_len = True
self.filters.can_filter_inheritance = True
|
Initialize a vcf adapter.
When instansiating all cases are found.
Args:
variant_type(str) : 'snv' or 'sv'
|
juraj-google-style
|
def char_ngrams(s, n=3, token_fn=tokens.on_whitespace):
tokens = token_fn(s)
ngram_tuples = [__ngrams(t, n=min(len(t), n)) for t in tokens]
def unpack(l):
return sum(l, [])
def untuple(l):
return [''.join(t) for t in l]
return untuple(unpack(ngram_tuples))
|
Character-level n-grams from within the words in a string.
By default, the word boundary is assumed to be whitespace. n-grams are
not taken across word boundaries, only within words.
If a word's length is less than or equal to n, the n-grams are simply a
list with the word itself.
>>> ng.char_ngrams('This is not a test!')
['Thi', 'his', 'is', 'not', 'a', 'tes', 'est', 'st!']
Therefore some n-grams may have a length less than n, like 'is' and 'a'
in this example.
Args:
s: a string
n: an int for the n in n-gram
token_fn: a function that splits a string into a list of strings
Returns:
list: strings of char-level n-grams
|
codesearchnet
|
def remove_waiter(self, waiter_handle):
spec, waiter = waiter_handle
self._remove_waiter(spec, waiter)
|
Remove a message callback.
This call will remove a callback previously registered using
every_match.
Args:
waiter_handle (object): The opaque handle returned by the
previous call to every_match().
|
juraj-google-style
|
def stop_dag(self, name=None):
return self._client.send(
Request(
action='stop_dag',
payload={'name': name if name is not None else self._dag_name}
)
).success
|
Send a stop signal to the specified dag or the dag that hosts this task.
Args:
name str: The name of the dag that should be stopped. If no name is given the
dag that hosts this task is stopped.
Upon receiving the stop signal, the dag will not queue any new tasks and wait
for running tasks to terminate.
Returns:
bool: True if the signal was sent successfully.
|
juraj-google-style
|
def __init__(self, action_type=None, nw_addr=None):
super().__init__(action_type, length=8)
self.nw_addr = nw_addr
|
Create an ActionNWAddr with the optional parameters below.
Args:
action_type (:class:`~pyof.v0x01.common.action.ActionType`):
:attr:`~ActionType.OFPAT_SET_NW_SRC` or
:attr:`~ActionType.OFPAT_SET_NW_DST`.
nw_addr (int): IP Address.
|
juraj-google-style
|
def InsertData(self, table_id, fd, schema, job_id):
configuration = {'schema': {'fields': schema}, 'destinationTable': {'projectId': self.project_id, 'tableId': table_id, 'datasetId': self.dataset_id}, 'sourceFormat': 'NEWLINE_DELIMITED_JSON'}
body = {'configuration': {'load': configuration}, 'jobReference': {'projectId': self.project_id, 'jobId': job_id}}
mediafile = http.MediaFileUpload(fd.name, mimetype='application/octet-stream')
job = self.service.jobs().insert(projectId=self.project_id, body=body, media_body=mediafile)
try:
response = job.execute()
return response
except errors.HttpError as e:
if self.GetDataset(self.dataset_id):
logging.exception('Error with job: %s', job_id)
else:
logging.info('Attempting to create dataset: %s', self.dataset_id)
self.CreateDataset()
return self.RetryUpload(job, job_id, e)
|
Insert data into a bigquery table.
If the table specified doesn't exist, it will be created with the specified
schema.
Args:
table_id: string table id
fd: open file descriptor containing the newline separated JSON
schema: BigQuery schema dict
job_id: string job id
Returns:
API response object on success, None on failure
|
codesearchnet
|
def __init__(self, fetches, contraction_fn):
self._unique_fetches = []
for fetch in fetches:
try:
self._unique_fetches.append(ops.get_default_graph().as_graph_element(fetch, allow_tensor=True, allow_operation=True))
except TypeError as e:
raise TypeError(f'Argument `fetch` = {fetch} has invalid type "{type(fetch).__name__}" must be a string or Tensor. ({str(e)})')
except ValueError as e:
raise ValueError(f'Argument `fetch` = {fetch} cannot be interpreted as a Tensor. ({str(e)})')
except KeyError as e:
raise ValueError(f'Argument `fetch` = {fetch} cannot be interpreted as a Tensor. ({str(e)})')
self._contraction_fn = contraction_fn
|
Creates an _ElementFetchMapper.
This is the fetch mapper used for leaves in the fetch struct. Because of
the expansions mechanism, a leaf can actually fetch more than one tensor.
Also note that the fetches here can be just strings (tensor or op names) or
any other object that the graph knows how to convert to a tensor, such as a
Variable. So we have to run each fetch through `as_graph_element()` to get
the corresponding tensor or op.
Args:
fetches: List of objects, as returned by a fetch_fn defined in
_REGISTERED_EXPANSIONS.
contraction_fn: Callable as returned by a fetch_fn.
|
github-repos
|
def view(value: Any, *, name: Optional[str]=None, root_path: Optional[utils.KeyPath]=None, view_id: str='html-tree-view', **kwargs) -> Content:
if isinstance(value, Content):
return value
with view_options(**kwargs) as options:
view_object = View.create(view_id)
return view_object.render(value, name=name, root_path=root_path or utils.KeyPath(), **options)
|
Views an object through generating content based on a specific view.
Args:
value: The value to view.
name: The name of the value.
root_path: The root path of the value.
view_id: The ID of the view to use. See `pg.View.dir()` for all available
view IDs.
**kwargs: Additional keyword arguments passed to the view, wich
will be used as the preset arguments for the View and Extension methods.
Returns:
The rendered `Content` object.
|
github-repos
|
def inner_join(df, other, **kwargs):
(left_on, right_on, suffixes) = get_join_parameters(kwargs)
joined = df.merge(other, how='inner', left_on=left_on, right_on=right_on, suffixes=suffixes)
return joined
|
Joins on values present in both DataFrames.
Args:
df (pandas.DataFrame): Left DataFrame (passed in via pipe)
other (pandas.DataFrame): Right DataFrame
Kwargs:
by (str or list): Columns to join on. If a single string, will join
on that column. If a list of lists which contain strings or
integers, the right/left columns to join on.
suffixes (list): String suffixes to append to column names in left
and right DataFrames.
Example:
a >> inner_join(b, by='x1')
x1 x2 x3
0 A 1 True
1 B 2 False
|
codesearchnet
|
def neighborhood_probability(self, threshold, radius, sigmas=None):
if (sigmas is None):
sigmas = [0]
weights = disk(radius)
filtered_prob = []
for sigma in sigmas:
filtered_prob.append(EnsembleConsensus(np.zeros(self.data.shape[1:], dtype=np.float32), 'neighbor_prob_r_{0:d}_s_{1:d}'.format(radius, sigma), self.ensemble_name, self.run_date, (self.variable + '_{0:0.2f}'.format(threshold)), self.start_date, self.end_date, ''))
thresh_data = np.zeros(self.data.shape[2:], dtype=np.uint8)
neighbor_prob = np.zeros(self.data.shape[2:], dtype=np.float32)
for t in range(self.data.shape[1]):
for m in range(self.data.shape[0]):
thresh_data[(self.data[(m, t)] >= threshold)] = 1
maximized = fftconvolve(thresh_data, weights, mode='same')
maximized[(maximized > 1)] = 1
maximized[(maximized < 1)] = 0
neighbor_prob += fftconvolve(maximized, weights, mode='same')
neighbor_prob[(neighbor_prob < 1)] = 0
thresh_data[:] = 0
neighbor_prob /= (self.data.shape[0] * float(weights.sum()))
for (s, sigma) in enumerate(sigmas):
if (sigma > 0):
filtered_prob[s].data[t] = gaussian_filter(neighbor_prob, sigma=sigma)
else:
filtered_prob[s].data[t] = neighbor_prob
neighbor_prob[:] = 0
return filtered_prob
|
Hourly probability of exceeding a threshold based on model values within a specified radius of a point.
Args:
threshold (float): probability of exceeding this threshold
radius (int): distance from point in number of grid points to include in neighborhood calculation.
sigmas (array of ints): Radii for Gaussian filter used to smooth neighborhood probabilities.
Returns:
list of EnsembleConsensus objects containing neighborhood probabilities for each forecast hour.
|
codesearchnet
|
def remove_codeblock_syntax_sentinals(code_text):
flags = (re.MULTILINE | re.DOTALL)
code_text_ = code_text
code_text_ = re.sub('^ *
code_text_ = re.sub('^ *
code_text_ = re.sub('^ *
code_text_ = code_text_.rstrip()
return code_text_
|
r"""
Removes template comments and vim sentinals
Args:
code_text (str):
Returns:
str: code_text_
|
codesearchnet
|
def restores(self):
if (not self.__restores):
self.__restores = Restores(self.__connection)
return self.__restores
|
Gets the Restores API client.
Returns:
Restores:
|
codesearchnet
|
def from_str(cls, label: str) -> int:
label_norm = label.replace('1', 'one').upper()
if (label_norm in cls.__members__):
return DecayType[label_norm]
else:
raise NotImplementedError
|
Convert given string label of decay type to special index
Args:
label: name of decay type.
Set of values: `"linear"`, `"cosine"`, `"exponential"`,
`"onecycle"`, `"trapezoid"`, `["polynomial", K]`, where K is a polynomial power
Returns:
index of decay type
|
codesearchnet
|
def patch_so(srcs_dir: str) -> None:
to_patch = {'tensorflow/python/_pywrap_tensorflow_internal.so': '$ORIGIN/../../tensorflow/compiler/xla/tsl/python/lib/core', 'tensorflow/compiler/mlir/quantization/tensorflow/python/pywrap_function_lib.so': '$ORIGIN/../../../../../python', 'tensorflow/compiler/mlir/quantization/tensorflow/python/pywrap_quantize_model.so': '$ORIGIN/../../../../../python', 'tensorflow/compiler/mlir/tensorflow_to_stablehlo/python/pywrap_tensorflow_to_stablehlo.so': '$ORIGIN/../../../../python', 'tensorflow/compiler/mlir/lite/python/_pywrap_converter_api.so': '$ORIGIN/../../../../python'}
for file, path in to_patch.items():
rpath = subprocess.check_output(['patchelf', '--print-rpath', '{}/{}'.format(srcs_dir, file)]).decode().strip()
new_rpath = rpath + ':' + path
subprocess.run(['patchelf', '--set-rpath', new_rpath, '{}/{}'.format(srcs_dir, file)], check=True)
subprocess.run(['patchelf', '--shrink-rpath', '{}/{}'.format(srcs_dir, file)], check=True)
|
Patch .so files.
We must patch some of .so files otherwise auditwheel will fail.
Args:
srcs_dir: target directory with .so files to patch.
|
github-repos
|
def _DrawTrips(self, triplist, colpar=''):
stations = []
if ((not self._stations) and triplist):
self._stations = self._CalculateYLines(self._TravelTimes(triplist))
if (not self._stations):
self._AddWarning('Failed to use traveltimes for graph')
self._stations = self._CalculateYLines(self._Uniform(triplist))
if (not self._stations):
self._AddWarning('Failed to calculate station distances')
return
stations = self._stations
tmpstrs = []
servlist = []
for t in triplist:
if (not colpar):
if (t.service_id not in servlist):
servlist.append(t.service_id)
shade = int(((servlist.index(t.service_id) * (200 / len(servlist))) + 55))
color = ('
else:
color = colpar
start_offsets = [0]
first_stop = t.GetTimeStops()[0]
for (j, freq_offset) in enumerate(start_offsets):
if ((j > 0) and (not colpar)):
color = 'purple'
scriptcall = ('onmouseover="LineClick(\'%s\',\'Trip %s starting %s\')"' % (t.trip_id, t.trip_id, transitfeed.FormatSecondsSinceMidnight(t.GetStartTime())))
tmpstrhead = ('<polyline class="T" id="%s" stroke="%s" %s points="' % (str(t.trip_id), color, scriptcall))
tmpstrs.append(tmpstrhead)
for (i, s) in enumerate(t.GetTimeStops()):
arr_t = s[0]
dep_t = s[1]
if ((arr_t is None) or (dep_t is None)):
continue
arr_x = (int(((arr_t / 3600.0) * self._hour_grid)) - (self._hour_grid * self._offset))
dep_x = (int(((dep_t / 3600.0) * self._hour_grid)) - (self._hour_grid * self._offset))
tmpstrs.append(('%s,%s ' % (int((arr_x + 20)), int((stations[i] + 20)))))
tmpstrs.append(('%s,%s ' % (int((dep_x + 20)), int((stations[i] + 20)))))
tmpstrs.append('" />')
return ''.join(tmpstrs)
|
Generates svg polylines for each transit trip.
Args:
# Class Trip is defined in transitfeed.py
[Trip, Trip, ...]
Returns:
# A string containing a polyline tag for each trip
' <polyline class="T" stroke="#336633" points="433,0 ...'
|
codesearchnet
|
def oauth2decorator_from_clientsecrets(filename, scope, message=None, cache=None):
return OAuth2DecoratorFromClientSecrets(filename, scope, message=message, cache=cache)
|
Creates an OAuth2Decorator populated from a clientsecrets file.
Args:
filename: string, File name of client secrets.
scope: string or list of strings, scope(s) of the credentials being
requested.
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. The message may
contain HTML and will be presented on the web interface for
any method that uses the decorator.
cache: An optional cache service client that implements get() and set()
methods. See clientsecrets.loadfile() for details.
Returns: An OAuth2Decorator
|
codesearchnet
|
def stop_loss_replace(self, accountID, orderID, **kwargs):
return self.replace(
accountID,
orderID,
order=StopLossOrderRequest(**kwargs)
)
|
Shortcut to replace a pending Stop Loss Order in an Account
Args:
accountID : The ID of the Account
orderID : The ID of the Stop Loss Order to replace
kwargs : The arguments to create a StopLossOrderRequest
Returns:
v20.response.Response containing the results from submitting
the request
|
juraj-google-style
|
def create(window, root):
notifications = {}
_id = root.get_property("id")
from foxpuppet.windows.browser.notifications import addons
notifications.update(addons.NOTIFICATIONS)
return notifications.get(_id, BaseNotification)(window, root)
|
Create a notification object.
Args:
window (:py:class:`BrowserWindow`): Window object this region
appears in.
root
(:py:class:`~selenium.webdriver.remote.webelement.WebElement`):
WebDriver element object that serves as the root for the
notification.
Returns:
:py:class:`BaseNotification`: Firefox notification.
|
juraj-google-style
|
def _load_data(self, resource, default=DEFAULT_VALUE_SAFEGUARD, **kwargs):
default_val = (default if (default != self.DEFAULT_VALUE_SAFEGUARD) else {})
try:
return (get_edx_api_data(api_config=CatalogIntegration.current(), resource=resource, api=self.client, **kwargs) or default_val)
except (SlumberBaseException, ConnectionError, Timeout) as exc:
LOGGER.exception('Failed to load data from resource [%s] with kwargs [%s] due to: [%s]', resource, kwargs, str(exc))
return default_val
|
Load data from API client.
Arguments:
resource(string): type of resource to load
default(any): value to return if API query returned empty result. Sensible values: [], {}, None etc.
Returns:
dict: Deserialized response from Course Catalog API
|
codesearchnet
|
def attention_bias_proximal(length):
r = tf.to_float(tf.range(length))
diff = (tf.expand_dims(r, 0) - tf.expand_dims(r, 1))
return tf.expand_dims(tf.expand_dims((- tf.log1p(tf.abs(diff))), 0), 0)
|
Bias for self-attention to encourage attention to close positions.
Args:
length: an integer scalar.
Returns:
a Tensor with shape [1, 1, length, length]
|
codesearchnet
|
def list_asgs_all(access_token, subscription_id):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/providers/Microsoft.Network/virtualNetworks/',
'?api-version=', NETWORK_API])
return do_get(endpoint, access_token)
|
Get details about the application security groups for a resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. ASG JSON body.
|
juraj-google-style
|
def text_colour_for_hex(hexx, percent=50, dark='
return (light if hex_is_dark(hexx, percent=percent) else dark)
|
Function to decide what colour to use for a given hex colour.
Args:
hexx (str): A hexadecimal colour, starting with '#'.
Returns:
bool: The colour's brightness is less than the given percent.
|
codesearchnet
|
def symlink(src, link):
if (sys.getwindowsversion().major < 6):
raise SaltInvocationError('Symlinks are only supported on Windows Vista or later.')
if (not os.path.exists(src)):
raise SaltInvocationError('The given source path does not exist.')
if (not os.path.isabs(src)):
raise SaltInvocationError('File path must be absolute.')
src = os.path.normpath(src)
link = os.path.normpath(link)
is_dir = os.path.isdir(src)
try:
win32file.CreateSymbolicLink(link, src, int(is_dir))
return True
except pywinerror as exc:
raise CommandExecutionError("Could not create '{0}' - [{1}] {2}".format(link, exc.winerror, exc.strerror))
|
Create a symbolic link to a file
This is only supported with Windows Vista or later and must be executed by
a user with the SeCreateSymbolicLink privilege.
The behavior of this function matches the Unix equivalent, with one
exception - invalid symlinks cannot be created. The source path must exist.
If it doesn't, an error will be raised.
Args:
src (str): The path to a file or directory
link (str): The path to the link
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' file.symlink /path/to/file /path/to/link
|
codesearchnet
|
def trace_set_buffer_capacity(self, size):
cmd = enums.JLinkTraceCommand.SET_CAPACITY
data = ctypes.c_uint32(size)
res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data))
if (res == 1):
raise errors.JLinkException('Failed to set trace buffer size.')
return None
|
Sets the capacity for the trace buffer.
Args:
self (JLink): the ``JLink`` instance.
size (int): the new capacity for the trace buffer.
Returns:
``None``
|
juraj-google-style
|
def supported_tifs(self):
buf = ctypes.c_uint32()
self._dll.JLINKARM_TIF_GetAvailable(ctypes.byref(buf))
return buf.value
|
Returns a bitmask of the supported target interfaces.
Args:
self (JLink): the ``JLink`` instance
Returns:
Bitfield specifying which target interfaces are supported.
|
juraj-google-style
|
def _ConvertAttributeContainerToDict(cls, attribute_container):
if (not isinstance(attribute_container, containers_interface.AttributeContainer)):
raise TypeError('{0:s} is not an attribute container type.'.format(type(attribute_container)))
container_type = getattr(attribute_container, 'CONTAINER_TYPE', None)
if (not container_type):
raise ValueError('Unsupported attribute container type: {0:s}.'.format(type(attribute_container)))
json_dict = {'__type__': 'AttributeContainer', '__container_type__': container_type}
for (attribute_name, attribute_value) in attribute_container.GetAttributes():
json_dict[attribute_name] = cls._ConvertAttributeValueToDict(attribute_value)
return json_dict
|
Converts an attribute container object into a JSON dictionary.
The resulting dictionary of the JSON serialized objects consists of:
{
'__type__': 'AttributeContainer'
'__container_type__': ...
...
}
Here '__type__' indicates the object base type. In this case
'AttributeContainer'.
'__container_type__' indicates the container type and rest of the elements
of the dictionary make up the attributes of the container.
Args:
attribute_container (AttributeContainer): attribute container.
Returns:
dict[str, object]: JSON serialized objects.
Raises:
TypeError: if not an instance of AttributeContainer.
ValueError: if the attribute container type is not supported.
|
codesearchnet
|
def parse_columns(lines):
data = []
index = []
for line in lines:
line = line.rstrip()
if line.startswith('
tmp = __parse_entry(line)
data.append(tmp[1])
index.append(tmp[0])
return DataFrame(data, index=index, columns=['description'])
|
Parse list of lines with columns description from SOFT file.
Args:
lines (:obj:`Iterable`): Iterator over the lines.
Returns:
:obj:`pandas.DataFrame`: Columns description.
|
codesearchnet
|
def GetEventTagByIdentifier(self, identifier):
event_tag = self._GetAttributeContainerByIndex(self._CONTAINER_TYPE_EVENT_TAG, (identifier.row_identifier - 1))
if event_tag:
event_identifier = identifiers.SQLTableIdentifier(self._CONTAINER_TYPE_EVENT, event_tag.event_row_identifier)
event_tag.SetEventIdentifier(event_identifier)
del event_tag.event_row_identifier
return event_tag
|
Retrieves a specific event tag.
Args:
identifier (SQLTableIdentifier): event tag identifier.
Returns:
EventTag: event tag or None if not available.
|
codesearchnet
|
def kill_reporter(self, check_alive=True):
if PY3:
self._kill_process_type(
ray_constants.PROCESS_TYPE_REPORTER, check_alive=check_alive)
|
Kill the reporter.
Args:
check_alive (bool): Raise an exception if the process was already
dead.
|
juraj-google-style
|
def csv_to_matrix(csv_file_path):
mtx = []
with open(csv_file_path) as csv_data_file:
for row in csv_data_file:
mtx.append(row.split(','))
return mtx
|
Load a CSV file into a Python matrix of strings.
Args:
csv_file_path: Full path to a valid CSV file (e.g. c:/ladybug/test.csv)
|
juraj-google-style
|
def softplus(x, scale=1.0, name=None):
if scale == 1:
return tf.nn.softplus(x)
else:
with tf.name_scope(name, 'softplus', [x]):
scale = tf.convert_to_tensor(scale, dtype=x.dtype.base_dtype)
return tf.nn.softplus(x * scale) / scale
|
Computes softplus with a scale factor to sharpen of the hinge.
This is an alternate non-linearity to relu. It has a similar shape, but
it has a smooth transition from the linear part to 0.
Args:
x: A tensor.
scale: A float that sharpens the curve.
name: Optional name.
Returns:
y = log(1 + exp(scale * x)) / scale
|
juraj-google-style
|
def prep_parallel(self, binary_args, other_args):
if self.length < 100:
raise Exception("Run this across 1 processor by setting num_processors kwarg to None.")
if self.num_processors == -1:
self.num_processors = mp.cpu_count()
split_val = int(np.ceil(self.length/self.num_splits))
split_inds = [self.num_splits*i for i in np.arange(1, split_val)]
inds_split_all = np.split(np.arange(self.length), split_inds)
self.args = []
for i, ind_split in enumerate(inds_split_all):
trans_args = []
for arg in binary_args:
try:
trans_args.append(arg[ind_split])
except TypeError:
trans_args.append(arg)
self.args.append((i, tuple(trans_args)) + other_args)
return
|
Prepare the parallel calculations
Prepares the arguments to be run in parallel.
It will divide up arrays according to num_splits.
Args:
binary_args (list): List of binary arguments for input into the SNR function.
other_args (tuple of obj): tuple of other args for input into parallel snr function.
|
juraj-google-style
|
def __init__(self, job_id, context):
super(GCPJob, self).__init__(job_id)
if context is None:
context = google.datalab.Context.default()
self._context = context
self._api = self._create_api(context)
|
Initializes an instance of a Job.
Args:
job_id: the BigQuery job ID corresponding to this job.
context: a Context object providing project_id and credentials.
|
juraj-google-style
|
def preprocess(self, raw_inputs):
image_arrays = []
for raw_im in raw_inputs:
im = raw_im.convert('L')
im = im.resize(MNIST_DIM, Image.ANTIALIAS)
arr = np.array(im)
image_arrays.append(arr)
inputs = np.array(image_arrays)
return (inputs.reshape(len(inputs), MNIST_DIM[0], MNIST_DIM[1], 1).astype('float32') / 255)
|
Convert images into the format required by our model.
Our model requires that inputs be grayscale (mode 'L'), be resized to
`MNIST_DIM`, and be represented as float32 numpy arrays in range
[0, 1].
Args:
raw_inputs (list of Images): a list of PIL Image objects
Returns:
array (float32): num images * height * width * num channels
|
codesearchnet
|
def _page_to_text(page):
start_pos = page.find(u'<text')
assert (start_pos != (- 1))
end_tag_pos = page.find(u'>', start_pos)
assert (end_tag_pos != (- 1))
end_tag_pos += len(u'>')
end_pos = page.find(u'</text>')
if (end_pos == (- 1)):
return u''
return page[end_tag_pos:end_pos]
|
Extract the text from a page.
Args:
page: a unicode string
Returns:
a unicode string
|
codesearchnet
|
def rated_movies(self, **kwargs):
path = self._get_guest_session_id_path('rated_movies')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
|
Get a list of rated moview for a specific guest session id.
Args:
page: (optional) Minimum 1, maximum 1000.
sort_by: (optional) 'created_at.asc' | 'created_at.desc'
language: (optional) ISO 639-1 code.
Returns:
A dict respresentation of the JSON returned from the API.
|
codesearchnet
|
def execute(self):
for name_context, spec in zip(self._map_task.name_contexts, self._map_task.operations):
op = create_operation(name_context, spec, self._counter_factory, None, self._state_sampler, test_shuffle_source=self._test_shuffle_source, test_shuffle_sink=self._test_shuffle_sink)
self._ops.append(op)
if hasattr(op.spec, 'input'):
producer, output_index = op.spec.input
self._ops[producer].add_receiver(op, output_index)
if hasattr(op.spec, 'inputs'):
for producer, output_index in op.spec.inputs:
self._ops[producer].add_receiver(op, output_index)
for ix, op in reversed(list(enumerate(self._ops))):
_LOGGER.debug('Starting op %d %s', ix, op)
op.start()
for op in self._ops:
op.finish()
|
Executes all the operation_specs.Worker* instructions in a map task.
We update the map_task with the execution status, expressed as counters.
Raises:
RuntimeError: if we find more than on read instruction in task spec.
TypeError: if the spec parameter is not an instance of the recognized
operation_specs.Worker* classes.
|
github-repos
|
def _ragged_split(tensor, pieces):
shape = tensor.shape
if 1 != len(shape):
raise ValueError('input tensor must be 1D')
tensor_len = shape.dims[0].value
chunk_size = tensor_len
with ops.colocate_with(tensor):
if tensor_len != pieces * chunk_size:
assert pieces > 1
last_chunk_size = tensor_len - (pieces - 1) * chunk_size
assert last_chunk_size > 0
piece_lens = [chunk_size for _ in range(pieces - 1)] + [last_chunk_size]
return array_ops.split(tensor, piece_lens)
else:
return array_ops.split(tensor, pieces)
|
Like split for 1D tensors but allows case where len % pieces != 0.
Args:
tensor: `tf.Tensor` that must be 1D.
pieces: a positive integer specifying the number of pieces into which
tensor should be split.
Returns:
list of `tf.Tensor` of length pieces, which hold the values of
the input tensor, in order. The final tensor may be shorter
than the others, which will all be of equal length.
Raises:
ValueError: input tensor must be 1D.
|
github-repos
|
class PatchTSMixerForRegressionOutput(ModelOutput):
loss: Optional[torch.FloatTensor] = None
regression_outputs: Optional[torch.FloatTensor] = None
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
Output type of [`PatchTSMixerForRegressionOutput`].
Args:
regression_outputs (`torch.FloatTensor` of shape `(batch_size, num_targets)`):
Prediction output from the regression head.
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_input_channels, num_patches, d_model)`):
Backbone embeddings before passing through the head.
hidden_states (`tuple(torch.FloatTensor)`, *optional*):
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
loss (*optional*, returned when `y` is provided, `torch.FloatTensor` of shape `()`):
Total loss.
|
github-repos
|
def GetContainingWhileContext(ctxt, stop_ctxt=None):
while ctxt:
if ctxt.IsWhileContext() or ctxt == stop_ctxt:
return ctxt
ctxt = ctxt.outer_context
return None
|
Returns the first ancestor WhileContext of `ctxt`.
Returns `ctxt` if `ctxt` is a WhileContext, or None if `ctxt` is not in a
while loop.
Args:
ctxt: ControlFlowContext
stop_ctxt: ControlFlowContext, optional. If provided, the search will end
if it sees stop_ctxt.
Returns:
`ctxt` if `ctxt` is a WhileContext, the most nested WhileContext containing
`ctxt`, or None if `ctxt` is not in a while loop. If `stop_ctxt` is not
`None`, this returns `ctxt` if it matches `stop_ctxt` in its traversal.
|
github-repos
|
def Create(conf, map_name, automount_mountpoint=None):
global _cache_implementations
if not _cache_implementations:
raise RuntimeError('no cache implementations exist')
cache_name = conf['name']
if cache_name not in _cache_implementations:
raise RuntimeError('cache not implemented: %r' % (cache_name,))
if map_name not in _cache_implementations[cache_name]:
raise RuntimeError('map %r not supported by cache %r' % (map_name, cache_name))
return _cache_implementations[cache_name][map_name](conf, map_name, automount_mountpoint=automount_mountpoint)
|
Cache creation factory method.
Args:
conf: a dictionary of configuration key/value pairs, including one
required attribute 'name'
map_name: a string identifying the map name to handle
automount_mountpoint: A string containing the automount mountpoint, used only
by automount maps.
Returns:
an instance of a Cache
Raises:
RuntimeError: problem instantiating the requested cache
|
github-repos
|
def structure_2_lmpdata(structure, ff_elements=None, atom_style="charge"):
s = structure.get_sorted_structure()
a, b, c = s.lattice.abc
m = s.lattice.matrix
xhi = a
xy = np.dot(m[1], m[0] / xhi)
yhi = np.sqrt(b ** 2 - xy ** 2)
xz = np.dot(m[2], m[0] / xhi)
yz = (np.dot(m[1], m[2]) - xy * xz) / yhi
zhi = np.sqrt(c ** 2 - xz ** 2 - yz ** 2)
box_bounds = [[0.0, xhi], [0.0, yhi], [0.0, zhi]]
box_tilt = [xy, xz, yz]
box_tilt = None if not any(box_tilt) else box_tilt
box = LammpsBox(box_bounds, box_tilt)
new_latt = Lattice([[xhi, 0, 0], [xy, yhi, 0], [xz, yz, zhi]])
s.lattice = new_latt
symbols = list(s.symbol_set)
if ff_elements:
symbols.extend(ff_elements)
elements = sorted(Element(el) for el in set(symbols))
mass_info = [tuple([i.symbol] * 2) for i in elements]
ff = ForceField(mass_info)
topo = Topology(s)
return LammpsData.from_ff_and_topologies(box=box, ff=ff, topologies=[topo],
atom_style=atom_style)
|
Converts a structure to a LammpsData object with no force field
parameters and topologies.
Args:
structure (Structure): Input structure.
ff_elements ([str]): List of strings of elements that must be
present due to force field settings but not necessarily in
the structure. Default to None.
atom_style (str): Choose between "atomic" (neutral) and
"charge" (charged). Default to "charge".
Returns:
LammpsData
|
juraj-google-style
|
def lookup(self, keys, name=None):
with ops.name_scope(name, '%s_lookup_table_find' % self.name, [self.resource_handle, keys]):
keys = ops.convert_to_tensor(keys, dtype=self._key_dtype, name='keys')
with ops.colocate_with(self.resource_handle):
values = gen_lookup_ops.lookup_table_find_v2(self.resource_handle, keys, self._default_value)
return values
|
Looks up `keys` in a table, outputs the corresponding values.
The `default_value` is used for keys not present in the table.
Args:
keys: Keys to look up. Can be a tensor of any shape. Must match the
table's key_dtype.
name: A name for the operation (optional).
Returns:
A tensor containing the values in the same shape as `keys` using the
table's value type.
Raises:
TypeError: when `keys` do not match the table data types.
|
github-repos
|
def __init__(self, ball_radius=10, n_chains=4, chain_length=10, monomer=None):
super(Tnp, self).__init__()
if not monomer:
monomer = Bead(particle_kind='t')
n = 129
self.add(Sphere(n=n, radius=ball_radius, port_distance_from_surface=0.7), label="np")
pattern = mb.SpherePattern(n_chains)
pattern.scale(ball_radius)
chain_proto = mb.Polymer(monomer, n=chain_length)
chain_protos, empty_backfill = pattern.apply_to_compound(chain_proto,
guest_port_name="down", host=self['np'])
self.add(chain_protos)
self.generate_bonds('np', 'np', sqrt(4 * ball_radius ** 2 * pi / n) - 0.5,
sqrt(4 * ball_radius**2 * pi / n) + 0.5)
self.generate_bonds('np', 't', 0.1, 0.3)
self.generate_bonds('t', 'np', 0.1, 0.3)
|
Initialize a tethered nanoparticle.
Args:
ball_radius (float): Radius of the nanoparticle.
n_chains (int): Number of chains to attach to the nanoparticle.
chain_length (int): Length of the chains being attached.
monomer (Compound, optional): Type of chain being attached.
|
juraj-google-style
|
def save_plot(self, filename, img_format="eps", ylim=None, units="thz"):
plt = self.get_plot(ylim=ylim, units=units)
plt.savefig(filename, format=img_format)
plt.close()
|
Save matplotlib plot to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
ylim: Specifies the y-axis limits.
units: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1.
|
juraj-google-style
|
def upload_dict(s3_conn, s3_prefix, data_to_sync):
(bucket_name, prefix) = split_s3_path(s3_prefix)
bucket = s3_conn.get_bucket(bucket_name)
for (key, value) in data_to_sync.items():
full_name = '{}/{}.json'.format(prefix, key)
s3_key = boto.s3.key.Key(bucket=bucket, name=full_name)
logging.info('uploading key %s', full_name)
s3_key.set_contents_from_string(json.dumps(value))
|
Syncs a dictionary to an S3 bucket, serializing each value in the
dictionary as a JSON file with the key as its name.
Args:
s3_conn: (boto.s3.connection) an s3 connection
s3_prefix: (str) the destination prefix
data_to_sync: (dict)
|
codesearchnet
|
def initial_sql(self, value):
self._initial_sql = value
if value is None:
try:
del self._connectionXML.attrib['one-time-sql']
except KeyError:
pass
else:
self._connectionXML.set('one-time-sql', value)
|
Set the connection's initial_sql property.
Args:
value: New initial_sql value. String.
Returns:
Nothing.
|
juraj-google-style
|
def get_unbound_arg_names(arg_names, arg_binding_keys):
bound_arg_names = [abk._arg_name for abk in arg_binding_keys]
return [arg_name for arg_name in arg_names
if arg_name not in bound_arg_names]
|
Determines which args have no arg binding keys.
Args:
arg_names: a sequence of the names of possibly bound args
arg_binding_keys: a sequence of ArgBindingKey each of whose arg names is
in arg_names
Returns:
a sequence of arg names that is a (possibly empty, possibly non-proper)
subset of arg_names
|
juraj-google-style
|
def add_file_to_tree(tree, file_path, file_contents, is_executable=False):
record = {
"path": file_path,
"mode": "100755" if is_executable else "100644",
"type": "blob",
"content": file_contents,
}
tree.append(record)
return tree
|
Add a file to a tree.
Args:
tree
A list of dicts containing info about each blob in a tree.
file_path
The path of the new file in the tree.
file_contents
The (UTF-8 encoded) contents of the new file.
is_executable
If ``True``, the new file will get executable permissions (0755).
Otherwise, it will get 0644 permissions.
Returns:
The provided tree, but with the new file added.
|
juraj-google-style
|
def save_prefixed_metrics(results, output_dir, file_name: str='all_results.json', metric_key_prefix: str='eval'):
for key in list(results.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
results[f'{metric_key_prefix}_{key}'] = results.pop(key)
with open(os.path.join(output_dir, file_name), 'w') as f:
json.dump(results, f, indent=4)
|
Save results while prefixing metric names.
Args:
results: (:obj:`dict`):
A dictionary of results.
output_dir: (:obj:`str`):
An output directory.
file_name: (:obj:`str`, `optional`, defaults to :obj:`all_results.json`):
An output file name.
metric_key_prefix: (:obj:`str`, `optional`, defaults to :obj:`eval`):
A metric name prefix.
|
github-repos
|
def full(shape, fill_value, dtype=None):
if any_symbolic_tensors((fill_value,)):
return Full(shape=shape, dtype=dtype).symbolic_call(fill_value)
return backend.numpy.full(shape, fill_value, dtype=dtype)
|
Return a new tensor of given shape and type, filled with `fill_value`.
Args:
shape: Shape of the new tensor.
fill_value: Fill value.
dtype: Desired data type of the tensor.
Returns:
Output tensor.
|
github-repos
|
def safe_tag(self, tag, errors='strict'):
if tag is not None:
try:
tag = quote(self.s(tag, errors=errors), safe='~')[:128]
except KeyError as e:
warn = 'Failed converting tag to safetag ({})'.format(e)
self.log.warning(warn)
return tag
|
URL Encode and truncate tag to match limit (128 characters) of ThreatConnect API.
Args:
tag (string): The tag to be truncated
Returns:
(string): The truncated tag
|
juraj-google-style
|
def _preprocess_params(cls, kwargs):
for attr, val in kwargs.items():
if cls.is_the_primary_key(attr) and cls._prevent_primary_key_initialization_:
del kwargs[attr]
continue
if val == "":
kwargs[attr] = None
continue
if attr in class_mapper(cls).relationships and attr not in cls._no_overwrite_:
rel = class_mapper(cls).relationships[attr]
if rel.uselist:
if isinstance(val, list):
if all(isinstance(v, dict) for v in val):
rel_cls = cls.mapped_rel_class(attr)
kwargs[attr] = rel_cls.update_or_new_all(
list_of_kwargs=val, keys=[rel_cls.primary_key_name()])
elif isinstance(val, dict):
rel_cls = cls.mapped_rel_class(attr)
mapping_col = rel.collection_class().keyfunc.name
list_of_kwargs = [merge(v, {mapping_col: k}) for k, v in val.items()]
kwargs[attr] = {getattr(obj, mapping_col): obj for obj in rel_cls.update_or_new_all(
list_of_kwargs=list_of_kwargs, keys=[rel_cls.primary_key_name()])}
elif isinstance(val, dict):
rel_cls = cls.mapped_rel_class(attr)
kwargs[attr] = rel_cls.update_or_new(
**merge(val, {'keys': [rel_cls.primary_key_name()]}))
return kwargs
|
Returns a preprocessed dictionary of parameters.
Use this to filter the kwargs passed to `new`, `create`,
`build` methods.
Args:
**kwargs: a dictionary of parameters
|
juraj-google-style
|
def make_innermost_setter(setter):
@functools.wraps(setter)
def _new_setter(kernel_results, *args, **kwargs):
'Wrapped setter.'
results_stack = []
while hasattr(kernel_results, 'inner_results'):
results_stack.append(kernel_results)
kernel_results = kernel_results.inner_results
new_kernel_results = setter(kernel_results, *args, **kwargs)
for outer_results in reversed(results_stack):
new_kernel_results = outer_results._replace(inner_results=new_kernel_results)
return new_kernel_results
return _new_setter
|
Wraps a setter so it applies to the inner-most results in `kernel_results`.
The wrapped setter unwraps `kernel_results` and applies `setter` to the first
results without an `inner_results` attribute.
Args:
setter: A callable that takes the kernel results as well as some `*args` and
`**kwargs` and returns a modified copy of those kernel results.
Returns:
new_setter: A wrapped `setter`.
|
codesearchnet
|
def make_2d_block_raster_mask(query_shape, memory_flange):
query_triangle = common_layers.ones_matrix_band_part(
np.prod(query_shape), np.prod(query_shape), -1, 0)
split_query_masks = tf.split(query_triangle, query_shape[0], axis=1)
mask_pieces = [
tf.concat(
[tf.ones([np.prod(query_shape), memory_flange[1]]),
split_query_masks[i],
tf.zeros([np.prod(query_shape), memory_flange[1]])],
axis=1) for i in range(query_shape[0])
]
final_mask = tf.concat(
[
tf.ones([
np.prod(query_shape),
(query_shape[1] + 2 * memory_flange[1]) * memory_flange[0]
]),
tf.concat(mask_pieces, axis=1)
],
axis=1)
return 1. - final_mask
|
Creates a mask for 2d block raster scan.
The query mask can look to the left, top left, top, and top right, but
not to the right. Inside the query, we have the standard raster scan
masking.
Args:
query_shape: A tuple of ints (query_height, query_width)
memory_flange: A tuple of ints
(memory_flange_height, memory_flange_width)
Returns:
A tensor of shape query_size, memory_size
|
juraj-google-style
|
def _parse_slices(slicing_string):
parsed = []
for slice_string in slicing_string[1:-1].split(','):
indices = slice_string.split(':')
if len(indices) == 1:
parsed.append(int(indices[0].strip()))
elif 2 <= len(indices) <= 3:
parsed.append(slice(*[int(index.strip()) if index.strip() else None for index in indices]))
else:
raise ValueError('Invalid tensor-slicing string.')
return tuple(parsed)
|
Construct a tuple of slices from the slicing string.
The string must be a valid slicing string.
Args:
slicing_string: (str) Input slicing string to be parsed.
Returns:
tuple(slice1, slice2, ...)
Raises:
ValueError: If tensor_slicing is not a valid numpy ndarray slicing str.
|
github-repos
|
def save_intraday(data: pd.DataFrame, ticker: str, dt, typ='TRADE'):
cur_dt = pd.Timestamp(dt).strftime('%Y-%m-%d')
logger = logs.get_logger(save_intraday, level='debug')
info = f'{ticker} / {cur_dt} / {typ}'
data_file = hist_file(ticker=ticker, dt=dt, typ=typ)
if (not data_file):
return
if data.empty:
logger.warning(f'data is empty for {info} ...')
return
exch = const.exch_info(ticker=ticker)
if exch.empty:
return
end_time = pd.Timestamp(const.market_timing(ticker=ticker, dt=dt, timing='FINISHED')).tz_localize(exch.tz)
now = (pd.Timestamp('now', tz=exch.tz) - pd.Timedelta('1H'))
if (end_time > now):
logger.debug(f'skip saving cause market close ({end_time}) < now - 1H ({now}) ...')
return
logger.info(f'saving data to {data_file} ...')
files.create_folder(data_file, is_file=True)
data.to_parquet(data_file)
|
Check whether data is done for the day and save
Args:
data: data
ticker: ticker
dt: date
typ: [TRADE, BID, ASK, BID_BEST, ASK_BEST, BEST_BID, BEST_ASK]
Examples:
>>> os.environ['BBG_ROOT'] = 'xbbg/tests/data'
>>> sample = pd.read_parquet('xbbg/tests/data/aapl.parq')
>>> save_intraday(sample, 'AAPL US Equity', '2018-11-02')
>>> # Invalid exchange
>>> save_intraday(sample, 'AAPL XX Equity', '2018-11-02')
>>> # Invalid empty data
>>> save_intraday(pd.DataFrame(), 'AAPL US Equity', '2018-11-02')
>>> # Invalid date - too close
>>> cur_dt = utils.cur_time()
>>> save_intraday(sample, 'AAPL US Equity', cur_dt)
|
codesearchnet
|
def _TestGetItem(self, rt, slice_spec, expected, expected_shape=None):
tensor_slice_spec1 = _make_tensor_slice_spec(slice_spec, True)
tensor_slice_spec2 = _make_tensor_slice_spec(slice_spec, False)
value1 = rt.__getitem__(slice_spec)
value2 = rt.__getitem__(tensor_slice_spec1)
value3 = rt.__getitem__(tensor_slice_spec2)
self.assertAllEqual(value1, expected, 'slice_spec=%s' % (slice_spec,))
self.assertAllEqual(value2, expected, 'slice_spec=%s' % (slice_spec,))
self.assertAllEqual(value3, expected, 'slice_spec=%s' % (slice_spec,))
if expected_shape is not None:
value1.shape.assert_is_compatible_with(expected_shape)
value2.shape.assert_is_compatible_with(expected_shape)
value3.shape.assert_is_compatible_with(expected_shape)
|
Helper function for testing RaggedTensor.__getitem__.
Checks that calling `rt.__getitem__(slice_spec) returns the expected value.
Checks three different configurations for each slice spec:
* Call __getitem__ with the slice spec as-is (with int values)
* Call __getitem__ with int values in the slice spec wrapped in
`tf.constant()`.
* Call __getitem__ with int values in the slice spec wrapped in
`tf.compat.v1.placeholder()` (so value is not known at graph
construction time).
Args:
rt: The RaggedTensor to test.
slice_spec: The slice spec.
expected: The expected value of rt.__getitem__(slice_spec), as a python
list; or an exception class.
expected_shape: The expected shape for `rt.__getitem__(slice_spec)`.
|
github-repos
|
def get_num_bytes(self, batch: Sequence[pandas.DataFrame]) -> int:
return sum((df.memory_usage(deep=True).sum() for df in batch))
|
Returns:
The number of bytes of data for a batch of Numpy arrays.
|
github-repos
|
def coalescence_waiting_times(self, backward=True):
if (not isinstance(backward, bool)):
raise TypeError('backward must be a bool')
times = list()
lowest_leaf_dist = float('-inf')
for (n, d) in self.distances_from_root():
if (len(n.children) > 1):
times.append(d)
elif ((len(n.children) == 0) and (d > lowest_leaf_dist)):
lowest_leaf_dist = d
times.append(lowest_leaf_dist)
times.sort(reverse=backward)
for i in range((len(times) - 1)):
(yield abs((times[i] - times[(i + 1)])))
|
Generator over the waiting times of successive coalescence events
Args:
``backward`` (``bool``): ``True`` to go backward in time (i.e., leaves to root), otherwise ``False``
|
codesearchnet
|
def style_str(cls, style: Union[str, Dict[str, Any], None]) -> Optional[str]:
if not style:
return None
if isinstance(style, str):
return style
else:
assert isinstance(style, dict), style
return ''.join([f'{k.replace('_', '-')}:{v};' for k, v in style.items() if v is not None]) or None
|
Gets a string representing an inline CSS style.
Args:
style: A single CSS style string, or a dictionary for CSS properties.
When dictionary form is used, underscore in the key name will be
replaced by dash in the generated CSS style string.
For example, `background_color` will be converted to `background-color`.
Returns:
A CSS style string or None if no CSS property is provided.
|
github-repos
|
def confab_conformers(self, forcefield='mmff94', freeze_atoms=None, rmsd_cutoff=0.5, energy_cutoff=50.0, conf_cutoff=100000, verbose=False):
if (self._obmol.GetDimension() != 3):
self.make3d()
else:
self.add_hydrogen()
ff = ob.OBForceField_FindType(forcefield)
if (ff == 0):
print("Could not find forcefield {} in openbabel, the forcefield will be reset as default 'mmff94'".format(forcefield))
ff = ob.OBForceField_FindType('mmff94')
if freeze_atoms:
print('{} atoms will be freezed'.format(len(freeze_atoms)))
constraints = ob.OBFFConstraints()
for atom in ob.OBMolAtomIter(self._obmol):
atom_id = (atom.GetIndex() + 1)
if (id in freeze_atoms):
constraints.AddAtomConstraint(atom_id)
ff.SetConstraints(constraints)
ff.DiverseConfGen(rmsd_cutoff, conf_cutoff, energy_cutoff, verbose)
ff.GetConformers(self._obmol)
conformer_num = self._obmol.NumConformers()
conformers = []
for i in range(conformer_num):
self._obmol.SetConformer(i)
conformer = copy.deepcopy(BabelMolAdaptor(self._obmol).pymatgen_mol)
conformers.append(conformer)
self._obmol.SetConformer(0)
return conformers
|
Conformer generation based on Confab to generate all diverse low-energy
conformers for molecules. This is different from rotor_conformer or
gen3d_conformer as it aims to not simply to find a low energy
conformation but to generate several different conformations.
Args:
forcefield (str): Default is mmff94. Options are 'gaff', 'ghemical',
'mmff94', 'mmff94s', and 'uff'.
freeze_atoms ([int]): index of atoms to be freezed when performing
conformer search, default is None.
rmsd_cutoff (float): rmsd_cufoff, default is 0.5 Angstrom.
energy_cutoff (float): energy_cutoff, default is 50.0 kcal/mol.
conf_cutoff (float): max number of conformers to test,
default is 1 million.
verbose (bool): whether to display information on torsions found,
default is False.
Returns:
(list): list of pymatgen Molecule objects for generated conformers.
|
codesearchnet
|
def get_gradebook_id(self, gbuuid):
gradebook = self.get('gradebook', params={'uuid': gbuuid})
if ('data' not in gradebook):
failure_messsage = 'Error in get_gradebook_id for {0} - no data'.format(gradebook)
log.error(failure_messsage)
raise PyLmodUnexpectedData(failure_messsage)
return gradebook['data']['gradebookId']
|
Return gradebookid for a given gradebook uuid.
Args:
gbuuid (str): gradebook uuid, i.e. ``STELLAR:/project/gbngtest``
Raises:
PyLmodUnexpectedData: No gradebook id returned
requests.RequestException: Exception connection error
ValueError: Unable to decode response content
Returns:
str: value of gradebook id
|
codesearchnet
|
def _collective_with_groups(self, x, mesh_axes, collective):
if not mesh_axes:
return x
x = x.to_laid_out_tensor()
if len(mesh_axes) == self.ndims:
return self.LaidOutTensor(collective(x.tensor_list, self._devices))
else:
groups = mtf.processor_groups(self.shape, mesh_axes)
ret = [None] * self.size
for g in groups:
inputs = [x.tensor_list[pnum] for pnum in g]
devices = [self._devices[pnum] for pnum in g]
reduced = collective(inputs, devices)
for pnum, y in zip(g, reduced):
ret[pnum] = y
return self.LaidOutTensor(ret)
|
Grouped collective, (across the given dimensions).
Args:
x: a LaidOutTensor
mesh_axes: a list of integers - the mesh dimensions to be reduced
collective: fn from list(tf.Tensor), list(device) -> list(tf.Tensor)
Returns:
a LaidOutTensor
|
juraj-google-style
|
def render(engine, format, filepath, renderer=None, formatter=None, quiet=False):
(cmd, rendered) = command(engine, format, filepath, renderer, formatter)
run(cmd, capture_output=True, check=True, quiet=quiet)
return rendered
|
Render file with Graphviz ``engine`` into ``format``, return result filename.
Args:
engine: The layout commmand used for rendering (``'dot'``, ``'neato'``, ...).
format: The output format used for rendering (``'pdf'``, ``'png'``, ...).
filepath: Path to the DOT source file to render.
renderer: The output renderer used for rendering (``'cairo'``, ``'gd'``, ...).
formatter: The output formatter used for rendering (``'cairo'``, ``'gd'``, ...).
quiet (bool): Suppress ``stderr`` output.
Returns:
The (possibly relative) path of the rendered file.
Raises:
ValueError: If ``engine``, ``format``, ``renderer``, or ``formatter`` are not known.
graphviz.RequiredArgumentError: If ``formatter`` is given but ``renderer`` is None.
graphviz.ExecutableNotFound: If the Graphviz executable is not found.
subprocess.CalledProcessError: If the exit status is non-zero.
|
codesearchnet
|
def findall_operations_with_gate_type(
self,
gate_type: Type[T_DESIRED_GATE_TYPE]
) -> Iterable[Tuple[int,
ops.GateOperation,
T_DESIRED_GATE_TYPE]]:
result = self.findall_operations(lambda operation: bool(
ops.op_gate_of_type(operation, gate_type)))
for index, op in result:
gate_op = cast(ops.GateOperation, op)
yield index, gate_op, cast(T_DESIRED_GATE_TYPE, gate_op.gate)
|
Find the locations of all gate operations of a given type.
Args:
gate_type: The type of gate to find, e.g. XPowGate or
MeasurementGate.
Returns:
An iterator (index, operation, gate)'s for operations with the given
gate type.
|
juraj-google-style
|
def is_flaky(max_attempts: int=5, wait_before_retry: Optional[float]=None, description: Optional[str]=None):
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
logger.error(f'Test failed with {err} at try {retry_count}/{max_attempts}.')
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return unittest.skipUnless(_run_flaky_tests, 'test is flaky')(wrapper)
return decorator
|
To decorate flaky tests. They will be retried on failures.
Please note that our push tests use `pytest-rerunfailures`, which prompts the CI to rerun certain types of
failed tests. More specifically, if the test exception contains any substring in `FLAKY_TEST_FAILURE_PATTERNS`
(in `.circleci/create_circleci_config.py`), it will be rerun. If you find a recurrent pattern of failures,
expand `FLAKY_TEST_FAILURE_PATTERNS` in our CI configuration instead of using `is_flaky`.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
|
github-repos
|
def datasets_list(self, project_id=None, max_results=0, page_token=None):
if project_id is None:
project_id = self._project_id
url = Api._ENDPOINT + (Api._DATASETS_PATH % (project_id, ''))
args = {}
if max_results != 0:
args['maxResults'] = max_results
if page_token is not None:
args['pageToken'] = page_token
return datalab.utils.Http.request(url, args=args, credentials=self._credentials)
|
Issues a request to list the datasets in the project.
Args:
project_id: the project id to use to fetch the results; use None for the default project.
max_results: an optional maximum number of tables to retrieve.
page_token: an optional token to continue the retrieval.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
|
juraj-google-style
|
def get_public_key_pem(cert_obj):
return cert_obj.public_key().public_bytes(
encoding=cryptography.hazmat.primitives.serialization.Encoding.PEM,
format=cryptography.hazmat.primitives.serialization.PublicFormat.PKCS1,
)
|
Extract public key from certificate as PEM encoded PKCS#1.
Args:
cert_obj: cryptography.Certificate
Returns:
bytes: PEM encoded PKCS#1 public key.
|
juraj-google-style
|
def set_unrecognized_field(self, key, value, variant):
if (not isinstance(variant, Variant)):
raise TypeError(('Variant type %s is not valid.' % variant))
self.__unrecognized_fields[key] = (value, variant)
|
Set an unrecognized field, used when decoding a message.
Args:
key: The name or number used to refer to this unknown value.
value: The value of the field.
variant: Type information needed to interpret the value or re-encode
it.
Raises:
TypeError: If the variant is not an instance of messages.Variant.
|
codesearchnet
|
def _conform_to_outputs(self, outputs, struct):
struct = map_to_output_names(outputs, self._output_names, struct)
struct = map_missing_dict_keys(outputs, struct)
if not nest.is_nested(struct) and nest.is_nested(outputs):
struct = nest.map_structure(lambda _: struct, outputs)
return struct
|
Convenience method to conform `struct` to `outputs` structure.
Mappings performed:
(1) Map a dict to a list of outputs, using the output names.
(2) Fill missing keys in a dict w/ `None`s.
(3) Map a single item to all outputs.
Args:
outputs: Model predictions.
struct: Arbitrary nested structure (e.g. of labels, sample_weights,
losses, or metrics).
Returns:
Mapping of `struct` to `outputs` structure.
|
github-repos
|
def seek(self, offset, whence=os.SEEK_SET):
if not self._is_open:
raise IOError('Not opened.')
if self._current_offset < 0:
raise IOError(
'Invalid current offset: {0:d} value less than zero.'.format(
self._current_offset))
if whence == os.SEEK_CUR:
offset += self._current_offset
elif whence == os.SEEK_END:
if self._uncompressed_stream_size is None:
self._uncompressed_stream_size = self._GetUncompressedStreamSize()
if self._uncompressed_stream_size is None:
raise IOError('Invalid uncompressed stream size.')
offset += self._uncompressed_stream_size
elif whence != os.SEEK_SET:
raise IOError('Unsupported whence.')
if offset < 0:
raise IOError('Invalid offset value less than zero.')
if offset != self._current_offset:
self._current_offset = offset
self._realign_offset = True
|
Seeks to an offset within the file-like object.
Args:
offset (int): offset to seek to.
whence (Optional(int)): value that indicates whether offset is an absolute
or relative position within the file.
Raises:
IOError: if the seek failed.
OSError: if the seek failed.
|
juraj-google-style
|
def replace(self, **kwargs):
init_kwargs = dict(job=self.job, replica=self.replica, task=self.task, device_type=self.device_type, device_index=self.device_index)
init_kwargs.update(kwargs)
return self.__class__(**init_kwargs)
|
Convenience method for making a new DeviceSpec by overriding fields.
For instance:
```
my_spec = DeviceSpec=(job="my_job", device="CPU")
my_updated_spec = my_spec.replace(device="GPU")
my_other_spec = my_spec.replace(device=None)
```
Args:
**kwargs: This method takes the same args as the DeviceSpec constructor
Returns:
A DeviceSpec with the fields specified in kwargs overridden.
|
github-repos
|
def _validate_fhir_constraints(msg: message.Message, base_name: str, primitive_handler_: primitive_handler.PrimitiveHandler) -> None:
if annotation_utils.is_primitive_type(msg):
_ = primitive_handler_.primitive_wrapper_from_primitive(msg)
return
if proto_utils.is_message_type(msg, any_pb2.Any):
return
for field in msg.DESCRIPTOR.fields:
field_name = f'{base_name}.{proto_utils.json_field_name(field)}'
_validate_field(msg, field, field_name, primitive_handler_)
for oneof in msg.DESCRIPTOR.oneofs:
if msg.WhichOneof(oneof.name) is None and (not oneof.GetOptions().HasExtension(annotations_pb2.fhir_oneof_is_optional)):
raise fhir_errors.InvalidFhirError(f'Empty oneof: `{oneof.full_name}`.')
|
Iterates over fields of the provided message and validates constraints.
Args:
msg: The message to validate.
base_name: The root message name for recursive validation of nested message
fields.
primitive_handler_: Responsible for returning PrimitiveWrappers.
Raises:
fhir_errors.InvalidFhirError: In the event that a field is found to be
violating FHIR constraints or a required oneof is not set.
|
github-repos
|
def _handle_stop_dag(self, request):
if ((request.payload['name'] is not None) and (request.payload['name'] not in self._stop_dags)):
self._stop_dags.append(request.payload['name'])
return Response(success=True, uid=request.uid)
|
The handler for the stop_dag request.
The stop_dag request adds a dag to the list of dags that should be stopped.
The dag will then stop queueing new tasks and will eventually stop running.
Args:
request (Request): Reference to a request object containing the
incoming request. The payload has to contain the
following fields:
'name': the name of the dag that should be stopped
Returns:
Response: A response object containing the following fields:
- success: True if the dag was added successfully to the list
of dags that should be stopped.
|
codesearchnet
|
def from_json(cls, json_data):
data = json.loads(_helpers._from_bytes(json_data))
if (data.get('token_expiry') and
not isinstance(data['token_expiry'], datetime.datetime)):
try:
data['token_expiry'] = datetime.datetime.strptime(
data['token_expiry'], EXPIRY_FORMAT)
except ValueError:
data['token_expiry'] = None
retval = cls(
data['access_token'],
data['client_id'],
data['client_secret'],
data['refresh_token'],
data['token_expiry'],
data['token_uri'],
data['user_agent'],
revoke_uri=data.get('revoke_uri', None),
id_token=data.get('id_token', None),
id_token_jwt=data.get('id_token_jwt', None),
token_response=data.get('token_response', None),
scopes=data.get('scopes', None),
token_info_uri=data.get('token_info_uri', None))
retval.invalid = data['invalid']
return retval
|
Instantiate a Credentials object from a JSON description of it.
The JSON should have been produced by calling .to_json() on the object.
Args:
json_data: string or bytes, JSON to deserialize.
Returns:
An instance of a Credentials subclass.
|
juraj-google-style
|
def RegionalWebhook(self, request, global_params=None):
config = self.GetMethodConfig('RegionalWebhook')
return self._RunMethod(config, request, global_params=global_params)
|
ReceiveRegionalWebhook is called when the API receives a regional GitHub webhook.
Args:
request: (CloudbuildLocationsRegionalWebhookRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
|
github-repos
|
def multinomial_sample(x, vocab_size=None, sampling_method="random",
temperature=1.0):
vocab_size = vocab_size or common_layers.shape_list(x)[-1]
if sampling_method == "random" and temperature > 0.0:
samples = tf.multinomial(tf.reshape(x, [-1, vocab_size]) / temperature, 1)
else:
samples = tf.argmax(x, axis=-1)
reshaped_samples = tf.reshape(samples, common_layers.shape_list(x)[:-1])
return reshaped_samples
|
Multinomial sampling from a n-dimensional tensor.
Args:
x: Tensor of shape [..., vocab_size]. Parameterizes logits of multinomial.
vocab_size: Number of classes in multinomial distribution.
sampling_method: String, "random" or otherwise deterministic.
temperature: Positive float.
Returns:
Tensor of shape [...].
|
juraj-google-style
|
def sawtooth(duration: int, amp: complex, period: float=None, phase: float=0, name: str=None) -> SamplePulse:
if (period is None):
period = duration
return _sampled_sawtooth_pulse(duration, amp, period, phase=phase, name=name)
|
Generates sawtooth wave `SamplePulse`.
Args:
duration: Duration of pulse. Must be greater than zero.
amp: Pulse amplitude. Wave range is [-amp, amp].
period: Pulse period, units of dt. If `None` defaults to single cycle.
phase: Pulse phase.
name: Name of pulse.
|
codesearchnet
|
def delete(self, key):
try:
del self._collection(key)[key]
if len(self._collection(key)) == 0:
del self._items[str(key.path)]
except KeyError, e:
pass
|
Removes the object named by `key`.
Removes the object from the collection corresponding to ``key.path``.
Args:
key: Key naming the object to remove.
|
juraj-google-style
|
def _protobuf_value_type(value):
if value.HasField("number_value"):
return api_pb2.DATA_TYPE_FLOAT64
if value.HasField("string_value"):
return api_pb2.DATA_TYPE_STRING
if value.HasField("bool_value"):
return api_pb2.DATA_TYPE_BOOL
return None
|
Returns the type of the google.protobuf.Value message as an api.DataType.
Returns None if the type of 'value' is not one of the types supported in
api_pb2.DataType.
Args:
value: google.protobuf.Value message.
|
juraj-google-style
|
def _ParseEntryObjectOffsets(self, file_object, file_offset):
entry_array_object = self._ParseEntryArrayObject(file_object, file_offset)
entry_object_offsets = list(entry_array_object.entry_object_offsets)
while (entry_array_object.next_entry_array_offset != 0):
entry_array_object = self._ParseEntryArrayObject(file_object, entry_array_object.next_entry_array_offset)
entry_object_offsets.extend(entry_array_object.entry_object_offsets)
return entry_object_offsets
|
Parses entry array objects for the offset of the entry objects.
Args:
file_object (dfvfs.FileIO): a file-like object.
file_offset (int): offset of the first entry array object relative to
the start of the file-like object.
Returns:
list[int]: offsets of the entry objects.
|
codesearchnet
|
def expand_dims(x, axis=-1):
return array_ops.expand_dims(x, axis)
|
Adds a 1-sized dimension at index "axis".
Args:
x: A tensor or variable.
axis: Position where to add a new axis.
Returns:
A tensor with expanded dimensions.
|
github-repos
|
def get_setting(name):
current_settings = get_settings(category='All')
for setting in current_settings:
if (name.lower() == setting.lower()):
return current_settings[setting]
raise KeyError('Invalid name: {0}'.format(name))
|
Get the current configuration for the named audit setting
Args:
name (str): The name of the setting to retrieve
Returns:
str: The current configuration for the named setting
Raises:
KeyError: On invalid setting name
CommandExecutionError: If an error is encountered retrieving the settings
Usage:
.. code-block:: python
import salt.utils.win_lgpo_auditpol
# Get current state of the "Credential Validation" setting
salt.utils.win_lgpo_auditpol.get_setting(name='Credential Validation')
|
codesearchnet
|
def _build_ring_gather(input_tensors, devices, num_subchunks, pred_by_s_d, rank_by_s_d, red_op):
num_devices = len(input_tensors)
if num_devices == 0:
return []
if num_devices == 1:
return input_tensors
shape = input_tensors[0].shape
if 1 != len(shape):
raise ValueError('input tensors must be 1D')
num_chunks = num_devices * num_subchunks
num_ticks = num_devices - 1
chunks_by_dev = []
split_pad_len = 0
for d in range(0, num_devices):
with ops.device(devices[d]):
splits, split_pad_len = _padded_split(input_tensors[d], num_chunks)
chunks_by_dev.append(splits)
for tick in range(0, num_ticks):
new_partial_reductions = [None for _ in range(0, num_chunks)]
for d in range(0, num_devices):
with ops.device(devices[d]):
for s in range(0, num_subchunks):
rank = rank_by_s_d[s][d]
seg_index = (rank + num_devices - (2 + tick)) % num_devices
pred_dev = pred_by_s_d[s][d]
chunk_index = seg_index * num_subchunks + s
new_partial_reductions[chunk_index] = red_op(chunks_by_dev[pred_dev][chunk_index], chunks_by_dev[d][chunk_index])
for d in range(0, num_devices):
for s in range(0, num_subchunks):
rank = rank_by_s_d[s][d]
seg_index = (rank + num_devices - (2 + tick)) % num_devices
chunk_index = seg_index * num_subchunks + s
chunks_by_dev[d][chunk_index] = new_partial_reductions[chunk_index]
return (chunks_by_dev, split_pad_len)
|
Construct a subgraph for the first (reduction) pass of ring all-reduce.
Args:
input_tensors: a list of `tf.Tensor` 1D input tensors of same
shape and type.
devices: array of device name strings
num_subchunks: number of subchunks each device should process in one tick.
pred_by_s_d: as produced by _ring_permutations
rank_by_s_d: as produced by _ring_permutations
red_op: a binary operator for elementwise reduction
Raises:
ValueError: tensors must all be one dimensional.
Returns:
list of list of `tf.Tensor` of (partially) reduced values where
exactly num_subchunks chunks at each device are fully reduced.
|
github-repos
|
def distance(self, method='haversine'):
distances = []
for segment in self:
if (len(segment) < 2):
distances.append([])
else:
distances.append(segment.distance(method))
return distances
|
Calculate distances between locations in segments.
Args:
method (str): Method used to calculate distance
Returns:
list of list of float: Groups of distance between points in
segments
|
codesearchnet
|
def use_pcm(self, pcm_params=None, solvent_key="solvent", solvent_params=None,
radii_force_field=None):
self.params["pcm"] = dict()
self.params[solvent_key] = dict()
default_pcm_params = {"Theory": "SSVPE",
"vdwScale": 1.1,
"Radii": "UFF"}
if not solvent_params:
solvent_params = {"Dielectric": 78.3553}
if pcm_params:
for k, v in pcm_params.items():
self.params["pcm"][k.lower()] = v.lower() \
if isinstance(v, str) else v
for k, v in default_pcm_params.items():
if k.lower() not in self.params["pcm"].keys():
self.params["pcm"][k.lower()] = v.lower() \
if isinstance(v, str) else v
for k, v in solvent_params.items():
self.params[solvent_key][k.lower()] = v.lower() \
if isinstance(v, str) else copy.deepcopy(v)
self.params["rem"]["solvent_method"] = "pcm"
if radii_force_field:
self.params["pcm"]["radii"] = "bondi"
self.params["rem"]["force_fied"] = radii_force_field.lower()
|
Set the solvent model to PCM. Default parameters are trying to comply to
gaussian default value
Args:
pcm_params (dict): The parameters of "$pcm" section.
solvent_key (str): for versions < 4.2 the section name is "pcm_solvent"
solvent_params (dict): The parameters of solvent_key section
radii_force_field (str): The force fied used to set the solute
radii. Default to UFF.
|
juraj-google-style
|
def get_namespace(self, name_seq):
namespaces = self.namespaces
result = []
for name in name_seq:
namespaces = namespaces.get(name)
if not namespaces:
break
result.append(name)
return result
|
Returns the prefix of names from name_seq that are known namespaces.
Args:
name_seq: ['names', 'of', 'possible', 'namespace', 'to', 'find']
Returns:
['names', 'that', 'are', 'namespaces', 'possibly', 'empty', 'list']
|
juraj-google-style
|
def subscribe(self, requested_timeout=None, auto_renew=False):
class AutoRenewThread(threading.Thread):
'Used by the auto_renew code to renew a subscription from within\n a thread.\n\n '
def __init__(self, interval, stop_flag, sub, *args, **kwargs):
super(AutoRenewThread, self).__init__(*args, **kwargs)
self.interval = interval
self.sub = sub
self.stop_flag = stop_flag
self.daemon = True
def run(self):
sub = self.sub
stop_flag = self.stop_flag
interval = self.interval
while (not stop_flag.wait(interval)):
log.info('Autorenewing subscription %s', sub.sid)
sub.renew()
self.requested_timeout = requested_timeout
if self._has_been_unsubscribed:
raise SoCoException('Cannot resubscribe instance once unsubscribed')
service = self.service
if (not event_listener.is_running):
event_listener.start(service.soco)
(ip_address, port) = event_listener.address
if config.EVENT_ADVERTISE_IP:
ip_address = config.EVENT_ADVERTISE_IP
headers = {'Callback': '<http:
if (requested_timeout is not None):
headers['TIMEOUT'] = 'Second-{}'.format(requested_timeout)
with _subscriptions_lock:
response = requests.request('SUBSCRIBE', (service.base_url + service.event_subscription_url), headers=headers)
response.raise_for_status()
self.sid = response.headers['sid']
timeout = response.headers['timeout']
if (timeout.lower() == 'infinite'):
self.timeout = None
else:
self.timeout = int(timeout.lstrip('Second-'))
self._timestamp = time.time()
self.is_subscribed = True
log.info('Subscribed to %s, sid: %s', (service.base_url + service.event_subscription_url), self.sid)
_subscriptions[self.sid] = self
atexit.register(self.unsubscribe)
if (not auto_renew):
return
interval = ((self.timeout * 85) / 100)
auto_renew_thread = AutoRenewThread(interval, self._auto_renew_thread_flag, self)
auto_renew_thread.start()
|
Subscribe to the service.
If requested_timeout is provided, a subscription valid for that number
of seconds will be requested, but not guaranteed. Check
`timeout` on return to find out what period of validity is
actually allocated.
Note:
SoCo will try to unsubscribe any subscriptions which are still
subscribed on program termination, but it is good practice for
you to clean up by making sure that you call :meth:`unsubscribe`
yourself.
Args:
requested_timeout(int, optional): The timeout to be requested.
auto_renew (bool, optional): If `True`, renew the subscription
automatically shortly before timeout. Default `False`.
|
codesearchnet
|
def _get_ami_dict(json_url):
LOG.info('Getting AMI from %s', json_url)
response = requests.get(json_url)
assert response.ok, 'Error getting ami info from {}'.format(json_url)
ami_dict = response.json()
LOG.debug('AMI json contents: %s', ami_dict)
return ami_dict
|
Get ami from a web url.
Args:
region (str): AWS Region to find AMI ID.
Returns:
dict: Contents in dictionary format.
|
codesearchnet
|
def get_subset_counts(self, *keys):
if self.prepickle:
key_set = [pickle.dumps(key) for key in set(keys)]
else:
key_set = list(set(keys))
hashtables = [unordered_storage({'type': 'dict'}) for _ in
range(self.b)]
Hss = self.keys.getmany(*key_set)
for key, Hs in zip(key_set, Hss):
for H, hashtable in zip(Hs, hashtables):
hashtable.insert(H, key)
return [hashtable.itemcounts() for hashtable in hashtables]
|
Returns the bucket allocation counts (see :func:`~datasketch.MinHashLSH.get_counts` above)
restricted to the list of keys given.
Args:
keys (hashable) : the keys for which to get the bucket allocation
counts
|
juraj-google-style
|
def poisson(data):
data = np.hstack(([0.0], np.array(data)))
cumm = np.cumsum(data)
def cost(s, t):
diff = cumm[t]-cumm[s]
if diff == 0:
return -2 * diff * (- np.log(t-s) - 1)
else:
return -2 * diff * (np.log(diff) - np.log(t-s) - 1)
return cost
|
Creates a segment cost function for a time series with a
poisson distribution with changing mean
Args:
data (:obj:`list` of float): 1D time series data
Returns:
function: Function with signature
(int, int) -> float
where the first arg is the starting index, and the second
is the last arg. Returns the cost of that segment
|
juraj-google-style
|
def indicator(self, data):
try:
ip = ipaddress.ip_address(data)
except ValueError:
ip = ipaddress.ip_address(u'{}'.format(data))
if ip.version == 6:
data = ip.exploded
sections = []
for s in data.split(':'):
if s == '0000':
s = '0'
else:
s = s.lstrip('0')
sections.append(s)
data = ':'.join(sections)
super(Address, self).indicator(data)
|
Update the request URI to include the Indicator for specific indicator retrieval.
Overload to handle formatting of ipv6 addresses
Args:
data (string): The indicator value
|
juraj-google-style
|
def dbmax20years(self, value=None):
if (value is not None):
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float for field `dbmax20years`'.format(value))
self._dbmax20years = value
|
Corresponds to IDD Field `dbmax20years`
20-year return period values for maximum extreme dry-bulb temperature
Args:
value (float): value for IDD Field `dbmax20years`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
codesearchnet
|
def set_zone(timezone):
if timezone.lower() in mapper.win_to_unix:
win_zone = timezone
elif timezone.lower() in mapper.unix_to_win:
win_zone = mapper.get_win(timezone)
else:
raise CommandExecutionError('Invalid timezone passed: {0}'.format(timezone))
cmd = ['tzutil', '/s', win_zone]
res = __salt__['cmd.run_all'](cmd, python_shell=False)
if res['retcode']:
raise CommandExecutionError('tzutil encountered an error setting '
'timezone: {0}'.format(timezone),
info=res)
return zone_compare(timezone)
|
Sets the timezone using the tzutil.
Args:
timezone (str): A valid timezone
Returns:
bool: ``True`` if successful, otherwise ``False``
Raises:
CommandExecutionError: If invalid timezone is passed
CLI Example:
.. code-block:: bash
salt '*' timezone.set_zone 'America/Denver'
|
juraj-google-style
|
def _binary_3d_label_to_sparse_value(labels):
indices = []
values = []
for d0, labels_d0 in enumerate(labels):
for d1, labels_d1 in enumerate(labels_d0):
d2 = 0
for class_id, label in enumerate(labels_d1):
if label == 1:
values.append(class_id)
indices.append([d0, d1, d2])
d2 += 1
else:
assert label == 0
shape = [len(labels), len(labels[0]), len(labels[0][0])]
return sparse_tensor.SparseTensorValue(np.array(indices, np.int64), np.array(values, np.int64), np.array(shape, np.int64))
|
Convert dense 3D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensorValue` whose values are indices along the last dimension of
`labels`.
|
github-repos
|
def Get(self, project_id):
if project_id in self._emulators:
return self._emulators[project_id]
emulator = self.Create(project_id)
self._emulators[project_id] = emulator
return emulator
|
Returns an existing emulator instance for the provided project_id.
If an emulator instance doesn't yet exist, it creates one.
Args:
project_id: project ID
Returns:
a DatastoreEmulator
|
juraj-google-style
|
def consume(self, message):
if (('jsonrpc' not in message) or (message['jsonrpc'] != JSONRPC_VERSION)):
log.warn('Unknown message type %s', message)
return
if ('id' not in message):
log.debug('Handling notification from client %s', message)
self._handle_notification(message['method'], message.get('params'))
elif ('method' not in message):
log.debug('Handling response from client %s', message)
self._handle_response(message['id'], message.get('result'), message.get('error'))
else:
try:
log.debug('Handling request from client %s', message)
self._handle_request(message['id'], message['method'], message.get('params'))
except JsonRpcException as e:
log.exception('Failed to handle request %s', message['id'])
self._consumer({'jsonrpc': JSONRPC_VERSION, 'id': message['id'], 'error': e.to_dict()})
except Exception:
log.exception('Failed to handle request %s', message['id'])
self._consumer({'jsonrpc': JSONRPC_VERSION, 'id': message['id'], 'error': JsonRpcInternalError.of(sys.exc_info()).to_dict()})
|
Consume a JSON RPC message from the client.
Args:
message (dict): The JSON RPC message sent by the client
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.