code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def check_if_ok_to_update(self):
current_time = int(time.time())
last_refresh = self.last_refresh
if last_refresh is None:
last_refresh = 0
if current_time >= (last_refresh + self.refresh_rate):
return True
return False | Check if it is ok to perform an http request. |
def release(no_master, release_type):
try:
locale.setlocale(locale.LC_ALL, '')
except:
print("Warning: Unable to set locale. Expect encoding problems.")
git.is_repo_clean(master=(not no_master))
config = utils.get_config()
config.update(utils.get_dist_metadata())
config['project_dir'] = Path(os.getcwd())
config['release_type'] = release_type
with tempfile.TemporaryDirectory(prefix='ap_tmp') as tmp_dir:
config['tmp_dir'] = tmp_dir
values = release_ui(config)
if type(values) is not str:
utils.release(project_name=config['project_name'], tmp_dir=tmp_dir,
project_dir=config['project_dir'],
pypi_servers=config['pypi_servers'], **values)
print('New release options:')
pprint.pprint(values)
else:
print(values) | Releases a new version |
def stratify_by_features(features, n_strata, **kwargs):
n_items = features.shape[0]
km = KMeans(n_clusters=n_strata, **kwargs)
allocations = km.fit_predict(X=features)
return Strata(allocations) | Stratify by clustering the items in feature space
Parameters
----------
features : array-like, shape=(n_items,n_features)
feature matrix for the pool, where rows correspond to items and columns
correspond to features.
n_strata : int
number of strata to create.
**kwargs :
passed to sklearn.cluster.KMeans
Returns
-------
Strata instance |
def sub_working_days(self, day, delta,
extra_working_days=None, extra_holidays=None,
keep_datetime=False):
delta = abs(delta)
return self.add_working_days(
day, -delta,
extra_working_days, extra_holidays, keep_datetime=keep_datetime) | Substract `delta` working days to the date.
This method is a shortcut / helper. Users may want to use either::
cal.add_working_days(my_date, -7)
cal.sub_working_days(my_date, 7)
The other parameters are to be used exactly as in the
``add_working_days`` method.
A negative ``delta`` argument will be converted into its absolute
value. Hence, the two following calls are equivalent::
cal.sub_working_days(my_date, -7)
cal.sub_working_days(my_date, 7)
As in ``add_working_days()`` you can set the parameter
``keep_datetime`` to ``True`` to make sure that if your ``day``
argument is a ``datetime``, the returned date will also be a
``datetime`` object. |
def _set_suffix_links(self):
self._suffix_links_set = True
for current, parent in self.bfs():
if parent is None:
continue
current.longest_prefix = parent.longest_prefix
if parent.has_value:
current.longest_prefix = parent
if current.has_suffix:
continue
suffix = parent
while True:
if not suffix.has_suffix:
current.suffix = self.root
break
else:
suffix = suffix.suffix
if current.uplink in suffix:
current.suffix = suffix[current.uplink]
break
suffix = current.suffix
while not suffix.has_value and suffix.has_suffix:
suffix = suffix.suffix
if suffix.has_value:
current.dict_suffix = suffix | Sets all suffix links in all nodes in this trie. |
def compose(*fs):
ensure_argcount(fs, min_=1)
fs = list(imap(ensure_callable, fs))
if len(fs) == 1:
return fs[0]
if len(fs) == 2:
f1, f2 = fs
return lambda *args, **kwargs: f1(f2(*args, **kwargs))
if len(fs) == 3:
f1, f2, f3 = fs
return lambda *args, **kwargs: f1(f2(f3(*args, **kwargs)))
fs.reverse()
def g(*args, **kwargs):
x = fs[0](*args, **kwargs)
for f in fs[1:]:
x = f(x)
return x
return g | Creates composition of the functions passed in.
:param fs: One-argument functions, with the possible exception of last one
that can accept arbitrary arguments
:return: Function returning a result of functions from ``fs``
applied consecutively to the argument(s), in reverse order |
def set(self, instance, value, **kw):
value = str(value).decode("base64")
if "filename" not in kw:
logger.debug("FielFieldManager::set: No Filename detected "
"-> using title or id")
kw["filename"] = kw.get("id") or kw.get("title")
self._set(instance, value, **kw) | Decodes base64 value and set the file object |
def ActiveDates(self):
(earliest, latest) = self.GetDateRange()
if earliest is None:
return []
dates = []
date_it = util.DateStringToDateObject(earliest)
date_end = util.DateStringToDateObject(latest)
delta = datetime.timedelta(days=1)
while date_it <= date_end:
date_it_string = date_it.strftime("%Y%m%d")
if self.IsActiveOn(date_it_string, date_it):
dates.append(date_it_string)
date_it = date_it + delta
return dates | Return dates this service period is active as a list of "YYYYMMDD". |
def sync(self, data):
for k, v in data.get('billing', {}).items():
setattr(self, k, v)
self.card_number = data.get('credit_card', {}).get('card_number',
self.card_number)
self.save(sync=False) | Overwrite local customer payment profile data with remote data |
def apply(self, arr):
for t in self.cpu_transforms:
arr = t.apply(arr)
return arr | Apply all CPU transforms on an array. |
def log_likelihood(covariance, precision):
assert covariance.shape == precision.shape
dim, _ = precision.shape
log_likelihood_ = (
-np.sum(covariance * precision)
+ fast_logdet(precision)
- dim * np.log(2 * np.pi)
)
log_likelihood_ /= 2.
return log_likelihood_ | Computes the log-likelihood between the covariance and precision
estimate.
Parameters
----------
covariance : 2D ndarray (n_features, n_features)
Maximum Likelihood Estimator of covariance
precision : 2D ndarray (n_features, n_features)
The precision matrix of the covariance model to be tested
Returns
-------
log-likelihood |
def setup_default_permissions(session, instance):
if instance not in session.new or not isinstance(instance, Entity):
return
if not current_app:
return
_setup_default_permissions(instance) | Setup default permissions on newly created entities according to.
:attr:`Entity.__default_permissions__`. |
def RattributesBM(dataset,database,host=rbiomart_host):
biomaRt = importr("biomaRt")
ensemblMart=biomaRt.useMart(database, host=rbiomart_host)
ensembl=biomaRt.useDataset(dataset, mart=ensemblMart)
print(biomaRt.listAttributes(ensembl)) | Lists BioMart attributes through a RPY2 connection.
:param dataset: a dataset listed in RdatasetsBM()
:param database: a database listed in RdatabasesBM()
:param host: address of the host server, default='www.ensembl.org'
:returns: nothing |
def delete_file_or_tree(*args):
for f in args:
try:
os.unlink(f)
except OSError:
shutil.rmtree(f, ignore_errors=True) | For every path in args, try to delete it as a file or a directory
tree. Ignores deletion errors. |
def _estimate_runner_memory(json_file):
with open(json_file) as in_handle:
sinfo = json.load(in_handle)
num_parallel = 1
for key in ["config__algorithm__variantcaller", "description"]:
item_counts = []
n = 0
for val in (sinfo.get(key) or []):
n += 1
if val:
if isinstance(val, (list, tuple)):
item_counts.append(len(val))
else:
item_counts.append(1)
print(key, n, item_counts)
if n and item_counts:
num_parallel = n * max(item_counts)
break
if num_parallel < 25:
return "3g"
if num_parallel < 150:
return "6g"
elif num_parallel < 500:
return "12g"
else:
return "24g" | Estimate Java memory requirements based on number of samples.
A rough approach to selecting correct allocated memory for Cromwell. |
def min_ems(self, value: float) -> 'Size':
raise_not_number(value)
self.minimum = '{}em'.format(value)
return self | Set the minimum size in ems. |
def generate_all_aliases(fieldfile, include_global):
all_options = aliases.all(fieldfile, include_global=include_global)
if all_options:
thumbnailer = get_thumbnailer(fieldfile)
for key, options in six.iteritems(all_options):
options['ALIAS'] = key
thumbnailer.get_thumbnail(options) | Generate all of a file's aliases.
:param fieldfile: A ``FieldFile`` instance.
:param include_global: A boolean which determines whether to generate
thumbnails for project-wide aliases in addition to field, model, and
app specific aliases. |
def add_bookmark(self, url, favorite=False, archive=False, allow_duplicates=True):
rdb_url = self._generate_url('bookmarks')
params = {
"url": url,
"favorite": int(favorite),
"archive": int(archive),
"allow_duplicates": int(allow_duplicates)
}
return self.post(rdb_url, params) | Adds given bookmark to the authenticated user.
:param url: URL of the article to bookmark
:param favorite: whether or not the bookmark should be favorited
:param archive: whether or not the bookmark should be archived
:param allow_duplicates: whether or not to allow duplicate bookmarks to
be created for a given url |
def set(self, document_data, merge=False):
batch = self._client.batch()
batch.set(self, document_data, merge=merge)
write_results = batch.commit()
return _first_write_result(write_results) | Replace the current document in the Firestore database.
A write ``option`` can be specified to indicate preconditions of
the "set" operation. If no ``option`` is specified and this document
doesn't exist yet, this method will create it.
Overwrites all content for the document with the fields in
``document_data``. This method performs almost the same functionality
as :meth:`create`. The only difference is that this method doesn't
make any requirements on the existence of the document (unless
``option`` is used), whereas as :meth:`create` will fail if the
document already exists.
Args:
document_data (dict): Property names and values to use for
replacing a document.
merge (Optional[bool] or Optional[List<apispec>]):
If True, apply merging instead of overwriting the state
of the document.
Returns:
google.cloud.firestore_v1beta1.types.WriteResult: The
write result corresponding to the committed document. A write
result contains an ``update_time`` field. |
def set_block_name(self, index, name):
if name is None:
return
self.GetMetaData(index).Set(vtk.vtkCompositeDataSet.NAME(), name)
self.Modified() | Set a block's string name at the specified index |
def dimension(self):
self.dim = 0
self._slices = {}
for stochastic in self.stochastics:
if isinstance(stochastic.value, np.matrix):
p_len = len(stochastic.value.A.ravel())
elif isinstance(stochastic.value, np.ndarray):
p_len = len(stochastic.value.ravel())
else:
p_len = 1
self._slices[stochastic] = slice(self.dim, self.dim + p_len)
self.dim += p_len | Compute the dimension of the sampling space and identify the slices
belonging to each stochastic. |
def write_values(self):
return dict(((k, v.value) for k, v in self._inputs.items() if not v.is_secret and not v.is_empty(False))) | Return the dictionary with which to write values |
def push_tx(self, crypto, tx_hex):
url = "%s/pushtx" % self.base_url
return self.post_url(url, {'hex': tx_hex}).content | This method is untested. |
def GetConfiguredUsers(self):
if os.path.exists(self.google_users_file):
users = open(self.google_users_file).readlines()
else:
users = []
return [user.strip() for user in users] | Retrieve the list of configured Google user accounts.
Returns:
list, the username strings of users congfigured by Google. |
def extract_date(cls, date_str):
if not date_str:
raise DateTimeFormatterException('date_str must a valid string {}.'.format(date_str))
try:
return cls._extract_timestamp(date_str, cls.DATE_FORMAT)
except (TypeError, ValueError):
raise DateTimeFormatterException('Invalid date string {}.'.format(date_str)) | Tries to extract a `datetime` object from the given string, expecting
date information only.
Raises `DateTimeFormatterException` if the extraction fails. |
def _get_basin_response_term(self, C, z2pt5):
f_sed = np.zeros(len(z2pt5))
idx = z2pt5 < 1.0
f_sed[idx] = (C["c14"] + C["c15"] * float(self.CONSTS["SJ"])) *\
(z2pt5[idx] - 1.0)
idx = z2pt5 > 3.0
f_sed[idx] = C["c16"] * C["k3"] * exp(-0.75) *\
(1.0 - np.exp(-0.25 * (z2pt5[idx] - 3.0)))
return f_sed | Returns the basin response term defined in equation 20 |
def _get_case_file_paths(tmp_dir, case, training_fraction=0.95):
paths = tf.gfile.Glob("%s/*.jpg" % tmp_dir)
if not paths:
raise ValueError("Search of tmp_dir (%s) " % tmp_dir,
"for subimage paths yielded an empty list, ",
"can't proceed with returning training/eval split.")
split_index = int(math.floor(len(paths)*training_fraction))
if split_index >= len(paths):
raise ValueError("For a path list of size %s "
"and a training_fraction of %s "
"the resulting split_index of the paths list, "
"%s, would leave no elements for the eval "
"condition." % (len(paths),
training_fraction,
split_index))
if case:
return paths[:split_index]
else:
return paths[split_index:] | Obtain a list of image paths corresponding to training or eval case.
Args:
tmp_dir: str, the root path to which raw images were written, at the
top level having meta/ and raw/ subdirs.
case: bool, whether obtaining file paths for training (true) or eval
(false).
training_fraction: float, the fraction of the sub-image path list to
consider as the basis for training examples.
Returns:
list: A list of file paths.
Raises:
ValueError: if images not found in tmp_dir, or if training_fraction would
leave no examples for eval. |
def debug(self, msg, *args, **kwargs) -> Task:
return self._make_log_task(logging.DEBUG, msg, args, **kwargs) | Log msg with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
await logger.debug("Houston, we have a %s", "thorny problem", exc_info=1) |
def calculate_output(self, variable_name, period):
variable = self.tax_benefit_system.get_variable(variable_name, check_existence = True)
if variable.calculate_output is None:
return self.calculate(variable_name, period)
return variable.calculate_output(self, variable_name, period) | Calculate the value of a variable using the ``calculate_output`` attribute of the variable. |
def evaluate_dir(sample_dir):
results = []
if sample_dir[-1] == "/":
sample_dir = sample_dir[:-1]
for filename in glob.glob("%s/*.inkml" % sample_dir):
results.append(evaluate_inkml(filename))
return results | Evaluate all recordings in `sample_dir`.
Parameters
----------
sample_dir : string
The path to a directory with *.inkml files.
Returns
-------
list of dictionaries
Each dictionary contains the keys 'filename' and 'results', where
'results' itself is a list of dictionaries. Each of the results has
the keys 'latex' and 'probability' |
def cleanup(logger, *args):
for obj in args:
if obj is not None and hasattr(obj, 'cleanup'):
try:
obj.cleanup()
except NotImplementedError:
pass
except Exception:
logger.exception("Unable to cleanup %s object", obj) | Environment's cleanup routine. |
def compress(data, compresslevel=9):
buf = BytesIO()
with open_fileobj(buf, 'wb', compresslevel) as ogz:
if six.PY3 and not isinstance(data, bytes):
data = data.encode(__salt_system_encoding__)
ogz.write(data)
compressed = buf.getvalue()
return compressed | Returns the data compressed at gzip level compression. |
def prepare(self, data_batch, sparse_row_id_fn=None):
super(SVRGModule, self).prepare(data_batch, sparse_row_id_fn=sparse_row_id_fn)
self._mod_aux.prepare(data_batch, sparse_row_id_fn=sparse_row_id_fn) | Prepares two modules for processing a data batch.
Usually involves switching bucket and reshaping.
For modules that contain `row_sparse` parameters in KVStore,
it prepares the `row_sparse` parameters based on the sparse_row_id_fn.
When KVStore is used to update parameters for multi-device or multi-machine training,
a copy of the parameters are stored in KVStore. Note that for `row_sparse` parameters,
the `update()` updates the copy of parameters in KVStore, but doesn't broadcast
the updated parameters to all devices / machines. The `prepare` function is used to
broadcast `row_sparse` parameters with the next batch of data.
Parameters
----------
data_batch : DataBatch
The current batch of data for forward computation.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull. |
def statistic_recommend(classes, P):
if imbalance_check(P):
return IMBALANCED_RECOMMEND
if binary_check(classes):
return BINARY_RECOMMEND
return MULTICLASS_RECOMMEND | Return recommend parameters which are more suitable due to the input dataset characteristics.
:param classes: all classes name
:type classes : list
:param P: condition positive
:type P : dict
:return: recommendation_list as list |
def _load_scalar_fit(self, fit_key=None, h5file=None, fit_data=None):
if (fit_key is None) ^ (h5file is None):
raise ValueError("Either specify both fit_key and h5file, or"
" neither")
if not ((fit_key is None) ^ (fit_data is None)):
raise ValueError("Specify exactly one of fit_key and fit_data.")
if fit_data is None:
fit_data = self._read_dict(h5file[fit_key])
if 'fitType' in fit_data.keys() and fit_data['fitType'] == 'GPR':
fit = _eval_pysur.evaluate_fit.getGPRFitAndErrorEvaluator(fit_data)
else:
fit = _eval_pysur.evaluate_fit.getFitEvaluator(fit_data)
return fit | Loads a single fit |
async def lock(self, container = None):
"Wait for lock acquire"
if container is None:
container = RoutineContainer.get_container(self.scheduler)
if self.locked:
pass
elif self.lockroutine:
await LockedEvent.createMatcher(self)
else:
await container.wait_for_send(LockEvent(self.context, self.key, self))
self.locked = True | Wait for lock acquire |
def restore(self):
'Restore the file'
if not self.deleted:
raise JFSError('Tried to restore a not deleted file')
raise NotImplementedError('Jottacloud has changed the restore API. Please use jottacloud.com in a browser, for now.')
url = 'https://www.jottacloud.com/rest/webrest/%s/action/restore' % self.jfs.username
data = {'paths[]': self.path.replace(JFS_ROOT, ''),
'web': 'true',
'ts': int(time.time()),
'authToken': 0}
r = self.jfs.post(url, content=data)
return r | Restore the file |
def iam_device_info(self, apdu):
if _debug: DeviceInfoCache._debug("iam_device_info %r", apdu)
if not isinstance(apdu, IAmRequest):
raise ValueError("not an IAmRequest: %r" % (apdu,))
device_instance = apdu.iAmDeviceIdentifier[1]
device_info = self.cache.get(device_instance, None)
if not device_info:
device_info = self.cache.get(apdu.pduSource, None)
if not device_info:
device_info = self.device_info_class(device_instance, apdu.pduSource)
device_info.deviceIdentifier = device_instance
device_info.address = apdu.pduSource
device_info.maxApduLengthAccepted = apdu.maxAPDULengthAccepted
device_info.segmentationSupported = apdu.segmentationSupported
device_info.vendorID = apdu.vendorID
self.update_device_info(device_info) | Create a device information record based on the contents of an
IAmRequest and put it in the cache. |
def get_point_cloud(self, pair):
disparity = self.block_matcher.get_disparity(pair)
points = self.block_matcher.get_3d(disparity,
self.calibration.disp_to_depth_mat)
colors = cv2.cvtColor(pair[0], cv2.COLOR_BGR2RGB)
return PointCloud(points, colors) | Get 3D point cloud from image pair. |
def send_text(self, text, **options):
return self.bot.send_message(self.id, text, **options) | Send a text message to the chat.
:param str text: Text of the message to send
:param options: Additional sendMessage options (see
https://core.telegram.org/bots/api#sendmessage |
def generate_proto(source, require = True):
if not require and not os.path.exists(source):
return
output = source.replace(".proto", "_pb2.py").replace("../src/", "")
if (not os.path.exists(output) or
(os.path.exists(source) and
os.path.getmtime(source) > os.path.getmtime(output))):
print("Generating %s..." % output)
if not os.path.exists(source):
sys.stderr.write("Can't find required file: %s\n" % source)
sys.exit(-1)
if protoc is None:
sys.stderr.write(
"protoc is not installed nor found in ../src. Please compile it "
"or install the binary package.\n")
sys.exit(-1)
protoc_command = [ protoc, "-I../src", "-I.", "--python_out=.", source ]
if subprocess.call(protoc_command) != 0:
sys.exit(-1) | Invokes the Protocol Compiler to generate a _pb2.py from the given
.proto file. Does nothing if the output already exists and is newer than
the input. |
def race(iterable, loop=None, timeout=None, *args, **kw):
assert_iter(iterable=iterable)
coros = []
resolved = False
result = None
@asyncio.coroutine
def resolver(index, coro):
nonlocal result
nonlocal resolved
value = yield from coro
if not resolved:
resolved = True
result = value
for _index, future in enumerate(coros):
if _index != index:
future.cancel()
for index, coro in enumerate(iterable):
isfunction = asyncio.iscoroutinefunction(coro)
if not isfunction and not asyncio.iscoroutine(coro):
raise TypeError(
'paco: coro must be a coroutine or coroutine function')
if isfunction:
coro = coro(*args, **kw)
coros.append(ensure_future(resolver(index, coro)))
yield from asyncio.wait(coros, timeout=timeout, loop=loop)
return result | Runs coroutines from a given iterable concurrently without waiting until
the previous one has completed.
Once any of the tasks completes, the main coroutine
is immediately resolved, yielding the first resolved value.
All coroutines will be executed in the same loop.
This function is a coroutine.
Arguments:
iterable (iterable): an iterable collection yielding
coroutines functions or coroutine objects.
*args (mixed): mixed variadic arguments to pass to coroutines.
loop (asyncio.BaseEventLoop): optional event loop to use.
timeout (int|float): timeout can be used to control the maximum number
of seconds to wait before returning. timeout can be an int or
float. If timeout is not specified or None, there is no limit to
the wait time.
*args (mixed): optional variadic argument to pass to coroutine
function, if provided.
Raises:
TypeError: if ``iterable`` argument is not iterable.
asyncio.TimoutError: if wait timeout is exceeded.
Returns:
filtered values (list): ordered list of resultant values.
Usage::
async def coro1():
await asyncio.sleep(2)
return 1
async def coro2():
return 2
async def coro3():
await asyncio.sleep(1)
return 3
await paco.race([coro1, coro2, coro3])
# => 2 |
def stretch(image, mask=None):
image = np.array(image, float)
if np.product(image.shape) == 0:
return image
if mask is None:
minval = np.min(image)
maxval = np.max(image)
if minval == maxval:
if minval < 0:
return np.zeros_like(image)
elif minval > 1:
return np.ones_like(image)
return image
else:
return (image - minval) / (maxval - minval)
else:
significant_pixels = image[mask]
if significant_pixels.size == 0:
return image
minval = np.min(significant_pixels)
maxval = np.max(significant_pixels)
if minval == maxval:
transformed_image = minval
else:
transformed_image = ((significant_pixels - minval) /
(maxval - minval))
result = image.copy()
image[mask] = transformed_image
return image | Normalize an image to make the minimum zero and maximum one
image - pixel data to be normalized
mask - optional mask of relevant pixels. None = don't mask
returns the stretched image |
def cn(shape, dtype=None, impl='numpy', **kwargs):
cn_cls = tensor_space_impl(impl)
if dtype is None:
dtype = cn_cls.default_dtype(ComplexNumbers())
cn = cn_cls(shape=shape, dtype=dtype, **kwargs)
if not cn.is_complex:
raise ValueError('data type {!r} not a complex floating-point type.'
''.format(dtype))
return cn | Return a space of complex tensors.
Parameters
----------
shape : positive int or sequence of positive ints
Number of entries per axis for elements in this space. A
single integer results in a space with 1 axis.
dtype : optional
Data type of each element. Can be provided in any way the
`numpy.dtype` function understands, e.g. as built-in type or
as a string. Only complex floating-point data types are allowed.
For ``None``, the `TensorSpace.default_dtype` of the
created space is used in the form
``default_dtype(ComplexNumbers())``.
impl : str, optional
Impmlementation back-end for the space. See
`odl.space.entry_points.tensor_space_impl_names` for available
options.
kwargs :
Extra keyword arguments passed to the space constructor.
Returns
-------
cn : `TensorSpace`
Examples
--------
Space of complex 3-tuples with ``complex64`` entries:
>>> odl.cn(3, dtype='complex64')
cn(3, dtype='complex64')
Complex 2x3 tensors with ``complex64`` entries:
>>> odl.cn((2, 3), dtype='complex64')
cn((2, 3), dtype='complex64')
The default data type depends on the implementation. For
``impl='numpy'``, it is ``'complex128'``:
>>> space = odl.cn((2, 3))
>>> space
cn((2, 3))
>>> space.dtype
dtype('complex128')
See Also
--------
tensor_space : Space of tensors with arbitrary scalar data type.
rn : Real tensor space. |
def ns(ns):
def setup_ns(cls):
setattr(cls, ENTITY_DEFAULT_NS_ATTR, ns)
return cls
return setup_ns | Class decorator that sets default tags namespace to use with its
instances. |
def _maybe_apply_time_shift(da, time_offset=None, **DataAttrs):
if time_offset is not None:
time = times.apply_time_offset(da[TIME_STR], **time_offset)
da[TIME_STR] = time
else:
if DataAttrs['dtype_in_time'] == 'inst':
if DataAttrs['intvl_in'].endswith('hr'):
offset = -1 * int(DataAttrs['intvl_in'][0])
else:
offset = 0
time = times.apply_time_offset(da[TIME_STR], hours=offset)
da[TIME_STR] = time
return da | Correct off-by-one error in GFDL instantaneous model data.
Instantaneous data that is outputted by GFDL models is generally off by
one timestep. For example, a netCDF file that is supposed to
correspond to 6 hourly data for the month of January, will have its
last time value be in February. |
def parse_command(self, string):
possible_command, _, rest = string.partition(" ")
possible_command = possible_command.lower()
if possible_command not in self.commands:
return None, None
event = self.commands[possible_command]["event"]
args = shlex.split(rest.strip())
return event, args | Parse out any possible valid command from an input string. |
def _get_initial_request(self):
if self._leaser is not None:
lease_ids = list(self._leaser.ack_ids)
else:
lease_ids = []
request = types.StreamingPullRequest(
modify_deadline_ack_ids=list(lease_ids),
modify_deadline_seconds=[self.ack_deadline] * len(lease_ids),
stream_ack_deadline_seconds=self.ack_histogram.percentile(99),
subscription=self._subscription,
)
return request | Return the initial request for the RPC.
This defines the initial request that must always be sent to Pub/Sub
immediately upon opening the subscription.
Returns:
google.cloud.pubsub_v1.types.StreamingPullRequest: A request
suitable for being the first request on the stream (and not
suitable for any other purpose). |
def convert_dicts(d, to_class=AttrDictWrapper, from_class=dict):
d_ = to_class()
for key, value in d.iteritems():
if isinstance(value, from_class):
d_[key] = convert_dicts(value, to_class=to_class,
from_class=from_class)
else:
d_[key] = value
return d_ | Recursively convert dict and UserDict types.
Note that `d` is unchanged.
Args:
to_class (type): Dict-like type to convert values to, usually UserDict
subclass, or dict.
from_class (type): Dict-like type to convert values from. If a tuple,
multiple types are converted.
Returns:
Converted data as `to_class` instance. |
def get_medium_attachment(self, name, controller_port, device):
if not isinstance(name, basestring):
raise TypeError("name can only be an instance of type basestring")
if not isinstance(controller_port, baseinteger):
raise TypeError("controller_port can only be an instance of type baseinteger")
if not isinstance(device, baseinteger):
raise TypeError("device can only be an instance of type baseinteger")
attachment = self._call("getMediumAttachment",
in_p=[name, controller_port, device])
attachment = IMediumAttachment(attachment)
return attachment | Returns a medium attachment which corresponds to the controller with
the given name, on the given port and device slot.
in name of type str
in controller_port of type int
in device of type int
return attachment of type :class:`IMediumAttachment`
raises :class:`VBoxErrorObjectNotFound`
No attachment exists for the given controller/port/device combination. |
async def body(self):
if not isinstance(self._body, bytes):
self._body = await self._body
return self._body | A helper function which blocks until the body has been read
completely.
Returns the bytes of the body which the user should decode.
If the request does not have a body part (i.e. it is a GET
request) this function returns None. |
def _pprint(dic):
for key, value in dic.items():
print(" {0}: {1}".format(key, value)) | Prints a dictionary with one indentation level |
def friendly_load(parser, token):
bits = token.contents.split()
if len(bits) >= 4 and bits[-2] == "from":
name = bits[-1]
try:
lib = find_library(parser, name)
subset = load_from_library(lib, name, bits[1:-2])
parser.add_library(subset)
except TemplateSyntaxError:
pass
else:
for name in bits[1:]:
try:
lib = find_library(parser, name)
parser.add_library(lib)
except TemplateSyntaxError:
pass
return LoadNode() | Tries to load a custom template tag set. Non existing tag libraries
are ignored.
This means that, if used in conjunction with ``if_has_tag``, you can try to
load the comments template tag library to enable comments even if the
comments framework is not installed.
For example::
{% load friendly_loader %}
{% friendly_load comments webdesign %}
{% if_has_tag render_comment_list %}
{% render_comment_list for obj %}
{% else %}
{% if_has_tag lorem %}
{% lorem %}
{% endif_has_tag %}
{% endif_has_tag %} |
def fit(self, X, y, **kwargs):
self.estimator.fit(X, y, **kwargs)
self.draw()
return self | A simple pass-through method; calls fit on the estimator and then
draws the alpha-error plot. |
def tarball_files(work_dir, tar_name, uuid=None, files=None):
with tarfile.open(os.path.join(work_dir, tar_name), 'w:gz') as f_out:
for fname in files:
if uuid:
f_out.add(os.path.join(work_dir, fname), arcname=uuid + '.' + fname)
else:
f_out.add(os.path.join(work_dir, fname), arcname=fname) | Tars a group of files together into a tarball
work_dir: str Current Working Directory
tar_name: str Name of tarball
uuid: str UUID to stamp files with
files: str(s) List of filenames to place in the tarball from working directory |
def safe_request(fct):
def inner(*args, **kwargs):
try:
_data = fct(*args, **kwargs)
except requests.exceptions.ConnectionError as error:
return {'error': str(error), 'status': 404}
if _data.ok:
if _data.content:
safe_data = _data.json()
else:
safe_data = {'success': True}
else:
safe_data = {'error': _data.reason, 'status': _data.status_code}
return safe_data
return inner | Return json messages instead of raising errors |
def write_to_fitsfile(self, fitsfile, clobber=True):
from fermipy.skymap import Map
hpx_header = self._hpx.make_header()
index_map = Map(self.ipixs, self.wcs)
mult_map = Map(self.mult_val, self.wcs)
prim_hdu = index_map.create_primary_hdu()
mult_hdu = index_map.create_image_hdu()
for key in ['COORDSYS', 'ORDERING', 'PIXTYPE',
'ORDERING', 'ORDER', 'NSIDE',
'FIRSTPIX', 'LASTPIX']:
prim_hdu.header[key] = hpx_header[key]
mult_hdu.header[key] = hpx_header[key]
hdulist = fits.HDUList([prim_hdu, mult_hdu])
hdulist.writeto(fitsfile, overwrite=clobber) | Write this mapping to a FITS file, to avoid having to recompute it |
def install_via_requirements(requirements_str, force=False):
if requirements_str[0] == '@':
path = requirements_str[1:]
if os.path.isfile(path):
yaml_data = load_yaml(path)
if 'packages' not in yaml_data.keys():
raise CommandException('Error in {filename}: missing "packages" node'.format(filename=path))
else:
raise CommandException("Requirements file not found: {filename}".format(filename=path))
else:
yaml_data = yaml.safe_load(requirements_str)
for pkginfo in yaml_data['packages']:
info = parse_package_extended(pkginfo)
install(info.full_name, info.hash, info.version, info.tag, force=force) | Download multiple Quilt data packages via quilt.xml requirements file. |
def human_size(bytes, units=[' bytes','KB','MB','GB','TB', 'PB', 'EB']):
return str(bytes) + units[0] if bytes < 1024 else human_size(bytes>>10, units[1:]) | Returns a human readable string reprentation of bytes |
def category_count(self):
category_dict = self.categories
count_dict = {category: len(
category_dict[category]) for category in category_dict}
return count_dict | Returns the number of categories in `categories`. |
def _filter_filecommands(self, filecmd_iter):
if self.includes is None and self.excludes is None:
return list(filecmd_iter())
result = []
for fc in filecmd_iter():
if (isinstance(fc, commands.FileModifyCommand) or
isinstance(fc, commands.FileDeleteCommand)):
if self._path_to_be_kept(fc.path):
fc.path = self._adjust_for_new_root(fc.path)
else:
continue
elif isinstance(fc, commands.FileDeleteAllCommand):
pass
elif isinstance(fc, commands.FileRenameCommand):
fc = self._convert_rename(fc)
elif isinstance(fc, commands.FileCopyCommand):
fc = self._convert_copy(fc)
else:
self.warning("cannot handle FileCommands of class %s - ignoring",
fc.__class__)
continue
if fc is not None:
result.append(fc)
return result | Return the filecommands filtered by includes & excludes.
:return: a list of FileCommand objects |
def notify_change(self, change):
name = change['name']
if self.comm is not None and self.comm.kernel is not None:
if name in self.keys and self._should_send_property(name, getattr(self, name)):
self.send_state(key=name)
super(Widget, self).notify_change(change) | Called when a property has changed. |
def tanimoto_coefficient(a, b):
return sum(map(lambda (x,y): float(x)*float(y), zip(a,b))) / sum([
-sum(map(lambda (x,y): float(x)*float(y), zip(a,b))),
sum(map(lambda x: float(x)**2, a)),
sum(map(lambda x: float(x)**2, b))]) | Measured similarity between two points in a multi-dimensional space.
Returns:
1.0 if the two points completely overlap,
0.0 if the two points are infinitely far apart. |
def dump_engines(target=sys.stderr):
print("Available templating engines:", file=target)
width = max(len(engine) for engine in engines.engines)
for handle, engine in sorted(engines.engines.items()):
description = engine.__doc__.split('\n', 0)[0]
print(" %-*s - %s" % (width, handle, description), file=target) | Print successfully imported templating engines. |
def get_points(self, measurement=None, tags=None):
if not isinstance(measurement,
(bytes, type(b''.decode()), type(None))):
raise TypeError('measurement must be an str or None')
for series in self._get_series():
series_name = series.get('measurement',
series.get('name', 'results'))
if series_name is None:
if tags is None:
for item in self._get_points_for_series(series):
yield item
elif measurement in (None, series_name):
series_tags = series.get('tags', {})
for item in self._get_points_for_series(series):
if tags is None or \
self._tag_matches(item, tags) or \
self._tag_matches(series_tags, tags):
yield item | Return a generator for all the points that match the given filters.
:param measurement: The measurement name
:type measurement: str
:param tags: Tags to look for
:type tags: dict
:return: Points generator |
def split_bits(value, *bits):
result = []
for b in reversed(bits):
mask = (1 << b) - 1
result.append(value & mask)
value = value >> b
assert value == 0
result.reverse()
return result | Split integer value into list of ints, according to `bits` list.
For example, split_bits(0x1234, 4, 8, 4) == [0x1, 0x23, 0x4] |
def is_excluded_path(args, filepath):
for regexp_exclude_path in args.regexp:
if re.match(regexp_exclude_path, filepath):
return True
abspath = os.path.abspath(filepath)
if args.include:
out_of_include_dirs = True
for incl_path in args.include:
absolute_include_path = os.path.abspath(os.path.join(args.root, incl_path))
if is_child_dir(absolute_include_path, abspath):
out_of_include_dirs = False
break
if out_of_include_dirs:
return True
excl_rules = create_exclude_rules(args)
for i, rule in enumerate(excl_rules):
if rule[0] == abspath:
return rule[1]
if is_child_dir(rule[0], abspath):
last_result = rule[1]
for j in range(i + 1, len(excl_rules)):
rule_deep = excl_rules[j]
if not is_child_dir(rule_deep[0], abspath):
break
last_result = rule_deep[1]
return last_result
return False | Returns true if the filepath is under the one of the exclude path. |
def search(self, **kwargs):
return super(ApiVlan, self).get(self.prepare_url('api/v3/vlan/',
kwargs)) | Method to search vlan's based on extends search.
:param search: Dict containing QuerySets to find vlan's.
:param include: Array containing fields to include on response.
:param exclude: Array containing fields to exclude on response.
:param fields: Array containing fields to override default fields.
:param kind: Determine if result will be detailed ('detail') or basic ('basic').
:return: Dict containing vlan's |
def expected_h(nvals, fit="RANSAC"):
rsvals = [expected_rs(n) for n in nvals]
poly = poly_fit(np.log(nvals), np.log(rsvals), 1, fit=fit)
return poly[0] | Uses expected_rs to calculate the expected value for the Hurst exponent h
based on the values of n used for the calculation.
Args:
nvals (iterable of int):
the values of n used to calculate the individual (R/S)_n
KWargs:
fit (str):
the fitting method to use for the line fit, either 'poly' for normal
least squares polynomial fitting or 'RANSAC' for RANSAC-fitting which
is more robust to outliers
Returns:
float:
expected h for white noise |
def _process_string_token(self, token, start_row, start_col):
for i, char in enumerate(token):
if char in QUOTES:
break
norm_quote = token[i:]
if len(norm_quote) >= 3 and norm_quote[:3] in TRIPLE_QUOTE_OPTS.values():
self._tokenized_triple_quotes[start_row] = (token, norm_quote[:3], start_row, start_col)
return
preferred_quote = SMART_QUOTE_OPTS.get(self.config.string_quote)
if self.config.string_quote in SMART_CONFIG_OPTS:
other_quote = next(q for q in QUOTES if q != preferred_quote)
if preferred_quote in token[i + 1:-1] and other_quote not in token[i + 1:-1]:
preferred_quote = other_quote
if norm_quote[0] != preferred_quote:
self._invalid_string_quote(
quote=norm_quote[0],
row=start_row,
correct_quote=preferred_quote,
col=start_col,
) | Internal method for identifying and checking string tokens
from the token stream.
Args:
token: the token to check.
start_row: the line on which the token was found.
start_col: the column on which the token was found. |
def dataset_list_cli(self,
sort_by=None,
size=None,
file_type=None,
license_name=None,
tag_ids=None,
search=None,
user=None,
mine=False,
page=1,
csv_display=False):
datasets = self.dataset_list(sort_by, size, file_type, license_name,
tag_ids, search, user, mine, page)
fields = ['ref', 'title', 'size', 'lastUpdated', 'downloadCount']
if datasets:
if csv_display:
self.print_csv(datasets, fields)
else:
self.print_table(datasets, fields)
else:
print('No datasets found') | a wrapper to datasets_list for the client. Additional parameters
are described here, see dataset_list for others.
Parameters
==========
sort_by: how to sort the result, see valid_sort_bys for options
size: the size of the dataset, see valid_sizes for string options
file_type: the format, see valid_file_types for string options
license_name: string descriptor for license, see valid_license_names
tag_ids: tag identifiers to filter the search
search: a search term to use (default is empty string)
user: username to filter the search to
mine: boolean if True, group is changed to "my" to return personal
page: the page to return (default is 1)
csv_display: if True, print comma separated values instead of table |
def close(self):
if self.message_handler:
self.message_handler.destroy()
self.message_handler = None
self._shutdown = True
if self._keep_alive_thread:
self._keep_alive_thread.join()
self._keep_alive_thread = None
if not self._session:
return
if not self._connection.cbs:
_logger.debug("Closing non-CBS session.")
self._session.destroy()
else:
_logger.debug("CBS session pending.")
self._session = None
if not self._ext_connection:
_logger.debug("Closing exclusive connection.")
self._connection.destroy()
else:
_logger.debug("Shared connection remaining open.")
self._connection = None | Close the client. This includes closing the Session
and CBS authentication layer as well as the Connection.
If the client was opened using an external Connection,
this will be left intact.
No further messages can be sent or received and the client
cannot be re-opened.
All pending, unsent messages will remain uncleared to allow
them to be inspected and queued to a new client. |
def monitor(app):
heroku_app = HerokuApp(dallinger_uid=app)
webbrowser.open(heroku_app.dashboard_url)
webbrowser.open("https://requester.mturk.com/mturk/manageHITs")
heroku_app.open_logs()
check_call(["open", heroku_app.db_uri])
while _keep_running():
summary = get_summary(app)
click.clear()
click.echo(header)
click.echo("\nExperiment {}\n".format(app))
click.echo(summary)
time.sleep(10) | Set up application monitoring. |
def multi_split(txt, delims):
res = [txt]
for delimChar in delims:
txt, res = res, []
for word in txt:
if len(word) > 1:
res += word.split(delimChar)
return res | split by multiple delimiters |
def derive_key_block(self, master_secret, server_random,
client_random, req_len):
seed = server_random + client_random
if self.tls_version <= 0x0300:
return self.prf(master_secret, seed, req_len)
else:
return self.prf(master_secret, b"key expansion", seed, req_len) | Perform the derivation of master_secret into a key_block of req_len
requested length. See RFC 5246, section 6.3. |
def get_templates_per_page(self, per_page=1000, page=1, params=None):
return self._get_resource_per_page(resource=TEMPLATES, per_page=per_page, page=page, params=params) | Get templates per page
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:param params: Search parameters. Default: {}
:return: list |
def list_inactive_vms(**kwargs):
vms = []
conn = __get_conn(**kwargs)
for dom in _get_domain(conn, iterable=True, active=False):
vms.append(dom.name())
conn.close()
return vms | Return a list of names for inactive virtual machine on the minion
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.list_inactive_vms |
def join_group_with_token(self, group_hashtag, group_jid, join_token):
log.info("[+] Trying to join the group '{}' with JID {}".format(group_hashtag, group_jid))
return self._send_xmpp_element(roster.GroupJoinRequest(group_hashtag, join_token, group_jid)) | Tries to join into a specific group, using a cryptographic token that was received earlier from a search
:param group_hashtag: The public hashtag of the group into which to join (like '#Music')
:param group_jid: The JID of the same group
:param join_token: a token that can be extracted in the callback on_group_search_response, after calling
search_group() |
def _get_association_classes(self, namespace):
class_repo = self._get_class_repo(namespace)
for cl in six.itervalues(class_repo):
if 'Association' in cl.qualifiers:
yield cl
return | Return iterator of associator classes from the class repo
Returns the classes that have associations qualifier.
Does NOT copy so these are what is in repository. User functions
MUST NOT modify these classes.
Returns: Returns generator where each yield returns a single
association class |
def gui_repaint(self, drawDC=None):
DEBUG_MSG("gui_repaint()", 1, self)
if self.IsShownOnScreen():
if drawDC is None:
drawDC=wx.ClientDC(self)
drawDC.DrawBitmap(self.bitmap, 0, 0)
else:
pass | Performs update of the displayed image on the GUI canvas, using the
supplied device context. If drawDC is None, a ClientDC will be used to
redraw the image. |
def create_atomic_wrapper(cls, wrapped_func):
def _create_atomic_wrapper(*args, **kwargs):
with transaction.atomic():
return wrapped_func(*args, **kwargs)
return _create_atomic_wrapper | Returns a wrapped function. |
def averaged_sgd_entropic_transport(a, b, M, reg, numItermax=300000, lr=None):
if lr is None:
lr = 1. / max(a / reg)
n_source = np.shape(M)[0]
n_target = np.shape(M)[1]
cur_beta = np.zeros(n_target)
ave_beta = np.zeros(n_target)
for cur_iter in range(numItermax):
k = cur_iter + 1
i = np.random.randint(n_source)
cur_coord_grad = coordinate_grad_semi_dual(b, M, reg, cur_beta, i)
cur_beta += (lr / np.sqrt(k)) * cur_coord_grad
ave_beta = (1. / k) * cur_beta + (1 - 1. / k) * ave_beta
return ave_beta | Compute the ASGD algorithm to solve the regularized semi continous measures optimal transport max problem
The function solves the following optimization problem:
.. math::
\gamma = arg\min_\gamma <\gamma,M>_F + reg\cdot\Omega(\gamma)
s.t. \gamma 1 = a
\gamma^T 1= b
\gamma \geq 0
Where :
- M is the (ns,nt) metric cost matrix
- :math:`\Omega` is the entropic regularization term with :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
- a and b are source and target weights (sum to 1)
The algorithm used for solving the problem is the ASGD algorithm
as proposed in [18]_ [alg.2]
Parameters
----------
b : np.ndarray(nt,)
target measure
M : np.ndarray(ns, nt)
cost matrix
reg : float number
Regularization term > 0
numItermax : int number
number of iteration
lr : float number
learning rate
Returns
-------
ave_v : np.ndarray(nt,)
dual variable
Examples
--------
>>> n_source = 7
>>> n_target = 4
>>> reg = 1
>>> numItermax = 300000
>>> a = ot.utils.unif(n_source)
>>> b = ot.utils.unif(n_target)
>>> rng = np.random.RandomState(0)
>>> X_source = rng.randn(n_source, 2)
>>> Y_target = rng.randn(n_target, 2)
>>> M = ot.dist(X_source, Y_target)
>>> method = "ASGD"
>>> asgd_pi = stochastic.solve_semi_dual_entropic(a, b, M, reg,
method, numItermax)
>>> print(asgd_pi)
References
----------
[Genevay et al., 2016] :
Stochastic Optimization for Large-scale Optimal Transport,
Advances in Neural Information Processing Systems (2016),
arXiv preprint arxiv:1605.08527. |
def recompile_all(path):
import os
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
for name in files:
if name.endswith('.py'):
filename = os.path.abspath(os.path.join(root, name))
print >> sys.stderr, filename
recompile(filename)
else:
filename = os.path.abspath(path)
recompile(filename) | recursively recompile all .py files in the directory |
def as_dict(self):
tags_dict = dict(self)
tags_dict['@module'] = self.__class__.__module__
tags_dict['@class'] = self.__class__.__name__
return tags_dict | Dict representation.
Returns:
Dictionary of parameters from fefftags object |
def _get_content(self, url):
"Get HTML content"
target_url = self._db_url + '/' + unquote(url)
log.debug("Opening '{0}'".format(target_url))
try:
f = self.opener.open(target_url)
except HTTPError as e:
log.error("HTTP error, your session may be expired.")
log.error(e)
if input("Request new permanent session and retry? (y/n)") in 'yY':
self.request_permanent_session()
return self._get_content(url)
else:
return None
log.debug("Accessing '{0}'".format(target_url))
try:
content = f.read()
except IncompleteRead as icread:
log.critical(
"Incomplete data received from the DB, " +
"the data could be corrupted."
)
content = icread.partial
log.debug("Got {0} bytes of data.".format(len(content)))
return content.decode('utf-8') | Get HTML content |
def log(self, branch, remote):
log_hook = self.settings['rebase.log-hook']
if log_hook:
if ON_WINDOWS:
log_hook = re.sub(r'\$(\d+)', r'%\1', log_hook)
log_hook = re.sub(r'%(?!\d)', '%%', log_hook)
log_hook = re.sub(r'; ?', r'\n', log_hook)
with NamedTemporaryFile(
prefix='PyGitUp.', suffix='.bat', delete=False
) as bat_file:
bat_file.file.write(b'@echo off\n')
bat_file.file.write(log_hook.encode('utf-8'))
state = subprocess.call(
[bat_file.name, branch.name, remote.name]
)
os.remove(bat_file.name)
else:
state = subprocess.call(
[log_hook, 'git-up', branch.name, remote.name],
shell=True
)
if self.testing:
assert state == 0, 'log_hook returned != 0' | Call a log-command, if set by git-up.fetch.all. |
def on_channel_open(self, channel):
self._logger.debug('Channel opened')
self._channel = channel
self._channel.parent_client = self
self.add_on_channel_close_callback()
self.setup_exchange(self._exchange) | Invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
Since the channel is now open, we'll declare the exchange to use.
:param pika.channel.Channel channel: The channel object |
def update_col_from_series(self, column_name, series, cast=False):
logger.debug('updating column {!r} in table {!r}'.format(
column_name, self.name))
col_dtype = self.local[column_name].dtype
if series.dtype != col_dtype:
if cast:
series = series.astype(col_dtype)
else:
err_msg = "Data type mismatch, existing:{}, update:{}"
err_msg = err_msg.format(col_dtype, series.dtype)
raise ValueError(err_msg)
self.local.loc[series.index, column_name] = series | Update existing values in a column from another series.
Index values must match in both column and series. Optionally
casts data type to match the existing column.
Parameters
---------------
column_name : str
series : panas.Series
cast: bool, optional, default False |
def read(self, size=None):
if not self._open:
raise pycdlibexception.PyCdlibInvalidInput('I/O operation on closed file.')
if self._offset >= self._length:
return b''
if size is None or size < 0:
data = self.readall()
else:
readsize = min(self._length - self._offset, size)
data = self._fp.read(readsize)
self._offset += readsize
return data | A method to read and return up to size bytes.
Parameters:
size - Optional parameter to read size number of bytes; if None or
negative, all remaining bytes in the file will be read
Returns:
The number of bytes requested or the rest of the data left in the file,
whichever is smaller. If the file is at or past EOF, returns an empty
bytestring. |
def find_args(event, arg_type):
args = event.get('arguments', {})
obj_tags = [arg for arg in args if arg['type'] == arg_type]
if obj_tags:
return [o['value']['@id'] for o in obj_tags]
else:
return [] | Return IDs of all arguments of a given type |
def _check_base_classes(base_classes, check_for_type):
return_value = False
for base in base_classes:
if base.__name__ == check_for_type:
return_value = True
break
else:
return_value = _check_base_classes(base.__bases__, check_for_type)
if return_value is True:
break
return return_value | Indicate whether ``check_for_type`` exists in ``base_classes``. |
def _json_column(**kwargs):
return db.Column(
JSONType().with_variant(
postgresql.JSON(none_as_null=True),
'postgresql',
),
nullable=True,
**kwargs
) | Return JSON column. |
def get_pool_context(self):
context = {self.current.lane_id: self.current.role, 'self': self.current.role}
for lane_id, role_id in self.current.pool.items():
if role_id:
context[lane_id] = lazy_object_proxy.Proxy(
lambda: self.role_model(super_context).objects.get(role_id))
return context | Builds context for the WF pool.
Returns:
Context dict. |
async def install_mediaroom_protocol(responses_callback, box_ip=None):
from . import version
_LOGGER.debug(version)
loop = asyncio.get_event_loop()
mediaroom_protocol = MediaroomProtocol(responses_callback, box_ip)
sock = create_socket()
await loop.create_datagram_endpoint(lambda: mediaroom_protocol, sock=sock)
return mediaroom_protocol | Install an asyncio protocol to process NOTIFY messages. |
def build(self, x, h, mask=None):
xw = tf.split(tf.matmul(x, self.w_matrix) + self.bias, 3, 1)
hu = tf.split(tf.matmul(h, self.U), 3, 1)
r = tf.sigmoid(xw[0] + hu[0])
z = tf.sigmoid(xw[1] + hu[1])
h1 = tf.tanh(xw[2] + r * hu[2])
next_h = h1 * (1 - z) + h * z
if mask is not None:
next_h = next_h * mask + h * (1 - mask)
return next_h | Build the GRU cell. |
def __create_header(self, command, command_string, session_id, reply_id):
buf = pack('<4H', command, 0, session_id, reply_id) + command_string
buf = unpack('8B' + '%sB' % len(command_string), buf)
checksum = unpack('H', self.__create_checksum(buf))[0]
reply_id += 1
if reply_id >= const.USHRT_MAX:
reply_id -= const.USHRT_MAX
buf = pack('<4H', command, checksum, session_id, reply_id)
return buf + command_string | Puts a the parts that make up a packet together and packs them into a byte string |
async def jsk_vc_pause(self, ctx: commands.Context):
voice = ctx.guild.voice_client
if voice.is_paused():
return await ctx.send("Audio is already paused.")
voice.pause()
await ctx.send(f"Paused audio in {voice.channel.name}.") | Pauses a running audio source, if there is one. |
def transformer_librispeech_tpu_v2():
hparams = transformer_librispeech_v2()
update_hparams_for_tpu(hparams)
hparams.batch_size = 16
librispeech.set_librispeech_length_hparams(hparams)
return hparams | HParams for training ASR model on Librispeech on TPU v2. |
def set_description(self):
if self.device_info['type'] == 'Router':
self.node['description'] = '%s %s' % (self.device_info['type'],
self.device_info['model'])
else:
self.node['description'] = self.device_info['desc'] | Set the node description |
def _create_service(self, parameters={}, **kwargs):
logging.debug("_create_service()")
logging.debug(str.join(',', [self.service_name, self.plan_name,
self.name, str(parameters)]))
return self.service.create_service(self.service_name, self.plan_name,
self.name, parameters, **kwargs) | Create a Cloud Foundry service that has custom parameters. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.