code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def check_if_ok_to_update(self):
current_time = int(time.time())
last_refresh = self.last_refresh
if last_refresh is None:
last_refresh = 0
if current_time >= (last_refresh + self.refresh_rate):
return True
return False | Check if it is ok to perform an http request. |
def release(no_master, release_type):
try:
locale.setlocale(locale.LC_ALL, '')
except:
print("Warning: Unable to set locale. Expect encoding problems.")
git.is_repo_clean(master=(not no_master))
config = utils.get_config()
config.update(utils.get_dist_metadata())
config['project... | Releases a new version |
def stratify_by_features(features, n_strata, **kwargs):
n_items = features.shape[0]
km = KMeans(n_clusters=n_strata, **kwargs)
allocations = km.fit_predict(X=features)
return Strata(allocations) | Stratify by clustering the items in feature space
Parameters
----------
features : array-like, shape=(n_items,n_features)
feature matrix for the pool, where rows correspond to items and columns
correspond to features.
n_strata : int
number of strata to create.
**kwargs :
... |
def sub_working_days(self, day, delta,
extra_working_days=None, extra_holidays=None,
keep_datetime=False):
delta = abs(delta)
return self.add_working_days(
day, -delta,
extra_working_days, extra_holidays, keep_datetime=keep_dateti... | Substract `delta` working days to the date.
This method is a shortcut / helper. Users may want to use either::
cal.add_working_days(my_date, -7)
cal.sub_working_days(my_date, 7)
The other parameters are to be used exactly as in the
``add_working_days`` method.
... |
def _set_suffix_links(self):
self._suffix_links_set = True
for current, parent in self.bfs():
if parent is None:
continue
current.longest_prefix = parent.longest_prefix
if parent.has_value:
current.longest_prefix = parent
if... | Sets all suffix links in all nodes in this trie. |
def compose(*fs):
ensure_argcount(fs, min_=1)
fs = list(imap(ensure_callable, fs))
if len(fs) == 1:
return fs[0]
if len(fs) == 2:
f1, f2 = fs
return lambda *args, **kwargs: f1(f2(*args, **kwargs))
if len(fs) == 3:
f1, f2, f3 = fs
return lambda *args, **kwargs:... | Creates composition of the functions passed in.
:param fs: One-argument functions, with the possible exception of last one
that can accept arbitrary arguments
:return: Function returning a result of functions from ``fs``
applied consecutively to the argument(s), in reverse order |
def set(self, instance, value, **kw):
value = str(value).decode("base64")
if "filename" not in kw:
logger.debug("FielFieldManager::set: No Filename detected "
"-> using title or id")
kw["filename"] = kw.get("id") or kw.get("title")
self._set(insta... | Decodes base64 value and set the file object |
def ActiveDates(self):
(earliest, latest) = self.GetDateRange()
if earliest is None:
return []
dates = []
date_it = util.DateStringToDateObject(earliest)
date_end = util.DateStringToDateObject(latest)
delta = datetime.timedelta(days=1)
while date_it <= date_end:
date_it_string = ... | Return dates this service period is active as a list of "YYYYMMDD". |
def sync(self, data):
for k, v in data.get('billing', {}).items():
setattr(self, k, v)
self.card_number = data.get('credit_card', {}).get('card_number',
self.card_number)
self.save(sync=False) | Overwrite local customer payment profile data with remote data |
def apply(self, arr):
for t in self.cpu_transforms:
arr = t.apply(arr)
return arr | Apply all CPU transforms on an array. |
def log_likelihood(covariance, precision):
assert covariance.shape == precision.shape
dim, _ = precision.shape
log_likelihood_ = (
-np.sum(covariance * precision)
+ fast_logdet(precision)
- dim * np.log(2 * np.pi)
)
log_likelihood_ /= 2.
return log_likelihood_ | Computes the log-likelihood between the covariance and precision
estimate.
Parameters
----------
covariance : 2D ndarray (n_features, n_features)
Maximum Likelihood Estimator of covariance
precision : 2D ndarray (n_features, n_features)
The precision matrix of the covariance model ... |
def setup_default_permissions(session, instance):
if instance not in session.new or not isinstance(instance, Entity):
return
if not current_app:
return
_setup_default_permissions(instance) | Setup default permissions on newly created entities according to.
:attr:`Entity.__default_permissions__`. |
def RattributesBM(dataset,database,host=rbiomart_host):
biomaRt = importr("biomaRt")
ensemblMart=biomaRt.useMart(database, host=rbiomart_host)
ensembl=biomaRt.useDataset(dataset, mart=ensemblMart)
print(biomaRt.listAttributes(ensembl)) | Lists BioMart attributes through a RPY2 connection.
:param dataset: a dataset listed in RdatasetsBM()
:param database: a database listed in RdatabasesBM()
:param host: address of the host server, default='www.ensembl.org'
:returns: nothing |
def delete_file_or_tree(*args):
for f in args:
try:
os.unlink(f)
except OSError:
shutil.rmtree(f, ignore_errors=True) | For every path in args, try to delete it as a file or a directory
tree. Ignores deletion errors. |
def _estimate_runner_memory(json_file):
with open(json_file) as in_handle:
sinfo = json.load(in_handle)
num_parallel = 1
for key in ["config__algorithm__variantcaller", "description"]:
item_counts = []
n = 0
for val in (sinfo.get(key) or []):
n += 1
if... | Estimate Java memory requirements based on number of samples.
A rough approach to selecting correct allocated memory for Cromwell. |
def min_ems(self, value: float) -> 'Size':
raise_not_number(value)
self.minimum = '{}em'.format(value)
return self | Set the minimum size in ems. |
def generate_all_aliases(fieldfile, include_global):
all_options = aliases.all(fieldfile, include_global=include_global)
if all_options:
thumbnailer = get_thumbnailer(fieldfile)
for key, options in six.iteritems(all_options):
options['ALIAS'] = key
thumbnailer.get_thumbna... | Generate all of a file's aliases.
:param fieldfile: A ``FieldFile`` instance.
:param include_global: A boolean which determines whether to generate
thumbnails for project-wide aliases in addition to field, model, and
app specific aliases. |
def add_bookmark(self, url, favorite=False, archive=False, allow_duplicates=True):
rdb_url = self._generate_url('bookmarks')
params = {
"url": url,
"favorite": int(favorite),
"archive": int(archive),
"allow_duplicates": int(allow_duplicates)
}
... | Adds given bookmark to the authenticated user.
:param url: URL of the article to bookmark
:param favorite: whether or not the bookmark should be favorited
:param archive: whether or not the bookmark should be archived
:param allow_duplicates: whether or not to allow duplicate bookmarks ... |
def set(self, document_data, merge=False):
batch = self._client.batch()
batch.set(self, document_data, merge=merge)
write_results = batch.commit()
return _first_write_result(write_results) | Replace the current document in the Firestore database.
A write ``option`` can be specified to indicate preconditions of
the "set" operation. If no ``option`` is specified and this document
doesn't exist yet, this method will create it.
Overwrites all content for the document with the ... |
def set_block_name(self, index, name):
if name is None:
return
self.GetMetaData(index).Set(vtk.vtkCompositeDataSet.NAME(), name)
self.Modified() | Set a block's string name at the specified index |
def dimension(self):
self.dim = 0
self._slices = {}
for stochastic in self.stochastics:
if isinstance(stochastic.value, np.matrix):
p_len = len(stochastic.value.A.ravel())
elif isinstance(stochastic.value, np.ndarray):
p_len = len(stochasti... | Compute the dimension of the sampling space and identify the slices
belonging to each stochastic. |
def write_values(self):
return dict(((k, v.value) for k, v in self._inputs.items() if not v.is_secret and not v.is_empty(False))) | Return the dictionary with which to write values |
def push_tx(self, crypto, tx_hex):
url = "%s/pushtx" % self.base_url
return self.post_url(url, {'hex': tx_hex}).content | This method is untested. |
def GetConfiguredUsers(self):
if os.path.exists(self.google_users_file):
users = open(self.google_users_file).readlines()
else:
users = []
return [user.strip() for user in users] | Retrieve the list of configured Google user accounts.
Returns:
list, the username strings of users congfigured by Google. |
def extract_date(cls, date_str):
if not date_str:
raise DateTimeFormatterException('date_str must a valid string {}.'.format(date_str))
try:
return cls._extract_timestamp(date_str, cls.DATE_FORMAT)
except (TypeError, ValueError):
raise DateTimeFormatterExcepti... | Tries to extract a `datetime` object from the given string, expecting
date information only.
Raises `DateTimeFormatterException` if the extraction fails. |
def _get_basin_response_term(self, C, z2pt5):
f_sed = np.zeros(len(z2pt5))
idx = z2pt5 < 1.0
f_sed[idx] = (C["c14"] + C["c15"] * float(self.CONSTS["SJ"])) *\
(z2pt5[idx] - 1.0)
idx = z2pt5 > 3.0
f_sed[idx] = C["c16"] * C["k3"] * exp(-0.75) *\
(1.0 - np.exp... | Returns the basin response term defined in equation 20 |
def _get_case_file_paths(tmp_dir, case, training_fraction=0.95):
paths = tf.gfile.Glob("%s/*.jpg" % tmp_dir)
if not paths:
raise ValueError("Search of tmp_dir (%s) " % tmp_dir,
"for subimage paths yielded an empty list, ",
"can't proceed with returning training/eval spl... | Obtain a list of image paths corresponding to training or eval case.
Args:
tmp_dir: str, the root path to which raw images were written, at the
top level having meta/ and raw/ subdirs.
case: bool, whether obtaining file paths for training (true) or eval
(false).
training_fraction: float, the ... |
def debug(self, msg, *args, **kwargs) -> Task:
return self._make_log_task(logging.DEBUG, msg, args, **kwargs) | Log msg with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
await logger.debug("Houston, we have a %s", "thorny problem", exc_info=1) |
def calculate_output(self, variable_name, period):
variable = self.tax_benefit_system.get_variable(variable_name, check_existence = True)
if variable.calculate_output is None:
return self.calculate(variable_name, period)
return variable.calculate_output(self, variable_name, period) | Calculate the value of a variable using the ``calculate_output`` attribute of the variable. |
def evaluate_dir(sample_dir):
results = []
if sample_dir[-1] == "/":
sample_dir = sample_dir[:-1]
for filename in glob.glob("%s/*.inkml" % sample_dir):
results.append(evaluate_inkml(filename))
return results | Evaluate all recordings in `sample_dir`.
Parameters
----------
sample_dir : string
The path to a directory with *.inkml files.
Returns
-------
list of dictionaries
Each dictionary contains the keys 'filename' and 'results', where
'results' itself is a list of dictionari... |
def cleanup(logger, *args):
for obj in args:
if obj is not None and hasattr(obj, 'cleanup'):
try:
obj.cleanup()
except NotImplementedError:
pass
except Exception:
logger.exception("Unable to cleanup %s object", obj) | Environment's cleanup routine. |
def compress(data, compresslevel=9):
buf = BytesIO()
with open_fileobj(buf, 'wb', compresslevel) as ogz:
if six.PY3 and not isinstance(data, bytes):
data = data.encode(__salt_system_encoding__)
ogz.write(data)
compressed = buf.getvalue()
return compressed | Returns the data compressed at gzip level compression. |
def prepare(self, data_batch, sparse_row_id_fn=None):
super(SVRGModule, self).prepare(data_batch, sparse_row_id_fn=sparse_row_id_fn)
self._mod_aux.prepare(data_batch, sparse_row_id_fn=sparse_row_id_fn) | Prepares two modules for processing a data batch.
Usually involves switching bucket and reshaping.
For modules that contain `row_sparse` parameters in KVStore,
it prepares the `row_sparse` parameters based on the sparse_row_id_fn.
When KVStore is used to update parameters for multi-dev... |
def statistic_recommend(classes, P):
if imbalance_check(P):
return IMBALANCED_RECOMMEND
if binary_check(classes):
return BINARY_RECOMMEND
return MULTICLASS_RECOMMEND | Return recommend parameters which are more suitable due to the input dataset characteristics.
:param classes: all classes name
:type classes : list
:param P: condition positive
:type P : dict
:return: recommendation_list as list |
def _load_scalar_fit(self, fit_key=None, h5file=None, fit_data=None):
if (fit_key is None) ^ (h5file is None):
raise ValueError("Either specify both fit_key and h5file, or"
" neither")
if not ((fit_key is None) ^ (fit_data is None)):
raise ValueError("Specify exac... | Loads a single fit |
async def lock(self, container = None):
"Wait for lock acquire"
if container is None:
container = RoutineContainer.get_container(self.scheduler)
if self.locked:
pass
elif self.lockroutine:
await LockedEvent.createMatcher(self)
else:
... | Wait for lock acquire |
def restore(self):
'Restore the file'
if not self.deleted:
raise JFSError('Tried to restore a not deleted file')
raise NotImplementedError('Jottacloud has changed the restore API. Please use jottacloud.com in a browser, for now.')
url = 'https://www.jottacloud.com/rest/webres... | Restore the file |
def iam_device_info(self, apdu):
if _debug: DeviceInfoCache._debug("iam_device_info %r", apdu)
if not isinstance(apdu, IAmRequest):
raise ValueError("not an IAmRequest: %r" % (apdu,))
device_instance = apdu.iAmDeviceIdentifier[1]
device_info = self.cache.get(device_instance, ... | Create a device information record based on the contents of an
IAmRequest and put it in the cache. |
def get_point_cloud(self, pair):
disparity = self.block_matcher.get_disparity(pair)
points = self.block_matcher.get_3d(disparity,
self.calibration.disp_to_depth_mat)
colors = cv2.cvtColor(pair[0], cv2.COLOR_BGR2RGB)
return PointCloud(points, col... | Get 3D point cloud from image pair. |
def send_text(self, text, **options):
return self.bot.send_message(self.id, text, **options) | Send a text message to the chat.
:param str text: Text of the message to send
:param options: Additional sendMessage options (see
https://core.telegram.org/bots/api#sendmessage |
def generate_proto(source, require = True):
if not require and not os.path.exists(source):
return
output = source.replace(".proto", "_pb2.py").replace("../src/", "")
if (not os.path.exists(output) or
(os.path.exists(source) and
os.path.getmtime(source) > os.path.getmtime(output))):
print("Gen... | Invokes the Protocol Compiler to generate a _pb2.py from the given
.proto file. Does nothing if the output already exists and is newer than
the input. |
def race(iterable, loop=None, timeout=None, *args, **kw):
assert_iter(iterable=iterable)
coros = []
resolved = False
result = None
@asyncio.coroutine
def resolver(index, coro):
nonlocal result
nonlocal resolved
value = yield from coro
if not resolved:
... | Runs coroutines from a given iterable concurrently without waiting until
the previous one has completed.
Once any of the tasks completes, the main coroutine
is immediately resolved, yielding the first resolved value.
All coroutines will be executed in the same loop.
This function is a coroutine.
... |
def stretch(image, mask=None):
image = np.array(image, float)
if np.product(image.shape) == 0:
return image
if mask is None:
minval = np.min(image)
maxval = np.max(image)
if minval == maxval:
if minval < 0:
return np.zeros_like(image)
e... | Normalize an image to make the minimum zero and maximum one
image - pixel data to be normalized
mask - optional mask of relevant pixels. None = don't mask
returns the stretched image |
def cn(shape, dtype=None, impl='numpy', **kwargs):
cn_cls = tensor_space_impl(impl)
if dtype is None:
dtype = cn_cls.default_dtype(ComplexNumbers())
cn = cn_cls(shape=shape, dtype=dtype, **kwargs)
if not cn.is_complex:
raise ValueError('data type {!r} not a complex floating-point type.'
... | Return a space of complex tensors.
Parameters
----------
shape : positive int or sequence of positive ints
Number of entries per axis for elements in this space. A
single integer results in a space with 1 axis.
dtype : optional
Data type of each element. Can be provided in any w... |
def ns(ns):
def setup_ns(cls):
setattr(cls, ENTITY_DEFAULT_NS_ATTR, ns)
return cls
return setup_ns | Class decorator that sets default tags namespace to use with its
instances. |
def _maybe_apply_time_shift(da, time_offset=None, **DataAttrs):
if time_offset is not None:
time = times.apply_time_offset(da[TIME_STR], **time_offset)
da[TIME_STR] = time
else:
if DataAttrs['dtype_in_time'] == 'inst':
if DataAttrs['intvl_in'].endswith... | Correct off-by-one error in GFDL instantaneous model data.
Instantaneous data that is outputted by GFDL models is generally off by
one timestep. For example, a netCDF file that is supposed to
correspond to 6 hourly data for the month of January, will have its
last time value be in Febr... |
def parse_command(self, string):
possible_command, _, rest = string.partition(" ")
possible_command = possible_command.lower()
if possible_command not in self.commands:
return None, None
event = self.commands[possible_command]["event"]
args = shlex.split(rest.strip())... | Parse out any possible valid command from an input string. |
def _get_initial_request(self):
if self._leaser is not None:
lease_ids = list(self._leaser.ack_ids)
else:
lease_ids = []
request = types.StreamingPullRequest(
modify_deadline_ack_ids=list(lease_ids),
modify_deadline_seconds=[self.ack_deadline] * le... | Return the initial request for the RPC.
This defines the initial request that must always be sent to Pub/Sub
immediately upon opening the subscription.
Returns:
google.cloud.pubsub_v1.types.StreamingPullRequest: A request
suitable for being the first request on the stre... |
def convert_dicts(d, to_class=AttrDictWrapper, from_class=dict):
d_ = to_class()
for key, value in d.iteritems():
if isinstance(value, from_class):
d_[key] = convert_dicts(value, to_class=to_class,
from_class=from_class)
else:
d_[key] =... | Recursively convert dict and UserDict types.
Note that `d` is unchanged.
Args:
to_class (type): Dict-like type to convert values to, usually UserDict
subclass, or dict.
from_class (type): Dict-like type to convert values from. If a tuple,
multiple types are converted.
... |
def get_medium_attachment(self, name, controller_port, device):
if not isinstance(name, basestring):
raise TypeError("name can only be an instance of type basestring")
if not isinstance(controller_port, baseinteger):
raise TypeError("controller_port can only be an instance of typ... | Returns a medium attachment which corresponds to the controller with
the given name, on the given port and device slot.
in name of type str
in controller_port of type int
in device of type int
return attachment of type :class:`IMediumAttachment`
raises :class:`VBoxEr... |
async def body(self):
if not isinstance(self._body, bytes):
self._body = await self._body
return self._body | A helper function which blocks until the body has been read
completely.
Returns the bytes of the body which the user should decode.
If the request does not have a body part (i.e. it is a GET
request) this function returns None. |
def _pprint(dic):
for key, value in dic.items():
print(" {0}: {1}".format(key, value)) | Prints a dictionary with one indentation level |
def friendly_load(parser, token):
bits = token.contents.split()
if len(bits) >= 4 and bits[-2] == "from":
name = bits[-1]
try:
lib = find_library(parser, name)
subset = load_from_library(lib, name, bits[1:-2])
parser.add_library(subset)
except Template... | Tries to load a custom template tag set. Non existing tag libraries
are ignored.
This means that, if used in conjunction with ``if_has_tag``, you can try to
load the comments template tag library to enable comments even if the
comments framework is not installed.
For example::
{% load fri... |
def fit(self, X, y, **kwargs):
self.estimator.fit(X, y, **kwargs)
self.draw()
return self | A simple pass-through method; calls fit on the estimator and then
draws the alpha-error plot. |
def tarball_files(work_dir, tar_name, uuid=None, files=None):
with tarfile.open(os.path.join(work_dir, tar_name), 'w:gz') as f_out:
for fname in files:
if uuid:
f_out.add(os.path.join(work_dir, fname), arcname=uuid + '.' + fname)
else:
f_out.add(os.pat... | Tars a group of files together into a tarball
work_dir: str Current Working Directory
tar_name: str Name of tarball
uuid: str UUID to stamp files with
files: str(s) List of filenames to place in the tarball from working directory |
def safe_request(fct):
def inner(*args, **kwargs):
try:
_data = fct(*args, **kwargs)
except requests.exceptions.ConnectionError as error:
return {'error': str(error), 'status': 404}
if _data.ok:
if _data.content:
safe_data = _data.json()
... | Return json messages instead of raising errors |
def write_to_fitsfile(self, fitsfile, clobber=True):
from fermipy.skymap import Map
hpx_header = self._hpx.make_header()
index_map = Map(self.ipixs, self.wcs)
mult_map = Map(self.mult_val, self.wcs)
prim_hdu = index_map.create_primary_hdu()
mult_hdu = index_map.create_ima... | Write this mapping to a FITS file, to avoid having to recompute it |
def install_via_requirements(requirements_str, force=False):
if requirements_str[0] == '@':
path = requirements_str[1:]
if os.path.isfile(path):
yaml_data = load_yaml(path)
if 'packages' not in yaml_data.keys():
raise CommandException('Error in {filename}: mis... | Download multiple Quilt data packages via quilt.xml requirements file. |
def human_size(bytes, units=[' bytes','KB','MB','GB','TB', 'PB', 'EB']):
return str(bytes) + units[0] if bytes < 1024 else human_size(bytes>>10, units[1:]) | Returns a human readable string reprentation of bytes |
def category_count(self):
category_dict = self.categories
count_dict = {category: len(
category_dict[category]) for category in category_dict}
return count_dict | Returns the number of categories in `categories`. |
def _filter_filecommands(self, filecmd_iter):
if self.includes is None and self.excludes is None:
return list(filecmd_iter())
result = []
for fc in filecmd_iter():
if (isinstance(fc, commands.FileModifyCommand) or
isinstance(fc, commands.FileDeleteCommand)... | Return the filecommands filtered by includes & excludes.
:return: a list of FileCommand objects |
def notify_change(self, change):
name = change['name']
if self.comm is not None and self.comm.kernel is not None:
if name in self.keys and self._should_send_property(name, getattr(self, name)):
self.send_state(key=name)
super(Widget, self).notify_change(change) | Called when a property has changed. |
def tanimoto_coefficient(a, b):
return sum(map(lambda (x,y): float(x)*float(y), zip(a,b))) / sum([
-sum(map(lambda (x,y): float(x)*float(y), zip(a,b))),
sum(map(lambda x: float(x)**2, a)),
sum(map(lambda x: float(x)**2, b))]) | Measured similarity between two points in a multi-dimensional space.
Returns:
1.0 if the two points completely overlap,
0.0 if the two points are infinitely far apart. |
def dump_engines(target=sys.stderr):
print("Available templating engines:", file=target)
width = max(len(engine) for engine in engines.engines)
for handle, engine in sorted(engines.engines.items()):
description = engine.__doc__.split('\n', 0)[0]
print(" %-*s - %s" % (width, handle, descri... | Print successfully imported templating engines. |
def get_points(self, measurement=None, tags=None):
if not isinstance(measurement,
(bytes, type(b''.decode()), type(None))):
raise TypeError('measurement must be an str or None')
for series in self._get_series():
series_name = series.get('measurement',
... | Return a generator for all the points that match the given filters.
:param measurement: The measurement name
:type measurement: str
:param tags: Tags to look for
:type tags: dict
:return: Points generator |
def split_bits(value, *bits):
result = []
for b in reversed(bits):
mask = (1 << b) - 1
result.append(value & mask)
value = value >> b
assert value == 0
result.reverse()
return result | Split integer value into list of ints, according to `bits` list.
For example, split_bits(0x1234, 4, 8, 4) == [0x1, 0x23, 0x4] |
def is_excluded_path(args, filepath):
for regexp_exclude_path in args.regexp:
if re.match(regexp_exclude_path, filepath):
return True
abspath = os.path.abspath(filepath)
if args.include:
out_of_include_dirs = True
for incl_path in args.include:
absolute_includ... | Returns true if the filepath is under the one of the exclude path. |
def search(self, **kwargs):
return super(ApiVlan, self).get(self.prepare_url('api/v3/vlan/',
kwargs)) | Method to search vlan's based on extends search.
:param search: Dict containing QuerySets to find vlan's.
:param include: Array containing fields to include on response.
:param exclude: Array containing fields to exclude on response.
:param fields: Array containing fields to override d... |
def expected_h(nvals, fit="RANSAC"):
rsvals = [expected_rs(n) for n in nvals]
poly = poly_fit(np.log(nvals), np.log(rsvals), 1, fit=fit)
return poly[0] | Uses expected_rs to calculate the expected value for the Hurst exponent h
based on the values of n used for the calculation.
Args:
nvals (iterable of int):
the values of n used to calculate the individual (R/S)_n
KWargs:
fit (str):
the fitting method to use for the line fit, either 'poly' fo... |
def _process_string_token(self, token, start_row, start_col):
for i, char in enumerate(token):
if char in QUOTES:
break
norm_quote = token[i:]
if len(norm_quote) >= 3 and norm_quote[:3] in TRIPLE_QUOTE_OPTS.values():
self._tokenized_triple_quotes[start_row... | Internal method for identifying and checking string tokens
from the token stream.
Args:
token: the token to check.
start_row: the line on which the token was found.
start_col: the column on which the token was found. |
def dataset_list_cli(self,
sort_by=None,
size=None,
file_type=None,
license_name=None,
tag_ids=None,
search=None,
user=None,
... | a wrapper to datasets_list for the client. Additional parameters
are described here, see dataset_list for others.
Parameters
==========
sort_by: how to sort the result, see valid_sort_bys for options
size: the size of the dataset, see valid_sizes for string o... |
def close(self):
if self.message_handler:
self.message_handler.destroy()
self.message_handler = None
self._shutdown = True
if self._keep_alive_thread:
self._keep_alive_thread.join()
self._keep_alive_thread = None
if not self._session:
... | Close the client. This includes closing the Session
and CBS authentication layer as well as the Connection.
If the client was opened using an external Connection,
this will be left intact.
No further messages can be sent or received and the client
cannot be re-opened.
A... |
def monitor(app):
heroku_app = HerokuApp(dallinger_uid=app)
webbrowser.open(heroku_app.dashboard_url)
webbrowser.open("https://requester.mturk.com/mturk/manageHITs")
heroku_app.open_logs()
check_call(["open", heroku_app.db_uri])
while _keep_running():
summary = get_summary(app)
c... | Set up application monitoring. |
def multi_split(txt, delims):
res = [txt]
for delimChar in delims:
txt, res = res, []
for word in txt:
if len(word) > 1:
res += word.split(delimChar)
return res | split by multiple delimiters |
def derive_key_block(self, master_secret, server_random,
client_random, req_len):
seed = server_random + client_random
if self.tls_version <= 0x0300:
return self.prf(master_secret, seed, req_len)
else:
return self.prf(master_secret, b"key expansio... | Perform the derivation of master_secret into a key_block of req_len
requested length. See RFC 5246, section 6.3. |
def get_templates_per_page(self, per_page=1000, page=1, params=None):
return self._get_resource_per_page(resource=TEMPLATES, per_page=per_page, page=page, params=params) | Get templates per page
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:param params: Search parameters. Default: {}
:return: list |
def list_inactive_vms(**kwargs):
vms = []
conn = __get_conn(**kwargs)
for dom in _get_domain(conn, iterable=True, active=False):
vms.append(dom.name())
conn.close()
return vms | Return a list of names for inactive virtual machine on the minion
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, ... |
def join_group_with_token(self, group_hashtag, group_jid, join_token):
log.info("[+] Trying to join the group '{}' with JID {}".format(group_hashtag, group_jid))
return self._send_xmpp_element(roster.GroupJoinRequest(group_hashtag, join_token, group_jid)) | Tries to join into a specific group, using a cryptographic token that was received earlier from a search
:param group_hashtag: The public hashtag of the group into which to join (like '#Music')
:param group_jid: The JID of the same group
:param join_token: a token that can be extracted in the c... |
def _get_association_classes(self, namespace):
class_repo = self._get_class_repo(namespace)
for cl in six.itervalues(class_repo):
if 'Association' in cl.qualifiers:
yield cl
return | Return iterator of associator classes from the class repo
Returns the classes that have associations qualifier.
Does NOT copy so these are what is in repository. User functions
MUST NOT modify these classes.
Returns: Returns generator where each yield returns a single
... |
def gui_repaint(self, drawDC=None):
DEBUG_MSG("gui_repaint()", 1, self)
if self.IsShownOnScreen():
if drawDC is None:
drawDC=wx.ClientDC(self)
drawDC.DrawBitmap(self.bitmap, 0, 0)
else:
pass | Performs update of the displayed image on the GUI canvas, using the
supplied device context. If drawDC is None, a ClientDC will be used to
redraw the image. |
def create_atomic_wrapper(cls, wrapped_func):
def _create_atomic_wrapper(*args, **kwargs):
with transaction.atomic():
return wrapped_func(*args, **kwargs)
return _create_atomic_wrapper | Returns a wrapped function. |
def averaged_sgd_entropic_transport(a, b, M, reg, numItermax=300000, lr=None):
if lr is None:
lr = 1. / max(a / reg)
n_source = np.shape(M)[0]
n_target = np.shape(M)[1]
cur_beta = np.zeros(n_target)
ave_beta = np.zeros(n_target)
for cur_iter in range(numItermax):
k = cur_iter + 1... | Compute the ASGD algorithm to solve the regularized semi continous measures optimal transport max problem
The function solves the following optimization problem:
.. math::
\gamma = arg\min_\gamma <\gamma,M>_F + reg\cdot\Omega(\gamma)
s.t. \gamma 1 = a
\gamma^T 1= b
... |
def recompile_all(path):
import os
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
for name in files:
if name.endswith('.py'):
filename = os.path.abspath(os.path.join(root, name))
print >> sys.stderr, filename
... | recursively recompile all .py files in the directory |
def as_dict(self):
tags_dict = dict(self)
tags_dict['@module'] = self.__class__.__module__
tags_dict['@class'] = self.__class__.__name__
return tags_dict | Dict representation.
Returns:
Dictionary of parameters from fefftags object |
def _get_content(self, url):
"Get HTML content"
target_url = self._db_url + '/' + unquote(url)
log.debug("Opening '{0}'".format(target_url))
try:
f = self.opener.open(target_url)
except HTTPError as e:
log.error("HTTP error, your session may be expired.")
... | Get HTML content |
def log(self, branch, remote):
log_hook = self.settings['rebase.log-hook']
if log_hook:
if ON_WINDOWS:
log_hook = re.sub(r'\$(\d+)', r'%\1', log_hook)
log_hook = re.sub(r'%(?!\d)', '%%', log_hook)
log_hook = re.sub(r'; ?', r'\n', log_hook)... | Call a log-command, if set by git-up.fetch.all. |
def on_channel_open(self, channel):
self._logger.debug('Channel opened')
self._channel = channel
self._channel.parent_client = self
self.add_on_channel_close_callback()
self.setup_exchange(self._exchange) | Invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
Since the channel is now open, we'll declare the exchange to use.
:param pika.channel.Channel channel: The channel object |
def update_col_from_series(self, column_name, series, cast=False):
logger.debug('updating column {!r} in table {!r}'.format(
column_name, self.name))
col_dtype = self.local[column_name].dtype
if series.dtype != col_dtype:
if cast:
series = series.astype(co... | Update existing values in a column from another series.
Index values must match in both column and series. Optionally
casts data type to match the existing column.
Parameters
---------------
column_name : str
series : panas.Series
cast: bool, optional, default Fa... |
def read(self, size=None):
if not self._open:
raise pycdlibexception.PyCdlibInvalidInput('I/O operation on closed file.')
if self._offset >= self._length:
return b''
if size is None or size < 0:
data = self.readall()
else:
readsize = min(se... | A method to read and return up to size bytes.
Parameters:
size - Optional parameter to read size number of bytes; if None or
negative, all remaining bytes in the file will be read
Returns:
The number of bytes requested or the rest of the data left in the file,
... |
def find_args(event, arg_type):
args = event.get('arguments', {})
obj_tags = [arg for arg in args if arg['type'] == arg_type]
if obj_tags:
return [o['value']['@id'] for o in obj_tags]
else:
return [] | Return IDs of all arguments of a given type |
def _check_base_classes(base_classes, check_for_type):
return_value = False
for base in base_classes:
if base.__name__ == check_for_type:
return_value = True
break
else:
return_value = _check_base_classes(base.__bases__, check_for_type)
if return_v... | Indicate whether ``check_for_type`` exists in ``base_classes``. |
def _json_column(**kwargs):
return db.Column(
JSONType().with_variant(
postgresql.JSON(none_as_null=True),
'postgresql',
),
nullable=True,
**kwargs
) | Return JSON column. |
def get_pool_context(self):
context = {self.current.lane_id: self.current.role, 'self': self.current.role}
for lane_id, role_id in self.current.pool.items():
if role_id:
context[lane_id] = lazy_object_proxy.Proxy(
lambda: self.role_model(super_context).obj... | Builds context for the WF pool.
Returns:
Context dict. |
async def install_mediaroom_protocol(responses_callback, box_ip=None):
from . import version
_LOGGER.debug(version)
loop = asyncio.get_event_loop()
mediaroom_protocol = MediaroomProtocol(responses_callback, box_ip)
sock = create_socket()
await loop.create_datagram_endpoint(lambda: mediaroom_prot... | Install an asyncio protocol to process NOTIFY messages. |
def build(self, x, h, mask=None):
xw = tf.split(tf.matmul(x, self.w_matrix) + self.bias, 3, 1)
hu = tf.split(tf.matmul(h, self.U), 3, 1)
r = tf.sigmoid(xw[0] + hu[0])
z = tf.sigmoid(xw[1] + hu[1])
h1 = tf.tanh(xw[2] + r * hu[2])
next_h = h1 * (1 - z) + h * z
if ma... | Build the GRU cell. |
def __create_header(self, command, command_string, session_id, reply_id):
buf = pack('<4H', command, 0, session_id, reply_id) + command_string
buf = unpack('8B' + '%sB' % len(command_string), buf)
checksum = unpack('H', self.__create_checksum(buf))[0]
reply_id += 1
if reply_id >=... | Puts a the parts that make up a packet together and packs them into a byte string |
async def jsk_vc_pause(self, ctx: commands.Context):
voice = ctx.guild.voice_client
if voice.is_paused():
return await ctx.send("Audio is already paused.")
voice.pause()
await ctx.send(f"Paused audio in {voice.channel.name}.") | Pauses a running audio source, if there is one. |
def transformer_librispeech_tpu_v2():
hparams = transformer_librispeech_v2()
update_hparams_for_tpu(hparams)
hparams.batch_size = 16
librispeech.set_librispeech_length_hparams(hparams)
return hparams | HParams for training ASR model on Librispeech on TPU v2. |
def set_description(self):
if self.device_info['type'] == 'Router':
self.node['description'] = '%s %s' % (self.device_info['type'],
self.device_info['model'])
else:
self.node['description'] = self.device_info['desc'] | Set the node description |
def _create_service(self, parameters={}, **kwargs):
logging.debug("_create_service()")
logging.debug(str.join(',', [self.service_name, self.plan_name,
self.name, str(parameters)]))
return self.service.create_service(self.service_name, self.plan_name,
self.name, parame... | Create a Cloud Foundry service that has custom parameters. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.