code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def _getFromDate(l, date):
try:
date = _toDate(date)
i = _insertDateIndex(date, l) - 1
if i == -1:
return l[0]
return l[i]
except (ValueError, TypeError):
return l[0] | returns the index of given or best fitting date |
def _fill_col_borders(self):
first = True
last = True
if self.col_indices[0] == self.hcol_indices[0]:
first = False
if self.col_indices[-1] == self.hcol_indices[-1]:
last = False
for num, data in enumerate(self.tie_data):
self.tie_data[num] = s... | Add the first and last column to the data by extrapolation. |
def from_gpx(gpx_track_point):
return Point(
lat=gpx_track_point.latitude,
lon=gpx_track_point.longitude,
time=gpx_track_point.time
) | Creates a point from GPX representation
Arguments:
gpx_track_point (:obj:`gpxpy.GPXTrackPoint`)
Returns:
:obj:`Point` |
def create_eager_metrics_for_problem(problem, model_hparams):
metric_fns = problem.eval_metric_fns(model_hparams)
problem_hparams = problem.get_hparams(model_hparams)
target_modality = problem_hparams.modality["targets"]
weights_fn = model_hparams.weights_fn.get(
"targets",
modalities.get_weights_fn... | See create_eager_metrics. |
def echo(msg, *args, **kwargs):
file = kwargs.pop('file', None)
nl = kwargs.pop('nl', True)
err = kwargs.pop('err', False)
color = kwargs.pop('color', None)
msg = safe_unicode(msg).format(*args, **kwargs)
click.echo(msg, file=file, nl=nl, err=err, color=color) | Wraps click.echo, handles formatting and check encoding |
def parallel_split_combine(args, split_fn, parallel_fn,
parallel_name, combiner,
file_key, combine_arg_keys, split_outfile_i=-1):
args = [x[0] for x in args]
split_args, combine_map, finished_out, extras = _get_split_tasks(args, split_fn, file_key,
... | Split, run split items in parallel then combine to output file.
split_fn: Split an input file into parts for processing. Returns
the name of the combined output file along with the individual
split output names and arguments for the parallel function.
parallel_fn: Reference to run_parallel function... |
async def _close(self):
try:
if self._hb_inbox_sid is not None:
await self._nc.unsubscribe(self._hb_inbox_sid)
self._hb_inbox = None
self._hb_inbox_sid = None
if self._ack_subject_sid is not None:
await self._nc.unsubscribe(... | Removes any present internal state from the client. |
async def unsubscribe(self, topic):
if self.socket_type not in {SUB, XSUB}:
raise AssertionError(
"A %s socket cannot unsubscribe." % self.socket_type.decode(),
)
self._subscriptions.remove(topic)
tasks = [
asyncio.ensure_future(
... | Unsubscribe the socket from the specified topic.
:param topic: The topic to unsubscribe from. |
def _deploy_and_remember(
self,
contract_name: str,
arguments: List,
deployed_contracts: 'DeployedContracts',
) -> Contract:
receipt = self.deploy(contract_name, arguments)
deployed_contracts['contracts'][contract_name] = _deployed_data_from_receipt(
... | Deploys contract_name with arguments and store the result in deployed_contracts. |
def get(self, telescope, band):
klass = self._bpass_classes.get(telescope)
if klass is None:
raise NotDefinedError('bandpass data for %s not defined', telescope)
bp = klass()
bp.registry = self
bp.telescope = telescope
bp.band = band
return bp | Get a Bandpass object for a known telescope and filter. |
def _merge(self, a, b):
for k, v in a.items():
if isinstance(v, dict):
item = b.setdefault(k, {})
self._merge(v, item)
elif isinstance(v, list):
item = b.setdefault(k, [{}])
if len(v) == 1 and isinstance(v[0], dict):
... | Merges a into b. |
def integer_squareroot(value: int) -> int:
if not isinstance(value, int) or isinstance(value, bool):
raise ValueError(
"Value must be an integer: Got: {0}".format(
type(value),
)
)
if value < 0:
raise ValueError(
"Value cannot be negati... | Return the integer square root of ``value``.
Uses Python's decimal module to compute the square root of ``value`` with
a precision of 128-bits. The value 128 is chosen since the largest square
root of a 256-bit integer is a 128-bit integer. |
def order_transforms(transforms):
outputs = set().union(*[t.outputs for t in transforms])
out = []
remaining = [t for t in transforms]
while remaining:
leftover = []
for t in remaining:
if t.inputs.isdisjoint(outputs):
out.append(t)
outputs -= ... | Orders transforms to ensure proper chaining.
For example, if `transforms = [B, A, C]`, and `A` produces outputs needed
by `B`, the transforms will be re-rorderd to `[A, B, C]`.
Parameters
----------
transforms : list
List of transform instances to order.
Outputs
-------
list :... |
def add_job(self, id, func, **kwargs):
job_def = dict(kwargs)
job_def['id'] = id
job_def['func'] = func
job_def['name'] = job_def.get('name') or id
fix_job_def(job_def)
return self._scheduler.add_job(**job_def) | Add the given job to the job list and wakes up the scheduler if it's already running.
:param str id: explicit identifier for the job (for modifying it later)
:param func: callable (or a textual reference to one) to run at the given time |
def _find_geophysical_vars(self, ds, refresh=False):
if self._geophysical_vars.get(ds, None) and refresh is False:
return self._geophysical_vars[ds]
self._geophysical_vars[ds] = cfutil.get_geophysical_variables(ds)
return self._geophysical_vars[ds] | Returns a list of geophysical variables. Modifies
`self._geophysical_vars`
:param netCDF4.Dataset ds: An open netCDF dataset
:param bool refresh: if refresh is set to True, the cache is
invalidated.
:rtype: list
:return: A list containing strings wi... |
def get_template_loader(self, subdir='templates'):
if self.request is None:
raise ValueError("this method can only be called after the view middleware is run. Check that `django_mako_plus.middleware` is in MIDDLEWARE.")
dmp = apps.get_app_config('django_mako_plus')
return dmp.engine.... | App-specific function to get the current app's template loader |
def get_data(self):
data = []
ntobj = cx.namedtuple("NtGoCnt", "Depth_Level BP_D MF_D CC_D BP_L MF_L CC_L")
cnts = self.get_cnts_levels_depths_recs(set(self.obo.values()))
max_val = max(max(dep for dep in cnts['depth']), max(lev for lev in cnts['level']))
for i in range(max_val+1... | Collect counts of GO terms at all levels and depths. |
def bootstrap_auc(df, col, pred_col, n_bootstrap=1000):
scores = np.zeros(n_bootstrap)
old_len = len(df)
df.dropna(subset=[col], inplace=True)
new_len = len(df)
if new_len < old_len:
logger.info("Dropping NaN values in %s to go from %d to %d rows" % (col, old_len, new_len))
preds = df[pr... | Calculate the boostrapped AUC for a given col trying to predict a pred_col.
Parameters
----------
df : pandas.DataFrame
col : str
column to retrieve the values from
pred_col : str
the column we're trying to predict
n_boostrap : int
the number of bootstrap samples
Re... |
def decompile(f):
co = f.__code__
args, kwonly, varargs, varkwargs = paramnames(co)
annotations = f.__annotations__ or {}
defaults = list(f.__defaults__ or ())
kw_defaults = f.__kwdefaults__ or {}
if f.__name__ == '<lambda>':
node = ast.Lambda
body = pycode_to_body(co, Decompilat... | Decompile a function.
Parameters
----------
f : function
The function to decompile.
Returns
-------
ast : ast.FunctionDef
A FunctionDef node that compiles to f. |
def _get_prefixes(self):
prefixes = {
"@": "o",
"+": "v",
}
feature_prefixes = self.server.features.get('PREFIX')
if feature_prefixes:
modes = feature_prefixes[1:len(feature_prefixes)//2]
symbols = feature_prefixes[len(feature_prefixes)//2+... | Get the possible nick prefixes and associated modes for a client. |
def set_level_for_logger_and_its_handlers(log: logging.Logger,
level: int) -> None:
log.setLevel(level)
for h in log.handlers:
h.setLevel(level) | Set a log level for a log and all its handlers.
Args:
log: log to modify
level: log level to set |
def workspace(show_values: bool = True, show_types: bool = True):
r = _get_report()
data = {}
for key, value in r.project.shared.fetch(None).items():
if key.startswith('__cauldron_'):
continue
data[key] = value
r.append_body(render.status(data, values=show_values, types=show_... | Adds a list of the shared variables currently stored in the project
workspace.
:param show_values:
When true the values for each variable will be shown in addition to
their name.
:param show_types:
When true the data types for each shared variable will be shown in
addition t... |
def blurring_kernel(shape=None):
name = 'motionblur.mat'
url = URL_CAM + name
dct = get_data(name, subset=DATA_SUBSET, url=url)
return convert(255 - dct['im'], shape, normalize='sum') | Blurring kernel for convolution simulations.
The kernel is scaled to sum to one.
Returns
-------
An image with the following properties:
image type: gray scales
size: [100, 100] (if not specified by `size`)
scale: [0, 1]
type: float64 |
def get_element_coors(self, ig=None):
cc = self.coors
n_ep_max = self.n_e_ps.max()
coors = nm.empty((self.n_el, n_ep_max, self.dim), dtype=cc.dtype)
for ig, conn in enumerate(self.conns):
i1, i2 = self.el_offsets[ig], self.el_offsets[ig + 1]
coors[i1:i2, :conn.sha... | Get the coordinates of vertices elements in group `ig`.
Parameters
----------
ig : int, optional
The element group. If None, the coordinates for all groups
are returned, filled with zeros at places of missing
vertices, i.e. where elements having less then the... |
def verify_indices_all_unique(obj):
axis_names = [
('index',),
('index', 'columns'),
('items', 'major_axis', 'minor_axis')
][obj.ndim - 1]
for axis_name, index in zip(axis_names, obj.axes):
if index.is_unique:
continue
raise ValueError(
"Duplic... | Check that all axes of a pandas object are unique.
Parameters
----------
obj : pd.Series / pd.DataFrame / pd.Panel
The object to validate.
Returns
-------
obj : pd.Series / pd.DataFrame / pd.Panel
The validated object, unchanged.
Raises
------
ValueError
If... |
def delete_resource_scenario(scenario_id, resource_attr_id, quiet=False, **kwargs):
_check_can_edit_scenario(scenario_id, kwargs['user_id'])
_delete_resourcescenario(scenario_id, resource_attr_id, suppress_error=quiet) | Remove the data associated with a resource in a scenario. |
def format(self):
if self._format:
return self._format
elif self.pil_image:
return self.pil_image.format | The format of the image file.
An uppercase string corresponding to the
:attr:`PIL.ImageFile.ImageFile.format` attribute. Valid values include
``"JPEG"`` and ``"PNG"``. |
def checkin_bundle(self, db_path, replace=True, cb=None):
from ambry.orm.exc import NotFoundError
db = Database('sqlite:///{}'.format(db_path))
db.open()
if len(db.datasets) == 0:
raise NotFoundError("Did not get a dataset in the {} bundle".format(db_path))
ds = db.da... | Add a bundle, as a Sqlite file, to this library |
def _get_target_from_package_name(self, target, package_name, file_path):
address_path = self.parse_file_path(file_path)
if not address_path:
return None
dep_spec_path = os.path.normpath(os.path.join(target.address.spec_path, address_path))
for dep in target.dependencies:
if dep.package_name... | Get a dependent target given the package name and relative file path.
This will only traverse direct dependencies of the passed target. It is not necessary
to traverse further than that because transitive dependencies will be resolved under the
direct dependencies and every direct dependencies is symlinked... |
def out(self, obj, formatter=None, out_file=None):
if not isinstance(obj, CommandResultItem):
raise TypeError('Expected {} got {}'.format(CommandResultItem.__name__, type(obj)))
import platform
import colorama
if platform.system() == 'Windows':
out_file = colorama... | Produces the output using the command result.
The method does not return a result as the output is written straight to the output file.
:param obj: The command result
:type obj: knack.util.CommandResultItem
:param formatter: The formatter we should use for the command result
... |
def weld_cast_array(array, weld_type, to_weld_type):
if not is_numeric(weld_type) or not is_numeric(to_weld_type):
raise TypeError('Cannot cast array of type={} to type={}'.format(weld_type, to_weld_type))
obj_id, weld_obj = create_weld_object(array)
weld_template =
weld_obj.weld_code = weld_tem... | Cast array to a different type.
Parameters
----------
array : numpy.ndarray or WeldObject
Input data.
weld_type : WeldType
Type of each element in the input array.
to_weld_type : WeldType
Desired type.
Returns
-------
WeldObject
Representation of this co... |
def _get_zipped_rows(self, soup):
table = soup.findChildren('table')[2]
rows = table.findChildren(['tr'])[:-2]
spacing = range(2, len(rows), 3)
rows = [row for (i, row) in enumerate(rows) if (i not in spacing)]
info = [row for (i, row) in enumerate(rows) if (i % 2 == 0)]
... | Returns all 'tr' tag rows as a list of tuples. Each tuple is for
a single story. |
def _get_fw(self, msg, updates, req_fw_type=None, req_fw_ver=None):
fw_type = None
fw_ver = None
if not isinstance(updates, tuple):
updates = (updates, )
for store in updates:
fw_id = store.pop(msg.node_id, None)
if fw_id is not None:
f... | Get firmware type, version and a dict holding binary data. |
def converts_values(self):
return self.convert_value is not Formatter.convert_value or \
self.convert_column is not Formatter.convert_column | Whether this Formatter also converts values. |
def reindex_axis(self, labels, axis=0, **kwargs):
if axis != 0:
raise ValueError("cannot reindex series on non-zero axis!")
msg = ("'.reindex_axis' is deprecated and will be removed in a future "
"version. Use '.reindex' instead.")
warnings.warn(msg, FutureWarning, sta... | Conform Series to new index with optional filling logic.
.. deprecated:: 0.21.0
Use ``Series.reindex`` instead. |
def colorize(occurence,maxoccurence,minoccurence):
if occurence == maxoccurence:
color = (255,0,0)
elif occurence == minoccurence:
color = (0,0,255)
else:
color = (int((float(occurence)/maxoccurence*255)),0,int(float(minoccurence)/occurence*255))
return color | A formula for determining colors. |
def _GetPkgResources(package_name, filepath):
requirement = pkg_resources.Requirement.parse(package_name)
try:
return pkg_resources.resource_filename(requirement, filepath)
except pkg_resources.DistributionNotFound:
pkg_resources.working_set = pkg_resources.WorkingSet()
try:
return pkg_resources... | A wrapper for the `pkg_resource.resource_filename` function. |
def from_inline(cls: Type[UnlockType], inline: str) -> UnlockType:
data = Unlock.re_inline.match(inline)
if data is None:
raise MalformedDocumentError("Inline input")
index = int(data.group(1))
parameters_str = data.group(2).split(' ')
parameters = []
for p in... | Return an Unlock instance from inline string format
:param inline: Inline string format
:return: |
def extract_match(self, list_title_matches):
list_title_matches_set = set(list_title_matches)
list_title_count = []
for match in list_title_matches_set:
list_title_count.append((list_title_matches.count(match), match))
if list_title_count and max(list_title_count)[0] != min(l... | Extract the title with the most matches from the list.
:param list_title_matches: A list, the extracted titles which match with others
:return: A string, the most frequently extracted title. |
def show_domain_record(self, domain_id, record_id):
json = self.request('/domains/%s/records/%s' % (domain_id, record_id),
method='GET')
status = json.get('status')
if status == 'OK':
domain_record_json = json.get('record')
domain_record = Reco... | This method returns the specified domain record.
Required parameters
domain_id:
Integer or Domain Name (e.g. domain.com), specifies the domain
for which to retrieve a record.
record_id:
Integer, specifies the record_id to retrieve. |
def random(self, namespace=0):
query = self.LIST.substitute(
WIKI=self.uri,
ENDPOINT=self.endpoint,
LIST='random')
query += "&rnlimit=1&rnnamespace=%d" % namespace
emoji = [
u'\U0001f32f',
u'\U0001f355',
u'\U0001f35c',
... | Returns query string for random page |
def message(self, msg):
for broker in self.message_brokers:
try:
broker(msg)
except Exception as exc:
utils.error(exc) | Send a message to third party applications |
def set_riskfree_rate(self, rf):
self.rf = rf
self._update(self.prices) | Set annual risk-free rate property and calculate properly annualized
monthly and daily rates. Then performance stats are recalculated.
Affects only this instance of the PerformanceStats.
Args:
* rf (float): Annual `risk-free rate <https://www.investopedia.com/terms/r/risk-freerate.a... |
def attach_const_node(node, name, value):
if name not in node.special_attributes:
_attach_local_node(node, nodes.const_factory(value), name) | create a Const node and register it in the locals of the given
node with the specified name |
def _reconstruct(self, path_to_root):
item_pattern = re.compile('\d+\\]')
dot_pattern = re.compile('\\.|\\[')
path_segments = dot_pattern.split(path_to_root)
schema_endpoint = self.schema
if path_segments[1]:
for i in range(1,len(path_segments)):
if it... | a helper method for finding the schema endpoint from a path to root
:param path_to_root: string with dot path to root from
:return: list, dict, string, number, or boolean at path to root |
def max_frequency (sig,FS):
f, fs = plotfft(sig, FS, doplot=False)
t = cumsum(fs)
ind_mag = find (t>t[-1]*0.95)[0]
f_max=f[ind_mag]
return f_max | Compute max frequency along the specified axes.
Parameters
----------
sig: ndarray
input from which max frequency is computed.
FS: int
sampling frequency
Returns
-------
f_max: int
0.95 of max_frequency using cumsum. |
def get(self, key):
value = None
for store in self._stores:
value = store.get(key)
if value is not None:
break
if value is not None:
for store2 in self._stores:
if store == store2:
break
store2.put(key, value)
return value | Return the object named by key. Checks each datastore in order. |
def drop_udf(
self,
name,
input_types=None,
database=None,
force=False,
aggregate=False,
):
if not input_types:
if not database:
database = self.current_database
result = self.list_udfs(database=database, like=name)
... | Drops a UDF
If only name is given, this will search
for the relevant UDF and drop it.
To delete an overloaded UDF, give only a name and force=True
Parameters
----------
name : string
input_types : list of strings (optional)
force : boolean, default False ... |
def get_screen_settings(self, screen_id):
if not isinstance(screen_id, baseinteger):
raise TypeError("screen_id can only be an instance of type baseinteger")
record_screen_settings = self._call("getScreenSettings",
in_p=[screen_id])
record_screen_settings = IReco... | Returns the recording settings for a particular screen.
in screen_id of type int
Screen ID to retrieve recording screen settings for.
return record_screen_settings of type :class:`IRecordingScreenSettings`
Recording screen settings for the requested screen. |
def commit_format(self):
formatted_analyses = []
for analyze in self.analysis['messages']:
formatted_analyses.append({
'message': f"{analyze['source']}: {analyze['message']}. Code: {analyze['code']}",
'file': analyze['location']['path'],
'line'... | Formats the analysis into a simpler dictionary with the line, file and message values to
be commented on a commit.
Returns a list of dictionaries |
def true_false_returns(func):
@functools.wraps(func)
def _execute(*args, **kwargs):
try:
func(*args, **kwargs)
return True
except:
return False
return _execute | Executes function, if error returns False, else True
:param func: function to call
:return: True iff ok, else False |
def m_quadratic_sum(A, B, max_it=50):
r
gamma1 = solve_discrete_lyapunov(A, B, max_it)
return gamma1 | r"""
Computes the quadratic sum
.. math::
V = \sum_{j=0}^{\infty} A^j B A^{j'}
V is computed by solving the corresponding discrete lyapunov
equation using the doubling algorithm. See the documentation of
`util.solve_discrete_lyapunov` for more information.
Parameters
----------
... |
def dispatch_url(self, url_string):
url, url_adapter, query_args = self.parse_url(url_string)
try:
endpoint, kwargs = url_adapter.match()
except NotFound:
raise NotSupported(url_string)
except RequestRedirect as e:
new_url = "{0.new_url}?{1}".format(e,... | Dispatch the URL string to the target endpoint function.
:param url_string: the origin URL string.
:returns: the return value of calling dispatched function. |
def _build(self, inputs):
shape_inputs = inputs.get_shape().as_list()
rank = len(shape_inputs)
max_dim = np.max(self._dims) + 1
if rank < max_dim:
raise ValueError("Rank of inputs must be at least {}.".format(max_dim))
full_begin = [0] * rank
full_size = [-1] * rank
for dim, begin, siz... | Connects the SliceByDim module into the graph.
Args:
inputs: `Tensor` to slice. Its rank must be greater than the maximum
dimension specified in `dims` (plus one as python is 0 indexed).
Returns:
The sliced tensor.
Raises:
ValueError: If `inputs` tensor has insufficient rank. |
def clean_registration_ids(self, registration_ids=[]):
valid_registration_ids = []
for registration_id in registration_ids:
details = self.registration_info_request(registration_id)
if details.status_code == 200:
valid_registration_ids.append(registration_id)
... | Checks registration ids and excludes inactive ids
Args:
registration_ids (list, optional): list of ids to be cleaned
Returns:
list: cleaned registration ids |
def from_array(array):
if array is None or not array:
return None
assert_type_or_raise(array, dict, parameter_name="array")
data = {}
data['label'] = u(array.get('label'))
data['amount'] = int(array.get('amount'))
instance = LabeledPrice(**data)
instan... | Deserialize a new LabeledPrice from a given dictionary.
:return: new LabeledPrice instance.
:rtype: LabeledPrice |
def on_view_not_found(
self,
environ: Dict[str, Any],
start_response: Callable[[str, List[Tuple[str, str]]], None],
) -> Iterable[bytes]:
start_response('404 Not Found', [('Content-type', 'text/plain')])
return [b'Not found'] | called when views not found |
def findWCSExtn(filename):
rootname,extroot = fileutil.parseFilename(filename)
extnum = None
if extroot is None:
fimg = fits.open(rootname, memmap=False)
for i,extn in enumerate(fimg):
if 'crval1' in extn.header:
refwcs = wcsutil.HSTWCS('{}[{}]'.format(rootname,i)... | Return new filename with extension that points to an extension with a
valid WCS.
Returns
=======
extnum : str, None
Value of extension name as a string either as provided by the user
or based on the extension number for the first extension which
conta... |
def lockfile(path):
with genfile(path) as fd:
fcntl.lockf(fd, fcntl.LOCK_EX)
yield None | A file lock with-block helper.
Args:
path (str): A path to a lock file.
Examples:
Get the lock on a file and dostuff while having the lock::
path = '/hehe/haha.lock'
with lockfile(path):
dostuff()
Notes:
This is curently based on fcntl.loc... |
def new_request(sender, request=None, notify=True, **kwargs):
if current_app.config['COMMUNITIES_MAIL_ENABLED'] and notify:
send_community_request_email(request) | New request for inclusion. |
def observed(self, band, corrected=True):
if band not in 'ugriz':
raise ValueError("band='{0}' not recognized".format(band))
i = 'ugriz'.find(band)
t, y, dy = self.lcdata.get_lightcurve(self.lcid, return_1d=False)
if corrected:
ext = self.obsmeta['rExt'] * self.ex... | Return observed values in the given band
Parameters
----------
band : str
desired bandpass: should be one of ['u', 'g', 'r', 'i', 'z']
corrected : bool (optional)
If true, correct for extinction
Returns
-------
t, mag, dmag : ndarrays
... |
def _FormatOtherFileToken(self, token_data):
timestamp = token_data.microseconds + (
token_data.timestamp * definitions.MICROSECONDS_PER_SECOND)
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
date_time_string = date_time.CopyToDateTimeString()
return {
... | Formats an other file token as a dictionary of values.
Args:
token_data (bsm_token_data_other_file32): AUT_OTHER_FILE32 token data.
Returns:
dict[str, str]: token values. |
async def generate_wallet_key(config: Optional[str]) -> str:
logger = logging.getLogger(__name__)
logger.debug("generate_wallet_key: >>> config: %r",
config)
if not hasattr(generate_wallet_key, "cb"):
logger.debug("generate_wallet_key: Creating callback")
generate_wallet_key... | Generate wallet master key.
Returned key is compatible with "RAW" key derivation method.
It allows to avoid expensive key derivation for use cases when wallet keys can be stored in a secure enclave.
:param config: (optional) key configuration json.
{
"seed": string, (optional) Seed that allows... |
def info(self, message, domain=None):
if domain is None:
domain = self.extension_name
info(message, domain) | Shortcut function for `utils.loggable.info`
Args:
message: see `utils.loggable.info`
domain: see `utils.loggable.info` |
def get_multipart_md5(self, filename, chunk_size=8 * 1024 * 1024):
md5s = []
with open(filename, 'rb') as fp:
while True:
data = fp.read(chunk_size)
if not data:
break
md5s.append(hashlib.md5(data))
digests = b"".joi... | Returns the md5 checksum of the provided file name after breaking it into chunks.
This is done to mirror the method used by Amazon S3 after a multipart upload. |
def update_logo_preview(self):
logo_path = self.organisation_logo_path_line_edit.text()
if os.path.exists(logo_path):
icon = QPixmap(logo_path)
label_size = self.organisation_logo_label.size()
label_size.setHeight(label_size.height() - 2)
label_size.setWid... | Update logo based on the current logo path. |
def get(ctx, key):
file = ctx.obj['FILE']
stored_value = get_key(file, key)
if stored_value:
click.echo('%s=%s' % (key, stored_value))
else:
exit(1) | Retrieve the value for the given key. |
def map_to_matype(self, matype):
try:
value = int(matype)
if abs(value) > len(AlphaVantage._ALPHA_VANTAGE_MATH_MAP):
raise ValueError("The value {} is not supported".format(value))
except ValueError:
value = AlphaVantage._ALPHA_VANTAGE_MATH_MAP.index(m... | Convert to the alpha vantage math type integer. It returns an
integer correspondent to the type of math to apply to a function. It
raises ValueError if an integer greater than the supported math types
is given.
Keyword Arguments:
matype: The math type of the alpha vantage a... |
def particles(category=None):
filepath = os.path.join(os.path.dirname(__file__), './particles.json')
with open(filepath) as f:
try:
particles = json.load(f)
except ValueError as e:
log.error('Bad json format in "{}"'.format(filepath))
else:
if category... | Returns a dict containing old greek particles grouped by category. |
def create_gp(self):
nb_bams = len(self.bams)
gp_parts = [
textwrap.dedent(
),
os.linesep.join([self._gp_style_func(i, nb_bams) for i in range(nb_bams)]),
textwrap.dedent(
),
os.linesep.join(self.gp_plots)
]
gp_src =... | Create GnuPlot file. |
def _fail(self, message, text, i):
raise ValueError("{}:\n{}".format(message, text[i : i + 79])) | Raise an exception with given message and text at i. |
def load(cls, fname, args):
if args.type == JSON:
if fname.endswith('.bz2'):
open_ = bz2.open
else:
open_ = open
if args.progress:
print('Loading JSON data...')
with open_(fname, 'rt') as fp:
storage = JsonStorage.load(fp)
else:
... | Load a generator.
Parameters
----------
cls : `type`
Generator class.
fname : `str`
Input file path.
args : `argparse.Namespace`
Command arguments.
Returns
-------
`cls` |
def get_column_list_prefixed(self):
return map(
lambda x: ".".join([self.name, x]),
self.columns
) | Returns a list of columns |
def est_covariance_mtx(self, corr=False):
cov = self.particle_covariance_mtx(self.particle_weights,
self.particle_locations)
if corr:
dstd = np.sqrt(np.diag(cov))
cov /= (np.outer(dstd, dstd))
return cov | Returns the full-rank covariance matrix of the current particle
distribution.
:param bool corr: If `True`, the covariance matrix is normalized
by the outer product of the square root diagonal of the covariance matrix,
i.e. the correlation matrix is returned instead.
:rt... |
def _check_and_assign_normalization_members(self, normalization_ctor,
normalization_kwargs):
if isinstance(normalization_ctor, six.string_types):
normalization_ctor = util.parse_string_to_constructor(normalization_ctor)
if normalization_ctor is not None and no... | Checks that the normalization constructor is callable. |
def reference_index(self):
if self._db_location:
ref_indices = glob.glob(os.path.join(self._db_location, "*", self._REF_INDEX))
if ref_indices:
return ref_indices[0] | Absolute path to the BWA index for EricScript reference data. |
def debug_dump(message, file_prefix="dump"):
global index
index += 1
with open("%s_%s.dump" % (file_prefix, index), 'w') as f:
f.write(message.SerializeToString())
f.close() | Utility while developing to dump message data to play with in the
interpreter |
def _validate_required(self, item, name):
if self.required is True and item is None:
raise ArgumentError(name, "This argument is required.") | Validate that the item is present if it's required. |
def _ifelse(expr, true_expr, false_expr):
tps = (SequenceExpr, Scalar)
if not isinstance(true_expr, tps):
true_expr = Scalar(_value=true_expr)
if not isinstance(false_expr, tps):
false_expr = Scalar(_value=false_expr)
output_type = utils.highest_precedence_data_type(
*[true_e... | Given a boolean sequence or scalar, if true will return the left, else return the right one.
:param expr: sequence or scalar
:param true_expr:
:param false_expr:
:return: sequence or scalar
:Example:
>>> (df.id == 3).ifelse(df.id, df.fid.astype('int'))
>>> df.isMale.ifelse(df.male_count, ... |
def _countWhereGreaterEqualInRows(sparseMatrix, rows, threshold):
return sum(sparseMatrix.countWhereGreaterOrEqual(row, row+1,
0, sparseMatrix.nCols(),
threshold)
for row in rows) | Like countWhereGreaterOrEqual, but for an arbitrary selection of rows, and
without any column filtering. |
def dot(a, b):
if hasattr(a, '__dot__'):
return a.__dot__(b)
if a is None:
return b
else:
raise ValueError(
'Dot is waiting for two TT-vectors or two TT- matrices') | Dot product of two TT-matrices or two TT-vectors |
def yesterday(date=None):
if not date:
return _date - datetime.timedelta(days=1)
else:
current_date = parse(date)
return current_date - datetime.timedelta(days=1) | yesterday once more |
def _absolute_path(path, relative_to=None):
if path and os.path.isabs(path):
return path
if path and relative_to is not None:
_abspath = os.path.join(relative_to, path)
if os.path.isfile(_abspath):
log.debug(
'Relative path \'%s\' converted to existing absolut... | Return an absolute path. In case ``relative_to`` is passed and ``path`` is
not an absolute path, we try to prepend ``relative_to`` to ``path``and if
that path exists, return that one |
def get_func_name(func):
func_name = getattr(func, '__name__', func.__class__.__name__)
module_name = func.__module__
if module_name is not None:
module_name = func.__module__
return '{}.{}'.format(module_name, func_name)
return func_name | Return a name which includes the module name and function name. |
def set_sleep_on_power_button(enabled):
state = salt.utils.mac_utils.validate_enabled(enabled)
cmd = 'systemsetup -setallowpowerbuttontosleepcomputer {0}'.format(state)
salt.utils.mac_utils.execute_return_success(cmd)
return salt.utils.mac_utils.confirm_updated(
state,
get_sleep_on_power... | Set whether or not the power button can sleep the computer.
:param bool enabled: True to enable, False to disable. "On" and "Off" are
also acceptable values. Additionally you can pass 1 and 0 to represent
True and False respectively
:return: True if successful, False if not
:rtype: bool
... |
def run(self, messages):
statistics = {}
statistics['time'] = str(datetime.now())
statistics['time-utc'] = str(datetime.utcnow())
statistics['unlock'] = self.args.unlock
if self.args.question:
statistics['question'] = [t.name for t in self.assignment.specified_tests]
... | Returns some analytics about this autograder run. |
def setup(self):
super().setup()
self._start_time = self.clock.time
self.initialize_simulants() | Setup the simulation and initialize its population. |
def write_pid(self, pid=None):
pid = pid or os.getpid()
self.write_metadata_by_name(self._name, 'pid', str(pid)) | Write the current processes PID to the pidfile location |
def start(track_file,
twitter_api_key,
twitter_api_secret,
twitter_access_token,
twitter_access_token_secret,
poll_interval=15,
unfiltered=False,
languages=None,
debug=False,
outfile=None):
listener = construct_listener(outfil... | Start the stream. |
def setOverlayTransformTrackedDeviceComponent(self, ulOverlayHandle, unDeviceIndex, pchComponentName):
fn = self.function_table.setOverlayTransformTrackedDeviceComponent
result = fn(ulOverlayHandle, unDeviceIndex, pchComponentName)
return result | Sets the transform to draw the overlay on a rendermodel component mesh instead of a quad. This will only draw when the system is
drawing the device. Overlays with this transform type cannot receive mouse events. |
def _process_deprecated(attrib, deprecated_attrib, kwargs):
if deprecated_attrib not in DEPRECATIONS:
raise ValueError('{0} not included in deprecations list'
.format(deprecated_attrib))
if deprecated_attrib in kwargs:
warnings.warn("'{0}' is DEPRECAT... | Processes optional deprecate arguments |
def Map(self, function):
new_table = self.__class__()
new_table._table = [self.header]
for row in self:
filtered_row = function(row)
if filtered_row:
new_table.Append(filtered_row)
return new_table | Applies the function to every row in the table.
Args:
function: A function applied to each row.
Returns:
A new TextTable()
Raises:
TableError: When transform is not invalid row entry. The transform
must be compatible with Append(). |
def details_for_given_date_in_gradebook_history_for_this_course(self, date, course_id):
path = {}
data = {}
params = {}
path["course_id"] = course_id
path["date"] = date
self.logger.debug("GET /api/v1/courses/{course_id}/gradebook_history/{date} with query params: {... | Details for a given date in gradebook history for this course.
Returns the graders who worked on this day, along with the assignments they worked on.
More details can be obtained by selecting a grader and assignment and calling the
'submissions' api endpoint for a given date. |
def _db_remove_prefix(self, spec, recursive = False):
if recursive:
prefix = spec['prefix']
del spec['prefix']
where, params = self._expand_prefix_spec(spec)
spec['prefix'] = prefix
params['prefix'] = prefix
where = 'prefix <<= %(prefix)s A... | Do the underlying database operations to delete a prefix |
def _store_object(self, obj_name, content, etag=None, chunked=False,
chunk_size=None, headers=None):
head_etag = headers.pop("ETag", "")
if chunked:
headers.pop("Content-Length", "")
headers["Transfer-Encoding"] = "chunked"
elif etag is None and content is not... | Handles the low-level creation of a storage object and the uploading of
the contents of that object. |
def remove_from_parent(self):
if self.parent:
self.parent._children.remove(self)
self.parent._invalidate_time_caches()
self.parent = None | Removes this frame from its parent, and nulls the parent link |
def retrieve_order(self, order_id):
response = self.request(E.retrieveOrderSslCertRequest(
E.id(order_id)
))
return response.as_model(SSLOrder) | Retrieve details on a single order. |
def fap_davies(Z, fmax, t, y, dy, normalization='standard'):
N = len(t)
fap_s = fap_single(Z, N, normalization=normalization)
tau = tau_davies(Z, fmax, t, y, dy, normalization=normalization)
return fap_s + tau | Davies upper-bound to the false alarm probability
(Eqn 5 of Baluev 2008) |
def _error_dm(self, m, dm, s):
pred = self.fmodel.predict_given_context(np.hstack((m, dm)), s, range(len(s)))
err_v = pred - self.goal
error = sum(e*e for e in err_v)
return error | Error function.
Once self.goal has been defined, compute the error
of input using the generalized forward model. |
def issueCommand(self, command, *args):
result = Deferred()
self._dq.append(result)
self.sendLine(b" ".join([command] + list(args)))
return result | Issue the given Assuan command and return a Deferred that will fire
with the response. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.