code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def format_to_http_prompt(context, excluded_options=None):
cmds = _extract_httpie_options(context, quote=True, join_key_value=True,
excluded_keys=excluded_options)
cmds.append('cd ' + smart_quote(context.url))
cmds += _extract_httpie_request_items(context, quote=True)
return '\n'.join(cmds) + '\n' | Format a Context object to HTTP Prompt commands. |
def set_type(self, type, header='Content-Type', requote=True):
if not type.count('/') == 1:
raise ValueError
if header.lower() == 'content-type':
del self['mime-version']
self['MIME-Version'] = '1.0'
if header not in self:
self[header] = type
return
params = self.get_params(header=header, unquote=requote)
del self[header]
self[header] = type
for p, v in params[1:]:
self.set_param(p, v, header, requote) | Set the main type and subtype for the Content-Type header.
type must be a string in the form "maintype/subtype", otherwise a
ValueError is raised.
This method replaces the Content-Type header, keeping all the
parameters in place. If requote is False, this leaves the existing
header's quoting as is. Otherwise, the parameters will be quoted (the
default).
An alternative header can be specified in the header argument. When
the Content-Type header is set, we'll always also add a MIME-Version
header. |
def cpp_app_builder(build_context, target):
yprint(build_context.conf, 'Build CppApp', target)
if target.props.executable and target.props.main:
raise KeyError(
'`main` and `executable` arguments are mutually exclusive')
if target.props.executable:
if target.props.executable not in target.artifacts.get(AT.app):
target.artifacts.add(AT.app, target.props.executable)
entrypoint = [target.props.executable]
elif target.props.main:
prog = build_context.targets[target.props.main]
binary = list(prog.artifacts.get(AT.binary).keys())[0]
entrypoint = ['/usr/src/bin/' + binary]
else:
raise KeyError('Must specify either `main` or `executable` argument')
build_app_docker_and_bin(
build_context, target, entrypoint=entrypoint) | Pack a C++ binary as a Docker image with its runtime dependencies.
TODO(itamar): Dynamically analyze the binary and copy shared objects
from its buildenv image to the runtime image, unless they're installed. |
def do_keys(self, keys):
for inst in keys:
typ = inst["kty"]
try:
_usage = harmonize_usage(inst['use'])
except KeyError:
_usage = ['']
else:
del inst['use']
flag = 0
for _use in _usage:
for _typ in [typ, typ.lower(), typ.upper()]:
try:
_key = K2C[_typ](use=_use, **inst)
except KeyError:
continue
except JWKException as err:
logger.warning('While loading keys: {}'.format(err))
else:
if _key not in self._keys:
self._keys.append(_key)
flag = 1
break
if not flag:
logger.warning(
'While loading keys, UnknownKeyType: {}'.format(typ)) | Go from JWK description to binary keys
:param keys:
:return: |
def create_event_permission(self, lambda_name, principal, source_arn):
logger.debug('Adding new permission to invoke Lambda function: {}'.format(lambda_name))
permission_response = self.lambda_client.add_permission(
FunctionName=lambda_name,
StatementId=''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8)),
Action='lambda:InvokeFunction',
Principal=principal,
SourceArn=source_arn,
)
if permission_response['ResponseMetadata']['HTTPStatusCode'] != 201:
print('Problem creating permission to invoke Lambda function')
return None
return permission_response | Create permissions to link to an event.
Related: http://docs.aws.amazon.com/lambda/latest/dg/with-s3-example-configure-event-source.html |
def construct(path, name=None):
"Selects an appropriate CGroup subclass for the given CGroup path."
name = name if name else path.split("/")[4]
classes = {"memory": Memory,
"cpu": CPU,
"cpuacct": CPUAcct}
constructor = classes.get(name, CGroup)
log.debug("Chose %s for: %s", constructor.__name__, path)
return constructor(path, name) | Selects an appropriate CGroup subclass for the given CGroup path. |
def memory_used(self):
if self._end_memory:
memory_used = self._end_memory - self._start_memory
return memory_used
else:
return None | To know the allocated memory at function termination.
..versionadded:: 4.1
This property might return None if the function is still running.
This function should help to show memory leaks or ram greedy code. |
def samples(self, anystring, limit=None, offset=None, sortby=None):
uri = self._uris['samples'].format(anystring)
params = {'limit': limit, 'offset': offset, 'sortby': sortby}
return self.get_parse(uri, params) | Return an object representing the samples identified by the input domain, IP, or URL |
def get_waiter(self, waiter_name):
config = self._get_waiter_config()
if not config:
raise ValueError("Waiter does not exist: %s" % waiter_name)
model = waiter.WaiterModel(config)
mapping = {}
for name in model.waiter_names:
mapping[xform_name(name)] = name
if waiter_name not in mapping:
raise ValueError("Waiter does not exist: %s" % waiter_name)
return waiter.create_waiter_with_client(
mapping[waiter_name], model, self, loop=self._loop) | Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters
section of the service docs for a list of available waiters.
:returns: The specified waiter object.
:rtype: botocore.waiter.Waiter |
def _buckets_nearly_equal(a_dist, b_dist):
a_type, a_buckets = _detect_bucket_option(a_dist)
b_type, b_buckets = _detect_bucket_option(b_dist)
if a_type != b_type:
return False
elif a_type == u'linearBuckets':
return _linear_buckets_nearly_equal(a_buckets, b_buckets)
elif a_type == u'exponentialBuckets':
return _exponential_buckets_nearly_equal(a_buckets, b_buckets)
elif a_type == u'explicitBuckets':
return _explicit_buckets_nearly_equal(a_buckets, b_buckets)
else:
return False | Determines whether two `Distributions` are nearly equal.
Args:
a_dist (:class:`Distribution`): an instance
b_dist (:class:`Distribution`): another instance
Return:
boolean: `True` if the two instances are approximately equal, otherwise
False |
def to_24bit_gray(mat: np.ndarray):
return np.repeat(np.expand_dims(_normalize(mat), axis=2), 3, axis=2) | returns a matrix that contains RGB channels, and colors scaled
from 0 to 255 |
def checkpoint_filepath(checkpoint, pm):
if isinstance(checkpoint, str):
if os.path.isabs(checkpoint):
if is_in_file_tree(checkpoint, pm.outfolder):
return checkpoint
else:
raise ValueError(
"Absolute checkpoint path '{}' is not in pipeline output "
"folder '{}'".format(checkpoint, pm.outfolder))
_, ext = os.path.splitext(checkpoint)
if ext == CHECKPOINT_EXTENSION:
return pipeline_filepath(pm, filename=checkpoint)
try:
pm = pm.manager
except AttributeError:
pass
chkpt_name = checkpoint_filename(checkpoint, pipeline_name=pm.name)
return pipeline_filepath(pm, filename=chkpt_name) | Create filepath for indicated checkpoint.
:param str | pypiper.Stage checkpoint: Pipeline phase/stage or one's name
:param pypiper.PipelineManager | pypiper.Pipeline pm: manager of a pipeline
instance, relevant for output folder path.
:return str: standardized checkpoint name for file, plus extension
:raise ValueError: if the checkpoint is given as absolute path that does
not point within pipeline output folder |
def _get_coordinatenames(self):
validnames = ("direction", "spectral", "linear", "stokes", "tabular")
self._names = [""] * len(validnames)
n = 0
for key in self._csys.keys():
for name in validnames:
if key.startswith(name):
idx = int(key[len(name):])
self._names[idx] = name
n += 1
self._names = self._names[:n][::-1]
if len(self._names) == 0:
raise LookupError("Coordinate record doesn't contain valid coordinates") | Create ordered list of coordinate names |
def set(self, path, value):
_log.debug(
"ZK: Setting {path} to {value}".format(path=path, value=value)
)
return self.zk.set(path, value) | Sets and returns new data for the specified node. |
def get_obj(path):
if not isinstance(path, str):
return path
if path.startswith('.'):
raise TypeError('relative imports are not supported')
parts = path.split('.')
head, tail = parts[0], parts[1:]
obj = importlib.import_module(head)
for i, name in enumerate(tail):
try:
obj = getattr(obj, name)
except AttributeError:
module = '.'.join([head] + tail[:i])
try:
importlib.import_module(module)
except ImportError:
raise AttributeError(
"object '%s' has no attribute '%s'" % (module, name))
else:
raise AttributeError(
"module '%s' has no attribute '%s'" % (module, name))
return obj | Return obj for given dotted path.
Typical inputs for `path` are 'os' or 'os.path' in which case you get a
module; or 'os.path.exists' in which case you get a function from that
module.
Just returns the given input in case it is not a str.
Note: Relative imports not supported.
Raises ImportError or AttributeError as appropriate. |
def get_stats(self, request, context):
_log_request(request, context)
m = self.listener.memory
return clearly_pb2.StatsMessage(
task_count=m.task_count,
event_count=m.event_count,
len_tasks=len(m.tasks),
len_workers=len(m.workers)
) | Returns the server statistics. |
def retrieve_data(self):
url = self.config.get('url')
timeout = float(self.config.get('timeout', 10))
self.data = requests.get(url, verify=self.verify_ssl, timeout=timeout).content | retrieve data from an HTTP URL |
def pretty_xml(data):
parsed_string = minidom.parseString(data.decode('utf-8'))
return parsed_string.toprettyxml(indent='\t', encoding='utf-8') | Return a pretty formated xml |
def get_urls(self):
urlpatterns = super(TranslatableAdmin, self).get_urls()
if not self._has_translatable_model():
return urlpatterns
else:
opts = self.model._meta
info = opts.app_label, opts.model_name
return [url(
r'^(.+)/change/delete-translation/(.+)/$',
self.admin_site.admin_view(self.delete_translation),
name='{0}_{1}_delete_translation'.format(*info)
)] + urlpatterns | Add a delete-translation view. |
def hash_tree(filepath: str) -> str:
if isfile(filepath):
return hash_file(filepath)
if isdir(filepath):
base_dir = filepath
md5 = hashlib.md5()
for root, dirs, files in walk(base_dir):
dirs.sort()
for fname in sorted(files):
filepath = join(root, fname)
md5.update(relpath(filepath, base_dir)
.replace('\\', '/').encode('utf8'))
acc_hash(filepath, md5)
return md5.hexdigest()
return None | Return the hexdigest MD5 hash of file or directory at `filepath`.
If file - just hash file content.
If directory - walk the directory, and accumulate hashes of all the
relative paths + contents of files under the directory. |
def process_event(self, event):
new_event = event
if self._absolute:
new_event.y -= self._canvas.start_line
if self.is_mouse_over(new_event) and event.buttons != 0:
self._set_pos((new_event.y - self._y) / (self._height - 1))
return True
return False | Handle input on the scroll bar.
:param event: the event to be processed.
:returns: True if the scroll bar handled the event. |
def findentry(self, item):
if not isinstance(item, str):
raise TypeError(
'Members of this object must be strings. '
'You supplied \"%s\"' % type(item))
for entry in self:
if item.lower() == entry.lower():
return entry
return None | A caseless way of checking if an item is in the list or not.
It returns None or the entry. |
def _try_import(module_name):
try:
mod = importlib.import_module(module_name)
return mod
except ImportError:
err_msg = ("Tried importing %s but failed. See setup.py extras_require. "
"The dataset you are trying to use may have additional "
"dependencies.")
utils.reraise(err_msg) | Try importing a module, with an informative error message on failure. |
def get_data(conn_objs, providers):
cld_svc_map = {"aws": nodes_aws,
"azure": nodes_az,
"gcp": nodes_gcp,
"alicloud": nodes_ali}
sys.stdout.write("\rCollecting Info: ")
sys.stdout.flush()
busy_obj = busy_disp_on()
collec_fn = [[cld_svc_map[x.rstrip('1234567890')], conn_objs[x]]
for x in providers]
ngroup = Group()
node_list = []
node_list = ngroup.map(get_nodes, collec_fn)
ngroup.join()
busy_disp_off(dobj=busy_obj)
sys.stdout.write("\r \r")
sys.stdout.write("\033[?25h")
sys.stdout.flush()
return node_list | Refresh node data using existing connection-objects. |
def linkChunk(key, chunk):
linkType = chunk[1].strip().split()[0]
if linkType == 'DX':
result = xSectionLink(chunk)
elif linkType == 'STRUCTURE':
result = structureLink(chunk)
elif linkType in ('RESERVOIR', 'LAKE'):
result = reservoirLink(chunk)
return result | Parse LINK Chunk Method |
def DeleteNotifications(self, session_ids, start=None, end=None):
if not session_ids:
return
for session_id in session_ids:
if not isinstance(session_id, rdfvalue.SessionID):
raise RuntimeError(
"Can only delete notifications for rdfvalue.SessionIDs.")
if start is None:
start = 0
else:
start = int(start)
if end is None:
end = self.frozen_timestamp or rdfvalue.RDFDatetime.Now()
for queue, ids in iteritems(
collection.Group(session_ids, lambda session_id: session_id.Queue())):
queue_shards = self.GetAllNotificationShards(queue)
self.data_store.DeleteNotifications(queue_shards, ids, start, end) | This deletes the notification when all messages have been processed. |
def dominant_sharp_ninth(note):
res = dominant_ninth(note)
res[4] = notes.augment(intervals.major_second(note))
return res | Build a dominant sharp ninth chord on note.
Example:
>>> dominant_ninth('C')
['C', 'E', 'G', 'Bb', 'D#'] |
def _make_sampling_sequence(n):
seq = list(range(5))
i = 50
while len(seq) < n:
seq.append(i)
i += 50
return seq | Return a list containing the proposed call event sampling sequence.
Return events are paired with call events and not counted separately.
This is 0, 1, 2, ..., 4 plus 50, 100, 150, 200, etc.
The total list size is n. |
def fractional_from_cartesian(coordinate, lattice_array):
deorthogonalisation_M = np.matrix(np.linalg.inv(lattice_array))
fractional = deorthogonalisation_M * coordinate.reshape(-1, 1)
return np.array(fractional.reshape(1, -1)) | Return a fractional coordinate from a cartesian one. |
def spellcheck(contents, technical_terms=None, spellcheck_cache=None):
contents = spelling.filter_nonspellcheckable_tokens(contents)
contents = _filter_disabled_regions(contents)
lines = contents.splitlines(True)
user_words, valid_words = valid_words_dictionary.create(spellcheck_cache)
technical_words = technical_words_dictionary.create(technical_terms,
spellcheck_cache)
return sorted([e for e in spellcheck_region(lines,
valid_words,
technical_words,
user_words)]) | Run spellcheck on the contents of a file.
:technical_terms: is a path to a file containing a list of "technical"
terms. These may be symbols as collected from files by using
the generic linter or other such symbols. If a symbol-like term is
used within contents and it does not appear in :technical_terms: then
an error will result.
:spellcheck_cache: is a path to a directory where graph files generated
by the spellchecking engine should be stored. It is used for caching
purposes between invocations, since generating the spellchecking
graph is an expensive operation which can take a few seconds to complete. |
def setting(self, setting_name, default=None):
keys = setting_name.split(".")
config = self._content
for key in keys:
if key not in config:
return default
config = config[key]
return config | Retrieve a setting value. |
def validate_value(self, string_value):
specs = self.specs
if 'type' in specs:
value = specs['type'](string_value)
else:
value = string_value
if 'min' in specs and value < specs['min']:
raise ValueError
if 'max' in specs and value > specs['max']:
raise ValueError
if 'valid' in specs and value not in specs['valid']:
raise ValueError
return value | Validate that a value match the Property specs. |
def redirectLoggerStreamHandlers(oldStream, newStream):
for handler in list(logger.handlers):
if handler.stream == oldStream:
handler.close()
logger.removeHandler(handler)
for handler in logger.handlers:
if handler.stream == newStream:
return
logger.addHandler(logging.StreamHandler(newStream)) | Redirect the stream of a stream handler to a different stream |
def summary(args):
from jcvi.graphics.histogram import loghistogram
p = OptionParser(summary.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
clstrfile, = args
cf = ClstrFile(clstrfile)
data = list(cf.iter_sizes())
loghistogram(data, summary=True) | %prog summary cdhit.clstr
Parse cdhit.clstr file to get distribution of cluster sizes. |
def _set_launcher_property(self, driver_arg_key, spark_property_key):
value = self._spark_launcher_args.get(driver_arg_key, self.conf._conf_dict.get(spark_property_key))
if value:
self._spark_launcher_args[driver_arg_key] = value
self.conf[spark_property_key] = value | Handler for a special property that exists in both the launcher arguments and the spark conf dictionary.
This will use the launcher argument if set falling back to the spark conf argument. If neither are set this is
a noop (which means that the standard spark defaults will be used).
Since `spark.driver.memory` (eg) can be set erroneously by a user on the standard spark conf, we want to be able
to use that value if present. If we do not have this fall-back behavior then these settings are IGNORED when
starting up the spark driver JVM under client mode (standalone, local, yarn-client or mesos-client).
Parameters
----------
driver_arg_key : string
Eg: "driver-memory"
spark_property_key : string
Eg: "spark.driver.memory" |
def order_by_header(table, headers):
ordered_table = []
for row in table:
row = {k:v for k,v in row.items() if k in headers}
for h in headers:
if h not in row:
row[h] = ''
ordered_row = OrderedDict(sorted(row.items(),
key=lambda x:headers.index(x[0])))
ordered_table.append(ordered_row)
return ordered_table | Convert a list of dicts to a list or OrderedDicts ordered by headers |
def releaseNetToMs():
a = TpPd(pd=0x3)
b = MessageType(mesType=0x2d)
c = CauseHdr(ieiC=0x08, eightBitC=0x0)
d = CauseHdr(ieiC=0x08, eightBitC=0x0)
e = FacilityHdr(ieiF=0x1C, eightBitF=0x0)
f = UserUserHdr(ieiUU=0x7E, eightBitUU=0x0)
packet = a / b / c / d / e / f
return packet | RELEASE Section 9.3.18.1 |
def make_chain(fns):
chain = lambda x: x
for fn in reversed(fns):
chain = fn(chain)
def validator(v):
try:
return chain(v)
except ReturnEarly:
return v
return validator | Take a list of chainable validators and return a chained validator
The functions should be decorated with ``chainable`` decorator.
Any exceptions raised by any of the validators are propagated except for
``ReturnEarly`` exception which is trapped. When ``ReturnEarly`` exception
is trapped, the original value passed to the chained validator is returned
as is. |
def _run_bcbio_variation(vrn_file, rm_file, rm_interval_file, base_dir, sample, caller, data):
val_config_file = _create_validate_config_file(vrn_file, rm_file, rm_interval_file,
base_dir, data)
work_dir = os.path.join(base_dir, "work")
out = {"summary": os.path.join(work_dir, "validate-summary.csv"),
"grading": os.path.join(work_dir, "validate-grading.yaml"),
"discordant": os.path.join(work_dir, "%s-eval-ref-discordance-annotate.vcf" % sample)}
if not utils.file_exists(out["discordant"]) or not utils.file_exists(out["grading"]):
bcbio_variation_comparison(val_config_file, base_dir, data)
out["concordant"] = filter(os.path.exists,
[os.path.join(work_dir, "%s-%s-concordance.vcf" % (sample, x))
for x in ["eval-ref", "ref-eval"]])[0]
return out | Run validation of a caller against the truth set using bcbio.variation. |
def rotate(self, shift):
self.child_corners.values[:] = np.roll(self.child_corners
.values, shift, axis=0)
self.update_transform() | Rotate 90 degrees clockwise `shift` times. If `shift` is negative,
rotate counter-clockwise. |
def _get_kind_name(param_type, is_list):
if issubclass(param_type, bool):
typename = 'bool'
elif issubclass(param_type, six.integer_types):
typename = 'int64'
elif issubclass(param_type, (six.string_types, six.binary_type)):
typename = 'bytes'
elif issubclass(param_type, float):
typename = 'float'
else:
raise ValueError('Unsupported parameter type: %s' % str(param_type))
suffix = 'list' if is_list else 'value'
return '_'.join([typename, suffix]) | Returns the field name given parameter type and is_list.
Args:
param_type: Data type of the hparam.
is_list: Whether this is a list.
Returns:
A string representation of the field name.
Raises:
ValueError: If parameter type is not recognized. |
def _GetSerializedPartitionList(self):
partition_list = list()
for part in self.partitions:
partition_list.append((part.node, unpack("<L", part.hash_value)[0]))
return partition_list | Gets the serialized version of the ConsistentRing.
Added this helper for the test code. |
def get_line_number(line_map, offset):
for lineno, line_offset in enumerate(line_map, start=1):
if line_offset > offset:
return lineno
return -1 | Find a line number, given a line map and a character offset. |
def create_xml_path(path, **kwargs):
try:
with salt.utils.files.fopen(path, 'r') as fp_:
return create_xml_str(
salt.utils.stringutils.to_unicode(fp_.read()),
**kwargs
)
except (OSError, IOError):
return False | Start a transient domain based on the XML-file path passed to the function
:param path: path to a file containing the libvirt XML definition of the domain
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.create_xml_path <path to XML file on the node> |
def encode_safely(self, data):
encoder = self.base_encoder
result = settings.null
try:
result = encoder(pickle.dumps(data))
except:
warnings.warn("Data could not be serialized.", RuntimeWarning)
return result | Encode the data. |
def get_response_attribute_filter(self, template_filter, template_model=None):
if template_filter is None:
return None
if 'Prestans-Response-Attribute-List' not in self.headers:
return None
attribute_list_str = self.headers['Prestans-Response-Attribute-List']
json_deserializer = deserializer.JSON()
attribute_list_dictionary = json_deserializer.loads(attribute_list_str)
attribute_filter = AttributeFilter(
from_dictionary=attribute_list_dictionary,
template_model=template_model
)
evaluated_filter = attribute_filter.conforms_to_template_filter(template_filter)
return evaluated_filter | Prestans-Response-Attribute-List can contain a client's requested
definition for attributes required in the response. This should match
the response_attribute_filter_template?
:param template_filter:
:param template_model: the expected model that this filter corresponds to
:return:
:rtype: None | AttributeFilter |
def bake(self):
options = self.options
default_exclude_list = options.pop('default_exclude')
options_exclude_list = options.pop('exclude')
excludes = default_exclude_list + options_exclude_list
x_list = options.pop('x')
exclude_args = ['--exclude={}'.format(exclude) for exclude in excludes]
x_args = tuple(('-x', x) for x in x_list)
self._ansible_lint_command = sh.ansible_lint.bake(
options,
exclude_args,
sum(x_args, ()),
self._playbook,
_env=self.env,
_out=LOG.out,
_err=LOG.error) | Bake an `ansible-lint` command so it's ready to execute and returns
None.
:return: None |
def reopen(args):
if not args.isadmin:
return "Nope, not gonna do it."
msg = args.msg.split()
if not msg:
return "Syntax: !poll reopen <pollnum>"
if not msg[0].isdigit():
return "Not a valid positve integer."
pid = int(msg[0])
poll = get_open_poll(args.session, pid)
if poll is None:
return "That poll doesn't exist or has been deleted!"
poll.active = 1
return "Poll %d reopened!" % pid | reopens a closed poll. |
def run_deferred(self, deferred):
for handler, scope, offset in deferred:
self.scope_stack = scope
self.offset = offset
handler() | Run the callables in deferred using their associated scope stack. |
def get_version(self):
_, version = misc.get_version(self.confdir)
if version is None:
return "Can't get the version."
else:
return "cslbot - %s" % version | Get the version. |
def state_args(id_, state, high):
args = set()
if id_ not in high:
return args
if state not in high[id_]:
return args
for item in high[id_][state]:
if not isinstance(item, dict):
continue
if len(item) != 1:
continue
args.add(next(iter(item)))
return args | Return a set of the arguments passed to the named state |
def set(self) -> None:
if not self._value:
self._value = True
for fut in self._waiters:
if not fut.done():
fut.set_result(None) | Set the internal flag to ``True``. All waiters are awakened.
Calling `.wait` once the flag is set will not block. |
def build_select_fields(self):
field_sql = []
for table in self.tables:
field_sql += table.get_field_sql()
for join_item in self.joins:
field_sql += join_item.right_table.get_field_sql()
sql = 'SELECT {0}{1} '.format(self.get_distinct_sql(), ', '.join(field_sql))
return sql | Generates the sql for the SELECT portion of the query
:return: the SELECT portion of the query
:rtype: str |
def patch(self, patch):
data = apply_patch(dict(self), patch)
return self.__class__(data, model=self.model) | Patch record metadata.
:params patch: Dictionary of record metadata.
:returns: A new :class:`Record` instance. |
def set(self, value = True):
value = value in (True,1) or ( (isinstance(value, str) or (sys.version < '3' and isinstance(value, unicode))) and (value.lower() in ("1","yes","true","enabled")))
return super(BooleanParameter,self).set(value) | Set the boolean parameter |
def predict(self, sequences, y=None):
predictions = []
check_iter_of_sequences(sequences, allow_trajectory=self._allow_trajectory)
for X in sequences:
predictions.append(self.partial_predict(X))
return predictions | Predict the closest cluster each sample in each sequence in
sequences belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
sequences : list of array-like, each of shape [sequence_length, n_features]
A list of multivariate timeseries. Each sequence may have
a different length, but they all must have the same number
of features.
Returns
-------
Y : list of arrays, each of shape [sequence_length,]
Index of the closest center each sample belongs to. |
def _strip_value(value, lookup='exact'):
if lookup == 'in':
stripped_value = [_strip_object(el) for el in value]
else:
stripped_value = _strip_object(value)
return stripped_value | Helper function to remove the branch and version information from the given value,
which could be a single object or a list. |
def set_dtreat_indt(self, t=None, indt=None):
lC = [indt is not None, t is not None]
if all(lC):
msg = "Please provide either t or indt (or none)!"
raise Exception(msg)
if lC[1]:
ind = self.select_t(t=t, out=bool)
else:
ind = _format_ind(indt, n=self._ddataRef['nt'])
self._dtreat['indt'] = ind
self._ddata['uptodate'] = False | Store the desired index array for the time vector
If an array of indices (refering to self.ddataRef['t'] is not provided,
uses self.select_t(t=t) to produce it |
def close(self):
if self._device is not None:
ibsta = self._lib.ibonl(self._device, 0)
self._check_status(ibsta)
self._device = None | Closes the gpib transport. |
def _string_width(string, *, _IS_ASCII=_IS_ASCII):
match = _IS_ASCII.match(string)
if match:
return match.endpos
UNICODE_WIDE_CHAR_TYPE = 'WFA'
width = 0
func = unicodedata.east_asian_width
for char in string:
width += 2 if func(char) in UNICODE_WIDE_CHAR_TYPE else 1
return width | Returns string's width. |
def get_matchers():
from . import matchers
def is_matcher_func(member):
return inspect.isfunction(member) and member.__name__.endswith("_matcher")
members = inspect.getmembers(matchers, is_matcher_func)
for name, func in members:
yield func | Get matcher functions from treeherder.autoclassify.matchers
We classify matchers as any function treeherder.autoclassify.matchers with
a name ending in _matcher. This is currently overkill but protects against
the unwarey engineer adding new functions to the matchers module that
shouldn't be treated as matchers. |
def show_data_file(fname):
txt = '<H2>' + fname + '</H2>'
print (fname)
txt += web.read_csv_to_html_table(fname, 'Y')
txt += '</div>\n'
return txt | shows a data file in CSV format - all files live in CORE folder |
def create_toolbutton(entries, parent=None):
btn = QtGui.QToolButton(parent)
menu = QtGui.QMenu()
actions = []
for label, slot in entries:
action = add_menu_action(menu, label, slot)
actions.append(action)
btn.setPopupMode(QtGui.QToolButton.MenuButtonPopup)
btn.setDefaultAction(actions[0])
btn.setMenu(menu)
return btn, actions | Create a toolbutton.
Args:
entries: List of (label, slot) tuples.
Returns:
`QtGui.QToolBar`. |
def redirect_to_unlocalized(*args, **kwargs):
endpoint = request.endpoint.replace('_redirect', '')
kwargs = multi_to_dict(request.args)
kwargs.update(request.view_args)
kwargs.pop('lang_code', None)
return redirect(url_for(endpoint, **kwargs)) | Redirect lang-prefixed urls to no prefixed URL. |
def polygon(self):
points = []
for fp in self.points[1:]:
points.append((fp.lat, fp.lng))
return points | return a polygon for the fence |
def register_event(self, event):
self.log('Registering event hook:', event.cmd, event.thing,
pretty=True, lvl=verbose)
self.hooks[event.cmd] = event.thing | Registers a new command line interface event hook as command |
def find(basedir, string):
matches = []
for root, dirnames, filenames in os.walk(basedir):
for filename in fnmatch.filter(filenames, string):
matches.append(os.path.join(root, filename))
return matches | walk basedir and return all files matching string |
def _copy_chunk(src, dst, length):
"Copy length bytes from file src to file dst."
BUFSIZE = 128 * 1024
while length > 0:
l = min(BUFSIZE, length)
buf = src.read(l)
assert len(buf) == l
dst.write(buf)
length -= l | Copy length bytes from file src to file dst. |
def parse_rest_response(self, records, rowcount, row_type=list):
if self.is_plain_count:
assert list(records) == []
yield rowcount
else:
while True:
for row_deep in records:
assert self.is_aggregation == (row_deep['attributes']['type'] == 'AggregateResult')
row_flat = self._make_flat(row_deep, path=(), subroots=self.subroots)
assert all(not isinstance(x, dict) or x['done'] for x in row_flat)
if issubclass(row_type, dict):
yield {k: fix_data_type(row_flat[k.lower()]) for k in self.aliases}
else:
yield [fix_data_type(row_flat[k.lower()]) for k in self.aliases]
break | Parse the REST API response to DB API cursor flat response |
def _layout(self):
if not self.hist:
self.ax
return
if make_axes_locatable is None:
raise YellowbrickValueError((
"joint plot histograms requires matplotlib 2.0.2 or greater "
"please upgrade matplotlib or set hist=False on the visualizer"
))
divider = make_axes_locatable(self.ax)
self._xhax = divider.append_axes("top", size=1, pad=0.1, sharex=self.ax)
self._yhax = divider.append_axes("right", size=1, pad=0.1, sharey=self.ax)
self._xhax.xaxis.tick_top()
self._yhax.yaxis.tick_right()
self._xhax.grid(False, axis='y')
self._yhax.grid(False, axis='x') | Creates the grid layout for the joint plot, adding new axes for the histograms
if necessary and modifying the aspect ratio. Does not modify the axes or the
layout if self.hist is False or None. |
def cross(self, vector):
return Vector((self.y * vector.z - self.z * vector.y),
(self.z * vector.x - self.x * vector.z),
(self.x * vector.y - self.y * vector.x)) | Return a Vector instance as the cross product of two vectors |
def validate_property_directives(directives):
for directive_name in six.iterkeys(directives):
if directive_name in VERTEX_ONLY_DIRECTIVES:
raise GraphQLCompilationError(
u'Found vertex-only directive {} set on property.'.format(directive_name)) | Validate the directives that appear at a property field. |
def create_user(self, username, password, admin=False):
text = "CREATE USER {0} WITH PASSWORD {1}".format(
quote_ident(username), quote_literal(password))
if admin:
text += ' WITH ALL PRIVILEGES'
self.query(text, method="POST") | Create a new user in InfluxDB.
:param username: the new username to create
:type username: str
:param password: the password for the new user
:type password: str
:param admin: whether the user should have cluster administration
privileges or not
:type admin: boolean |
def _string_to_byte_list(self, data):
bytes_length = 16
m = self.digest()
m.update(str.encode(data))
hex_digest = m.hexdigest()
return list(int(hex_digest[num * 2:num * 2 + 2], bytes_length)
for num in range(bytes_length)) | Creates a hex digest of the input string given to create the image,
if it's not already hexadecimal
Returns:
Length 16 list of rgb value range integers
(each representing a byte of the hex digest) |
def setup(self,
artifacts, use_tsk,
reason, grr_server_url, grr_username, grr_password, approvers=None,
verify=True):
super(GRRHuntArtifactCollector, self).setup(
reason, grr_server_url, grr_username, grr_password,
approvers=approvers, verify=verify)
self.artifacts = [item.strip() for item in artifacts.strip().split(',')]
if not artifacts:
self.state.add_error('No artifacts were specified.', critical=True)
self.use_tsk = use_tsk | Initializes a GRR Hunt artifact collector.
Args:
artifacts: str, comma-separated list of GRR-defined artifacts.
use_tsk: toggle for use_tsk flag.
reason: justification for GRR access.
grr_server_url: GRR server URL.
grr_username: GRR username.
grr_password: GRR password.
approvers: str, comma-separated list of GRR approval recipients.
verify: boolean, whether to verify the GRR server's x509 certificate. |
def match_grade_system_id(self, grade_system_id, match):
self._add_match('gradeSystemId', str(grade_system_id), bool(match)) | Sets the grade system ``Id`` for this query.
arg: grade_system_id (osid.id.Id): a grade system ``Id``
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: NullArgument - ``grade_system_id`` is ``null``
*compliance: mandatory -- This method must be implemented.* |
def is_equivalent(self, other, ignore=False):
def is_equivalent_to_list_of_ipachars(other):
my_ipa_chars = self.canonical_representation.ipa_chars
if len(my_ipa_chars) != len(other):
return False
for i in range(len(my_ipa_chars)):
if not my_ipa_chars[i].is_equivalent(other[i]):
return False
return True
if is_unicode_string(other):
try:
return is_equivalent_to_list_of_ipachars(IPAString(unicode_string=other, ignore=ignore).ipa_chars)
except:
return False
if is_list_of_ipachars(other):
try:
return is_equivalent_to_list_of_ipachars(other)
except:
return False
if isinstance(other, IPAString):
return is_equivalent_to_list_of_ipachars(other.canonical_representation.ipa_chars)
return False | Return ``True`` if the IPA string is equivalent to the ``other`` object.
The ``other`` object can be:
1. a Unicode string,
2. a list of IPAChar objects, and
3. another IPAString.
:param variant other: the object to be compared against
:param bool ignore: if other is a Unicode string, ignore Unicode characters not IPA valid
:rtype: bool |
def cart2pol(x, y):
theta = np.arctan2(y, x)
rho = np.hypot(x, y)
return theta, rho | Cartesian to Polar coordinates conversion. |
def decode_base64(data: str) -> bytes:
missing_padding = len(data) % 4
if missing_padding != 0:
data += "=" * (4 - missing_padding)
return base64.decodebytes(data.encode("utf-8")) | Decode base64, padding being optional.
:param data: Base64 data as an ASCII byte string
:returns: The decoded byte string. |
def approx_eq(val: Any, other: Any, *, atol: Union[int, float] = 1e-8) -> bool:
approx_eq_getter = getattr(val, '_approx_eq_', None)
if approx_eq_getter is not None:
result = approx_eq_getter(other, atol)
if result is not NotImplemented:
return result
other_approx_eq_getter = getattr(other, '_approx_eq_', None)
if other_approx_eq_getter is not None:
result = other_approx_eq_getter(val, atol)
if result is not NotImplemented:
return result
if isinstance(val, (int, float)):
if not isinstance(other, (int, float)):
return False
return _isclose(val, other, atol=atol)
if isinstance(val, complex):
if not isinstance(other, complex):
return False
return _isclose(val, other, atol=atol)
result = _approx_eq_iterables(val, other, atol=atol)
if result is NotImplemented:
return val == other
return result | Approximately compares two objects.
If `val` implements SupportsApproxEquality protocol then it is invoked and
takes precedence over all other checks:
- For primitive numeric types `int` and `float` approximate equality is
delegated to math.isclose().
- For complex primitive type the real and imaginary parts are treated
independently and compared using math.isclose().
- For `val` and `other` both iterable of the same length, consecutive
elements are compared recursively. Types of `val` and `other` does not
necessarily needs to match each other. They just need to be iterable and
have the same structure.
Args:
val: Source object for approximate comparison.
other: Target object for approximate comparison.
atol: The minimum absolute tolerance. See np.isclose() documentation for
details. Defaults to 1e-8 which matches np.isclose() default
absolute tolerance.
Returns:
True if objects are approximately equal, False otherwise. |
def inclusion_tag(self, name, context_class=Context, takes_context=False):
def tag_decorator(tag_func):
@wraps(tag_func)
def tag_wrapper(parser, token):
class InclusionTagNode(template.Node):
def render(self, context):
if not getattr(self, "nodelist", False):
try:
request = context["request"]
except KeyError:
t = get_template(name)
else:
ts = templates_for_device(request, name)
t = select_template(ts)
self.template = t
parts = [template.Variable(part).resolve(context)
for part in token.split_contents()[1:]]
if takes_context:
parts.insert(0, context)
result = tag_func(*parts)
autoescape = context.autoescape
context = context_class(result, autoescape=autoescape)
return self.template.render(context)
return InclusionTagNode()
return self.tag(tag_wrapper)
return tag_decorator | Replacement for Django's ``inclusion_tag`` which looks up device
specific templates at render time. |
def run_checks(number_samples, k_choices):
assert isinstance(k_choices, int), \
"Number of optimal trajectories should be an integer"
if k_choices < 2:
raise ValueError(
"The number of optimal trajectories must be set to 2 or more.")
if k_choices >= number_samples:
msg = "The number of optimal trajectories should be less than the \
number of samples"
raise ValueError(msg) | Runs checks on `k_choices` |
def remove_unused_links(self, used):
unused = []
self._execute("SELECT * FROM {}".format(self.LINK_STATE_TABLE))
for row in self.cursor:
relpath, inode, mtime = row
inode = self._from_sqlite(inode)
path = os.path.join(self.root_dir, relpath)
if path in used:
continue
if not os.path.exists(path):
continue
actual_inode = get_inode(path)
actual_mtime, _ = get_mtime_and_size(path)
if inode == actual_inode and mtime == actual_mtime:
logger.debug("Removing '{}' as unused link.".format(path))
remove(path)
unused.append(relpath)
for relpath in unused:
cmd = 'DELETE FROM {} WHERE path = "{}"'
self._execute(cmd.format(self.LINK_STATE_TABLE, relpath)) | Removes all saved links except the ones that are used.
Args:
used (list): list of used links that should not be removed. |
def show(self, baseAppInstance):
self.from_dict_to_fields(self.configDict)
super(ProjectConfigurationDialog, self).show(baseAppInstance) | Allows to show the widget as root window |
def getDataAtOffset(self, offset, size):
data = str(self)
return data[offset:offset+size] | Gets binary data at a given offset.
@type offset: int
@param offset: The offset to get the data from.
@type size: int
@param size: The size of the data to be obtained.
@rtype: str
@return: The data obtained at the given offset. |
def _parse_jing_output(output):
output = output.strip()
values = [_parse_jing_line(l) for l in output.split('\n') if l]
return tuple(values) | Parse the jing output into a tuple of line, column, type and message. |
def getExpressionLevels(
self, threshold=0.0, names=[], startIndex=0, maxResults=0):
rnaQuantificationId = self.getLocalId()
with self._db as dataSource:
expressionsReturned = dataSource.searchExpressionLevelsInDb(
rnaQuantificationId,
names=names,
threshold=threshold,
startIndex=startIndex,
maxResults=maxResults)
expressionLevels = [
SqliteExpressionLevel(self, expressionEntry) for
expressionEntry in expressionsReturned]
return expressionLevels | Returns the list of ExpressionLevels in this RNA Quantification. |
def detect(byte_str):
if not isinstance(byte_str, bytearray):
if not isinstance(byte_str, bytes):
raise TypeError('Expected object of type bytes or bytearray, got: '
'{0}'.format(type(byte_str)))
else:
byte_str = bytearray(byte_str)
detector = UniversalDetector()
detector.feed(byte_str)
return detector.close() | Detect the encoding of the given byte string.
:param byte_str: The byte sequence to examine.
:type byte_str: ``bytes`` or ``bytearray`` |
def security_label_pivot(self, security_label_resource):
resource = self.copy()
resource._request_uri = '{}/{}'.format(
security_label_resource.request_uri, resource._request_uri
)
return resource | Pivot point on security labels for this resource.
This method will return all *resources* (group, indicators, task,
victims, etc) for this resource that have the provided security
label applied.
**Example Endpoints URI's**
+--------------+----------------------------------------------------------------------+
| HTTP Method | API Endpoint URI's |
+==============+======================================================================+
| GET | /v2/securityLabels/{resourceId}/groups/{resourceType} |
+--------------+----------------------------------------------------------------------+
| GET | /v2/securityLabels/{resourceId}/groups/{resourceType}/{uniqueId} |
+--------------+----------------------------------------------------------------------+
| GET | /v2/securityLabels/{resourceId}/indicators/{resourceType} |
+--------------+----------------------------------------------------------------------+
| GET | /v2/securityLabels/{resourceId}/indicators/{resourceType}/{uniqueId} |
+--------------+----------------------------------------------------------------------+
Args:
resource_id (string): The resource pivot id (security label name). |
def _get_sorted_cond_keys(self, keys_list):
cond_list = []
for key in keys_list:
if key.startswith('analysisservice-'):
cond_list.append(key)
cond_list.sort()
return cond_list | This function returns only the elements starting with
'analysisservice-' in 'keys_list'. The returned list is sorted by the
index appended to the end of each element |
def get_writer_position(self, name):
cursor = self.cursor
cursor.execute('SELECT timestamp FROM gauged_writer_history '
'WHERE id = %s', (name,))
result = cursor.fetchone()
return result[0] if result else 0 | Get the current writer position |
def cat(dataset, query, bounds, indent, compact, dst_crs, pagesize, sortby):
dump_kwds = {"sort_keys": True}
if indent:
dump_kwds["indent"] = indent
if compact:
dump_kwds["separators"] = (",", ":")
table = bcdata.validate_name(dataset)
for feat in bcdata.get_features(
table, query=query, bounds=bounds, sortby=sortby, crs=dst_crs
):
click.echo(json.dumps(feat, **dump_kwds)) | Write DataBC features to stdout as GeoJSON feature objects. |
def create_vip(self):
return Vip(
self.networkapi_url,
self.user,
self.password,
self.user_ldap) | Get an instance of vip services facade. |
def authorization_link(self, redirect_uri):
args = '?client_id=%s&redirect_uri=%s' % (
self.app_key,
redirect_uri
)
uri = "/".join([BASE_URL, API_VERSION, OAUTH, args])
return uri | Construct OAuth2 authorization link.
Params: redirect_uri -> URI for receiving callback with token
Returns authorization URL as string |
def append(self, objects):
if not isinstance(objects, (list, tuple)):
objects = (objects,)
for child in objects:
if isinstance(child, Element):
self.children.append(child)
child.parent = self
continue
if isinstance(child, Attribute):
self.attributes.append(child)
child.parent = self
continue
raise Exception("append %s not-valid" %
(child.__class__.__name__,))
return self | Append the specified child based on whether it is an element or an
attribute.
@param objects: A (single|collection) of attribute(s) or element(s) to
be added as children.
@type objects: (L{Element}|L{Attribute})
@return: self
@rtype: L{Element} |
def clear_annotation_data(self):
self.genes = set()
self.annotations = []
self.term_annotations = {}
self.gene_annotations = {} | Clear annotation data.
Parameters
----------
Returns
-------
None |
def uptime():
from datetime import timedelta
with open('/proc/uptime', 'r') as f:
uptime_seconds = float(f.readline().split()[0])
uptime_string = str(timedelta(seconds=uptime_seconds))
bob.says(uptime_string) | Uptime of the host machine |
def _from_dict(cls, _dict):
args = {}
if 'expansions' in _dict:
args['expansions'] = [
Expansion._from_dict(x) for x in (_dict.get('expansions'))
]
else:
raise ValueError(
'Required property \'expansions\' not present in Expansions JSON'
)
return cls(**args) | Initialize a Expansions object from a json dictionary. |
def replay(journal_entry, function, *args, **kwargs):
section = fiber.WovenSection()
section.enter()
journal_mode = section.state.get(RECMODE_TAG, None)
is_first = journal_mode is None
if is_first:
section.state[RECMODE_TAG] = JournalMode.replay
section.state[JOURNAL_ENTRY_TAG] = IJournalReplayEntry(journal_entry)
result = function(*args, **kwargs)
section.abort(result)
return result | Calls method in replay context so that no journal entries are created,
expected_side_effects are checked, and no asynchronous task is started.
The journal entry is only used to fetch side-effects results. |
def resolve(checks, lazy, quiet):
root = get_root()
if 'all' in checks:
checks = os.listdir(root)
for check_name in sorted(checks):
pinned_reqs_file = os.path.join(root, check_name, 'requirements.in')
resolved_reqs_file = os.path.join(root, check_name, 'requirements.txt')
if os.path.isfile(pinned_reqs_file):
if not quiet:
echo_info('Check `{}`:'.format(check_name))
if not quiet:
echo_waiting(' Resolving dependencies...')
pre_packages = read_packages(resolved_reqs_file)
result = resolve_requirements(pinned_reqs_file, resolved_reqs_file, lazy=lazy)
if result.code:
abort(result.stdout + result.stderr)
if not quiet:
post_packages = read_packages(resolved_reqs_file)
display_package_changes(pre_packages, post_packages, indent=' ') | Resolve transient dependencies for any number of checks.
If you want to do this en masse, put `all`. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.