code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def change(self, inpt, hashfun=DEFAULT_HASHFUN):
self.img = self.__create_image(inpt, hashfun) | Change the avatar by providing a new input.
Uses the standard hash function if no one is given. |
def enable_step_on_branch_mode(cls):
cls.write_msr(DebugRegister.DebugCtlMSR,
DebugRegister.BranchTrapFlag | DebugRegister.LastBranchRecord) | When tracing, call this on every single step event
for step on branch mode.
@raise WindowsError:
Raises C{ERROR_DEBUGGER_INACTIVE} if the debugger is not attached
to least one process.
@raise NotImplementedError:
Current architecture is not C{i386} or C{amd64}.
@warning:
This method uses the processor's machine specific registers (MSR).
It could potentially brick your machine.
It works on my machine, but your mileage may vary.
@note:
It doesn't seem to work in VMWare or VirtualBox machines.
Maybe it fails in other virtualization/emulation environments,
no extensive testing was made so far. |
def _assign(self, values):
LOGGER.debug('Assigning values: %r', values)
if not values:
return
keys = self.keys()
if not self._ref:
keys.append('_ref')
if isinstance(values, dict):
for key in keys:
if values.get(key):
if isinstance(values.get(key), list):
items = list()
for item in values[key]:
if isinstance(item, dict):
if '_ref' in item:
obj_class = get_class(item['_ref'])
if obj_class:
items.append(obj_class(self._session,
**item))
else:
items.append(item)
setattr(self, key, items)
else:
setattr(self, key, values[key])
elif isinstance(values, list):
self._assign(values[0])
else:
LOGGER.critical('Unhandled return type: %r', values) | Assign the values passed as either a dict or list to the object if
the key for each value matches an available attribute on the object.
:param dict values: The values to assign |
def detect_extracellular_compartment(model):
extracellular_key = Counter()
for reaction in model.reactions:
equation = reaction.equation
if equation is None:
continue
if len(equation.compounds) == 1:
compound, _ = equation.compounds[0]
compartment = compound.compartment
extracellular_key[compartment] += 1
if len(extracellular_key) == 0:
return None
else:
best_key, _ = extracellular_key.most_common(1)[0]
logger.info('{} is extracellular compartment'.format(best_key))
return best_key | Detect the identifier for equations with extracellular compartments.
Args:
model: :class:`NativeModel`. |
def get_rnn_cells(self) -> List[mx.rnn.BaseRNNCell]:
return self.forward_rnn.get_rnn_cells() + self.reverse_rnn.get_rnn_cells() | Returns a list of RNNCells used by this encoder. |
def global_permission_set(self):
only_admins_create_admins = Or(
AllowAdmin,
And(
ObjAttrTrue(
lambda r, _: r.data.get('admin') is not True),
Or(
AllowPermission('org:admin')
)
)
)
return And(
AllowOnlyAuthenticated,
Or(
Not(AllowCreate),
only_admins_create_admins
)
) | All users must be authenticated. Only admins can create other admin
users. |
def get_record_collections(record, matcher):
collections = current_collections.collections
if collections is None:
collections = current_collections.collections = dict(_build_cache())
output = set()
for collections in matcher(collections, record):
output |= collections
return list(output) | Return list of collections to which record belongs to.
:param record: Record instance.
:param matcher: Function used to check if a record belongs to a collection.
:return: list of collection names. |
def _build_netengine_arguments(self):
arguments = {
"host": self.host
}
if self.config is not None:
for key, value in self.config.iteritems():
arguments[key] = value
if self.port:
arguments["port"] = self.port
return arguments | returns a python dictionary representing arguments
that will be passed to a netengine backend
for internal use only |
def file_match_any(self, filename):
if filename.startswith('.' + os.sep):
filename = filename[len(os.sep) + 1:]
if os.sep != '/':
filename = filename.replace(os.sep, '/')
for selector in self.file_selectors:
if (selector.pattern.endswith('/') and
filename.startswith(selector.pattern)):
return True
if fnmatch.fnmatch(filename, selector.pattern):
return True
return False | Match any filename. |
def _convert_url_to_downloadable(url):
if 'drive.google.com' in url:
file_id = url.split('d/')[1].split('/')[0]
base_url = 'https://drive.google.com/uc?export=download&id='
out = '{}{}'.format(base_url, file_id)
elif 'dropbox.com' in url:
if url.endswith('.png'):
out = url + '?dl=1'
else:
out = url.replace('dl=0', 'dl=1')
elif 'github.com' in url:
out = url.replace('github.com', 'raw.githubusercontent.com')
out = out.replace('blob/', '')
else:
out = url
return out | Convert a url to the proper style depending on its website. |
def resolve_pname(self, pname: PrefName,
mid: ModuleId) -> Tuple[YangIdentifier, ModuleId]:
p, s, loc = pname.partition(":")
try:
mdata = self.modules[mid]
except KeyError:
raise ModuleNotRegistered(*mid) from None
try:
return (loc, mdata.prefix_map[p]) if s else (p, mdata.main_module)
except KeyError:
raise UnknownPrefix(p, mid) from None | Return the name and module identifier in which the name is defined.
Args:
pname: Name with an optional prefix.
mid: Identifier of the module in which `pname` appears.
Raises:
ModuleNotRegistered: If `mid` is not registered in the data model.
UnknownPrefix: If the prefix specified in `pname` is not declared. |
def retry(func, exception_type, quit_event):
while True:
if quit_event.is_set():
raise StopIteration
try:
return func()
except exception_type:
pass | Run the function, retrying when the specified exception_type occurs.
Poll quit_event on each iteration, to be responsive to an external
exit request. |
def execute(self, view, include=None):
return self._get(self._build_url(self.endpoint.execute(id=view, include=include))) | Execute a view.
:param include: list of objects to sideload. `Side-loading API Docs
<https://developer.zendesk.com/rest_api/docs/core/side_loading>`__.
:param view: View or view id |
def setup_logging(handler, exclude=("gunicorn", "south", "elasticapm.errors")):
logger = logging.getLogger()
if handler.__class__ in map(type, logger.handlers):
return False
logger.addHandler(handler)
return True | Configures logging to pipe to Elastic APM.
- ``exclude`` is a list of loggers that shouldn't go to ElasticAPM.
For a typical Python install:
>>> from elasticapm.handlers.logging import LoggingHandler
>>> client = ElasticAPM(...)
>>> setup_logging(LoggingHandler(client))
Within Django:
>>> from elasticapm.contrib.django.handlers import LoggingHandler
>>> setup_logging(LoggingHandler())
Returns a boolean based on if logging was configured or not. |
def get_checks(self, argument_name):
checks = []
for check, attrs in _checks[argument_name].items():
(codes, args) = attrs
if any(not (code and self.ignore_code(code)) for code in codes):
checks.append((check.__name__, check, args))
return sorted(checks) | Get all the checks for this category.
Find all globally visible functions where the first argument name
starts with argument_name and which contain selected tests. |
def new_station(self, _id, callSign, name, affiliate, fccChannelNumber):
if self.__v_station:
print("[Station: %s, %s, %s, %s, %s]" %
(_id, callSign, name, affiliate, fccChannelNumber)) | Callback run for each new station |
def resize(self, newsize, zeros=True):
if not self.can_resize:
raise ValueError("Segment %s can't be resized" % str(self))
if not self.rawdata.is_base:
raise ValueError("Only container segments can be resized")
origsize = len(self)
self.rawdata.resize(newsize)
self.set_raw(self.rawdata)
newsize = len(self)
if zeros:
if newsize > origsize:
self.data[origsize:] = 0
self.style[origsize:] = 0
return origsize, newsize | Resize the data arrays.
This can only be performed on the container segment. Child segments
must adjust their rawdata to point to the correct place.
Since segments don't keep references to other segments, it is the
user's responsibility to update any child segments that point to this
segment's data.
Numpy can't do an in-place resize on an array that has a view, so the
data must be replaced and all segments that point to that raw data must
also be changed. This has to happen outside this method because it
doesn't know the segment list of segments using itself as a base. |
def delete(name, dry_run, verbose):
collection = Collection.query.filter_by(name=name).one()
if verbose:
tr = LeftAligned(traverse=AttributeTraversal())
click.secho(tr(collection), fg='red')
db.session.delete(collection) | Delete a collection. |
def _stop_processes(paths):
def cache_checksum(path):
if not path:
return None
if not path in _process_checksums:
checksum = _get_checksum(path)
_process_checksums[path] = checksum
return _process_checksums[path]
if not paths:
return
target_checksums = dict((cache_checksum(p), 1) for p in paths)
if not target_checksums:
return
for proc, path in _get_user_processes():
if cache_checksum(path) in target_checksums:
try:
proc.terminate()
except (psutil.AccessDenied, psutil.NoSuchProcess):
pass | Scans process list trying to terminate processes matching paths
specified. Uses checksums to identify processes that are duplicates of
those specified to terminate.
`paths`
List of full paths to executables for processes to terminate. |
def update(xCqNck7t, **kwargs):
def dict_list_val(inlist):
l = []
for i in inlist:
if type(i)==dict:
l.append(Dict(**i))
elif type(i)==list:
l.append(make_list(i))
elif type(i)==bytes:
l.append(i.decode('UTF-8'))
else:
l.append(i)
return l
for k in list(kwargs.keys()):
if type(kwargs[k])==dict:
xCqNck7t[k] = Dict(**kwargs[k])
elif type(kwargs[k])==list:
xCqNck7t[k] = dict_list_val(kwargs[k])
else:
xCqNck7t[k] = kwargs[k] | Updates the Dict with the given values. Turns internal dicts into Dicts. |
def underlying_likelihood(self, binary_outcomes, modelparams, expparams):
original_mps = modelparams[..., self._orig_mps_slice]
return self.underlying_model.likelihood(binary_outcomes, original_mps, expparams) | Given outcomes hypothesized for the underlying model, returns the likelihood
which which those outcomes occur. |
def _login(self, environ, start_response):
response = HTTPUnauthorized()
response.www_authenticate = ('Basic', {'realm': self._realm})
return response(environ, start_response) | Send a login response back to the client. |
def get_assessment_taken(self, assessment_taken_id):
collection = JSONClientValidated('assessment',
collection='AssessmentTaken',
runtime=self._runtime)
result = collection.find_one(
dict({'_id': ObjectId(self._get_id(assessment_taken_id, 'assessment').get_identifier())},
**self._view_filter()))
return objects.AssessmentTaken(osid_object_map=result, runtime=self._runtime, proxy=self._proxy) | Gets the ``AssessmentTaken`` specified by its ``Id``.
In plenary mode, the exact ``Id`` is found or a ``NotFound``
results. Otherwise, the returned ``AssessmentTaken`` may have a
different ``Id`` than requested, such as the case where a
duplicate ``Id`` was assigned to an ``AssessmentTaken`` and
retained for compatibility.
arg: assessment_taken_id (osid.id.Id): ``Id`` of the
``AssessmentTaken``
return: (osid.assessment.AssessmentTaken) - the assessment taken
raise: NotFound - ``assessment_taken_id`` not found
raise: NullArgument - ``assessment_taken_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method is must be implemented.* |
def run_subprocess(command: str, verbose: bool = True, blocking: bool = True) \
-> Optional[subprocess.Popen]:
if blocking:
result1 = subprocess.run(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding='utf-8',
shell=True)
if verbose:
for output in (result1.stdout, result1.stderr):
output = output.strip()
if output:
print(output)
return None
stdouterr = None if verbose else subprocess.DEVNULL
result2 = subprocess.Popen(
command,
stdout=stdouterr,
stderr=stdouterr,
encoding='utf-8',
shell=True)
return result2 | Execute the given command in a new process.
Only when both `verbose` and `blocking` are |True|, |run_subprocess|
prints all responses to the current value of |sys.stdout|:
>>> from hydpy import run_subprocess
>>> import platform
>>> esc = '' if 'windows' in platform.platform().lower() else '\\\\'
>>> run_subprocess(f'python -c print{esc}(1+1{esc})')
2
With verbose being |False|, |run_subprocess| does never print out
anything:
>>> run_subprocess(f'python -c print{esc}(1+1{esc})', verbose=False)
>>> process = run_subprocess('python', blocking=False, verbose=False)
>>> process.kill()
>>> _ = process.communicate()
When `verbose` is |True| and `blocking` is |False|, |run_subprocess|
prints all responses to the console ("invisible" for doctests):
>>> process = run_subprocess('python', blocking=False)
>>> process.kill()
>>> _ = process.communicate() |
def integral_scale(u, t, tau1=0.0, tau2=1.0):
tau, rho = autocorr_coeff(u, t, tau1, tau2)
zero_cross_ind = np.where(np.diff(np.sign(rho)))[0][0]
int_scale = np.trapz(rho[:zero_cross_ind], tau[:zero_cross_ind])
return int_scale | Calculate the integral scale of a time series by integrating up to
the first zero crossing. |
def bbox(self):
mn = amin(self.coordinates, axis=0)
mx = amax(self.coordinates, axis=0)
return concatenate((mn, mx)) | Bounding box as minimum and maximum coordinates. |
def _trigger_params_changed(self, trigger_parent=True):
[p._trigger_params_changed(trigger_parent=False) for p in self.parameters if not p.is_fixed]
self.notify_observers(None, None if trigger_parent else -np.inf) | First tell all children to update,
then update yourself.
If trigger_parent is True, we will tell the parent, otherwise not. |
def tags(norm):
parts = norm.split('.')
return ['.'.join(parts[:i]) for i in range(1, len(parts) + 1)] | Divide a normalized tag string into hierarchical layers. |
def solve_semi_dual_entropic(a, b, M, reg, method, numItermax=10000, lr=None,
log=False):
if method.lower() == "sag":
opt_beta = sag_entropic_transport(a, b, M, reg, numItermax, lr)
elif method.lower() == "asgd":
opt_beta = averaged_sgd_entropic_transport(a, b, M, reg, numItermax, lr)
else:
print("Please, select your method between SAG and ASGD")
return None
opt_alpha = c_transform_entropic(b, M, reg, opt_beta)
pi = (np.exp((opt_alpha[:, None] + opt_beta[None, :] - M[:, :]) / reg) *
a[:, None] * b[None, :])
if log:
log = {}
log['alpha'] = opt_alpha
log['beta'] = opt_beta
return pi, log
else:
return pi | Compute the transportation matrix to solve the regularized discrete
measures optimal transport max problem
The function solves the following optimization problem:
.. math::
\gamma = arg\min_\gamma <\gamma,M>_F + reg\cdot\Omega(\gamma)
s.t. \gamma 1 = a
\gamma^T 1= b
\gamma \geq 0
Where :
- M is the (ns,nt) metric cost matrix
- :math:`\Omega` is the entropic regularization term with :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
- a and b are source and target weights (sum to 1)
The algorithm used for solving the problem is the SAG or ASGD algorithms
as proposed in [18]_
Parameters
----------
a : np.ndarray(ns,)
source measure
b : np.ndarray(nt,)
target measure
M : np.ndarray(ns, nt)
cost matrix
reg : float number
Regularization term > 0
methode : str
used method (SAG or ASGD)
numItermax : int number
number of iteration
lr : float number
learning rate
n_source : int number
size of the source measure
n_target : int number
size of the target measure
log : bool, optional
record log if True
Returns
-------
pi : np.ndarray(ns, nt)
transportation matrix
log : dict
log dictionary return only if log==True in parameters
Examples
--------
>>> n_source = 7
>>> n_target = 4
>>> reg = 1
>>> numItermax = 300000
>>> a = ot.utils.unif(n_source)
>>> b = ot.utils.unif(n_target)
>>> rng = np.random.RandomState(0)
>>> X_source = rng.randn(n_source, 2)
>>> Y_target = rng.randn(n_target, 2)
>>> M = ot.dist(X_source, Y_target)
>>> method = "ASGD"
>>> asgd_pi = stochastic.solve_semi_dual_entropic(a, b, M, reg,
method, numItermax)
>>> print(asgd_pi)
References
----------
[Genevay et al., 2016] :
Stochastic Optimization for Large-scale Optimal Transport,
Advances in Neural Information Processing Systems (2016),
arXiv preprint arxiv:1605.08527. |
def boolean(
element_name,
attribute=None,
required=True,
alias=None,
default=False,
omit_empty=False,
hooks=None
):
return _PrimitiveValue(
element_name,
_parse_boolean,
attribute,
required,
alias,
default,
omit_empty,
hooks
) | Create a processor for boolean values.
:param element_name: Name of the XML element containing the value. Can also be specified
using supported XPath syntax.
:param attribute: If specified, then the value is searched for under the
attribute within the element specified by element_name. If not specified,
then the value is searched for as the contents of the element specified by
element_name.
:param required: Indicates whether the value is required when parsing and serializing.
:param alias: If specified, then this is used as the name of the value when read from
XML. If not specified, then the element_name is used as the name of the value.
:param default: Default value to use if the element is not present. This option is only
valid if required is specified as False.
:param omit_empty: If True, then Falsey values will be omitted when serializing to XML. Note
that Falsey values are never omitted when they are elements of an array. Falsey values can
be omitted only when they are standalone elements.
:param hooks: A Hooks object.
:return: A declxml processor object. |
def to_match(self):
self.validate()
translation_table = {
u'size': u'size()',
}
match_operator = translation_table.get(self.operator)
if not match_operator:
raise AssertionError(u'Unrecognized operator used: '
u'{} {}'.format(self.operator, self))
template = u'%(inner)s.%(operator)s'
args = {
'inner': self.inner_expression.to_match(),
'operator': match_operator,
}
return template % args | Return a unicode object with the MATCH representation of this UnaryTransformation. |
async def iter_lines(
self,
*cmds: str,
stream: str='both') -> AsyncGenerator[str, None]:
sps = self.spawn(*cmds)
if stream == 'both':
agen = amerge(
amerge(*[sp.stdout for sp in sps]),
amerge(*[sp.stderr for sp in sps]))
elif stream == 'stdout':
agen = amerge(*[sp.stdout for sp in sps])
elif stream == 'stderr':
agen = amerge(*[sp.stderr for sp in sps])
else:
raise SublemonRuntimeError(
'Invalid `stream` kwarg received: `' + str(stream) + '`')
async for line in agen:
yield line.decode('utf-8').rstrip() | Coroutine to spawn commands and yield text lines from stdout. |
def as_requirement(self):
if isinstance(self.parsed_version, packaging.version.Version):
spec = "%s==%s" % (self.project_name, self.parsed_version)
else:
spec = "%s===%s" % (self.project_name, self.parsed_version)
return Requirement.parse(spec) | Return a ``Requirement`` that matches this distribution exactly |
def sys_check_for_event(
mask: int, k: Optional[Key], m: Optional[Mouse]
) -> int:
return int(
lib.TCOD_sys_check_for_event(
mask, k.key_p if k else ffi.NULL, m.mouse_p if m else ffi.NULL
)
) | Check for and return an event.
Args:
mask (int): :any:`Event types` to wait for.
k (Optional[Key]): A tcod.Key instance which might be updated with
an event. Can be None.
m (Optional[Mouse]): A tcod.Mouse instance which might be updated
with an event. Can be None.
.. deprecated:: 9.3
Use the :any:`tcod.event.get` function to check for events. |
def commit_config(self, label=None, comment=None, confirmed=None):
rpc_command = '<Commit'
if label:
rpc_command += ' Label="%s"' % label
if comment:
rpc_command += ' Comment="%s"' % comment[:60]
if confirmed:
if 30 <= int(confirmed) <= 300:
rpc_command += ' Confirmed="%d"' % int(confirmed)
else:
raise InvalidInputError('confirmed needs to be between 30 and 300 seconds', self)
rpc_command += '/>'
self._execute_rpc(rpc_command) | Commit the candidate config.
:param label: Commit comment, displayed in the commit entry on the device.
:param comment: Commit label, displayed instead of the commit ID on the device. (Max 60 characters)
:param confirmed: Commit with auto-rollback if new commit is not made in 30 to 300 sec |
def get_active_clients():
global drivers
if not drivers:
return jsonify([])
result = {client: get_client_info(client) for client in drivers}
return jsonify(result) | Get a list of all active clients and their status |
def performAction(self, action):
self.t += 1
Task.performAction(self, action)
self.samples += 1 | Execute one action. |
def clampings_iter(self, cues=None):
s = cues or list(self.stimuli + self.inhibitors)
clampings = it.chain.from_iterable(it.combinations(s, r) for r in xrange(len(s) + 1))
literals_tpl = {}
for stimulus in self.stimuli:
literals_tpl[stimulus] = -1
for c in clampings:
literals = literals_tpl.copy()
for cues in c:
if cues in self.stimuli:
literals[cues] = 1
else:
literals[cues] = -1
yield Clamping(literals.iteritems()) | Iterates over all possible clampings of this experimental setup
Parameters
----------
cues : Optional[iterable]
If given, restricts clampings over given species names
Yields
------
caspo.core.clamping.Clamping
The next clamping with respect to the experimental setup |
def _perturbation(self):
if self.P>1:
scales = []
for term_i in range(self.n_terms):
_scales = SP.randn(self.diag[term_i].shape[0])
if self.offset[term_i]>0:
_scales = SP.concatenate((_scales,SP.zeros(1)))
scales.append(_scales)
scales = SP.concatenate(scales)
else:
scales = SP.randn(self.vd.getNumberScales())
return scales | Returns Gaussian perturbation |
def date_range(start_date, end_date, increment, period):
next = start_date
delta = relativedelta.relativedelta(**{period:increment})
while next <= end_date:
yield next
next += delta | Generate `date` objects between `start_date` and `end_date` in `increment`
`period` intervals. |
def complete(self):
if self._possible is None:
self._possible = []
for possible in self.names:
c = Completion(self.context, self.names[possible], len(self.context.symbol))
self._possible.append(c)
return self._possible | Gets a list of completion objects for the symbol under the cursor. |
def ulogin_response(self, token, host):
response = requests.get(
settings.TOKEN_URL,
params={
'token': token,
'host': host
})
content = response.content
if sys.version_info >= (3, 0):
content = content.decode('utf8')
return json.loads(content) | Makes a request to ULOGIN |
def _get_cifar(directory, url):
filename = os.path.basename(url)
path = generator_utils.maybe_download(directory, filename, url)
tarfile.open(path, "r:gz").extractall(directory) | Download and extract CIFAR to directory unless it is there. |
def _load_config(self):
config = import_module('config')
variables = [var for var in dir(config) if not var.startswith('_')]
return {var: getattr(config, var) for var in variables} | Load project's config and return dict.
TODO: Convert the original dotted representation to hierarchical. |
def ordered_load(self, stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict):
class OrderedLoader(Loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping)
try:
try:
result = yaml.load(stream, OrderedLoader)
except yaml.scanner.ScannerError:
if type(stream) == str:
result = json.loads(stream, object_pairs_hook=object_pairs_hook)
else:
stream.seek(0)
result = json.load(stream, object_pairs_hook=object_pairs_hook)
except Exception as e:
self.error(e)
result = {}
return result | Allows you to use `pyyaml` to load as OrderedDict.
Taken from https://stackoverflow.com/a/21912744/1927102 |
def get_help(self, prefix='', include_special_flags=True):
flags_by_module = self.flags_by_module_dict()
if flags_by_module:
modules = sorted(flags_by_module)
main_module = sys.argv[0]
if main_module in modules:
modules.remove(main_module)
modules = [main_module] + modules
return self._get_help_for_modules(modules, prefix, include_special_flags)
else:
output_lines = []
values = six.itervalues(self._flags())
if include_special_flags:
values = itertools.chain(
values, six.itervalues(_helpers.SPECIAL_FLAGS._flags()))
self._render_flag_list(values, output_lines, prefix)
return '\n'.join(output_lines) | Returns a help string for all known flags.
Args:
prefix: str, per-line output prefix.
include_special_flags: bool, whether to include description of
SPECIAL_FLAGS, i.e. --flagfile and --undefok.
Returns:
str, formatted help message. |
def zoom_pinch_cb(self, fitsimage, event):
chviewer = self.fv.getfocus_viewer()
bd = chviewer.get_bindings()
if hasattr(bd, 'pi_zoom'):
return bd.pi_zoom(chviewer, event)
return False | Pinch event in the pan window. Just zoom the channel viewer. |
def _metaclass_lookup_attribute(self, name, context):
attrs = set()
implicit_meta = self.implicit_metaclass()
metaclass = self.metaclass()
for cls in {implicit_meta, metaclass}:
if cls and cls != self and isinstance(cls, ClassDef):
cls_attributes = self._get_attribute_from_metaclass(cls, name, context)
attrs.update(set(cls_attributes))
return attrs | Search the given name in the implicit and the explicit metaclass. |
def neighbors(self, n, t=None):
try:
if t is None:
return list(self._adj[n])
else:
return [i for i in self._adj[n] if self.__presence_test(n, i, t)]
except KeyError:
raise nx.NetworkXError("The node %s is not in the graph." % (n,)) | Return a list of the nodes connected to the node n at time t.
Parameters
----------
n : node
A node in the graph
t : snapshot id (default=None)
If None will be returned the neighbors of the node on the flattened graph.
Returns
-------
nlist : list
A list of nodes that are adjacent to n.
Raises
------
NetworkXError
If the node n is not in the graph.
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> G.neighbors(0, t=0)
[1]
>>> G.neighbors(0, t=1)
[] |
def fillna(self, value):
if not is_scalar(value):
raise TypeError('Value to replace with is not a valid scalar')
return Index(weld_replace(self.weld_expr,
self.weld_type,
default_missing_data_literal(self.weld_type),
value),
self.dtype,
self.name) | Returns Index with missing values replaced with value.
Parameters
----------
value : {int, float, bytes, bool}
Scalar value to replace missing values with.
Returns
-------
Index
With missing values replaced. |
def find_service_by_id(self, service_id):
service_id_key = 'serviceDefinitionId'
service_id = str(service_id)
for service in self._services:
if service_id_key in service.values and str(
service.values[service_id_key]) == service_id:
return service
try:
int(service_id)
return None
except ValueError:
pass
return self.find_service_by_type(service_id) | Get service for a given service_id.
:param service_id: Service id, str
:return: Service |
def reads(args):
p = OptionParser(reads.__doc__)
p.add_option("-p", dest="prefix_length", default=4, type="int",
help="group the reads based on the first N chars [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
frgscffile, = args
prefix_length = opts.prefix_length
fp = open(frgscffile)
keyfn = lambda: defaultdict(int)
counts = defaultdict(keyfn)
for row in fp:
f = FrgScfLine(row)
fi = f.fragmentID[:prefix_length]
counts[f.scaffoldID][fi] += 1
for scf, count in sorted(counts.items()):
print("{0}\t{1}".format(scf,
", ".join("{0}:{1}".format(*x) for x in sorted(count.items())))) | %prog reads frgscffile
Report read counts per scaffold (based on frgscf). |
def using(self, alias):
clone = self._clone()
clone._index = alias
return clone | Selects which database this QuerySet should excecute its query against. |
def spectrum_loglike(self, specType, params, scale=1E3):
sfn = self.create_functor(specType, scale)[0]
return self.__call__(sfn(params)) | return the log-likelihood for a particular spectrum
Parameters
----------
specTypes : str
The type of spectrum to try
params : array-like
The spectral parameters
scale : float
The energy scale or 'pivot' energy |
def _prev_month(self):
self._canvas.place_forget()
self._date = self._date - self.timedelta(days=1)
self._date = self.datetime(self._date.year, self._date.month, 1)
self._build_calendar() | Updated calendar to show the previous month. |
def pept_diff(p1, p2):
if len(p1) != len(p2):
return -1
else:
return sum([p1[i] != p2[i] for i in range(len(p1))]) | Return the number of differences betweeen 2 peptides
:param str p1: Peptide 1
:param str p2: Peptide 2
:return: The number of differences between the pepetides
:rtype: int
>>> pept_diff('ABCDE', 'ABCDF')
1
>>> pept_diff('ABCDE', 'ABDFE')
2
>>> pept_diff('ABCDE', 'EDCBA')
4
>>> pept_diff('ABCDE', 'ABCDE')
0 |
def set_log_type_name(self, logType, name):
assert logType in self.__logTypeStdoutFlags.keys(), "logType '%s' not defined" %logType
assert isinstance(name, basestring), "name must be a string"
name = str(name)
self.__logTypeNames[logType] = name | Set a logtype name.
:Parameters:
#. logType (string): A defined logging type.
#. name (string): The logtype new name. |
def create_binary_descriptor(streamer):
trigger = 0
if streamer.automatic:
trigger = 1
elif streamer.with_other is not None:
trigger = (1 << 7) | streamer.with_other
return struct.pack("<8sHBBBx", streamer.dest.encode(), streamer.selector.encode(), trigger, streamer.KnownFormats[streamer.format], streamer.KnownTypes[streamer.report_type]) | Create a packed binary descriptor of a DataStreamer object.
Args:
streamer (DataStreamer): The streamer to create a packed descriptor for
Returns:
bytes: A packed 14-byte streamer descriptor. |
def fmap(self, f: 'WrappedFunction') -> 'WrappedFunction':
if not isinstance(f, WrappedFunction):
f = WrappedFunction(f)
return WrappedFunction(lambda *args, **kwargs: self(f(*args, **kwargs)), nargs=f.nargs, nouts=self.nouts) | function map for Wrapped Function. A forced transfermation to WrappedFunction would be applied.async def
fmap(self, f: 'WrappedFunction') -> 'WrappedFunction' |
def dashes(phone):
if isinstance(phone, str):
if phone.startswith("+1"):
return "1-" + "-".join((phone[2:5], phone[5:8], phone[8:]))
elif len(phone) == 10:
return "-".join((phone[:3], phone[3:6], phone[6:]))
else:
return phone
else:
return phone | Returns the phone number formatted with dashes. |
def _get_movie_raw_metadata():
path = _get_movielens_path()
if not os.path.isfile(path):
_download_movielens(path)
with zipfile.ZipFile(path) as datafile:
return datafile.read('ml-100k/u.item').decode(errors='ignore').split('\n') | Get raw lines of the genre file. |
def get_level_methods(self, level):
try:
return set(self.__levels[level])
except KeyError:
result = set()
for sub_level in self.__aliases[level]:
result.update(self.get_level_methods(sub_level))
return result | Returns the methods to call for the given level of report
:param level: The level of report
:return: The set of methods to call to fill the report
:raise KeyError: Unknown level or alias |
def _log_board_ports(self, ports):
ports = sorted(ports, key=lambda port: (port.tile_id, port.direction))
self._logln('ports: {0}'.format(' '.join('{}({} {})'.format(p.type.value, p.tile_id, p.direction)
for p in ports))) | A board with no ports is allowed.
In the logfile, ports must be sorted
- ascending by tile identifier (primary)
- alphabetical by edge direction (secondary)
:param ports: list of catan.board.Port objects |
def flip_motion(self, value):
if value:
self.cam.enable_motion_detection()
else:
self.cam.disable_motion_detection() | Toggle motion detection |
def slice(self, items):
if self.limit:
if self.page>self.pages_count:
return []
if self.page == self.pages_count:
return items[self.limit*(self.page-1):]
return items[self.limit*(self.page-1):self.limit*self.page]
else:
return items[:] | Slice the sequence of all items to obtain them for current page. |
def by_name(self, name, archived=False, limit=None, page=None):
return super(Projects, self).by_name(name, archived=archived,
limit=limit, page=page) | return a project by it's name.
this only works with the exact name of the project. |
def with_optimizer_tensor(self, tensor: Union[tf.Tensor, tf.Operation]) -> 'Optimization':
self._optimizer_tensor = tensor
return self | Replace optimizer tensor.
:param model: Tensorflow tensor.
:return: Optimization instance self reference. |
def _update_phi(self):
etaprod = 1.0
for w in range(N_NT - 1):
self.phi[w] = etaprod * (1 - self.eta[w])
etaprod *= self.eta[w]
self.phi[N_NT - 1] = etaprod | Update `phi` using current `eta`. |
def view_links(obj):
result=format_html('')
result+=format_html('<a href="%s" style="white-space: nowrap">Show duplicates</a><br/>'%reverse('duplicates', args=(obj.pk,)))
result+=format_html('<a href="%s" style="white-space: nowrap">Show submissions</a><br/>'%obj.grading_url())
result+=format_html('<a href="%s" style="white-space: nowrap">Download submissions</a>'%reverse('assarchive', args=(obj.pk,)))
return result | Link to performance data and duplicate overview. |
def put(self, fn):
self._inserted += 1
self._queue.put(fn, block=True)
return self | Enqueue a task function for processing.
Requires:
fn: a function object that takes one argument
that is the interface associated with each
thread.
e.g. def download(api):
results.append(api.download())
self.put(download)
Returns: self |
def clone(self, into=None):
into = into or safe_mkdtemp()
new_chroot = Chroot(into)
for label, fileset in self.filesets.items():
for fn in fileset:
new_chroot.link(os.path.join(self.chroot, fn), fn, label=label)
return new_chroot | Clone this chroot.
:keyword into: (optional) An optional destination directory to clone the
Chroot into. If not specified, a temporary directory will be created.
.. versionchanged:: 0.8
The temporary directory created when ``into`` is not specified is now garbage collected on
interpreter exit. |
def detag_string(self, string):
counter = itertools.count(0)
count = lambda m: '<%s>' % next(counter)
tags = self.tag_pattern.findall(string)
tags = [''.join(tag) for tag in tags]
(new, nfound) = self.tag_pattern.subn(count, string)
if len(tags) != nfound:
raise Exception('tags dont match:' + string)
return (new, tags) | Extracts tags from string.
returns (string, list) where
string: string has tags replaced by indices (<BR>... => <0>, <1>, <2>, etc.)
list: list of the removed tags ('<BR>', '<I>', '</I>') |
def seen_tasks(self):
print('\n'.join(self._stub.seen_tasks(clearly_pb2.Empty()).task_types)) | Shows a list of seen task types. |
def changeThreadTitle(self, title, thread_id=None, thread_type=ThreadType.USER):
thread_id, thread_type = self._getThread(thread_id, thread_type)
if thread_type == ThreadType.USER:
return self.changeNickname(
title, thread_id, thread_id=thread_id, thread_type=thread_type
)
data = {"thread_name": title, "thread_id": thread_id}
j = self._post(self.req_url.THREAD_NAME, data, fix_request=True, as_json=True) | Changes title of a thread.
If this is executed on a user thread, this will change the nickname of that user, effectively changing the title
:param title: New group thread title
:param thread_id: Group ID to change title of. See :ref:`intro_threads`
:param thread_type: See :ref:`intro_threads`
:type thread_type: models.ThreadType
:raises: FBchatException if request failed |
def update(self, n=1):
with self._lock:
self._pbar.update(n)
self.refresh() | Increment current value. |
def make_unique_name(name, existing_names, name_format="{name}_{index}", start=2):
index = start
new_name = name
while new_name in existing_names:
new_name = name_format.format(name=name, index=index)
index += 1
return new_name | Return a unique name based on `name_format` and `name`. |
def explode_line(argument_line: str) -> typing.Tuple[str, str]:
parts = tuple(argument_line.split(' ', 1)[-1].split(':', 1))
return parts if len(parts) > 1 else (parts[0], '') | Returns a tuple containing the parameter name and the description parsed
from the given argument line |
def _attach_to_model(self, model):
super(RelatedFieldMixin, self)._attach_to_model(model)
if model.abstract:
return
self.related_name = self._get_related_name()
self.related_to = self._get_related_model_name()
if not hasattr(self.database, '_relations'):
self.database._relations = {}
self.database._relations.setdefault(self.related_to, [])
self._assert_relation_does_not_exists()
relation = (self._model._name, self.name, self.related_name)
self.database._relations[self.related_to].append(relation) | When we have a model, save the relation in the database, to later create
RelatedCollection objects in the related model |
def __destroyLockedView(self):
if self._lockedView:
self._lockedView.close()
self._lockedView.deleteLater()
self._lockedView = None | Destroys the locked view from this widget. |
def parse_config(self):
tree = ElementTree.parse(self.file_xml)
root = tree.getroot()
for server in root.findall('server'):
destination = server.text
name = server.get("name")
self.discover_remote(destination, name) | Parse the xml file with remote servers and discover resources on each found server. |
def chunker(l, n):
for i in ranger(0, len(l), n):
yield l[i:i + n] | Generates n-sized chunks from the list l |
def extract_rows(data, *rows):
try:
out = []
for r in rows:
out.append(data[r])
return out
except IndexError:
raise IndexError("data=%s rows=%s" % (data, rows))
return out | Extract rows specified in the argument list.
>>> chart_data.extract_rows([[10,20], [30,40], [50,60]], 1, 2)
[[30,40],[50,60]] |
def uniqued(iterable):
seen = set()
return [item for item in iterable if item not in seen and not seen.add(item)] | Return unique list of ``iterable`` items preserving order.
>>> uniqued('spameggs')
['s', 'p', 'a', 'm', 'e', 'g'] |
def get_checksum(self):
arr = [ ]
for elem in self.parsed:
s = elem_checksum(elem)
if s:
arr.append(s)
arr.sort()
return md5(json.dumps(arr)) | Returns a checksum based on the IDL that ignores comments and
ordering, but detects changes to types, parameter order,
and enum values. |
def matrix(self, angle):
_rot_mat = {
'x': self._x_rot,
'y': self._y_rot,
'z': self._z_rot
}
return _rot_mat[self.axis](angle) | Return rotation matrix in homogeneous coordinates. |
def get_room_member_ids(self, room_id, start=None, timeout=None):
params = None if start is None else {'start': start}
response = self._get(
'/v2/bot/room/{room_id}/members/ids'.format(room_id=room_id),
params=params,
timeout=timeout
)
return MemberIds.new_from_json_dict(response.json) | Call get room member IDs API.
https://devdocs.line.me/en/#get-group-room-member-ids
Gets the user IDs of the members of a group that the bot is in.
This includes the user IDs of users who have not added the bot as a friend
or has blocked the bot.
:param str room_id: Room ID
:param str start: continuationToken
:param timeout: (optional) How long to wait for the server
to send data before giving up, as a float,
or a (connect timeout, read timeout) float tuple.
Default is self.http_client.timeout
:type timeout: float | tuple(float, float)
:rtype: :py:class:`linebot.models.responses.MemberIds`
:return: MemberIds instance |
def sampling(self, ufunc, **kwargs):
self.space.sampling(ufunc, out=self.tensor, **kwargs) | Sample a continuous function and assign to this element.
Parameters
----------
ufunc : ``self.space.fspace`` element
The continuous function that should be samplingicted.
kwargs :
Additional arugments for the sampling operator implementation
Examples
--------
>>> space = odl.uniform_discr(0, 1, 5)
>>> x = space.element()
Assign x according to a continuous function:
>>> x.sampling(lambda t: t)
>>> x # Print values at grid points (which are centered)
uniform_discr(0.0, 1.0, 5).element([ 0.1, 0.3, 0.5, 0.7, 0.9])
See Also
--------
DiscretizedSpace.sampling : For full description |
def get_field(self, offset, length, format):
return struct.unpack(format, self.data[offset:offset + length])[0] | Returns unpacked Python struct array.
Args:
offset (int): offset to byte array within structure
length (int): how many bytes to unpack
format (str): Python struct format string for unpacking
See Also:
https://docs.python.org/2/library/struct.html#format-characters |
def get_version(svn=False, limit=3):
"Returns the version as a human-format string."
v = '.'.join([str(i) for i in VERSION[:limit]])
if svn and limit >= 3:
from django.utils.version import get_svn_revision
import os
svn_rev = get_svn_revision(os.path.dirname(__file__))
if svn_rev:
v = '%s.%s' % (v, svn_rev)
return v | Returns the version as a human-format string. |
def tuple_to_datetime(self, date_tuple):
year, month, day, hour, minute, second = date_tuple
if year == month == day == 0:
return time(hour, minute, second)
elif hour == minute == second == 0:
return date(year, month, day)
else:
return datetime(year, month, day, hour, minute, second) | Converts a tuple to a either a date, time or datetime object.
If the Y, M and D parts of the tuple are all 0, then a time object is returned.
If the h, m and s aprts of the tuple are all 0, then a date object is returned.
Otherwise a datetime object is returned. |
def contains(self, key):
return self.cache_datastore.contains(key) \
or self.child_datastore.contains(key) | Returns whether the object named by `key` exists.
First checks ``cache_datastore``. |
def run(self, user_kw=None, build_kw=None):
user_kw = {} if user_kw is None else user_kw
build_kw = {} if build_kw is None else build_kw
n = self._build(self.get_items(**user_kw), **build_kw)
finalized = self.finalize(self._status.has_failures())
if not finalized:
_log.error("Finalization failed")
return n | Run the builder.
:param user_kw: keywords from user
:type user_kw: dict
:param build_kw: internal settings
:type build_kw: dict
:return: Number of items processed
:rtype: int |
def get_subgraph(self, name):
match = list()
if name in self.obj_dict['subgraphs']:
sgraphs_obj_dict = self.obj_dict['subgraphs'].get( name )
for obj_dict_list in sgraphs_obj_dict:
match.append( Subgraph( obj_dict = obj_dict_list ) )
return match | Retrieved a subgraph from the graph.
Given a subgraph's name the corresponding
Subgraph instance will be returned.
If one or more subgraphs exist with the same name, a list of
Subgraph instances is returned.
An empty list is returned otherwise. |
def closeEvent(self, event):
if self.inimodel.get_edited():
r = self.doc_modified_prompt()
if r == QtGui.QMessageBox.Yes:
event.accept()
else:
event.ignore()
else:
event.accept() | Handles closing of the window. If configs were edited, ask user to continue.
:param event: the close event
:type event: QCloseEvent
:returns: None
:rtype: None
:raises: None |
def _tokenize(cls, sentence):
while True:
match = cls._regex_tag.search(sentence)
if not match:
yield from cls._split(sentence)
return
chunk = sentence[:match.start()]
yield from cls._split(chunk)
tag = match.group(0)
yield tag
sentence = sentence[(len(chunk) + len(tag)):] | Split a sentence while preserving tags. |
def _GetFlowArgsHelpAsString(self, flow_cls):
output = [
" Call Spec:",
" %s" % self._GetCallingPrototypeAsString(flow_cls), ""
]
arg_list = sorted(
iteritems(self._GetArgsDescription(flow_cls.args_type)),
key=lambda x: x[0])
if not arg_list:
output.append(" Args: None")
else:
output.append(" Args:")
for arg, val in arg_list:
output.append(" %s" % arg)
output.append(" description: %s" % val["description"])
output.append(" type: %s" % val["type"])
output.append(" default: %s" % val["default"])
output.append("")
return "\n".join(output) | Get a string description of the calling prototype for this flow. |
def get_forced_variation(self, experiment, user_id):
forced_variations = experiment.forcedVariations
if forced_variations and user_id in forced_variations:
variation_key = forced_variations.get(user_id)
variation = self.config.get_variation_from_key(experiment.key, variation_key)
if variation:
self.logger.info('User "%s" is forced in variation "%s".' % (user_id, variation_key))
return variation
return None | Determine if a user is forced into a variation for the given experiment and return that variation.
Args:
experiment: Object representing the experiment for which user is to be bucketed.
user_id: ID for the user.
Returns:
Variation in which the user with ID user_id is forced into. None if no variation. |
def metadata(abbr, __metadata=__metadata):
abbr = abbr.lower()
if abbr in __metadata:
return __metadata[abbr]
rv = db.metadata.find_one({'_id': abbr})
__metadata[abbr] = rv
return rv | Grab the metadata for the given two-letter abbreviation. |
def owner(self, owner):
if owner is None:
raise ValueError("Invalid value for `owner`, must not be `None`")
if owner is not None and len(owner) > 31:
raise ValueError("Invalid value for `owner`, length must be less than or equal to `31`")
if owner is not None and len(owner) < 3:
raise ValueError("Invalid value for `owner`, length must be greater than or equal to `3`")
if owner is not None and not re.search('[a-z0-9](?:-(?!-)|[a-z0-9])+[a-z0-9]', owner):
raise ValueError("Invalid value for `owner`, must be a follow pattern or equal to `/[a-z0-9](?:-(?!-)|[a-z0-9])+[a-z0-9]/`")
self._owner = owner | Sets the owner of this OauthTokenReference.
User name of the owner of the OAuth token within data.world.
:param owner: The owner of this OauthTokenReference.
:type: str |
def grant_bonus(self, assignment_id, amount, reason):
assignment = self.get_assignment(assignment_id)
worker_id = assignment["worker_id"]
amount_str = "{:.2f}".format(amount)
try:
return self._is_ok(
self.mturk.send_bonus(
WorkerId=worker_id,
BonusAmount=amount_str,
AssignmentId=assignment_id,
Reason=reason,
UniqueRequestToken=self._request_token(),
)
)
except ClientError as ex:
error = "Failed to pay assignment {} bonus of {}: {}".format(
assignment_id, amount_str, str(ex)
)
raise MTurkServiceException(error) | Grant a bonus to the MTurk Worker.
Issues a payment of money from your account to a Worker. To
be eligible for a bonus, the Worker must have submitted
results for one of your HITs, and have had those results
approved or rejected. This payment happens separately from the
reward you pay to the Worker when you approve the Worker's
assignment. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.