code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def clearScreen(cls):
if "win32" in sys.platform:
os.system('cls')
elif "linux" in sys.platform:
os.system('clear')
elif 'darwin' in sys.platform:
os.system('clear')
else:
cit.err("No clearScreen for " + sys.platform) | Clear the screen |
def set_socket_address(self):
Global.LOGGER.debug('defining socket addresses for zmq')
random.seed()
default_port = random.randrange(5001, 5999)
internal_0mq_address = "tcp://127.0.0.1"
internal_0mq_port_subscriber = str(default_port)
internal_0mq_port_publisher = str(default_port)
Global.LOGGER.info(str.format(
f"zmq subsystem subscriber on {internal_0mq_port_subscriber} port"))
Global.LOGGER.info(str.format(
f"zmq subsystem publisher on {internal_0mq_port_publisher} port"))
self.subscriber_socket_address = f"{internal_0mq_address}:{internal_0mq_port_subscriber}"
self.publisher_socket_address = f"{internal_0mq_address}:{internal_0mq_port_publisher}" | Set a random port to be used by zmq |
def redirect_stdout(self, enabled=True, log_level=logging.INFO):
if enabled:
if self.__stdout_wrapper:
self.__stdout_wrapper.update_log_level(log_level=log_level)
else:
self.__stdout_wrapper = StdOutWrapper(logger=self, log_level=log_level)
self.__stdout_stream = self.__stdout_wrapper
else:
self.__stdout_stream = _original_stdout
sys.stdout = self.__stdout_stream | Redirect sys.stdout to file-like object. |
def _validate_handler(column_name, value, predicate_refs):
if value is not None:
for predicate_ref in predicate_refs:
predicate, predicate_name, predicate_args = _decode_predicate_ref(predicate_ref)
validate_result = predicate(value, *predicate_args)
if isinstance(validate_result, dict) and 'value' in validate_result:
value = validate_result['value']
elif type(validate_result) != bool:
raise Exception(
'predicate (name={}) can only return bool or dict(value=new_value) value'.format(predicate_name))
elif not validate_result:
raise ModelInvalid(u'db model validate failed: column={}, value={}, predicate={}, arguments={}'.format(
column_name, value, predicate_name, ','.join(map(str, predicate_args))
))
return value | handle predicate's return value |
def attach(self, attachments):
if not is_iterable(attachments, generators_allowed=True):
attachments = [attachments]
for a in attachments:
if not a.parent_item:
a.parent_item = self
if self.id and not a.attachment_id:
a.attach()
if a not in self.attachments:
self.attachments.append(a) | Add an attachment, or a list of attachments, to this item. If the item has already been saved, the
attachments will be created on the server immediately. If the item has not yet been saved, the attachments will
be created on the server the item is saved.
Adding attachments to an existing item will update the changekey of the item. |
def add_edge(edges, edge_points, coords, i, j):
if (i, j) in edges or (j, i) in edges:
return( edges.add((i, j)), edge_points.append(coords[[i, j]])) | Add a line between the i-th and j-th points,
if not in the list already |
def list_tags():
ret = set()
for item in six.itervalues(images()):
if not item.get('RepoTags'):
continue
ret.update(set(item['RepoTags']))
return sorted(ret) | Returns a list of tagged images
CLI Example:
.. code-block:: bash
salt myminion docker.list_tags |
def get_device_state(self, device, id_override=None, type_override=None):
_LOGGER.info("Getting state via online API")
object_id = id_override or device.object_id()
object_type = type_override or device.object_type()
url_string = "{}/{}s/{}".format(self.BASE_URL,
object_type, object_id)
arequest = requests.get(url_string, headers=API_HEADERS)
response_json = arequest.json()
_LOGGER.debug('%s', response_json)
return response_json | Get device state via online API.
Args:
device (WinkDevice): The device the change is being requested for.
id_override (String, optional): A device ID used to override the
passed in device's ID. Used to make changes on sub-devices.
i.e. Outlet in a Powerstrip. The Parent device's ID.
type_override (String, optional): Used to override the device type
when a device inherits from a device other than WinkDevice.
Returns:
response_json (Dict): The API's response in dictionary format |
def is_dir(self):
try:
return S_ISDIR(self.stat().st_mode)
except OSError as e:
if e.errno not in (ENOENT, ENOTDIR):
raise
return False | Whether this path is a directory. |
def get(self, sid):
return DomainContext(self._version, account_sid=self._solution['account_sid'], sid=sid, ) | Constructs a DomainContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.api.v2010.account.sip.domain.DomainContext
:rtype: twilio.rest.api.v2010.account.sip.domain.DomainContext |
def _merge_dict(self, global_dict, local_dict):
global_dict = global_dict.copy()
for key in local_dict.keys():
if key in global_dict:
global_dict[key] = self._do_merge(global_dict[key], local_dict[key])
else:
global_dict[key] = local_dict[key]
return global_dict | Merges the two dictionaries together
:param global_dict: Global dictionary to be merged
:param local_dict: Local dictionary to be merged
:return: New merged dictionary with values shallow copied |
def getFilename(name):
name = re.sub(r"[^0-9a-zA-Z_\-\.]", "_", name)
while ".." in name:
name = name.replace('..', '.')
while "__" in name:
name = name.replace('__', '_')
if name.startswith((".", "-")):
name = name[1:]
return name | Get a filename from given name without dangerous or incompatible characters. |
def der_cert(der_data):
if isinstance(der_data, str):
der_data = bytes(der_data, 'utf-8')
return x509.load_der_x509_certificate(der_data, default_backend()) | Load a DER encoded certificate
:param der_data: DER-encoded certificate
:return: A cryptography.x509.certificate instance |
def _make_patterns(patterns):
field_registry = display_fields.FieldRegistry()
pattern_list = display_pattern.ScreenPatternList(
field_registry=field_registry,
)
for pattern in patterns:
pattern_list.add(pattern.split('\n'))
return pattern_list | Create a ScreenPatternList from a given pattern text.
Args:
pattern_txt (str list): the patterns
Returns:
mpdlcd.display_pattern.ScreenPatternList: a list of patterns from the
given entries. |
def standalone_from_launchable(cls, launch):
attrs = copy.copy(launch.el_attrs)
del attrs["Type"]
if attrs.has_key("DependsOn"):
del attrs["DependsOn"]
if attrs["Properties"].has_key("SpotPrice"):
del attrs["Properties"]["SpotPrice"]
if attrs["Properties"].has_key("InstanceMonitoring"):
del attrs["Properties"]["InstanceMonitoring"]
if attrs["Properties"].has_key("SecurityGroups"):
del attrs["Properties"]["SecurityGroups"]
if attrs["Properties"].has_key("InstanceId"):
raise RuntimeError("Can't make instance from launchable containing InstanceId property")
inst = EC2Instance(**attrs)
inst.iscm = launch.iscm
return inst | Given a launchable resource, create a definition of a standalone
instance, which doesn't depend on or contain references to other
elements. |
def to_json(self):
result = {
'name': self.name,
'id': self._real_id(),
'type': self.type,
'localized': self.localized,
'omitted': self.omitted,
'required': self.required,
'disabled': self.disabled,
'validations': [v.to_json() for v in self.validations]
}
if self.type == 'Array':
result['items'] = self.items
if self.type == 'Link':
result['linkType'] = self.link_type
return result | Returns the JSON Representation of the content type field. |
def copy(self):
properties = {}
for key, value in self.raw_values.items():
if key in self.UnhashableOptions:
properties[key] = value
else:
properties[key] = copy.copy(value)
return Context(**properties) | Returns a copy of this database option set.
:return <orb.Context> |
def dict_to_literal(dict_container: dict):
if isinstance(dict_container["@value"], int):
return dict_container["@value"],
else:
return dict_container["@value"], dict_container.get("@language", None) | Transforms a JSON+LD PyLD dictionary into
an RDFLib object |
def set_energy_range(self, logemin, logemax):
if logemin is None:
logemin = self.log_energies[0]
else:
imin = int(utils.val_to_edge(self.log_energies, logemin)[0])
logemin = self.log_energies[imin]
if logemax is None:
logemax = self.log_energies[-1]
else:
imax = int(utils.val_to_edge(self.log_energies, logemax)[0])
logemax = self.log_energies[imax]
self._loge_bounds = np.array([logemin, logemax])
self._roi_data['loge_bounds'] = np.copy(self.loge_bounds)
for c in self.components:
c.set_energy_range(logemin, logemax)
return self._loge_bounds | Set the energy bounds of the analysis. This restricts the
evaluation of the likelihood to the data that falls in this
range. Input values will be rounded to the closest bin edge
value. If either argument is None then the lower or upper
bound of the analysis instance will be used.
Parameters
----------
logemin : float
Lower energy bound in log10(E/MeV).
logemax : float
Upper energy bound in log10(E/MeV).
Returns
-------
eminmax : array
Minimum and maximum energy in log10(E/MeV). |
def config_hook(self, func):
argspec = inspect.getargspec(func)
args = ['config', 'command_name', 'logger']
if not (argspec.args == args and argspec.varargs is None and
argspec.keywords is None and argspec.defaults is None):
raise ValueError('Wrong signature for config_hook. Expected: '
'(config, command_name, logger)')
self.config_hooks.append(func)
return self.config_hooks[-1] | Decorator to add a config hook to this ingredient.
Config hooks need to be a function that takes 3 parameters and returns
a dictionary:
(config, command_name, logger) --> dict
Config hooks are run after the configuration of this Ingredient, but
before any further ingredient-configurations are run.
The dictionary returned by a config hook is used to update the
config updates.
Note that they are not restricted to the local namespace of the
ingredient. |
def last_job_statuses(self) -> List[str]:
statuses = []
for status in self.jobs.values_list('status__status', flat=True):
if status is not None:
statuses.append(status)
return statuses | The last constants of the job in this experiment. |
async def create(self, **kwargs):
try:
obj = self._meta.object_class()
self.data.update(kwargs)
await obj.deserialize(self.data)
await obj.insert(db=self.db)
return await obj.serialize()
except Exception as ex:
logger.exception(ex)
raise BadRequest(ex) | Corresponds to POST request without a resource identifier, inserting a document into the database |
def insert(self, thread):
thread_id = thread['id']
title = thread['title']
self.db.threads.new(thread_id, title)
comments = list(map(self._build_comment, thread['comments']))
comments.sort(key=lambda comment: comment['id'])
self.count += len(comments)
for comment in comments:
self.db.comments.add(thread_id, comment) | Process a thread and insert its comments in the DB. |
def build_joblist(jobgraph):
jobset = set()
for job in jobgraph:
jobset = populate_jobset(job, jobset, depth=1)
return list(jobset) | Returns a list of jobs, from a passed jobgraph. |
def html_for_modules_method(method_name, *args, **kwargs):
method = getattr(modules, method_name)
value = method(*args, **kwargs)
return KEY_VALUE_TEMPLATE.format(method_name, value) | Returns an HTML snippet for a Modules API method.
Args:
method_name: A string containing a Modules API method.
args: Positional arguments to be passed to the method.
kwargs: Keyword arguments to be passed to the method.
Returns:
String HTML representing the Modules API method and value. |
def filter(self, source_file, encoding):
with codecs.open(source_file, 'r', encoding=encoding) as f:
text = f.read()
return [filters.SourceText(self._filter(text), source_file, encoding, 'context')] | Parse file. |
def is_running(config, container, *args, **kwargs):
try:
infos = _get_container_infos(config, container)
return (infos if infos.get('State', {}).get('Running') else None)
except Exception:
return None | Is this container running
container
Container id
Return container |
def error(name=None, message=''):
ret = {}
if name is not None:
salt.utils.error.raise_error(name=name, message=message)
return ret | If name is None Then return empty dict
Otherwise raise an exception with __name__ from name, message from message
CLI Example:
.. code-block:: bash
salt-wheel error
salt-wheel error.error name="Exception" message="This is an error." |
def minmax(arrays, masks=None, dtype=None, out=None, zeros=None,
scales=None, weights=None, nmin=1, nmax=1):
return generic_combine(intl_combine.minmax_method(nmin, nmax), arrays,
masks=masks, dtype=dtype, out=out,
zeros=zeros, scales=scales, weights=weights) | Combine arrays using mix max rejection, with masks.
Inputs and masks are a list of array objects. All input arrays
have the same shape. If present, the masks have the same shape
also.
The function returns an array with one more dimension than the
inputs and with size (3, shape). out[0] contains the mean,
out[1] the variance and out[2] the number of points used.
:param arrays: a list of arrays
:param masks: a list of mask arrays, True values are masked
:param dtype: data type of the output
:param out: optional output, with one more axis than the input arrays
:param nmin:
:param nmax:
:return: mean, variance of the mean and number of points stored |
def reset_internal_states(self, record=None):
self._record = None
self._count = 0
self._record = record | Resets the internal state of the recorder.
Args:
record: records.TestResultRecord, the test record for a test. |
def _search_env(keys):
matches = (os.environ[key] for key in keys if key in os.environ)
return next(matches, None) | Search the environment for the supplied keys, returning the first
one found or None if none was found. |
def validate_email(email, partial_match=False):
rgx = re.compile(RGX_EMAIL_VALIDATION_PATTERN, re.I)
if partial_match:
return rgx.search(email) is not None
else:
return rgx.match(email) is not None | Perform email address validation
>>> validate_email('akjaer@riotgames.com')
True
>>> validate_email('Asbjorn Kjaer <akjaer@riotgames.com')
False
>>> validate_email('Asbjorn Kjaer <akjaer@riotgames.com', partial_match=True)
True
Args:
email (str): Email address to match
partial_match (bool): If False (default), the entire string must be a valid email address. If true, any valid
email address in the string will trigger a valid response
Returns:
True if the value contains an email address, else False |
def data_nodes(self):
return {k: v for k, v in self.nodes.items() if v['type'] == 'data'} | Returns all data nodes of the dispatcher.
:return:
All data nodes of the dispatcher.
:rtype: dict[str, dict] |
def get_date_info(value):
fmt = _get_date_format(value)
dt_value = _datetime_obj_factory(value, fmt)
return dt_value, fmt | Returns the datetime object and the format of the date in input
:type value: `str` |
def _add_condition(self, operator, operand, types):
if not self.current_field:
raise QueryMissingField("Conditions requires a field()")
elif not type(operand) in types:
caller = inspect.currentframe().f_back.f_code.co_name
raise QueryTypeError("Invalid type passed to %s() , expected: %s" % (caller, types))
elif self.c_oper:
raise QueryMultipleExpressions("Expected logical operator after expression")
self.c_oper = inspect.currentframe().f_back.f_code.co_name
self._query.append("%(current_field)s%(operator)s%(operand)s" % {
'current_field': self.current_field,
'operator': operator,
'operand': operand
})
return self | Appends condition to self._query after performing validation
:param operator: operator (str)
:param operand: operand
:param types: allowed types
:raise:
- QueryMissingField: if a field hasn't been set
- QueryMultipleExpressions: if a condition already has been set
- QueryTypeError: if the value is of an unexpected type |
def make_key(*criteria):
criteria = [stringify(c) for c in criteria]
criteria = [c for c in criteria if c is not None]
if len(criteria):
return ':'.join(criteria) | Make a string key out of many criteria. |
def remove_remote_subnet(self, context_id, subnet_id):
return self.context.removeCustomerSubnetFromNetworkTunnel(subnet_id,
id=context_id) | Removes a remote subnet from a tunnel context.
:param int context_id: The id-value representing the context instance.
:param int subnet_id: The id-value representing the remote subnet.
:return bool: True if remote subnet removal was successful. |
def normalized_rgb(self):
r
r1 = self._r / 255
g1 = self._g / 255
b1 = self._b / 255
if r1 <= 0.03928:
r2 = r1 / 12.92
else:
r2 = math.pow(((r1 + 0.055) / 1.055), 2.4)
if g1 <= 0.03928:
g2 = g1 / 12.92
else:
g2 = math.pow(((g1 + 0.055) / 1.055), 2.4)
if b1 <= 0.03928:
b2 = b1 / 12.92
else:
b2 = math.pow(((b1 + 0.055) / 1.055), 2.4)
return (r2, g2, b2) | r"""
Returns a tuples of the normalized values of the red, green, and blue
channels of the Colour.
Returns:
tuple: the rgb values of the colour (with values normalized between
0.0 and 1.0)
.. note::
Uses the formula:
\\[ r_{norm} = \\begin{cases}
\\frac{r_{255}}{12.92}\\ \\qquad &\\text{if $r_{255}$ $\\le$ 0.03928}
\\\\
\\left(\\frac{r_{255} + 0.055}{1.055}\\right)^{2.4}
\\quad &\\text{otherwise}
\\end{cases} \\]
`Source <http://www.w3.org/TR/2008/REC-WCAG20-20081211/#relativeluminancedef>`_ |
def add_relation(self, source, destination):
if self.in_sources(source):
if self.forward[source] != destination:
raise ValueError("Source is already in use. Destination does "
"not match.")
else:
raise ValueError("Source-Destination relation already exists.")
elif self.in_destinations(destination):
raise ValueError("Destination is already in use. Source does not "
"match.")
else:
self.forward[source] = destination
self.reverse[destination] = source | Add new a relation to the bejection |
def transitingPlanets(self):
transitingPlanets = []
for planet in self.planets:
try:
if planet.isTransiting:
transitingPlanets.append(planet)
except KeyError:
pass
return transitingPlanets | Returns a list of transiting planet objects |
def normalize_node(node, headers=None):
headers = {} if headers is None else headers
if isinstance(node, str):
url = normalize_url(node)
return {'endpoint': url, 'headers': headers}
url = normalize_url(node['endpoint'])
node_headers = node.get('headers', {})
return {'endpoint': url, 'headers': {**headers, **node_headers}} | Normalizes given node as str or dict with headers |
def interrupt (aggregate):
while True:
try:
log.warn(LOG_CHECK,
_("interrupt; waiting for active threads to finish"))
log.warn(LOG_CHECK,
_("another interrupt will exit immediately"))
abort(aggregate)
break
except KeyboardInterrupt:
pass | Interrupt execution and shutdown, ignoring any subsequent
interrupts. |
def parse_qs(s, rx, parsef=None, length=2, quote=False):
if type(rx) != str:
rx = rx.pattern;
if re.match(" *\(.*\)", s):
if not parsef:
if parse_utuple(s,rx,length=length):
if quote:
s=quote_subs(s);
return evalt(s);
else:
raise ValueError("{} did is not a valid tuple of {}".format(
s, rx));
else:
return parsef(s,length=length);
elif re.match('^ *{} *$'.format(rx), s):
if quote:
return eval('["{}"]'.format(s));
return eval('[{}]'.format(s));
else:
raise ValueError("{} does not match '{}' or the passed parsef".format(
s,rx)); | helper for parsing a string that can both rx or parsef
which is obstensibly the parsef for rx.
Use parse colors for color tuples. This won't work with
those. |
def output_reference(self, name):
if name not in self.output_names:
raise ValueError('Invalid output "{}"'.format(name))
return Reference(step_name=self.name_in_workflow, output_name=name) | Return a reference to the given output for use in an input
of a next Step.
For a Step named `echo` that has an output called `echoed`, the
reference `echo/echoed` is returned.
Args:
name (str): the name of the Step output
Raises:
ValueError: The name provided is not a valid output name for this
Step. |
def get_value(self, property_name):
log = logging.getLogger(self.cls_logger + '.get_value')
if not isinstance(property_name, basestring):
log.error('property_name arg is not a string, found type: {t}'.format(t=property_name.__class__.__name__))
return None
prop = self.get_property(property_name)
if not prop:
log.debug('Property name not found matching: {n}'.format(n=property_name))
return None
value = self.properties[prop]
log.debug('Found value for property {n}: {v}'.format(n=property_name, v=value))
return value | Returns the value associated to the passed property
This public method is passed a specific property as a string
and returns the value of that property. If the property is not
found, None will be returned.
:param property_name (str) The name of the property
:return: (str) value for the passed property, or None. |
def redis_version(self):
if not hasattr(self, '_redis_version'):
self._redis_version = tuple(
map(int, self.connection.info().get('redis_version').split('.')[:3])
)
return self._redis_version | Return the redis version as a tuple |
def create_tool(self, task):
gp_tool = dict(taskName=task.name,
taskDisplayName=task.display_name,
taskDescription=task.description,
canRunInBackground=True,
taskUri=task.uri)
gp_tool['execute'] = self._execute_template.substitute(gp_tool)
gp_tool['parameterInfo'] = param_builder.create_param_info(task.parameters,
self.parameter_map)
gp_tool['updateParameter'] = param_builder.create_update_parameter(task.parameters,
self.parameter_map)
gp_tool['preExecute'] = param_builder.create_pre_execute(task.parameters,
self.parameter_map)
gp_tool['postExecute'] = param_builder.create_post_execute(task.parameters,
self.parameter_map)
return self._tool_template.substitute(gp_tool) | Creates a new GPTool for the toolbox. |
def _defaults():
d = {}
d['url'] = os.environ.get('BUGZSCOUT_URL')
d['user'] = os.environ.get('BUGZSCOUT_USER')
d['project'] = os.environ.get('BUGZSCOUT_PROJECT')
d['area'] = os.environ.get('BUGZSCOUT_AREA')
return d | Returns a dict of default args from the environment, which can be
overridden by command line args. |
def _jseq(self, cols, converter=None):
return _to_seq(self.sql_ctx._sc, cols, converter) | Return a JVM Seq of Columns from a list of Column or names |
def lazy_property(func):
attr_name = '_lazy_' + func.__name__
@property
def _lazy_property(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, func(self))
return getattr(self, attr_name)
@_lazy_property.deleter
def _lazy_property(self):
if hasattr(self, attr_name):
delattr(self, attr_name)
@_lazy_property.setter
def _lazy_property(self, value):
setattr(self, attr_name, value)
return _lazy_property | Wraps a property to provide lazy evaluation. Eliminates boilerplate.
Also provides for setting and deleting the property.
Use as you would use the @property decorator::
# OLD:
class MyClass():
def __init__():
self._compute = None
@property
def compute(self):
if self._compute is None:
# computationally intense stuff
# ...
# ...
self._compute = result
return self._compute
@compute.setter
def compute(self, value):
self._compute = value
# NEW:
class MyClass():
def __init__():
pass
@lazy_property
def compute(self):
# computationally intense stuff
# ...
# ...
return result
.. note:
Properties wrapped with ``lazy_property`` are only evaluated once.
If the instance state changes, lazy properties will not be automatically
re-evaulated and the update must be explicitly called for::
c = MyClass(data)
prop = c.lazy_property
# If you update some data that affects c.lazy_property
c.data = new_data
# c.lazy_property won't change
prop == c.lazy_property # TRUE
# If you want to update c.lazy_property, you can delete it, which will
# force it to be recomputed (with the new data) the next time you use it
del c.lazy_property
new_prop = c.lazy_property
new_prop == prop # FALSE |
def update_matches(self, other):
for match in self.error_matches.all():
other_matches = TextLogErrorMatch.objects.filter(
classified_failure=other,
text_log_error=match.text_log_error,
)
if not other_matches:
match.classified_failure = other
match.save(update_fields=['classified_failure'])
continue
other_matches.filter(score__lt=match.score).update(score=match.score)
yield match.id | Update this instance's Matches to point to the given other's Matches.
Find Matches with the same TextLogError as our Matches, updating their
score if less than ours and mark our matches for deletion.
If there are no other matches, update ours to point to the other
ClassifiedFailure. |
def get_method_analysis_by_name(self, class_name, method_name, method_descriptor):
method = self.get_method_by_name(class_name, method_name, method_descriptor)
if method:
return self.get_method_analysis(method)
return None | Returns the crossreferencing object for a given method.
This function is similar to :meth:`~get_method_analysis`, with the difference
that you can look up the Method by name
:param class_name: name of the class, for example `'Ljava/lang/Object;'`
:param method_name: name of the method, for example `'onCreate'`
:param method_descriptor: method descriptor, for example `'(I I)V'`
:return: :class:`MethodClassAnalysis` |
def bfloat16_activations_var_getter(getter, *args, **kwargs):
requested_dtype = kwargs["dtype"]
if requested_dtype == tf.bfloat16:
kwargs["dtype"] = tf.float32
var = getter(*args, **kwargs)
if var.dtype.base_dtype != requested_dtype:
var = tf.cast(var, requested_dtype)
return var | A custom getter function for float32 parameters and bfloat16 activations.
Args:
getter: custom getter
*args: arguments
**kwargs: keyword arguments
Returns:
variables with the correct dtype.
Raises:
KeyError: if "dtype" is not provided as a kwarg. |
def is_convertible_with(self, other):
other = as_dimension(other)
return self._value is None or other.value is None or self._value == other.value | Returns true if `other` is convertible with this Dimension.
Two known Dimensions are convertible if they have the same value.
An unknown Dimension is convertible with all other Dimensions.
Args:
other: Another Dimension.
Returns:
True if this Dimension and `other` are convertible. |
def save(self):
headers = {}
headers.setdefault('Content-Type', 'application/json')
if not self.exists():
self.create()
return
put_resp = self.r_session.put(
self.document_url,
data=self.json(),
headers=headers
)
put_resp.raise_for_status()
data = response_to_json_dict(put_resp)
super(Document, self).__setitem__('_rev', data['rev'])
return | Saves changes made to the locally cached Document object's data
structures to the remote database. If the document does not exist
remotely then it is created in the remote database. If the object
does exist remotely then the document is updated remotely. In either
case the locally cached Document object is also updated accordingly
based on the successful response of the operation. |
def prepare(self, session, event):
if not event:
self.logger.warn("event empty!")
return
sp_key, sp_hkey = self._keygen(session)
def _pk(obj):
pk_values = tuple(getattr(obj, c.name)
for c in obj.__mapper__.primary_key)
if len(pk_values) == 1:
return pk_values[0]
return pk_values
def _get_dump_value(value):
if hasattr(value, '__mapper__'):
return _pk(value)
return value
pickled_event = {
k: pickle.dumps({_get_dump_value(obj) for obj in objs})
for k, objs in event.items()}
with self.r.pipeline(transaction=False) as p:
p.sadd(sp_key, session.meepo_unique_id)
p.hmset(sp_hkey, pickled_event)
p.execute() | Prepare phase for session.
:param session: sqlalchemy session |
async def unlock(self, key, value, *, flags=None, session):
value = encode_value(value, flags)
session_id = extract_attr(session, keys=["ID"])
response = await self._write(key, value,
flags=flags,
release=session_id)
return response.body is True | Unlocks the Key with the given Session.
Parameters:
key (str): Key to set
value (Payload): Value to set, It will be encoded by flags
session (ObjectID): Session ID
flags (int): Flags to set with value
Response:
bool: ``True`` on success
The Key will only release the lock if the Session is valid and
currently has it locked. |
def _get_default_field_kwargs(model, field):
kwargs = {}
try:
field_name = field.model_attr or field.index_fieldname
model_field = model._meta.get_field(field_name)
kwargs.update(get_field_kwargs(field_name, model_field))
delete_attrs = [
"allow_blank",
"choices",
"model_field",
"allow_unicode",
]
for attr in delete_attrs:
if attr in kwargs:
del kwargs[attr]
except FieldDoesNotExist:
pass
return kwargs | Get the required attributes from the model field in order
to instantiate a REST Framework serializer field. |
def find_compilation_database(path):
result = './'
while not os.path.isfile(os.path.join(result, path)):
if os.path.realpath(result) == '/':
print('Error: could not find compilation database.')
sys.exit(1)
result += '../'
return os.path.realpath(result) | Adjusts the directory until a compilation database is found. |
def add(self, entities):
if not utils.is_list_like(entities):
entities = itertools.chain(
getattr(entities, 'chats', []),
getattr(entities, 'users', []),
(hasattr(entities, 'user') and [entities.user]) or []
)
for entity in entities:
try:
pid = utils.get_peer_id(entity)
if pid not in self.__dict__:
self.__dict__[pid] = utils.get_input_peer(entity)
except TypeError:
pass | Adds the given entities to the cache, if they weren't saved before. |
def info_1(*tokens: Token, **kwargs: Any) -> None:
info(bold, blue, "::", reset, *tokens, **kwargs) | Print an important informative message |
def greedy_merge_helper(
variant_sequences,
min_overlap_size=MIN_VARIANT_SEQUENCE_ASSEMBLY_OVERLAP_SIZE):
merged_variant_sequences = {}
merged_any = False
unmerged_variant_sequences = set(variant_sequences)
for i in range(len(variant_sequences)):
sequence1 = variant_sequences[i]
for j in range(i + 1, len(variant_sequences)):
sequence2 = variant_sequences[j]
combined = sequence1.combine(sequence2)
if combined is None:
continue
if combined.sequence in merged_variant_sequences:
existing = merged_variant_sequences[combined.sequence]
combined = combined.add_reads(existing.reads)
merged_variant_sequences[combined.sequence] = combined
unmerged_variant_sequences.discard(sequence1)
unmerged_variant_sequences.discard(sequence2)
merged_any = True
result = list(merged_variant_sequences.values()) + list(unmerged_variant_sequences)
return result, merged_any | Returns a list of merged VariantSequence objects, and True if any
were successfully merged. |
def find_nodes(self, query_dict=None, exact=False, verbose=False, **kwargs):
assert self.use_v1
return self._do_query('{p}/singlePropertySearchForTreeNodes'.format(p=self.query_prefix),
query_dict=query_dict,
exact=exact,
verbose=verbose,
valid_keys=self.node_search_term_set,
kwargs=kwargs) | Query on node properties. See documentation for _OTIWrapper class. |
def include_version(global_root: str, version_obj: models.Version, hardlink:bool=True):
global_root_dir = Path(global_root)
if version_obj.included_at:
raise VersionIncludedError(f"version included on {version_obj.included_at}")
version_root_dir = global_root_dir / version_obj.relative_root_dir
version_root_dir.mkdir(parents=True, exist_ok=True)
log.info(f"created new bundle version dir: {version_root_dir}")
for file_obj in version_obj.files:
file_obj_path = Path(file_obj.path)
new_path = version_root_dir / file_obj_path.name
if hardlink:
os.link(file_obj_path.resolve(), new_path)
else:
os.symlink(file_obj_path.resolve(), new_path)
log.info(f"linked file: {file_obj.path} -> {new_path}")
file_obj.path = str(new_path).replace(f"{global_root_dir}/", '', 1) | Include files in existing bundle version. |
def disable_availability_zones(self, load_balancer_name, zones_to_remove):
params = {'LoadBalancerName' : load_balancer_name}
self.build_list_params(params, zones_to_remove,
'AvailabilityZones.member.%d')
return self.get_list('DisableAvailabilityZonesForLoadBalancer',
params, None) | Remove availability zones from an existing Load Balancer.
All zones must be in the same region as the Load Balancer.
Removing zones that are not registered with the Load Balancer
has no effect.
You cannot remove all zones from an Load Balancer.
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type zones: List of strings
:param zones: The name of the zone(s) to remove.
:rtype: List of strings
:return: An updated list of zones for this Load Balancer. |
def load(cls, cache_file, backend=None):
with open(cache_file, 'rb') as pickle_fh:
(remote, backend_name, max_sleep_interval, job_id, status,
epilogue, ssh, scp) = pickle.load(pickle_fh)
if backend is None:
backend = JobScript._backends[backend_name]
ar = cls(backend)
(ar.remote, ar.max_sleep_interval, ar.job_id, ar._status, ar.epilogue,
ar.ssh, ar.scp) \
= (remote, max_sleep_interval, job_id, status, epilogue, ssh, scp)
ar.cache_file = cache_file
return ar | Instantiate AsyncResult from dumped `cache_file`.
This is the inverse of :meth:`dump`.
Parameters
----------
cache_file: str
Name of file from which the run should be read.
backend: clusterjob.backends.ClusterjobBackend or None
The backend instance for the job. If None, the backend will be
determined by the *name* of the dumped job's backend. |
def get_pidfile(pidfile):
try:
with salt.utils.files.fopen(pidfile) as pdf:
pid = pdf.read().strip()
return int(pid)
except (OSError, IOError, TypeError, ValueError):
return -1 | Return the pid from a pidfile as an integer |
def save(self):
self.session.add(self)
self.session.flush()
return self | Saves the updated model to the current entity db. |
def _check_if_tag_already_exists(self):
version = self.data['new_version']
if self.vcs.tag_exists(version):
return True
else:
return False | Check if tag already exists and show the difference if so |
def _cast_to_type(self, value):
if isinstance(value, datetime.datetime):
return value.date()
if isinstance(value, datetime.date):
return value
try:
value = date_parser(value)
return value.date()
except ValueError:
self.fail('invalid', value=value) | Convert the value to a date and raise error on failures |
def nic_v1(msg, NICs):
if typecode(msg) < 5 or typecode(msg) > 22:
raise RuntimeError(
"%s: Not a surface position message (5<TC<8), \
airborne position message (8<TC<19), \
or airborne position with GNSS height (20<TC<22)" % msg
)
tc = typecode(msg)
NIC = uncertainty.TC_NICv1_lookup[tc]
if isinstance(NIC, dict):
NIC = NIC[NICs]
try:
Rc = uncertainty.NICv1[NIC][NICs]['Rc']
VPL = uncertainty.NICv1[NIC][NICs]['VPL']
except KeyError:
Rc, VPL = uncertainty.NA, uncertainty.NA
return Rc, VPL | Calculate NIC, navigation integrity category, for ADS-B version 1
Args:
msg (string): 28 bytes hexadecimal message string
NICs (int or string): NIC supplement
Returns:
int or string: Horizontal Radius of Containment
int or string: Vertical Protection Limit |
def order_market_buy(self, **params):
params.update({
'side': self.SIDE_BUY
})
return self.order_market(**params) | Send in a new market buy order
:param symbol: required
:type symbol: str
:param quantity: required
:type quantity: decimal
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See order endpoint for full response options
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException |
def sizeHint(self, option, index):
component = index.internalPointer()
width = self.component.duration() * self.pixelsPerms*1000
return QtCore.QSize(width, 50) | Size based on component duration and a fixed height |
def device_state(device_id):
if device_id not in devices:
return jsonify(success=False)
return jsonify(state=devices[device_id].state) | Get device state via HTTP GET. |
def _parse_metadata(self, meta):
formatted_fields = self.settings['FORMATTED_FIELDS']
output = collections.OrderedDict()
for name, value in meta.items():
name = name.lower()
if name in formatted_fields:
rendered = self._render(value).strip()
output[name] = self.process_metadata(name, rendered)
else:
output[name] = self.process_metadata(name, value)
return output | Return the dict containing document metadata |
def search_external_subtitles(path, directory=None):
dirpath, filename = os.path.split(path)
dirpath = dirpath or '.'
fileroot, fileext = os.path.splitext(filename)
subtitles = {}
for p in os.listdir(directory or dirpath):
if not p.startswith(fileroot) or not p.endswith(SUBTITLE_EXTENSIONS):
continue
language = Language('und')
language_code = p[len(fileroot):-len(os.path.splitext(p)[1])].replace(fileext, '').replace('_', '-')[1:]
if language_code:
try:
language = Language.fromietf(language_code)
except (ValueError, LanguageReverseError):
logger.error('Cannot parse language code %r', language_code)
subtitles[p] = language
logger.debug('Found subtitles %r', subtitles)
return subtitles | Search for external subtitles from a video `path` and their associated language.
Unless `directory` is provided, search will be made in the same directory as the video file.
:param str path: path to the video.
:param str directory: directory to search for subtitles.
:return: found subtitles with their languages.
:rtype: dict |
def bridge_create(br, may_exist=True, parent=None, vlan=None):
param_may_exist = _param_may_exist(may_exist)
if parent is not None and vlan is None:
raise ArgumentValueError(
'If parent is specified, vlan must also be specified.')
if vlan is not None and parent is None:
raise ArgumentValueError(
'If vlan is specified, parent must also be specified.')
param_parent = '' if parent is None else ' {0}'.format(parent)
param_vlan = '' if vlan is None else ' {0}'.format(vlan)
cmd = 'ovs-vsctl {1}add-br {0}{2}{3}'.format(br, param_may_exist, param_parent,
param_vlan)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode']) | Creates a new bridge.
Args:
br: A string - bridge name
may_exist: Bool, if False - attempting to create a bridge that exists returns False.
parent: String, the name of the parent bridge (if the bridge shall be
created as a fake bridge). If specified, vlan must also be
specified.
vlan: Int, the VLAN ID of the bridge (if the bridge shall be created as
a fake bridge). If specified, parent must also be specified.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_create br0 |
def is_valid_timestamp(date, unit='millis'):
assert isinstance(date, int), "Input is not instance of int"
if unit is 'millis':
return is_positive(date) and len(str(date)) == 13
elif unit is 'seconds':
return is_positive(date) and len(str(date)) == 10
else:
raise ValueError('Unknown unit "%s"' % unit) | Checks that a number that represents a date as milliseconds is correct. |
def write_csv(self, path=None):
self.sort_sections(['Root', 'Contacts', 'Documentation', 'References', 'Resources', 'Citations', 'Schema'])
if self.description:
self.description = self.description
if self.abstract:
self.description = self.abstract
t = self['Root'].get_or_new_term('Root.Modified')
t.value = datetime_now()
self.sort_by_term()
return super().write_csv(str(path)) | Write CSV file. Sorts the sections before calling the superclass write_csv |
def overlaps(self, canvas, exclude=[]):
try:
exclude = list(exclude)
except TypeError:
exclude = [exclude]
exclude.append(self)
for selfY, row in enumerate(self.image.image()):
for selfX, pixel in enumerate(row):
canvasPixelOn = canvas.testPixel(
(selfX + self.position[0], selfY + self.position[1]),
excludedSprites=exclude
)
if pixel and canvasPixelOn:
return True
return False | Returns True if sprite is touching any other sprite. |
def choices(self):
choice_list = getattr(
settings, 'MARKUP_CHOICES', DEFAULT_MARKUP_CHOICES
)
return [(f, self._get_filter_title(f)) for f in choice_list] | Returns the filter list as a tuple. Useful for model choices. |
def iterate_analogy_datasets(args):
for dataset_name in args.analogy_datasets:
parameters = nlp.data.list_datasets(dataset_name)
for key_values in itertools.product(*parameters.values()):
kwargs = dict(zip(parameters.keys(), key_values))
yield dataset_name, kwargs, nlp.data.create(dataset_name, **kwargs) | Generator over all analogy evaluation datasets.
Iterates over dataset names, keyword arguments for their creation and the
created dataset. |
def fork(self):
url = self._build_url('forks', base_url=self._api)
json = self._json(self._post(url), 201)
return Gist(json, self) if json else None | Fork this gist.
:returns: :class:`Gist <Gist>` if successful, ``None`` otherwise |
def load_snps(
self,
raw_data,
discrepant_snp_positions_threshold=100,
discrepant_genotypes_threshold=500,
save_output=False,
):
if type(raw_data) is list:
for file in raw_data:
self._load_snps_helper(
file,
discrepant_snp_positions_threshold,
discrepant_genotypes_threshold,
save_output,
)
elif type(raw_data) is str:
self._load_snps_helper(
raw_data,
discrepant_snp_positions_threshold,
discrepant_genotypes_threshold,
save_output,
)
else:
raise TypeError("invalid filetype") | Load raw genotype data.
Parameters
----------
raw_data : list or str
path(s) to file(s) with raw genotype data
discrepant_snp_positions_threshold : int
threshold for discrepant SNP positions between existing data and data to be loaded,
a large value could indicate mismatched genome assemblies
discrepant_genotypes_threshold : int
threshold for discrepant genotype data between existing data and data to be loaded,
a large value could indicated mismatched individuals
save_output : bool
specifies whether to save discrepant SNP output to CSV files in the output directory |
def space(self):
arg_spaces = [o.space for o in self.matrix.ravel()
if hasattr(o, 'space')]
if len(arg_spaces) == 0:
return TrivialSpace
else:
return ProductSpace.create(*arg_spaces) | Combined Hilbert space of all matrix elements. |
def get_community_badge_progress(self, steamID, badgeID, format=None):
parameters = {'steamid' : steamID, 'badgeid' : badgeID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetCommunityBadgeProgress', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format) | Gets all the quests needed to get the specified badge, and which are completed.
steamID: The users ID
badgeID: The badge we're asking about
format: Return format. None defaults to json. (json, xml, vdf) |
def doc(self):
if hasattr(self, '_doc'):
return self._doc
elements = self.etree
doc = self._doc = PyQuery(elements)
doc.make_links_absolute(utils.text(self.url))
return doc | Returns a PyQuery object of the response's content |
def clear(self):
self.adj.clear()
self.node.clear()
self.graph.clear() | Remove all nodes and edges from the graph.
Unlike the regular networkx implementation, this does *not*
remove the graph's name. But all the other graph, node, and
edge attributes go away. |
def start_daemon():
if RequestLog.daemon is None:
parser = get_nginx_parser()
RequestLog.daemon = RequestLog.ParseToDBThread(parser, daemon=True)
RequestLog.daemon.start()
return RequestLog.daemon | Start a thread to continuously read log files and append lines in DB.
Work in progress. Currently the thread doesn't append anything,
it only print the information parsed from each line read.
Returns:
thread: the started thread. |
def chi_squared(source_frequency, target_frequency):
target_prob = frequency_to_probability(target_frequency)
source_len = sum(v for k, v in source_frequency.items() if k in target_frequency)
result = 0
for symbol, prob in target_prob.items():
symbol_frequency = source_frequency.get(symbol, 0)
result += _calculate_chi_squared(symbol_frequency, prob, source_len)
return result | Calculate the Chi Squared statistic by comparing ``source_frequency`` with ``target_frequency``.
Example:
>>> chi_squared({'a': 2, 'b': 3}, {'a': 1, 'b': 2})
0.1
Args:
source_frequency (dict): Frequency map of the text you are analyzing
target_frequency (dict): Frequency map of the target language to compare with
Returns:
Decimal value of the chi-squared statistic |
def record_result(self, res, prg=''):
self._log(self.logFileResult , force_to_string(res), prg) | record the output of the command. Records the result, can have
multiple results, so will need to work out a consistent way to aggregate this |
def camel_to_underscore(name):
as_list = []
length = len(name)
for index, i in enumerate(name):
if index != 0 and index != length - 1 and i.isupper():
as_list.append('_%s' % i.lower())
else:
as_list.append(i.lower())
return ''.join(as_list) | convert CamelCase style to under_score_case |
def parser_available(fpath):
if isinstance(fpath, basestring):
fname = fpath
elif hasattr(fpath, 'open') and hasattr(fpath, 'name'):
fname = fpath.name
elif hasattr(fpath, 'readline') and hasattr(fpath, 'name'):
fname = fpath.name
else:
raise ValueError(
'fpath should be a str or file_like object: {}'.format(fpath))
for parser in get_plugins('parsers').values():
if fnmatch(fname, parser.file_regex):
return True
return False | test if parser plugin available for fpath
Examples
--------
>>> load_builtin_plugins('parsers')
[]
>>> test_file = StringIO('{"a":[1,2,3.4]}')
>>> test_file.name = 'test.json'
>>> parser_available(test_file)
True
>>> test_file.name = 'test.other'
>>> parser_available(test_file)
False
>>> unload_all_plugins() |
def read_stat():
return [
{
"times": {
"user": random.randint(0, 999999999),
"nice": random.randint(0, 999999999),
"sys": random.randint(0, 999999999),
"idle": random.randint(0, 999999999),
"irq": random.randint(0, 999999999),
}
}
] | Mocks read_stat as this is a Linux-specific operation. |
def normrelpath(base, target):
if not all(map(isabs, [base, target])):
return target
return relpath(normpath(target), dirname(normpath(base))) | This function takes the base and target arguments as paths, and
returns an equivalent relative path from base to the target, if both
provided paths are absolute. |
def maxdiff_dtu_configurations(list_of_objects):
result = DtuConfiguration()
if len(list_of_objects) == 0:
return result
list_of_members = result.__dict__.keys()
for member in list_of_members:
tmp_array = np.array(
[tmp_dtu.__dict__[member] for tmp_dtu in list_of_objects]
)
minval = tmp_array.min()
maxval = tmp_array.max()
result.__dict__[member] = maxval - minval
return result | Return DtuConfiguration instance with maximum differences.
Parameters
----------
list_of_objects : python list
List of DtuConfiguration instances to be averaged.
Returns
-------
result : DtuConfiguration instance
Object with averaged values. |
def close(self):
if self.hwman.stream.connected:
self.hwman.disconnect()
self.hwman.close()
self.opened = False | Close and potentially disconnect from a device. |
def add_qualified_edge(
self,
u,
v,
*,
relation: str,
evidence: str,
citation: Union[str, Mapping[str, str]],
annotations: Optional[AnnotationsHint] = None,
subject_modifier: Optional[Mapping] = None,
object_modifier: Optional[Mapping] = None,
**attr
) -> str:
attr.update({
RELATION: relation,
EVIDENCE: evidence,
})
if isinstance(citation, str):
attr[CITATION] = {
CITATION_TYPE: CITATION_TYPE_PUBMED,
CITATION_REFERENCE: citation,
}
elif isinstance(citation, dict):
attr[CITATION] = citation
else:
raise TypeError
if annotations:
attr[ANNOTATIONS] = _clean_annotations(annotations)
if subject_modifier:
attr[SUBJECT] = subject_modifier
if object_modifier:
attr[OBJECT] = object_modifier
return self._help_add_edge(u, v, attr) | Add a qualified edge.
Qualified edges have a relation, evidence, citation, and optional annotations, subject modifications,
and object modifications.
:param u: The source node
:param v: The target node
:param relation: The type of relation this edge represents
:param evidence: The evidence string from an article
:param citation: The citation data dictionary for this evidence. If a string is given,
assumes it's a PubMed identifier and auto-fills the citation type.
:param annotations: The annotations data dictionary
:param subject_modifier: The modifiers (like activity) on the subject node. See data model documentation.
:param object_modifier: The modifiers (like activity) on the object node. See data model documentation.
:return: The hash of the edge |
def web(host, port):
from .webserver.web import get_app
get_app().run(host=host, port=port) | Start web application |
def render_form_template():
error = ""
remote_info = {}
registered_user_id = request.query.get("url_id", False)
if registered_user_id:
try:
remote_info = seeder.get_remote_info(registered_user_id)
except AssertionError:
registered_user_id = False
error = "Seeder neposlal očekávaná data.\n"
if registered_user_id and remote_info:
return render_registered(registered_user_id, remote_info)
if not remote_info:
error += "Seeder je nedostupný!\n"
return render_unregistered(error) | Rennder template for user.
Decide whether the user is registered or not, pull remote info and so on. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.