code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def save_reg(data):
reg_dir = _reg_dir()
regfile = os.path.join(reg_dir, 'register')
try:
if not os.path.exists(reg_dir):
os.makedirs(reg_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
try:
with salt.utils.files.fopen(regfile, 'a') as fh_:
salt.utils.msgpack.dump(data, fh_)
except Exception:
log.error('Could not write to msgpack file %s', __opts__['outdir'])
raise | Save the register to msgpack files |
def _get_value_from_ast(self, obj):
if isinstance(obj, ast.Num):
return obj.n
elif isinstance(obj, ast.Str):
return obj.s
elif isinstance(obj, ast.List):
return [self._get_value_from_ast(e) for e in obj.elts]
elif isinstance(obj, ast.Tuple):
return tuple([self._get_value_from_ast(e) for e in obj.elts])
elif sys.version_info.major >= 3 and isinstance(obj, ast.NameConstant):
return obj.value
elif isinstance(obj, ast.Name) and (obj.id in ["True", "False", "None"]):
return string_to_constant[obj.id]
raise NameError("name '%s' is not defined" % obj.id) | Return the value of the ast object. |
def find_encodings(enc=None, system=False):
if not enc:
enc = 'utf-8'
if system:
if getattr(sys.stdin, 'encoding', None) is None:
enc = sys.stdin.encoding
log.debug("Obtained encoding from stdin: %s" % enc)
else:
enc = 'ascii'
enc = enc.lower()
codec_alias = encodings.normalize_encoding(enc)
codecs.register(encodings.search_function)
coder = codecs.lookup(codec_alias)
return coder | Find functions for encoding translations for a specific codec.
:param str enc: The codec to find translation functions for. It will be
normalized by converting to lowercase, excluding
everything which is not ascii, and hyphens will be
converted to underscores.
:param bool system: If True, find encodings based on the system's stdin
encoding, otherwise assume utf-8.
:raises: :exc:LookupError if the normalized codec, ``enc``, cannot be
found in Python's encoding translation map. |
def join_path(path):
if isinstance(path, str):
return path
return os.path.join(*path) | If given a string, return it, otherwise combine a list into a string using os.path.join |
def is_allowed(self, name_or_class, mask):
if isinstance(name_or_class, type):
name = name_or_class.type
else:
name = name_or_class
info = self.connections[name]
limit = self.config[name + '_limit']
if limit and info['total'] >= limit:
msg = (
"Sorry, there is too much DCC %s active. Please try again "
"later.") % name.upper()
self.bot.notice(mask, msg)
return False
if mask not in info['masks']:
return True
limit = self.config[name + '_user_limit']
if limit and info['masks'][mask] >= limit:
msg = (
"Sorry, you have too many DCC %s active. Close the other "
"connection(s) or wait a few seconds and try again."
) % name.upper()
self.bot.notice(mask, msg)
return False
return True | Return True is a new connection is allowed |
def init(self, conn):
base = self.read_scripts()[0]['fname']
logging.info('Creating the initial schema from %s', base)
apply_sql_script(conn, os.path.join(self.upgrade_dir, base))
self.install_versioning(conn) | Create the version table and run the base script on an empty database.
:param conn: a DB API 2 connection |
def decode_obj_table(table_entries, plugin):
entries = []
for entry in table_entries:
if isinstance(entry, Container):
assert not hasattr(entry, '__recursion_lock__')
user_obj_def = plugin.user_objects[entry.classID]
assert entry.version == user_obj_def.version
entry = Container(class_name=entry.classID,
**dict(zip(user_obj_def.defaults.keys(),
entry.values)))
entries.append(entry)
return decode_network(entries) | Return root of obj table. Converts user-class objects |
def class_from_string(name):
module_name, class_name = name.rsplit('.', 1)
__import__(module_name)
module = sys.modules[module_name]
return getattr(module, class_name) | Get a python class object from its name |
def reply(self, user, msg, errors_as_replies=True):
return self._brain.reply(user, msg, errors_as_replies) | Fetch a reply from the RiveScript brain.
Arguments:
user (str): A unique user ID for the person requesting a reply.
This could be e.g. a screen name or nickname. It's used internally
to store user variables (including topic and history), so if your
bot has multiple users each one should have a unique ID.
msg (str): The user's message. This is allowed to contain
punctuation and such, but any extraneous data such as HTML tags
should be removed in advance.
errors_as_replies (bool): When errors are encountered (such as a
deep recursion error, no reply matched, etc.) this will make the
reply be a text representation of the error message. If you set
this to ``False``, errors will instead raise an exception, such as
a ``DeepRecursionError`` or ``NoReplyError``. By default, no
exceptions are raised and errors are set in the reply instead.
Returns:
str: The reply output. |
def diagonal_basis_commutes(pauli_a, pauli_b):
overlapping_active_qubits = set(pauli_a.get_qubits()) & set(pauli_b.get_qubits())
for qubit_index in overlapping_active_qubits:
if (pauli_a[qubit_index] != 'I' and pauli_b[qubit_index] != 'I' and
pauli_a[qubit_index] != pauli_b[qubit_index]):
return False
return True | Test if `pauli_a` and `pauli_b` share a diagonal basis
Example:
Check if [A, B] with the constraint that A & B must share a one-qubit
diagonalizing basis. If the inputs were [sZ(0), sZ(0) * sZ(1)] then this
function would return True. If the inputs were [sX(5), sZ(4)] this
function would return True. If the inputs were [sX(0), sY(0) * sZ(2)]
this function would return False.
:param pauli_a: Pauli term to check commutation against `pauli_b`
:param pauli_b: Pauli term to check commutation against `pauli_a`
:return: Boolean of commutation result
:rtype: Bool |
def iskip( value, iterable ):
for e in iterable:
if value is None:
if e is None:
continue
elif e == value:
continue
yield e | Skips all values in 'iterable' matching the given 'value'. |
def bin(self, size, name, value=None):
self._add_field(Binary(size, name, value)) | Add new binary field to template.
This keyword has to be called within a binary container. See `New Binary
Container`. |
def save_configuration(self):
self.check_credentials()
c = self._get_pypirc_command()
c._store_pypirc(self.username, self.password) | Save the PyPI access configuration. You must have set ``username`` and
``password`` attributes before calling this method.
Again, distutils is used to do the actual work. |
def _check_timers(self):
if self._timer_queue:
timer = self._timer_queue[0]
if timer['timeout_abs'] < _current_time_millis():
self._timer_queue.pop(0)
self._logger.debug('Timer {} expired for stm {}, adding it to event queue.'.format(timer['id'], timer['stm'].id))
self._add_event(timer['id'], [], {}, timer['stm'], front=True)
else:
self._next_timeout = (
timer['timeout_abs'] - _current_time_millis()) / 1000
if self._next_timeout < 0:
self._next_timeout = 0
else:
self._next_timeout = None | Check for expired timers.
If there are any timers that expired, place them in the event
queue. |
def relocate(self):
name=self.SearchVar.get()
if kbos.has_key(name):
import orbfit,ephem,math
jdate=ephem.julian_date(w.date.get())
try:
(ra,dec,a,b,ang)=orbfit.predict(kbos[name],jdate,568)
except:
return
ra=math.radians(ra)
dec=math.radians(dec)
elif mpc_objs.has_key(name):
ra=mpc_objs[name].ra
dec=mpc_objs[name].dec
self.recenter(ra,dec)
self.create_point(ra,dec,color='blue',size=4) | Move to the postion of self.SearchVar |
def exit_on_exception(self, raised_exception, message='', exit_code=99):
self.exit_on_error(message=message, exit_code=None)
logger.critical("-----\nException: %s\nBack trace of the error:\n%s",
str(raised_exception), traceback.format_exc())
exit(exit_code) | Log generic message when getting an unrecoverable error
:param raised_exception: raised Exception
:type raised_exception: Exception
:param message: message for the exit reason
:type message: str
:param exit_code: exit with the provided value as exit code
:type exit_code: int
:return: None |
def nato(sentence, pad=' ', format='telephony'):
try:
return '' + ALPHABET['nato'][format](sentence, pad)
except KeyError:
raise TypeError('Unsupported NATO alphabet "%s"' % (format,)) | Transform a sentence using the NATO spelling alphabet.
:param sentence: input sentence
:param pad: default ``' '``
:param format: default ``telephony``, options ``telephony`` or ``phonetic``
>>> print(nato('Python'))
papa yankee tango hotel oscar november
>>> print(nato('Python', format='phonetic'))
pah-pah yang-key tang-go hoh-tel oss-cah no-vem-ber |
def get_id(self, name, recurse=True):
self._dlog("getting id '{}'".format(name))
var = self._search("vars", name, recurse)
return var | Get the first id matching ``name``. Will either be a local
or a var.
:name: TODO
:returns: TODO |
def disconnect(self, func):
if id(self) not in _alleged_receivers:
return
l = _alleged_receivers[id(self)]
try:
l.remove(func)
except ValueError:
return
if not l:
del _alleged_receivers[id(self)] | No longer call the function when something changes here. |
def _file_changed_nilrt(full_filepath):
rs_state_dir = "/var/lib/salt/restartcheck_state"
base_filename = os.path.basename(full_filepath)
timestamp_file = os.path.join(rs_state_dir, '{0}.timestamp'.format(base_filename))
md5sum_file = os.path.join(rs_state_dir, '{0}.md5sum'.format(base_filename))
if not os.path.exists(timestamp_file) or not os.path.exists(md5sum_file):
return True
prev_timestamp = __salt__['file.read'](timestamp_file).rstrip()
cur_timestamp = str(int(os.path.getmtime(full_filepath)))
if prev_timestamp != cur_timestamp:
return True
return bool(__salt__['cmd.retcode']('md5sum -cs {0}'.format(md5sum_file), output_loglevel="quiet")) | Detect whether a file changed in an NILinuxRT system using md5sum and timestamp
files from a state directory.
Returns:
- False if md5sum/timestamp state files don't exist
- True/False depending if ``base_filename`` got modified/touched |
def delete_feature(self, dataset, fid):
uri = URITemplate(
self.baseuri + '/{owner}/{did}/features/{fid}').expand(
owner=self.username, did=dataset, fid=fid)
return self.session.delete(uri) | Removes a feature from a dataset.
Parameters
----------
dataset : str
The dataset id.
fid : str
The feature id.
Returns
-------
HTTP status code. |
def equally_spaced_points(self, point, distance):
lons, lats, depths = geodetic.intervals_between(
self.longitude, self.latitude, self.depth,
point.longitude, point.latitude, point.depth,
distance)
return [Point(lons[i], lats[i], depths[i]) for i in range(len(lons))] | Compute the set of points equally spaced between this point
and the given point.
:param point:
Destination point.
:type point:
Instance of :class:`Point`
:param distance:
Distance between points (in km).
:type distance:
float
:returns:
The list of equally spaced points.
:rtype:
list of :class:`Point` instances |
def _parse_authorization(cls, response, uri=None):
links = _parse_header_links(response)
try:
new_cert_uri = links[u'next'][u'url']
except KeyError:
raise errors.ClientError('"next" link missing')
return (
response.json()
.addCallback(
lambda body: messages.AuthorizationResource(
body=messages.Authorization.from_json(body),
uri=cls._maybe_location(response, uri=uri),
new_cert_uri=new_cert_uri))
) | Parse an authorization resource. |
def make_key(self, value):
if value:
parts = [self.key_filter.sub('', x)
for x in self.key_split.split(value.lower())]
key = parts[0] + ''.join(map(str.capitalize, parts[1:]))
else:
key = ''
if key in self.seen_keys:
i = 1
while '%s%d' % (key, i) in self.seen_keys:
i += 1
key = '%s%d' % (key, i)
self.seen_keys.add(key)
return key | Make camelCase variant of value. |
def use_isolated_bin_view(self):
self._bin_view = ISOLATED
for session in self._get_provider_sessions():
try:
session.use_isolated_bin_view()
except AttributeError:
pass | Pass through to provider ResourceLookupSession.use_isolated_bin_view |
def join_here(*paths, **kwargs):
path = os.path.abspath(".")
for next_path in paths:
next_path = next_path.lstrip("\\").lstrip("/").strip() if not \
kwargs.get('strict') else next_path
path = os.path.abspath(os.path.join(path, next_path))
return path if not kwargs.get('safe') else safe_path(path) | Join any path or paths as a sub directory of the current file's directory.
.. code:: python
reusables.join_here("Makefile")
# 'C:\\Reusables\\Makefile'
:param paths: paths to join together
:param kwargs: 'strict', do not strip os.sep
:param kwargs: 'safe', make them into a safe path it True
:return: abspath as string |
def orthorhombic(a: float, b: float, c: float):
return Lattice.from_parameters(a, b, c, 90, 90, 90) | Convenience constructor for an orthorhombic lattice.
Args:
a (float): *a* lattice parameter of the orthorhombic cell.
b (float): *b* lattice parameter of the orthorhombic cell.
c (float): *c* lattice parameter of the orthorhombic cell.
Returns:
Orthorhombic lattice of dimensions a x b x c. |
def set_active(self, username, active_state):
if active_state not in (True, False):
raise ValueError("active_state must be True or False")
user = self.get_user(username)
if user is None:
return None
if user['active'] is active_state:
return True
user['active'] = active_state
response = self._put(self.rest_url + "/user",
params={"username": username},
data=json.dumps(user))
if response.status_code == 204:
return True
return None | Set the active state of a user
Args:
username: The account username
active_state: True or False
Returns:
True: If successful
None: If no user or failure occurred |
def _store_oauth_access_token(self, oauth_access_token):
c = Cookie(version=0, name='oauth_access_token', value=oauth_access_token,
port=None, port_specified=False,
domain='steamwebbrowser.tld', domain_specified=True, domain_initial_dot=False,
path='/', path_specified=True,
secure=False, expires=None, discard=False, comment=None, comment_url=None, rest={},
)
self.session.cookies.set_cookie(c)
self._save_cookies() | Called when login is complete to store the oauth access token
This implementation stores the oauth_access_token in a seperate cookie for domain steamwebbrowser.tld |
def analyze(fqdn, result, argl, argd):
package = fqdn.split('.')[0]
if package not in _methods:
_load_methods(package)
if _methods[package] is not None and fqdn in _methods[package]:
return _methods[package][fqdn](fqdn, result, *argl, **argd) | Analyzes the result from calling the method with the specified FQDN.
Args:
fqdn (str): full-qualified name of the method that was called.
result: result of calling the method with `fqdn`.
argl (tuple): positional arguments passed to the method call.
argd (dict): keyword arguments passed to the method call. |
def get_pattern_link_topattern(self, patternnumber):
_checkPatternNumber(patternnumber)
address = _calculateRegisterAddress('linkpattern', patternnumber)
return self.read_register(address) | Get the 'linked pattern' value for a given pattern.
Args:
patternnumber (integer): From 0-7
Returns:
The 'linked pattern' value (int). |
def write(self, b):
from . import mavutil
self.debug("sending '%s' (0x%02x) of len %u\n" % (b, ord(b[0]), len(b)), 2)
while len(b) > 0:
n = len(b)
if n > 70:
n = 70
buf = [ord(x) for x in b[:n]]
buf.extend([0]*(70-len(buf)))
self.mav.mav.serial_control_send(self.port,
mavutil.mavlink.SERIAL_CONTROL_FLAG_EXCLUSIVE |
mavutil.mavlink.SERIAL_CONTROL_FLAG_RESPOND,
0,
0,
n,
buf)
b = b[n:] | write some bytes |
def load(self, entity_class, entity):
if self.__needs_flushing:
self.flush()
if entity.id is None:
raise ValueError('Can not load entity without an ID.')
cache = self.__get_cache(entity_class)
sess_ent = cache.get_by_id(entity.id)
if sess_ent is None:
if self.__clone_on_load:
sess_ent = self.__clone(entity, cache)
else:
cache.add(entity)
sess_ent = entity
self.__unit_of_work.register_clean(entity_class, sess_ent)
return sess_ent | Load the given repository entity into the session and return a
clone. If it was already loaded before, look up the loaded entity
and return it.
All entities referenced by the loaded entity will also be loaded
(and cloned) recursively.
:raises ValueError: When an attempt is made to load an entity that
has no ID |
def get_library_state_copy_instance(self, lib_os_path):
if lib_os_path in self._loaded_libraries:
state_machine = self._loaded_libraries[lib_os_path]
state_copy = copy.deepcopy(state_machine.root_state)
return state_machine.version, state_copy
else:
state_machine = storage.load_state_machine_from_path(lib_os_path)
self._loaded_libraries[lib_os_path] = state_machine
if config.global_config.get_config_value("NO_PROGRAMMATIC_CHANGE_OF_LIBRARY_STATES_PERFORMED", False):
return state_machine.version, state_machine.root_state
else:
state_copy = copy.deepcopy(state_machine.root_state)
return state_machine.version, state_copy | A method to get a state copy of the library specified via the lib_os_path.
:param lib_os_path: the location of the library to get a copy for
:return: |
def process(self, processor:PreProcessors=None):
"Apply `processor` or `self.processor` to `self`."
if processor is not None: self.processor = processor
self.processor = listify(self.processor)
for p in self.processor: p.process(self)
return self | Apply `processor` or `self.processor` to `self`. |
def get_grade_systems_by_gradebooks(self, gradebook_ids):
grade_system_list = []
for gradebook_id in gradebook_ids:
grade_system_list += list(
self.get_grade_systems_by_gradebook(gradebook_id))
return objects.GradeSystemList(grade_system_list) | Gets the list of grade systems corresponding to a list of ``Gradebooks``.
arg: gradebook_ids (osid.id.IdList): list of gradebook
``Ids``
return: (osid.grading.GradeSystemList) - list of grade systems
raise: NullArgument - ``gradebook_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
def slugify(text):
slug = re.sub(r'[^\w]+', ' ', text)
slug = "-".join(slug.lower().strip().split())
return slug | Returns a slug of given text, normalizing unicode data for file-safe
strings. Used for deciding where to write images to disk.
Parameters
----------
text : string
The string to slugify
Returns
-------
slug : string
A normalized slug representation of the text
.. seealso:: http://yashchandra.com/2014/05/08/how-to-generate-clean-url-or-a-slug-in-python/ |
def checkout_branch(self, branch):
_, stdout, stderr = self.git_exec(
['checkout', branch],
with_extended_output=True)
return '\n'.join([stderr, stdout]) | Checks out given branch. |
def get_substructure(data, path):
if not len(path):
return data
try:
return get_substructure(data[path[0]], path[1:])
except (TypeError, IndexError, KeyError):
return None | Tries to retrieve a sub-structure within some data. If the path does not
match any sub-structure, returns None.
>>> data = {'a': 5, 'b': {'c': [1, 2, [{'f': [57]}], 4], 'd': 'test'}}
>>> get_substructure(island, "bc")
[1, 2, [{'f': [57]}], 4]
>>> get_substructure(island, ['b', 'c'])
[1, 2, [{'f': [57]}], 4]
>>> get_substructure(island, ['b', 'c', 2, 0, 'f', 0])
57
>>> get_substructure(island, ['b', 'c', 2, 0, 'f', 'd'])
None
@param data: a container
@type data: str|dict|list|(an indexable container)
@param path: location of the data
@type path: list|str
@rtype: * |
def to_dict(self, save_data=True):
model_dict = super(SparseGPClassification,self).to_dict(save_data)
model_dict["class"] = "GPy.models.SparseGPClassification"
return model_dict | Store the object into a json serializable dictionary
:param boolean save_data: if true, it adds the data self.X and self.Y to the dictionary
:return dict: json serializable dictionary containing the needed information to instantiate the object |
def _get_section(self, name, type):
for section in self.sections:
if section['name'] == name and section['type'] == type:
return section
return None | Find and return a section with `name` and `type` |
def load(cls, path, base=None):
obj = cls()
obj.read(path, base)
return obj | Either load a path and return a shovel object or return None |
def transition_complete(self, pipeline_key):
def txn():
pipeline_record = db.get(pipeline_key)
if pipeline_record is None:
logging.warning(
'Tried to mark pipeline ID "%s" as complete but it does not exist.',
pipeline_key.name())
raise db.Rollback()
if pipeline_record.status not in (
_PipelineRecord.WAITING, _PipelineRecord.RUN):
logging.warning(
'Tried to mark pipeline ID "%s" as complete, found bad state: %s',
pipeline_key.name(), pipeline_record.status)
raise db.Rollback()
pipeline_record.status = _PipelineRecord.DONE
pipeline_record.finalized_time = self._gettime()
pipeline_record.put()
db.run_in_transaction(txn) | Marks the given pipeline as complete.
Does nothing if the pipeline is no longer in a state that can be completed.
Args:
pipeline_key: db.Key of the _PipelineRecord that has completed. |
def get_bool_raw(s: str) -> Optional[bool]:
if s == "Y" or s == "y":
return True
elif s == "N" or s == "n":
return False
return None | Maps ``"Y"``, ``"y"`` to ``True`` and ``"N"``, ``"n"`` to ``False``. |
def ungroupslice(groups,gslice):
'this is a helper for contigsub.'
'coordinate transform: takes a match from seqingroups() and transforms to ungrouped coordinates'
eltsbefore=0
for i in range(gslice[0]): eltsbefore+=len(groups[i])-1
x=eltsbefore+gslice[1]; return [x-1,x+gslice[2]-1] | this is a helper for contigsub. |
def clear(self):
if not self._clear:
self.lib._jit_clear_state(self.state)
self._clear = True | Clears state so it can be used for generating entirely new
instructions. |
def as_vartype(vartype):
if isinstance(vartype, Vartype):
return vartype
try:
if isinstance(vartype, str):
vartype = Vartype[vartype]
elif isinstance(vartype, frozenset):
vartype = Vartype(vartype)
else:
vartype = Vartype(frozenset(vartype))
except (ValueError, KeyError):
raise TypeError(("expected input vartype to be one of: "
"Vartype.SPIN, 'SPIN', {-1, 1}, "
"Vartype.BINARY, 'BINARY', or {0, 1}."))
return vartype | Cast various inputs to a valid vartype object.
Args:
vartype (:class:`.Vartype`/str/set):
Variable type. Accepted input values:
* :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
Returns:
:class:`.Vartype`: Either :class:`.Vartype.SPIN` or
:class:`.Vartype.BINARY`.
See also:
:func:`~dimod.decorators.vartype_argument` |
def sle(self, other):
self._check_match(other)
return self.to_sint() <= other.to_sint() | Compares two equal-sized BinWords, treating them as signed
integers, and returning True if the first is smaller or equal. |
def parse(cls, filename, root=None):
if root is not None:
if os.path.isabs(filename):
raise ValueError("filename must be a relative path if root is specified")
full_filename = os.path.join(root, filename)
else:
full_filename = filename
with io.open(full_filename, 'rb') as fp:
blob = fp.read()
tree = cls._parse(blob, filename)
return cls(blob=blob, tree=tree, root=root, filename=filename) | Parses the file at filename and returns a PythonFile.
If root is specified, it will open the file with root prepended to the path. The idea is to
allow for errors to contain a friendlier file path than the full absolute path. |
def is_ssh_available(host, port=22):
s = socket.socket()
try:
s.connect((host, port))
return True
except:
return False | checks if ssh port is open |
def to_json(fn, obj):
with open(fn, 'w') as f:
json.dump(obj, f, cls=OperatorEncoder, indent=2, ensure_ascii=False)
return fn | Convenience method to save pyquil.operator_estimation objects as a JSON file.
See :py:func:`read_json`. |
def download_go_basic_obo(obo="go-basic.obo", prt=sys.stdout, loading_bar=True):
if not os.path.isfile(obo):
http = "http://purl.obolibrary.org/obo/go"
if "slim" in obo:
http = "http://www.geneontology.org/ontology/subsets"
obo_remote = "{HTTP}/{OBO}".format(HTTP=http, OBO=os.path.basename(obo))
dnld_file(obo_remote, obo, prt, loading_bar)
else:
if prt is not None:
prt.write(" EXISTS: {FILE}\n".format(FILE=obo))
return obo | Download Ontologies, if necessary. |
def _checkJobGraphAcylicDFS(self, stack, visited, extraEdges):
if self not in visited:
visited.add(self)
stack.append(self)
for successor in self._children + self._followOns + extraEdges[self]:
successor._checkJobGraphAcylicDFS(stack, visited, extraEdges)
assert stack.pop() == self
if self in stack:
stack.append(self)
raise JobGraphDeadlockException("A cycle of job dependencies has been detected '%s'" % stack) | DFS traversal to detect cycles in augmented job graph. |
def to_bb(YY, y="deprecated"):
cols,rows = np.nonzero(YY)
if len(cols)==0: return np.zeros(4, dtype=np.float32)
top_row = np.min(rows)
left_col = np.min(cols)
bottom_row = np.max(rows)
right_col = np.max(cols)
return np.array([left_col, top_row, right_col, bottom_row], dtype=np.float32) | Convert mask YY to a bounding box, assumes 0 as background nonzero object |
def append_config_item(self, key, value):
return _lxc.Container.set_config_item(self, key, value) | Append 'value' to 'key', assuming 'key' is a list.
If 'key' isn't a list, 'value' will be set as the value of 'key'. |
def type_consumer():
while True:
item = _task_queue.get()
if isinstance(item, KeyAndTypes):
if item.key in collected_args:
_flush_signature(item.key, UnknownType)
collected_args[item.key] = ArgTypes(item.types)
else:
assert isinstance(item, KeyAndReturn)
if item.key in collected_args:
_flush_signature(item.key, item.return_type)
_task_queue.task_done() | Infinite loop of the type consumer thread.
It gets types to process from the task query. |
def dbus_readBytesFD(self, fd, byte_count):
f = os.fdopen(fd, 'rb')
result = f.read(byte_count)
f.close()
return bytearray(result) | Reads byte_count bytes from fd and returns them. |
def parse_config_list(config_list):
if config_list is None:
return {}
else:
mapping = {}
for pair in config_list:
if (constants.CONFIG_SEPARATOR not in pair) or (pair.count(constants.CONFIG_SEPARATOR) != 1):
raise ValueError("configs must be passed as two strings separted by a %s", constants.CONFIG_SEPARATOR)
(config, value) = pair.split(constants.CONFIG_SEPARATOR)
mapping[config] = value
return mapping | Parse a list of configuration properties separated by '=' |
def standardize(table, with_std=True):
if isinstance(table, pandas.DataFrame):
cat_columns = table.select_dtypes(include=['category']).columns
else:
cat_columns = []
new_frame = _apply_along_column(table, standardize_column, with_std=with_std)
for col in cat_columns:
new_frame[col] = table[col].copy()
return new_frame | Perform Z-Normalization on each numeric column of the given table.
Parameters
----------
table : pandas.DataFrame or numpy.ndarray
Data to standardize.
with_std : bool, optional, default: True
If ``False`` data is only centered and not converted to unit variance.
Returns
-------
normalized : pandas.DataFrame
Table with numeric columns normalized.
Categorical columns in the input table remain unchanged. |
def dump_to_pyc(co, python_version, output_dir):
pyc_basename = ntpath.basename(co.co_filename)
pyc_name = pyc_basename + '.pyc'
if pyc_name not in IGNORE:
logging.info("Extracting %s", pyc_name)
pyc_header = _generate_pyc_header(python_version, len(co.co_code))
destination = os.path.join(output_dir, pyc_name)
pyc = open(destination, 'wb')
pyc.write(pyc_header)
marshaled_code = marshal.dumps(co)
pyc.write(marshaled_code)
pyc.close()
else:
logging.info("Skipping %s", pyc_name) | Save given code_object as a .pyc file. |
def parse(self, element):
result = []
if element.text is not None and element.tag == self.identifier:
l, k = (0, 0)
raw = element.text.split()
while k < len(self.values):
dtype = self.dtype[k]
if isinstance(self.values[k], int):
for i in range(self.values[k]):
result.append(self._caster[dtype](raw[i + l]))
l += self.values[k]
k += 1
else:
rest = [ self._caster[dtype](val) for val in raw[l::] ]
result.extend(rest)
break
else:
msg.warn("no results for parsing {} using line {}".format(element.tag, self.identifier))
return result | Parses the contents of the specified XML element using template info.
:arg element: the XML element from the input file being converted. |
def replace_aliases(cut_dict, aliases):
for k, v in cut_dict.items():
for k0, v0 in aliases.items():
cut_dict[k] = cut_dict[k].replace(k0, '(%s)' % v0) | Substitute aliases in a cut dictionary. |
def parse(cls, fptr, offset, length):
num_bytes = offset + length - fptr.tell()
read_buffer = fptr.read(num_bytes)
ndr, = struct.unpack_from('>H', read_buffer, offset=0)
box_offset = 2
data_entry_url_box_list = []
for j in range(ndr):
box_fptr = io.BytesIO(read_buffer[box_offset:])
box_buffer = box_fptr.read(8)
(box_length, box_id) = struct.unpack_from('>I4s', box_buffer,
offset=0)
box = DataEntryURLBox.parse(box_fptr, 0, box_length)
box.offset = offset + 8 + box_offset
data_entry_url_box_list.append(box)
box_offset += box_length
return cls(data_entry_url_box_list, length=length, offset=offset) | Parse data reference box.
Parameters
----------
fptr : file
Open file object.
offset : int
Start position of box in bytes.
length : int
Length of the box in bytes.
Returns
-------
DataReferenceBox
Instance of the current data reference box. |
def Boolean(v):
if isinstance(v, basestring):
v = v.lower()
if v in ('1', 'true', 'yes', 'on', 'enable'):
return True
if v in ('0', 'false', 'no', 'off', 'disable'):
return False
raise ValueError
return bool(v) | Convert human-readable boolean values to a bool.
Accepted values are 1, true, yes, on, enable, and their negatives.
Non-string values are cast to bool.
>>> validate = Schema(Boolean())
>>> validate(True)
True
>>> validate("1")
True
>>> validate("0")
False
>>> with raises(MultipleInvalid, "expected boolean"):
... validate('moo')
>>> try:
... validate('moo')
... except MultipleInvalid as e:
... assert isinstance(e.errors[0], BooleanInvalid) |
def inserir(self, name, read, write, edit, remove):
ugroup_map = dict()
ugroup_map['nome'] = name
ugroup_map['leitura'] = read
ugroup_map['escrita'] = write
ugroup_map['edicao'] = edit
ugroup_map['exclusao'] = remove
code, xml = self.submit({'user_group': ugroup_map}, 'POST', 'ugroup/')
return self.response(code, xml) | Insert new user group and returns its identifier.
:param name: User group's name.
:param read: If user group has read permission ('S' ou 'N').
:param write: If user group has write permission ('S' ou 'N').
:param edit: If user group has edit permission ('S' ou 'N').
:param remove: If user group has remove permission ('S' ou 'N').
:return: Dictionary with structure: {'user_group': {'id': < id >}}
:raise InvalidParameterError: At least one of the parameters is invalid or none..
:raise NomeGrupoUsuarioDuplicadoError: User group name already exists.
:raise ValorIndicacaoPermissaoInvalidoError: Read, write, edit or remove value is invalid.
:raise DataBaseError: Networkapi failed to access database.
:raise XMLError: Networkapi fails generating response XML. |
def node_created_handler(sender, **kwargs):
if kwargs['created']:
obj = kwargs['instance']
queryset = exclude_owner_of_node(obj)
create_notifications.delay(**{
"users": queryset,
"notification_model": Notification,
"notification_type": "node_created",
"related_object": obj
}) | send notification when a new node is created according to users's settings |
def children_bp(self, feature, child_featuretype='exon', merge=False,
ignore_strand=False):
children = self.children(feature, featuretype=child_featuretype,
order_by='start')
if merge:
children = self.merge(children, ignore_strand=ignore_strand)
total = 0
for child in children:
total += len(child)
return total | Total bp of all children of a featuretype.
Useful for getting the exonic bp of an mRNA.
Parameters
----------
feature : str or Feature instance
child_featuretype : str
Which featuretype to consider. For example, to get exonic bp of an
mRNA, use `child_featuretype='exon'`.
merge : bool
Whether or not to merge child features together before summing
them.
ignore_strand : bool
If True, then overlapping features on different strands will be
merged together; otherwise, merging features with different strands
will result in a ValueError.
Returns
-------
Integer representing the total number of bp. |
def _check_pretrained_file_names(cls, pretrained_file_name):
embedding_name = cls.__name__.lower()
if pretrained_file_name not in cls.pretrained_file_name_sha1:
raise KeyError('Cannot find pretrained file %s for token embedding %s. Valid '
'pretrained files for embedding %s: %s' %
(pretrained_file_name, embedding_name, embedding_name,
', '.join(cls.pretrained_file_name_sha1.keys()))) | Checks if a pre-trained token embedding file name is valid.
Parameters
----------
pretrained_file_name : str
The pre-trained token embedding file. |
def _deshuffle_field(self, *args):
ip = self._invpermutation
fields = []
for arg in args:
fields.append( arg[ip] )
if len(fields) == 1:
return fields[0]
else:
return fields | Return to original ordering |
def getenv_int(key, default=0):
try:
return int(os.environ.get(key, str(default)))
except ValueError:
return default | Get an integer-valued environment variable `key`, if it exists and parses
as an integer, otherwise return `default`. |
def _validate_lattice_spacing(self, lattice_spacing):
dataType = np.float64
if lattice_spacing is not None:
lattice_spacing = np.asarray(lattice_spacing, dtype=dataType)
lattice_spacing = lattice_spacing.reshape((3,))
if np.shape(lattice_spacing) != (self.dimension,):
raise ValueError('Lattice spacing should be a vector of '
'size:({},). Please include lattice spacing '
'of size >= 0 depending on desired '
'dimensionality.'
.format(self.dimension))
else:
raise ValueError('No lattice_spacing provided. Please provide '
'lattice spacing\'s that are >= 0. with size ({},)'
.format((self.dimension)))
if np.any(np.isnan(lattice_spacing)):
raise ValueError('None type or NaN type values present in '
'lattice_spacing: {}.'.format(lattice_spacing))
elif np.any(lattice_spacing < 0.0):
raise ValueError('Negative lattice spacing value. One of '
'the spacing: {} is negative.'
.format(lattice_spacing))
self.lattice_spacing = lattice_spacing | Ensure that lattice spacing is provided and correct.
_validate_lattice_spacing will ensure that the lattice spacing
provided are acceptable values. Additional Numpy errors can also occur
due to the conversion to a Numpy array.
Exceptions Raised
-----------------
ValueError : Incorrect lattice_spacing input |
def _decode(self, obj, context):
cls = self._get_class(obj.classID)
return cls.from_construct(obj, context) | Initialises a new Python class from a construct using the mapping
passed to the adapter. |
def close_project(self):
if self.current_active_project:
self.switch_to_plugin()
if self.main.editor is not None:
self.set_project_filenames(
self.main.editor.get_open_filenames())
path = self.current_active_project.root_path
self.current_active_project = None
self.set_option('current_project_path', None)
self.setup_menu_actions()
self.sig_project_closed.emit(path)
self.sig_pythonpath_changed.emit()
if self.dockwidget is not None:
self.set_option('visible_if_project_open',
self.dockwidget.isVisible())
self.dockwidget.close()
self.explorer.clear()
self.restart_consoles() | Close current project and return to a window without an active
project |
def run_forever(self):
self.starting()
self.keep_running = True
def handle(signum, frame):
self.interrupt()
self.keep_running = False
signal.signal(signal.SIGINT, handle)
while self.keep_running:
if self.max_tasks and self.tasks_complete >= self.max_tasks:
self.stopping()
if self.gator.len():
result = self.gator.pop()
self.tasks_complete += 1
self.result(result)
if self.nap_time >= 0:
time.sleep(self.nap_time)
return 0 | Causes the worker to run either forever or until the
``Worker.max_tasks`` are reached. |
def refactor_rename_current_module(self, new_name):
refactor = Rename(self.project, self.resource, None)
return self._get_changes(refactor, new_name) | Rename the current module. |
async def send(self, sender, **kwargs):
if not self.receivers:
return []
responses = []
futures = []
for receiver in self._get_receivers(sender):
method = receiver()
if callable(method):
futures.append(method(sender=sender, **kwargs))
if len(futures) > 0:
responses = await asyncio.gather(*futures)
return responses | send a signal from the sender to all connected receivers |
def get_hostname(self):
self.oem_init()
try:
return self._oem.get_hostname()
except exc.UnsupportedFunctionality:
return self.get_mci() | Get the hostname used by the BMC in various contexts
This can vary somewhat in interpretation, but generally speaking
this should be the name that shows up on UIs and in DHCP requests and
DNS registration requests, as applicable.
:return: current hostname |
def _handle_authentication_error(self):
response = make_response('Access Denied')
response.headers['WWW-Authenticate'] = self.auth.get_authenticate_header()
response.status_code = 401
return response | Return an authentication error. |
def T_sigma(self, sigma):
R_sigma, Q_sigma = self.RQ_sigma(sigma)
return lambda v: R_sigma + self.beta * Q_sigma.dot(v) | Given a policy `sigma`, return the T_sigma operator.
Parameters
----------
sigma : array_like(int, ndim=1)
Policy vector, of length n.
Returns
-------
callable
The T_sigma operator. |
def Lewis(D=None, alpha=None, Cp=None, k=None, rho=None):
r
if k and Cp and rho:
alpha = k/(rho*Cp)
elif alpha:
pass
else:
raise Exception('Insufficient information provided for Le calculation')
return alpha/D | r'''Calculates Lewis number or `Le` for a fluid with the given parameters.
.. math::
Le = \frac{k}{\rho C_p D} = \frac{\alpha}{D}
Inputs can be either of the following sets:
* Diffusivity and Thermal diffusivity
* Diffusivity, heat capacity, thermal conductivity, and density
Parameters
----------
D : float
Diffusivity of a species, [m^2/s]
alpha : float, optional
Thermal diffusivity, [m^2/s]
Cp : float, optional
Heat capacity, [J/kg/K]
k : float, optional
Thermal conductivity, [W/m/K]
rho : float, optional
Density, [kg/m^3]
Returns
-------
Le : float
Lewis number []
Notes
-----
.. math::
Le=\frac{\text{Thermal diffusivity}}{\text{Mass diffusivity}} =
\frac{Sc}{Pr}
An error is raised if none of the required input sets are provided.
Examples
--------
>>> Lewis(D=22.6E-6, alpha=19.1E-6)
0.8451327433628318
>>> Lewis(D=22.6E-6, rho=800., k=.2, Cp=2200)
0.00502815768302494
References
----------
.. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
.. [2] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and
Applications. Boston: McGraw Hill Higher Education, 2006.
.. [3] Gesellschaft, V. D. I., ed. VDI Heat Atlas. 2nd edition.
Berlin; New York:: Springer, 2010. |
def linear_chirp(npts=2000):
time = np.linspace(0, 20, npts)
chirp = np.sin(0.2 * np.pi * (0.1 + 24.0 / 2.0 * time) * time)
return chirp | Generates a simple linear chirp.
:param npts: Number of samples.
:type npts: int
:returns: Generated signal
:rtype: numpy.ndarray |
def extract_translations(self, string):
trans = []
for t in Lexer(string.decode("utf-8"), None).tokenize():
if t.token_type == TOKEN_BLOCK:
if not t.contents.startswith(
(self.tranz_tag, self.tranzchoice_tag)):
continue
is_tranzchoice = t.contents.startswith(
self.tranzchoice_tag +
" ")
kwargs = {
"id": self._match_to_transvar(id_re, t.contents),
"number": self._match_to_transvar(number_re, t.contents),
"domain": self._match_to_transvar(domain_re, t.contents),
"locale": self._match_to_transvar(locale_re, t.contents),
"is_transchoice": is_tranzchoice, "parameters": TransVar(
[x.split("=")[0].strip() for x in properties_re.findall(t.contents) if x],
TransVar.LITERAL
),
"lineno": t.lineno,
}
trans.append(Translation(**kwargs))
return trans | Extract messages from Django template string. |
def merged_pex(cls, path, pex_info, interpreter, pexes, interpeter_constraints=None):
pex_paths = [pex.path() for pex in pexes if pex]
if pex_paths:
pex_info = pex_info.copy()
pex_info.merge_pex_path(':'.join(pex_paths))
with safe_concurrent_creation(path) as safe_path:
builder = PEXBuilder(safe_path, interpreter, pex_info=pex_info)
if interpeter_constraints:
for constraint in interpeter_constraints:
builder.add_interpreter_constraint(constraint)
yield builder | Yields a pex builder at path with the given pexes already merged.
:rtype: :class:`pex.pex_builder.PEXBuilder` |
def _set_extremum_session_metrics(session_group, aggregation_metric,
extremum_fn):
measurements = _measurements(session_group, aggregation_metric)
ext_session = extremum_fn(
measurements,
key=operator.attrgetter('metric_value.value')).session_index
del session_group.metric_values[:]
session_group.metric_values.MergeFrom(
session_group.sessions[ext_session].metric_values) | Sets the metrics for session_group to those of its "extremum session".
The extremum session is the session in session_group with the extremum value
of the metric given by 'aggregation_metric'. The extremum is taken over the
subset of sessions in the group whose 'aggregation_metric' was measured
at the largest training step among the sessions in the group.
Args:
session_group: A SessionGroup protobuffer.
aggregation_metric: A MetricName protobuffer.
extremum_fn: callable. Must be either 'min' or 'max'. Determines the type of
extremum to compute. |
def migrator(state):
cleverbot_kwargs, convos_kwargs = state
cb = Cleverbot(**cleverbot_kwargs)
for convo_kwargs in convos_kwargs:
cb.conversation(**convo_kwargs)
return cb | Nameless conversations will be lost. |
def devices(self):
install_devices = self.install_devices
if 'bootstrap-system-devices' in env.instance.config:
devices = set(env.instance.config['bootstrap-system-devices'].split())
else:
devices = set(self.sysctl_devices)
for sysctl_device in self.sysctl_devices:
for install_device in install_devices:
if install_device.startswith(sysctl_device):
devices.remove(sysctl_device)
return devices | computes the name of the disk devices that are suitable
installation targets by subtracting CDROM- and USB devices
from the list of total mounts. |
def _records_commit(record_ids):
for record_id in record_ids:
record = Record.get_record(record_id)
record.commit() | Commit all records. |
def version(self):
response = self.get(version="", base="/version")
response.raise_for_status()
data = response.json()
return (data["major"], data["minor"]) | Get Kubernetes API version |
def intersperse(x, ys):
it = iter(ys)
try:
y = next(it)
except StopIteration:
return
yield y
for y in it:
yield x
yield y | Returns an iterable where ``x`` is inserted between
each element of ``ys``
:type ys: Iterable |
def gen_rsd_cdf(K, delta, c):
mu = gen_mu(K, delta, c)
return [sum(mu[:d+1]) for d in range(K)] | The CDF of the RSD on block degree, precomputed for
sampling speed |
def call(self, tokens, *args, **kwargs):
tokens.append([evaluate, [args, kwargs], {}])
return tokens | Add args and kwargs to the tokens. |
def sync(ui, repo, **opts):
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
if not opts["local"]:
if hg_incoming(ui, repo):
err = hg_pull(ui, repo, update=True)
else:
err = hg_update(ui, repo)
if err:
return err
sync_changes(ui, repo) | synchronize with remote repository
Incorporates recent changes from the remote repository
into the local repository. |
def normalize_events_list(old_list):
new_list = []
for _event in old_list:
new_event = dict(_event)
if new_event.get('args'):
new_event['args'] = dict(new_event['args'])
encode_byte_values(new_event['args'])
if new_event.get('queue_identifier'):
del new_event['queue_identifier']
hexbytes_to_str(new_event)
name = new_event['event']
if name == 'EventPaymentReceivedSuccess':
new_event['initiator'] = to_checksum_address(new_event['initiator'])
if name in ('EventPaymentSentSuccess', 'EventPaymentSentFailed'):
new_event['target'] = to_checksum_address(new_event['target'])
encode_byte_values(new_event)
encode_object_to_str(new_event)
new_list.append(new_event)
return new_list | Internally the `event_type` key is prefixed with underscore but the API
returns an object without that prefix |
def cmd_dist(self, nm=None, ch=None):
viewer = self.get_viewer(ch)
if viewer is None:
self.log("No current viewer/channel.")
return
if nm is None:
rgbmap = viewer.get_rgbmap()
dist = rgbmap.get_dist()
self.log(str(dist))
else:
viewer.set_color_algorithm(nm) | dist nm=dist_name ch=chname
Set a color distribution for the given channel.
Possible values are linear, log, power, sqrt, squared, asinh, sinh,
and histeq.
If no value is given, reports the current color distribution
algorithm. |
def trade_history(
self, from_=None, count=None, from_id=None, end_id=None,
order=None, since=None, end=None, pair=None
):
return self._trade_api_call(
'TradeHistory', from_=from_, count=count, from_id=from_id, end_id=end_id,
order=order, since=since, end=end, pair=pair
) | Returns trade history.
To use this method you need a privilege of the info key.
:param int or None from_: trade ID, from which the display starts (default 0)
:param int or None count: the number of trades for display (default 1000)
:param int or None from_id: trade ID, from which the display starts (default 0)
:param int or None end_id: trade ID on which the display ends (default inf.)
:param str or None order: sorting (default 'DESC')
:param int or None since: the time to start the display (default 0)
:param int or None end: the time to end the display (default inf.)
:param str or None pair: pair to be displayed (ex. 'btc_usd') |
def main(**kwargs):
options = ApplicationOptions(**kwargs)
Event.configure(is_logging_enabled=options.event_logging)
application = Application(options)
application.run(options.definition) | The Pipeline tool. |
def decorator(func):
def function_timer(*args, **kwargs):
start = time.time()
value = func(*args, **kwargs)
end = time.time()
runtime = end - start
if runtime < 60:
runtime = str('sec: ' + str('{:f}'.format(runtime)))
else:
runtime = str('min: ' + str('{:f}'.format(runtime / 60)))
print('{func:50} --> {time}'.format(func=func.__qualname__, time=runtime))
return value
return function_timer | A function timer decorator. |
def launcher():
parser = OptionParser()
parser.add_option(
'-f',
'--file',
dest='filename',
default='agents.csv',
help='snmposter configuration file'
)
options, args = parser.parse_args()
factory = SNMPosterFactory()
snmpd_status = subprocess.Popen(
["service", "snmpd", "status"],
stdout=subprocess.PIPE
).communicate()[0]
if "is running" in snmpd_status:
message = "snmd service is running. Please stop it and try again."
print >> sys.stderr, message
sys.exit(1)
try:
factory.configure(options.filename)
except IOError:
print >> sys.stderr, "Error opening %s." % options.filename
sys.exit(1)
factory.start() | Launch it. |
def extract_version(exepath, version_arg, word_index=-1, version_rank=3):
if isinstance(version_arg, basestring):
version_arg = [version_arg]
args = [exepath] + version_arg
stdout, stderr, returncode = _run_command(args)
if returncode:
raise RezBindError("failed to execute %s: %s\n(error code %d)"
% (exepath, stderr, returncode))
stdout = stdout.strip().split('\n')[0].strip()
log("extracting version from output: '%s'" % stdout)
try:
strver = stdout.split()[word_index]
toks = strver.replace('.', ' ').replace('-', ' ').split()
strver = '.'.join(toks[:version_rank])
version = Version(strver)
except Exception as e:
raise RezBindError("failed to parse version from output '%s': %s"
% (stdout, str(e)))
log("extracted version: '%s'" % str(version))
return version | Run an executable and get the program version.
Args:
exepath: Filepath to executable.
version_arg: Arg to pass to program, eg "-V". Can also be a list.
word_index: Expect the Nth word of output to be the version.
version_rank: Cap the version to this many tokens.
Returns:
`Version` object. |
def use(self, id):
if self._connected and id > 0:
self.send_command('use', keys={'sid': id}) | Use a particular Virtual Server instance
@param id: Virtual Server ID
@type id: int |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.