code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def worker_stopped(name, workers=None, profile='default'):
if workers is None:
workers = []
return _bulk_state(
'modjk.bulk_stop', name, workers, profile
) | Stop all the workers in the modjk load balancer
Example:
.. code-block:: yaml
loadbalancer:
modjk.worker_stopped:
- workers:
- app1
- app2 |
def get_dbcollection_with_es(self, **kwargs):
es_objects = self.get_collection_es()
db_objects = self.Model.filter_objects(es_objects)
return db_objects | Get DB objects collection by first querying ES. |
def retrieve(url):
try:
pem_data = urlopen(url).read()
except (ValueError, HTTPError):
warnings.warn('Certificate URL is invalid.')
return False
if sys.version >= '3':
try:
pem_data = pem_data.decode()
except(UnicodeDecodeError):
warnings.warn(... | Retrieve and parse PEM-encoded X.509 certificate chain.
See `validate.request` for additional info.
Args:
url: str. SignatureCertChainUrl header value sent by request.
Returns:
list or bool: If url is valid, returns the certificate chain as a list
of cryptography.hazmat.backen... |
def get_player_summaries(players, **kwargs):
if (isinstance(players, list)):
params = {'steamids': ','.join(str(p) for p in players)}
elif (isinstance(players, int)):
params = {'steamids': players}
else:
raise ValueError("The players input needs to be a list or int")
return make_... | Get players steam profile from their steam ids |
def compare_config(self):
diff = self.device.cu.diff()
if diff is None:
return ''
else:
return diff.strip() | Compare candidate config with running. |
def clean_videos(self):
if self.videos:
self.videos = [int(v) for v in self.videos if v is not None and is_valid_digit(v)] | Validates that all values in the video list are integer ids and removes all None values. |
def has_permission(self, request, view):
user_filter = self._get_user_filter(request)
if not user_filter:
return True
username_param = get_username_param(request)
allowed = user_filter == username_param
if not allowed:
log.warning(
u"Permis... | If the JWT has a user filter, verify that the filtered
user value matches the user in the URL. |
def models_max_input_output_length(models: List[InferenceModel],
num_stds: int,
forced_max_input_len: Optional[int] = None,
forced_max_output_len: Optional[int] = None) -> Tuple[int, Callable]:
max_mean = max(mo... | Returns a function to compute maximum output length given a fixed number of standard deviations as a
safety margin, and the current input length.
Mean and std are taken from the model with the largest values to allow proper ensembling of models
trained on different data sets.
:param models: List of mod... |
def load_manifest_file(client, bucket, schema, versioned, ifilters, key_info):
yield None
with tempfile.NamedTemporaryFile() as fh:
client.download_fileobj(Bucket=bucket, Key=key_info['key'], Fileobj=fh)
fh.seek(0)
reader = csv.reader(gzip.GzipFile(fileobj=fh, mode='r'))
for key_... | Given an inventory csv file, return an iterator over keys |
def load_private_key(pem_path, passphrase_bytes=None):
with open(pem_path, "rb") as f:
return cryptography.hazmat.primitives.serialization.load_pem_private_key(
data=f.read(),
password=passphrase_bytes,
backend=cryptography.hazmat.backends.default_backend(),
) | Load private key from PEM encoded file |
def outputtemplate(self, template_id):
for profile in self.profiles:
for outputtemplate in profile.outputtemplates():
if outputtemplate.id == template_id:
return outputtemplate
return KeyError("Outputtemplate " + template_id + " not found") | Get an output template by ID |
def point_stokes(self, context):
(ls, us), (lt, ut), (l, u) = context.array_extents(context.name)
data = np.empty(context.shape, context.dtype)
data[ls:us,:,l:u] = np.asarray(lm_stokes)[ls:us,None,:]
return data | Supply point source stokes parameters to montblanc |
def is_binary(self):
with open(self.path, 'rb') as fin:
CHUNKSIZE = 1024
while 1:
chunk = fin.read(CHUNKSIZE)
if b'\0' in chunk:
return True
if len(chunk) < CHUNKSIZE:
break
return False | Return true if this is a binary file. |
def steepest_descent(f, x, line_search=1.0, maxiter=1000, tol=1e-16,
projection=None, callback=None):
r
grad = f.gradient
if x not in grad.domain:
raise TypeError('`x` {!r} is not in the domain of `grad` {!r}'
''.format(x, grad.domain))
if not callabl... | r"""Steepest descent method to minimize an objective function.
General implementation of steepest decent (also known as gradient
decent) for solving
.. math::
\min f(x)
The algorithm is intended for unconstrained problems. It needs line
search in order guarantee convergence. With appropri... |
def modify(self, *, sort=None, purge=False, done=None):
return self._modifyInternal(sort=sort, purge=purge, done=done) | Calls Model._modifyInternal after loading the database. |
def ParseUserEngagedRow(
self, parser_mediator, query, row, **unused_kwargs):
query_hash = hash(query)
event_data = WindowsTimelineUserEngagedEventData()
event_data.package_identifier = self._GetRowValue(
query_hash, row, 'PackageName')
payload_json_bytes = bytes(self._GetRowValue(query_ha... | Parses a timeline row that describes a user interacting with an app.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row. |
def find_executable(executable_name):
if six.PY3:
executable_abs = shutil.which(executable_name)
else:
import distutils.spawn
executable_abs = distutils.spawn.find_executable(executable_name)
return executable_abs | Tries to find executable in PATH environment
It uses ``shutil.which`` method in Python3 and
``distutils.spawn.find_executable`` method in Python2.7 to find the
absolute path to the 'name' executable.
:param executable_name: name of the executable
:returns: Returns the absolute path to the executabl... |
def creating_schema_and_index(self, models, func):
waiting_models = []
self.base_thread.do_with_submit(func, models, waiting_models, threads=self.threads)
if waiting_models:
print("WAITING MODELS ARE CHECKING...")
self.creating_schema_and_index(waiting_models, func) | Executes given functions with given models.
Args:
models: models to execute
func: function name to execute
Returns: |
def put_scancode(self, scancode):
if not isinstance(scancode, baseinteger):
raise TypeError("scancode can only be an instance of type baseinteger")
self._call("putScancode",
in_p=[scancode]) | Sends a scancode to the keyboard.
in scancode of type int
raises :class:`VBoxErrorIprtError`
Could not send scan code to virtual keyboard. |
def to_string(self, verbose=0, title=None, **kwargs):
from pprint import pformat
s = pformat(self, **kwargs)
if title is not None:
return "\n".join([marquee(title, mark="="), s])
return s | String representation. kwargs are passed to `pprint.pformat`.
Args:
verbose: Verbosity level
title: Title string. |
def apply(
self, doc_loader, pdf_path=None, clear=True, parallelism=None, progress_bar=True
):
super(Parser, self).apply(
doc_loader,
pdf_path=pdf_path,
clear=clear,
parallelism=parallelism,
progress_bar=progress_bar,
) | Run the Parser.
:param doc_loader: An iteratable of ``Documents`` to parse. Typically,
one of Fonduer's document preprocessors.
:param pdf_path: The path to the PDF documents, if any. This path will
override the one used in initialization, if provided.
:param clear: Whet... |
def check_rates(self, rates, base):
if "rates" not in rates:
raise RuntimeError("%s: 'rates' not found in results" % self.name)
if "base" not in rates or rates["base"] != base or base not in rates["rates"]:
self.log(logging.WARNING, "%s: 'base' not found in results", self.name)
... | Local helper function for validating rates response |
def clear_breakpoint(self, filename, lineno):
clear_breakpoint(filename, lineno)
self.breakpoints_saved.emit()
editorstack = self.get_current_editorstack()
if editorstack is not None:
index = self.is_file_opened(filename)
if index is not None:
... | Remove a single breakpoint |
def get_items_by_banks(self, bank_ids):
item_list = []
for bank_id in bank_ids:
item_list += list(
self.get_items_by_bank(bank_id))
return objects.ItemList(item_list) | Gets the list of ``Items`` corresponding to a list of ``Banks``.
arg: bank_ids (osid.id.IdList): list of bank ``Ids``
return: (osid.assessment.ItemList) - list of items
raise: NullArgument - ``bank_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise:... |
def pyxb_to_dict(rp_pyxb):
return {
'allowed': bool(_get_attr_or_list(rp_pyxb, 'allowed')),
'num': _get_as_int(rp_pyxb),
'block': _get_as_set(rp_pyxb, 'block'),
'pref': _get_as_set(rp_pyxb, 'pref'),
} | Convert ReplicationPolicy PyXB object to a normalized dict.
Args:
rp_pyxb: ReplicationPolicy to convert.
Returns:
dict : Replication Policy as normalized dict.
Example::
{
'allowed': True,
'num': 3,
'blockedMemberNode': {'urn:node:NODE1', 'urn:node:NODE2', 'ur... |
def get_encoder_from_vocab(vocab_filepath):
if not tf.gfile.Exists(vocab_filepath):
raise ValueError("Vocab file does not exist: {}.".format(vocab_filepath))
tf.logging.info("Found vocab file: %s", vocab_filepath)
encoder = text_encoder.SubwordTextEncoder(vocab_filepath)
return encoder | Get encoder from vocab file.
If vocab is not found in output dir, it will be copied there by
copy_vocab_to_output_dir to clarify the vocab used to generate the data.
Args:
vocab_filepath: path to vocab, either local or cns
Returns:
A SubwordTextEncoder vocabulary object. None if the output_parallel_t... |
def set_entry_points(self, names):
names = util.return_set(names)
self.entry_point_names = names | sets the internal collection of entry points to be
equal to `names`
`names` can be a single object or an iterable but
must be a string or iterable of strings. |
def freq_from_final_mass_spin(final_mass, final_spin, l=2, m=2, nmodes=1):
return get_lm_f0tau(final_mass, final_spin, l, m, nmodes)[0] | Returns QNM frequency for the given mass and spin and mode.
Parameters
----------
final_mass : float or array
Mass of the black hole (in solar masses).
final_spin : float or array
Dimensionless spin of the final black hole.
l : int or array, optional
l-index of the harmonic.... |
def _parse_default(self, target):
if not isinstance(target, (list, tuple)):
k, v, t = target, None, lambda x: x
elif len(target) == 1:
k, v, t = target[0], None, lambda x: x
elif len(target) == 2:
k, v, t = target[0], target[1], lambda x: x
elif len(ta... | Helper function to parse default values. |
def _BuildKeyHierarchy(self, subkeys, values):
if subkeys:
for registry_key in subkeys:
name = registry_key.name.upper()
if name in self._subkeys:
continue
self._subkeys[name] = registry_key
registry_key._key_path = key_paths.JoinKeyPath([
self._key_path, ... | Builds the Windows Registry key hierarchy.
Args:
subkeys (list[FakeWinRegistryKey]): list of subkeys.
values (list[FakeWinRegistryValue]): list of values. |
def truncate(self, path, length, fh=None):
"Download existing path, truncate and reupload"
try:
f = self._getpath(path)
except JFS.JFSError:
raise OSError(errno.ENOENT, '')
if isinstance(f, (JFS.JFSFile, JFS.JFSFolder)) and f.is_deleted():
raise OSErro... | Download existing path, truncate and reupload |
def get_all_delivery_notes(self, params=None):
if not params:
params = {}
return self._iterate_through_pages(
self.get_delivery_notes_per_page,
resource=DELIVERY_NOTES,
**{'params': params}
) | Get all delivery notes
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param params: search params
:return: list |
def todo_results_changed(self):
editorstack = self.get_current_editorstack()
results = editorstack.get_todo_results()
index = editorstack.get_stack_index()
if index != -1:
filename = editorstack.data[index].filename
for other_editorstack in self.editorstacks... | Synchronize todo results between editorstacks
Refresh todo list navigation buttons |
def send_response(self, msgid, error=None, result=None):
msg = self._encoder.create_response(msgid, error, result)
self._send_message(msg) | Send a response |
def get_next_file_path(self, service, operation):
base_name = '{0}.{1}'.format(service, operation)
if self.prefix:
base_name = '{0}.{1}'.format(self.prefix, base_name)
LOG.debug('get_next_file_path: %s', base_name)
next_file = None
serializer_format = None
ind... | Returns a tuple with the next file to read and the serializer
format used |
def build_absolute_uri(self, uri):
request = self.context.get('request', None)
return (
request.build_absolute_uri(uri) if request is not None else uri
) | Return a fully qualified absolute url for the given uri. |
def list_subnets(conn=None, call=None, kwargs=None):
if call == 'action':
raise SaltCloudSystemExit(
'The list_subnets function must be called with '
'-f or --function.'
)
if conn is None:
conn = get_conn()
if kwargs is None or (isinstance(kwargs, dict) and 'n... | List subnets in a virtual network
network
network to list subnets of
.. code-block:: bash
salt-cloud -f list_subnets myopenstack network=salt-net |
def validate_examples(example_file):
def test_example(raw):
example = tf.train.Example()
example.ParseFromString(raw)
pi = np.frombuffer(example.features.feature['pi'].bytes_list.value[0], np.float32)
value = example.features.feature['outcome'].float_list.value[0]
assert abs(... | Validate that examples are well formed.
Pi should sum to 1.0
value should be {-1,1}
Usage:
validate_examples("../data/300.tfrecord.zz") |
def states(self, states):
if not isinstance(states, dict):
raise TypeError("states must be of type dict")
if [state_id for state_id, state in states.items() if not isinstance(state, State)]:
raise TypeError("element of container_state.states must be of type State")
if [st... | Setter for _states field
See property
:param states: Dictionary of States
:raises exceptions.TypeError: if the states parameter is of wrong type
:raises exceptions.AttributeError: if the keys of the dictionary and the state_ids in the dictionary do not match |
def parse_error(output_dir):
sys.stderr.seek(0)
std_err = sys.stderr.read().decode('utf-8')
err_file = os.path.join(output_dir, "eplusout.err")
if os.path.isfile(err_file):
with open(err_file, "r") as f:
ep_err = f.read()
else:
ep_err = "<File not found>"
message = "\... | Add contents of stderr and eplusout.err and put it in the exception message.
:param output_dir: str
:return: str |
def from_string(cls, s):
for num, text in cls._STATUS2STR.items():
if text == s:
return cls(num)
else:
raise ValueError("Wrong string %s" % s) | Return a `Status` instance from its string representation. |
def handle_event(self, message):
needs_update = 0
for zone in self.zones:
if zone in message:
_LOGGER.debug("Received message for zone: %s", zone)
self.zones[zone].update_status(message[zone])
if 'netusb' in message:
needs_update += self.ha... | Dispatch all event messages |
def safe_cd(path):
starting_directory = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(starting_directory) | Changes to a directory, yields, and changes back.
Additionally any error will also change the directory back.
Usage:
>>> with safe_cd('some/repo'):
... call('git status') |
def read_serialized_rsa_pub_key(serialized):
n = None
e = None
rsa = from_hex(serialized)
pos = 0
ln = len(rsa)
while pos < ln:
tag = bytes_to_byte(rsa, pos)
pos += 1
length = bytes_to_short(rsa, pos)
pos += 2
if... | Reads serialized RSA pub key
TAG|len-2B|value. 81 = exponent, 82 = modulus
:param serialized:
:return: n, e |
def AppendContent(self, src_fd):
while 1:
blob = src_fd.read(self.chunksize)
if not blob:
break
blob_id = data_store.BLOBS.WriteBlobWithUnknownHash(blob)
self.AddBlob(blob_id, len(blob))
self.Flush() | Create new blob hashes and append to BlobImage.
We don't support writing at arbitrary file offsets, but this method provides
a convenient way to add blobs for a new file, or append content to an
existing one.
Args:
src_fd: source file handle open for read
Raises:
IOError: if blob has ... |
def _autocomplete(client, url_part, input_text, session_token=None,
offset=None, location=None, radius=None, language=None,
types=None, components=None, strict_bounds=False):
params = {"input": input_text}
if session_token:
params["sessiontoken"] = session_token
i... | Internal handler for ``autocomplete`` and ``autocomplete_query``.
See each method's docs for arg details. |
def copydb(self, sourcedb, destslab, destdbname=None, progresscb=None):
destdb = destslab.initdb(destdbname, sourcedb.dupsort)
statdict = destslab.stat(db=destdb)
if statdict['entries'] > 0:
raise s_exc.DataAlreadyExists()
rowcount = 0
for chunk in s_common.chunks(sel... | Copy an entire database in this slab to a new database in potentially another slab.
Args:
sourcedb (LmdbDatabase): which database in this slab to copy rows from
destslab (LmdbSlab): which slab to copy rows to
destdbname (str): the name of the database to copy rows to in dest... |
def files(self):
tag_name = self.release['tag_name']
repo_name = self.repository['full_name']
zipball_url = self.release['zipball_url']
filename = u'{name}-{tag}.zip'.format(name=repo_name, tag=tag_name)
response = self.gh.api.session.head(zipball_url)
assert response.sta... | Extract files to download from GitHub payload. |
def _get_hangul_syllable_type(hangul_syllable):
if not _is_hangul_syllable(hangul_syllable):
raise ValueError("Value 0x%0.4x does not represent a Hangul syllable!" % hangul_syllable)
if not _hangul_syllable_types:
_load_hangul_syllable_types()
return _hangul_syllable_types[hangul_syllable] | Function for taking a Unicode scalar value representing a Hangul syllable and determining the correct value for its
Hangul_Syllable_Type property. For more information on the Hangul_Syllable_Type property see the Unicode Standard,
ch. 03, section 3.12, Conjoining Jamo Behavior.
https://www.unicode.org/ver... |
def load_http_response(cls, http_response):
if not http_response.ok:
raise APIResponseError(http_response.text)
c = cls(http_response)
c.response = http_response
RateLimits.getRateLimits(cls.__name__).set(c.response.headers)
return c | This method should return an instantiated class and set its response
to the requests.Response object. |
def param_defs(self, method):
pts = self.bodypart_types(method)
if not method.soap.input.body.wrapped:
return pts
pt = pts[0][1].resolve()
return [(c.name, c, a) for c, a in pt if not c.isattr()] | Get parameter definitions for document literal. |
def metadata():
with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
return yaml.safe_load(md) | Get the current charm metadata.yaml contents as a python object |
def usearch61_chimera_check_ref(abundance_fp,
uchime_ref_fp,
reference_seqs_fp,
minlen=64,
output_dir=".",
remove_usearch_logs=False,
... | Does reference based chimera checking with usearch61
abundance_fp: input consensus fasta file with abundance information for
each cluster.
uchime_ref_fp: output uchime filepath for reference results
reference_seqs_fp: reference fasta database for chimera checking.
minlen: minimum sequence length f... |
def set_trace(*args, **kwargs):
out = sys.stdout.stream if hasattr(sys.stdout, 'stream') else None
kwargs['stdout'] = out
debugger = pdb.Pdb(*args, **kwargs)
debugger.use_rawinput = True
debugger.set_trace(sys._getframe().f_back) | Call pdb.set_trace, making sure it receives the unwrapped stdout.
This is so we don't keep drawing progress bars over debugger output. |
def to_text(self, relative=False, indent_level=0, clean_empty_block=False):
if relative:
fwd = self.rel_path_fwd
bwd = self.rel_path_bwd
else:
fwd = self.full_path_fwd
bwd = self.full_path_bwd
indent = 4*indent_level*' '
pre = '%s%s' % (ind... | This method returns the object model in text format. You should be able to copy&paste this text into any
device running a supported version of FortiOS.
Args:
- **relative** (bool):
* If ``True`` the text returned will assume that you are one block away
* If ... |
def _findSingleMemberGroups(classDictionaries):
toRemove = {}
for classDictionaryGroup in classDictionaries:
for classDictionary in classDictionaryGroup:
for name, members in list(classDictionary.items()):
if len(members) == 1:
toRemove[name] = list(member... | Find all classes that have only one member. |
def checksum_creation_action(target, source, env):
import crcmod
crc32_func = crcmod.mkCrcFun(0x104C11DB7, initCrc=0xFFFFFFFF, rev=False, xorOut=0)
with open(str(source[0]), 'rb') as f:
data = f.read()
data = data[:-4]
magicbin = data[-4:]
magic, = struct.unpack('<L', magicbi... | Create a linker command file for patching an application checksum into a firmware image |
def full(self):
if not self.size: return False
return len(self.pq) == (self.size + self.removed_count) | Return True if the queue is full |
def calc_padding(fmt, align):
remain = struct.calcsize(fmt) % align
if remain == 0:
return ""
return 'x' * (align - remain) | Calculate how many padding bytes needed for ``fmt`` to be aligned to
``align``.
Args:
fmt (str): :mod:`struct` format.
align (int): alignment (2, 4, 8, etc.)
Returns:
str: padding format (e.g., various number of 'x').
>>> calc_padding('b', 2)
'x'
>>> calc_padding('b',... |
def _ensure_tuple(item):
if isinstance(item, tuple):
return item
elif isinstance(item, list):
return tuple(item)
elif isinstance(item, np.ndarray):
return tuple(item.tolist())
else:
raise NotImplementedError | Simply ensure that the passed item is a tuple. If it is not, then
convert it if possible, or raise a NotImplementedError
Args:
item: the item that needs to become a tuple
Returns:
the item casted as a tuple
Raises:
NotImplementedError: if converting the given item to a tuple
... |
def process_tags(self, tag=None):
if self.downloaded is False:
raise serror("Track not downloaded, can't process tags..")
filetype = magic.from_file(self.filepath, mime=True)
if filetype != "audio/mpeg":
raise serror("Cannot process tags for file type %s." % filetype)
... | Process ID3 Tags for mp3 files. |
def _repair_column(self):
check_for_title = True
for column_index in range(self.start[1], self.end[1]):
table_column = TableTranspose(self.table)[column_index]
column_start = table_column[self.start[0]]
if check_for_title and is_empty_cell(column_start):
... | Same as _repair_row but for columns. |
def mark_backward(output_tensor, used_node_names):
op = output_tensor.op
if op.name in used_node_names:
return
used_node_names.add(op.name)
for input_tensor in op.inputs:
mark_backward(input_tensor, used_node_names)
for control_input_op in op.control_inputs:
used_node_names.add(control_input_op.na... | Function to propagate backwards in the graph and mark nodes as used.
Traverses recursively through the graph from the end tensor, through the op
that generates the tensor, and then to the input tensors that feed the op.
Nodes encountered are stored in used_node_names.
Args:
output_tensor: A Tensor which w... |
def _compute_distance_term(self, C, mag, rrup):
term1 = C['b'] * rrup
term2 = - np.log(rrup + C['c'] * np.exp(C['d'] * mag))
return term1 + term2 | Compute second and third terms in equation 1, p. 901. |
def prepare_args(self, args, transform=True):
updated_args = list(args)
if transform:
updated_args[-1] = self.transform_value(updated_args[-1])
if self.key:
updated_args.insert(-1, self.key)
return updated_args | Prepare args to be used by a sub-index
Parameters
----------
args: list
The while list of arguments passed to add, check_uniqueness, get_filtered_keys...
transform: bool
If ``True``, the last entry in `args`, ie the value, will be transformed.
Else it... |
def patched_packing_env(env):
old_env = pkg_resources.packaging.markers.default_environment
new_env = lambda: env
pkg_resources._vendor.packaging.markers.default_environment = new_env
try:
yield
finally:
pkg_resources._vendor.packaging.markers.default_environment = old_env | Monkey patch packaging.markers.default_environment |
def convert_complex_output(out_in):
out = {}
for key, val in out_in.iteritems():
if val.data.dtype in complex_types:
rval = copy(val)
rval.data = val.data.real
out['real(%s)' % key] = rval
ival = copy(val)
ival.data = val.data.imag
... | Convert complex values in the output dictionary `out_in` to pairs of
real and imaginary parts. |
def _read_bks_key(cls, data, pos, store_type):
key_type = b1.unpack_from(data, pos)[0]; pos += 1
key_format, pos = BksKeyStore._read_utf(data, pos, kind="key format")
key_algorithm, pos = BksKeyStore._read_utf(data, pos, kind="key algorithm")
key_enc, pos = BksKeyStore._read_data(data, p... | Given a data stream, attempt to parse a stored BKS key entry at the given position, and return it as a BksKeyEntry. |
def load_nicknames(self, file):
with open(os.path.join(main_dir, file + '.dat'), 'r') as f:
self.nicknames = json.load(f) | Load dict from file for random nicknames.
:param str file: filename |
def cluster_seqs(seqs,
neighbor_join=False,
params={},
add_seq_names=True,
WorkingDir=tempfile.gettempdir(),
SuppressStderr=None,
SuppressStdout=None,
max_chars=1000000,
max_hours=1.0,... | Muscle cluster list of sequences.
seqs: either file name or list of sequence objects or list of strings or
single multiline string containing sequences.
Addl docs coming soon |
def chi_p_from_xi1_xi2(xi1, xi2):
xi1, xi2, input_is_array = ensurearray(xi1, xi2)
chi_p = copy.copy(xi1)
mask = xi1 < xi2
chi_p[mask] = xi2[mask]
return formatreturn(chi_p, input_is_array) | Returns effective precession spin from xi1 and xi2. |
def remove_from_postmortem_exclusion_list(cls, pathname, bits = None):
if bits is None:
bits = cls.bits
elif bits not in (32, 64):
raise NotImplementedError("Unknown architecture (%r bits)" % bits)
if bits == 32 and cls.bits == 64:
keyname = 'HKLM\\SOFTWARE\\W... | Removes the given filename to the exclusion list for postmortem
debugging from the Registry.
@warning: This method requires administrative rights.
@warning: Don't ever delete entries you haven't created yourself!
Some entries are set by default for your version of Windows.
... |
def get_field_values(self, fldnames, rpt_fmt=True, itemid2name=None):
row = []
for fld in fldnames:
val = getattr(self, fld, None)
if val is not None:
if rpt_fmt:
val = self._get_rpt_fmt(fld, val, itemid2name)
row.append(val)
... | Get flat namedtuple fields for one GOEnrichmentRecord. |
def put_replication(Bucket, Role, Rules,
region=None, key=None, keyid=None, profile=None):
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
Role = _get_role_arn(name=Role,
region=region, key=key, keyid=keyid, profile=profile)
... | Given a valid config, update the replication configuration for a bucket.
Returns {updated: true} if replication configuration was updated and returns
{updated: False} if replication configuration was not updated.
CLI Example:
.. code-block:: bash
salt myminion boto_s3_bucket.put_replication ... |
def update_data(self):
url = ('https://www.openhumans.org/api/direct-sharing/project/'
'members/?access_token={}'.format(self.master_access_token))
results = get_all_results(url)
self.project_data = dict()
for result in results:
self.project_data[result['projec... | Returns data for all users including shared data files. |
def _exec_command(self, command: str):
stdin, stdout, stderr = self._ssh.exec_command(command)
stdout.read()
stderr.read()
stdin.close() | Executes the command and closes the handles
afterwards. |
def add_configuration_file(self, file_name):
logger.info('adding %s to configuration files', file_name)
if file_name not in self.configuration_files and self._inotify:
self._watch_manager.add_watch(file_name, pyinotify.IN_MODIFY)
if os.access(file_name, os.R_OK):
self.con... | Register a file path from which to read parameter values.
This method can be called multiple times to register multiple files for
querying. Files are expected to be ``ini`` formatted.
No assumptions should be made about the order that the registered files
are read and values defined i... |
def decrypt_stream(mode, in_stream, out_stream, block_size = BLOCK_SIZE, padding = PADDING_DEFAULT):
'Decrypts a stream of bytes from in_stream to out_stream using mode.'
decrypter = Decrypter(mode, padding = padding)
_feed_stream(decrypter, in_stream, out_stream, block_size) | Decrypts a stream of bytes from in_stream to out_stream using mode. |
def _parse_line_entry(self, line, type):
name = None
key_values = {}
if type == 'vars':
key_values = self._parse_line_vars(line)
else:
tokens = shlex.split(line.strip())
name = tokens.pop(0)
try:
key_values = self._parse_var... | Parse a section entry line into its components. In case of a 'vars'
section, the first field will be None. Otherwise, the first field will
be the unexpanded host or group name the variables apply to.
For example:
[production:children]
frontend purpose="web" # The lin... |
def create_package_level_rst_index_file(
package_name, max_depth, modules, inner_packages=None):
if inner_packages is None:
inner_packages = []
return_text = 'Package::' + package_name
dash = '=' * len(return_text)
return_text += '\n' + dash + '\n\n'
return_text += '.. toctree::' + '... | Function for creating text for index for a package.
:param package_name: name of the package
:type package_name: str
:param max_depth: Value for max_depth in the index file.
:type max_depth: int
:param modules: list of module in the package.
:type modules: list
:return: A text for the co... |
def _wva(values, weights):
assert len(values) == len(weights) and len(weights) > 0
return sum([mul(*x) for x in zip(values, weights)]) / sum(weights) | Calculates a weighted average |
def _get_more(collection_name, num_to_return, cursor_id):
return b"".join([
_ZERO_32,
_make_c_string(collection_name),
_pack_int(num_to_return),
_pack_long_long(cursor_id)]) | Get an OP_GET_MORE message. |
def redo(self):
if self._undoing or self._redoing:
raise RuntimeError
if not self._redo:
return
group = self._redo.pop()
self._redoing = True
self.begin_grouping()
group.perform()
self.set_action_name(group.name)
self.end_grouping()... | Performs the top group on the redo stack, if present. Creates an undo
group with the same name. Raises RuntimeError if called while undoing. |
def findBinomialNsWithExpectedSampleMinimum(desiredValuesSorted, p, numSamples, nMax):
actualValues = [
getExpectedValue(
SampleMinimumDistribution(numSamples,
BinomialDistribution(n, p, cache=True)))
for n in xrange(nMax + 1)]
results = []
n = 0
for desiredValue in... | For each desired value, find an approximate n for which the sample minimum
has a expected value equal to this value.
For each value, find an adjacent pair of n values whose expected sample minima
are below and above the desired value, respectively, and return a
linearly-interpolated n between these two values.... |
def _get_bank_redis_key(bank):
opts = _get_redis_keys_opts()
return '{prefix}{separator}{bank}'.format(
prefix=opts['bank_prefix'],
separator=opts['separator'],
bank=bank
) | Return the Redis key for the bank given the name. |
def fetch_friend_ids(self, user):
friends = self.fetch_friends(user)
friend_ids = []
for friend in friends:
friend_ids.append(friend.id)
return friend_ids | fethces friend id's from twitter
Return:
collection of friend ids |
def get_package_info(self, name):
if self._disable_cache:
return self._get_package_info(name)
return self._cache.store("packages").remember_forever(
name, lambda: self._get_package_info(name)
) | Return the package information given its name.
The information is returned from the cache if it exists
or retrieved from the remote server. |
def verify_and_extract_time(self, log_file, division, result_name):
expected_level = constants.DIVISION_COMPLIANCE_CHECK_LEVEL.get(
division, None)
print(result_name)
if expected_level is None:
raise Exception('Unknown division: {}'.format(division))
start_time, level, dt, _, success = sel... | Verifies and result and returns timing.
Uses submodule mlp_compliance (https://github.com/bitfort/mlp_compliance)
Args:
log_file: Absolute path to result file.
division: open, closed
result_name: name of the benchmark, ncf, ssd, etc
Returns:
Time for the result or `INFINITE_TIME` ... |
def ledger_effects(self, ledger_id, cursor=None, order='asc', limit=10):
endpoint = '/ledgers/{ledger_id}/effects'.format(ledger_id=ledger_id)
params = self.__query_params(cursor=cursor, order=order, limit=limit)
return self.query(endpoint, params) | This endpoint represents all effects that occurred in the given
ledger.
`GET /ledgers/{id}/effects{?cursor,limit,order}
<https://www.stellar.org/developers/horizon/reference/endpoints/effects-for-ledger.html>`_
:param int ledger_id: The id of the ledger to look up.
:param int c... |
def __render(self, context, **kwargs):
kwargs["namespaces"] = [context, ] + kwargs.get("namespaces", []) \
+ kwargs.get("searchList", [])
kwargs["searchList"] = None
kwargs = self.filter_options(kwargs, self.engine_valid_options())
self.engine_o... | Render template.
:param context: A dict or dict-like object to instantiate given
template file
:param kwargs: Keyword arguments passed to the template engine to
render templates with specific features enabled.
:return: Rendered string |
def update(self, ipv6s):
data = {'ips': ipv6s}
ipv6s_ids = [str(ipv6.get('id')) for ipv6 in ipv6s]
return super(ApiIPv6, self).put('api/v3/ipv6/%s/' %
';'.join(ipv6s_ids), data) | Method to update ipv6's
:param ipv6s: List containing ipv6's desired to updated
:return: None |
def get_help_usage(command):
if not command:
doc = get_primary_command_usage()
elif command in ('-a', '--all'):
subcommands = [k for k in settings.subcommands if k is not None]
available_commands = subcommands + ['help']
command_doc = '\nAvailable commands:\n{}\n'.format(
... | Print out a help message and exit the program.
Args:
command: If a command value is supplied then print the help message for
the command module if available. If the command is '-a' or '--all',
then print the standard help message but with a full list of
available command... |
def avroize_type(field_type, name_prefix=""):
if isinstance(field_type, MutableSequence):
for field in field_type:
avroize_type(field, name_prefix)
elif isinstance(field_type, MutableMapping):
if field_type["type"] in ("enum", "record"):
if "name" not in field_type:
... | adds missing information to a type so that CWL types are valid in schema_salad. |
def load_from_dict(dct=None, **kwargs):
dct = dct or dict()
dct.update(kwargs)
def _load_from_dict(metadata):
return dict(dct)
return _load_from_dict | Load configuration from a dictionary. |
def _setTaskParsObj(self, theTask):
self._taskParsObj = cfgpars.getObjectFromTaskArg(theTask,
self._strict, False)
self._taskParsObj.setDebugLogger(self)
self._lastSavedState = self._taskParsObj.dict() | Overridden version for ConfigObj. theTask can be either
a .cfg file name or a ConfigObjPars object. |
def set_source_filter(self, source):
if isinstance(source, str if py3k else basestring) and len(source) >= 2:
self.source_filter = source
else:
raise TwitterSearchException(1009) | Only search for tweets entered via given source
:param source: String. Name of the source to search for. An example \
would be ``source=twitterfeed`` for tweets submitted via TwitterFeed
:raises: TwitterSearchException |
def hash_file(path, digest=None):
digest = digest or hashlib.sha1()
with open(path, 'rb') as fd:
s = fd.read(8192)
while s:
digest.update(s)
s = fd.read(8192)
return digest.hexdigest() if PY3 else digest.hexdigest().decode('utf-8') | Hashes the contents of the file at the given path and returns the hash digest in hex form.
If a hashlib message digest is not supplied a new sha1 message digest is used. |
def get_branch_container_tag(self):
if self.__prefix:
return "{0}-{1}".format(
self.__prefix,
self.__branch)
else:
return "{0}".format(self.__branch) | Returns the branch container tag |
async def query_handler(service, action_type, payload, props, **kwds):
if action_type == query_action_type():
print('encountered query event {!r} '.format(payload))
result = await parse_string(payload,
service.object_resolver,
service.connection_resolver,
service.... | This action handler interprets the payload as a query to be executed
by the api gateway service. |
def error_response(response):
if response.status_code >= 500:
raise exceptions.GeocodioServerError
elif response.status_code == 403:
raise exceptions.GeocodioAuthError
elif response.status_code == 422:
raise exceptions.GeocodioDataError(response.json()["error"])
else:
rai... | Raises errors matching the response code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.