code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def _violinplot(val, shade, bw, ax, **kwargs_shade):
density, low_b, up_b = _fast_kde(val, bw=bw)
x = np.linspace(low_b, up_b, len(density))
x = np.concatenate([x, x[::-1]])
density = np.concatenate([-density, density[::-1]])
ax.fill_betweenx(x, density, alpha=shade, lw=0, **kwargs_shade) | Auxiliary function to plot violinplots. |
def assert_sympy_expressions_equal(expr1, expr2):
if not sympy_expressions_equal(expr1, expr2):
raise AssertionError("{0!r} != {1!r}".format(expr1, expr2)) | Raises `AssertionError` if `expr1` is not equal to `expr2`.
:param expr1: first expression
:param expr2: second expression
:return: None |
def record(self):
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Logical Volume Descriptor not initialized')
rec = struct.pack(self.FMT, b'\x00' * 16,
self.vol_desc_seqnum, self.desc_char_set,
self.logical_vol_ident, 2048,
self.domain_ident.record(),
self.logical_volume_contents_use.record(), 6, 1,
self.impl_ident.record(), self.implementation_use,
self.integrity_sequence_length,
self.integrity_sequence_extent,
self.partition_map.record(), b'\x00' * 66)[16:]
return self.desc_tag.record(rec) + rec | A method to generate the string representing this UDF Logical Volume Descriptor.
Parameters:
None.
Returns:
A string representing this UDF Logical Volume Descriptor. |
def initialize(self):
if not self._initialized:
logger.info("initializing %r", self)
if not os.path.exists(self.path):
if self.mode is not None:
os.makedirs(self.path, mode=self.mode)
else:
os.makedirs(self.path)
self._set_mode()
self._add_facl_rules()
self._set_selinux_context()
self._set_ownership()
self._initialized = True
logger.info("initialized")
return
logger.info("%r was already initialized", self) | create the directory if needed and configure it
:return: None |
def check_address(address):
if isinstance(address, tuple):
check_host(address[0])
check_port(address[1])
elif isinstance(address, string_types):
if os.name != 'posix':
raise ValueError('Platform does not support UNIX domain sockets')
if not (os.path.exists(address) or
os.access(os.path.dirname(address), os.W_OK)):
raise ValueError('ADDRESS not a valid socket domain socket ({0})'
.format(address))
else:
raise ValueError('ADDRESS is not a tuple, string, or character buffer '
'({0})'.format(type(address).__name__)) | Check if the format of the address is correct
Arguments:
address (tuple):
(``str``, ``int``) representing an IP address and port,
respectively
.. note::
alternatively a local ``address`` can be a ``str`` when working
with UNIX domain sockets, if supported by the platform
Raises:
ValueError:
raised when address has an incorrect format
Example:
>>> check_address(('127.0.0.1', 22)) |
def argsort_indices(a, axis=-1):
a = np.asarray(a)
ind = list(np.ix_(*[np.arange(d) for d in a.shape]))
ind[axis] = a.argsort(axis)
return tuple(ind) | Like argsort, but returns an index suitable for sorting the
the original array even if that array is multidimensional |
def pick_frequency_line(self, filename, frequency, cumulativefield='cumulative_frequency'):
if resource_exists('censusname', filename):
with closing(resource_stream('censusname', filename)) as b:
g = codecs.iterdecode(b, 'ascii')
return self._pick_frequency_line(g, frequency, cumulativefield)
else:
with open(filename, encoding='ascii') as g:
return self._pick_frequency_line(g, frequency, cumulativefield) | Given a numeric frequency, pick a line from a csv with a cumulative frequency field |
def chat_delete(self, *, channel: str, ts: str, **kwargs) -> SlackResponse:
kwargs.update({"channel": channel, "ts": ts})
return self.api_call("chat.delete", json=kwargs) | Deletes a message.
Args:
channel (str): Channel containing the message to be deleted. e.g. 'C1234567890'
ts (str): Timestamp of the message to be deleted. e.g. '1234567890.123456' |
def match_rule_patterns(fixed_text, cur=0):
pattern = exact_find_in_pattern(fixed_text, cur, RULE_PATTERNS)
if len(pattern) > 0:
return {"matched": True, "found": pattern[0]['find'],
"replaced": pattern[0]['replace'], "rules": pattern[0]['rules']}
else:
return {"matched": False, "found": None,
"replaced": fixed_text[cur], "rules": None} | Matches given text at cursor position with rule patterns
Returns a dictionary of four elements:
- "matched" - Bool: depending on if match found
- "found" - string/None: Value of matched pattern's 'find' key or none
- "replaced": string Replaced string if match found else input string at
cursor
- "rules": dict/None: A dict of rules or None if no match found |
def _get_all_filtered_channels(self, topics_without_signature):
mpe_address = self.get_mpe_address()
event_signature = self.ident.w3.sha3(text="ChannelOpen(uint256,uint256,address,address,address,bytes32,uint256,uint256)").hex()
topics = [event_signature] + topics_without_signature
logs = self.ident.w3.eth.getLogs({"fromBlock" : self.args.from_block, "address" : mpe_address, "topics" : topics})
abi = get_contract_def("MultiPartyEscrow")
event_abi = abi_get_element_by_name(abi, "ChannelOpen")
channels_ids = [get_event_data(event_abi, l)["args"]["channelId"] for l in logs]
return channels_ids | get all filtered chanels from blockchain logs |
def convert_mask_to_pil(mask, real=True):
from PIL import Image
header = mask._layer._psd._record.header
channel_ids = [ci.id for ci in mask._layer._record.channel_info]
if real and mask._has_real():
width = mask._data.real_right - mask._data.real_left
height = mask._data.real_bottom - mask._data.real_top
channel = mask._layer._channels[
channel_ids.index(ChannelID.REAL_USER_LAYER_MASK)
]
else:
width = mask._data.right - mask._data.left
height = mask._data.bottom - mask._data.top
channel = mask._layer._channels[
channel_ids.index(ChannelID.USER_LAYER_MASK)
]
data = channel.get_data(width, height, header.depth, header.version)
return _create_channel((width, height), data, header.depth) | Convert Mask to PIL Image. |
def initialize():
from zsl.interface.web.performers.default import create_not_found_mapping
from zsl.interface.web.performers.resource import create_resource_mapping
create_not_found_mapping()
create_resource_mapping() | Import in this form is necessary so that we avoid the unwanted behavior and immediate initialization of the
application objects. This makes the initialization procedure run in the time when it is necessary and has every
required resources. |
def roman2int(s):
val = 0
pos10 = 1000
beg = 0
for pos in range(3, -1, -1):
for digit in range(9,-1,-1):
r = roman[pos][digit]
if s.startswith(r, beg):
beg += len(r)
val += digit * pos10
break
pos10 //= 10
return val | Decode roman number
:param s: string representing a roman number between 1 and 9999
:returns: the decoded roman number
:complexity: linear (if that makes sense for constant bounded input size) |
def list_accounts(self, id, max_id=None, min_id=None, since_id=None, limit=None):
id = self.__unpack_id(id)
if max_id != None:
max_id = self.__unpack_id(max_id)
if min_id != None:
min_id = self.__unpack_id(min_id)
if since_id != None:
since_id = self.__unpack_id(since_id)
params = self.__generate_params(locals(), ['id'])
return self.__api_request('GET', '/api/v1/lists/{0}/accounts'.format(id)) | Get the accounts that are on the given list. A `limit` of 0 can
be specified to get all accounts without pagination.
Returns a list of `user dicts`_. |
def diff_safe(cls, value):
if isinstance(value, Frame):
return {'_str': str(value), '_id': value._id}
elif isinstance(value, (list, tuple)):
return [cls.diff_safe(v) for v in value]
return value | Return a value that can be safely stored as a diff |
def calculated_intervals(self, value):
if not value:
self._calculated_intervals = TimeIntervals()
return
if isinstance(value, TimeInterval):
value = TimeIntervals([value])
elif isinstance(value, TimeIntervals):
pass
elif isinstance(value, list):
value = TimeIntervals(value)
else:
raise TypeError("Expected list/TimeInterval/TimeIntervals, got {}".format(type(value)))
for interval in value:
if interval.end > utcnow():
raise ValueError("Calculated intervals should not be in the future")
self._calculated_intervals = value | Set the calculated intervals
This will be written to the stream_status collection if it's in the database channel
:param value: The calculated intervals
:type value: TimeIntervals, TimeInterval, list[TimeInterval] |
def export_json(self, filename):
json_graph = self.to_json()
with open(filename, 'wb') as f:
f.write(json_graph.encode('utf-8')) | Export graph in JSON form to the given file. |
def get_banks_by_assessment_part(self, assessment_part_id):
mgr = self._get_provider_manager('ASSESSMENT', local=True)
lookup_session = mgr.get_bank_lookup_session(proxy=self._proxy)
return lookup_session.get_banks_by_ids(
self.get_bank_ids_by_assessment_part(assessment_part_id)) | Gets the ``Banks`` mapped to an ``AssessmentPart``.
arg: assessment_part_id (osid.id.Id): ``Id`` of an
``AssessmentPart``
return: (osid.assessment.BankList) - list of banks
raise: NotFound - ``assessment_part_id`` is not found
raise: NullArgument - ``assessment_part_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
def adjust_opts(in_opts, config):
memory_adjust = config["algorithm"].get("memory_adjust", {})
out_opts = []
for opt in in_opts:
if opt.startswith("-Xmx") or (opt.startswith("-Xms") and memory_adjust.get("direction") == "decrease"):
arg = opt[:4]
opt = "{arg}{val}".format(arg=arg,
val=adjust_memory(opt[4:],
memory_adjust.get("magnitude", 1),
memory_adjust.get("direction"),
maximum=memory_adjust.get("maximum")))
out_opts.append(opt)
return out_opts | Establish JVM opts, adjusting memory for the context if needed.
This allows using less or more memory for highly parallel or multicore
supporting processes, respectively. |
def _handle_sigusr1(signum: int, frame: Any) -> None:
print('=' * 70)
print(''.join(traceback.format_stack()))
print('-' * 70) | Print stacktrace. |
def do_fish_complete(cli, prog_name):
commandline = os.environ['COMMANDLINE']
args = split_args(commandline)[1:]
if args and not commandline.endswith(' '):
incomplete = args[-1]
args = args[:-1]
else:
incomplete = ''
for item, help in get_choices(cli, prog_name, args, incomplete):
if help:
echo("%s\t%s" % (item, re.sub('\s', ' ', help)))
else:
echo(item)
return True | Do the fish completion
Parameters
----------
cli : click.Command
The main click Command of the program
prog_name : str
The program name on the command line
Returns
-------
bool
True if the completion was successful, False otherwise |
def opath_from_ext(self, ext):
return os.path.join(self.workdir, self.prefix.odata + "_" + ext) | Returns the path of the output file with extension ext.
Use it when the file does not exist yet. |
def removeTags(dom):
try:
string_type = basestring
except NameError:
string_type = str
element_stack = None
if type(dom) in [list, tuple]:
element_stack = dom
elif isinstance(dom, HTMLElement):
element_stack = dom.childs if dom.isTag() else [dom]
elif isinstance(dom, string_type):
element_stack = parseString(dom).childs
else:
element_stack = dom
output = ""
while element_stack:
el = element_stack.pop(0)
if not (el.isTag() or el.isComment() or not el.getTagName()):
output += el.__str__()
if el.childs:
element_stack = el.childs + element_stack
return output | Remove all tags from `dom` and obtain plaintext representation.
Args:
dom (str, obj, array): str, HTMLElement instance or array of elements.
Returns:
str: Plain string without tags. |
def upload(ctx):
settings.add_cli_options(ctx.cli_options, settings.TransferAction.Upload)
ctx.initialize(settings.TransferAction.Upload)
specs = settings.create_upload_specifications(
ctx.cli_options, ctx.config)
del ctx.cli_options
for spec in specs:
blobxfer.api.Uploader(
ctx.general_options, ctx.credentials, spec
).start() | Upload files to Azure Storage |
def get_sub_dim(src_ds, scale=None, maxdim=1024):
ns = src_ds.RasterXSize
nl = src_ds.RasterYSize
maxdim = float(maxdim)
if scale is None:
scale_ns = ns/maxdim
scale_nl = nl/maxdim
scale = max(scale_ns, scale_nl)
if scale > 1:
ns = int(round(ns/scale))
nl = int(round(nl/scale))
return ns, nl, scale | Compute dimensions of subsampled dataset
Parameters
----------
ds : gdal.Dataset
Input GDAL Datset
scale : int, optional
Scaling factor
maxdim : int, optional
Maximum dimension along either axis, in pixels
Returns
-------
ns
Numper of samples in subsampled output
nl
Numper of lines in subsampled output
scale
Final scaling factor |
def create_inline(project, resource, offset):
pyname = _get_pyname(project, resource, offset)
message = 'Inline refactoring should be performed on ' \
'a method, local variable or parameter.'
if pyname is None:
raise rope.base.exceptions.RefactoringError(message)
if isinstance(pyname, pynames.ImportedName):
pyname = pyname._get_imported_pyname()
if isinstance(pyname, pynames.AssignedName):
return InlineVariable(project, resource, offset)
if isinstance(pyname, pynames.ParameterName):
return InlineParameter(project, resource, offset)
if isinstance(pyname.get_object(), pyobjects.PyFunction):
return InlineMethod(project, resource, offset)
else:
raise rope.base.exceptions.RefactoringError(message) | Create a refactoring object for inlining
Based on `resource` and `offset` it returns an instance of
`InlineMethod`, `InlineVariable` or `InlineParameter`. |
def cli_run():
options = CLI.parse_args()
run(options.CONFIGURATION, options.log_level, options.log_target, options.log_journal) | Run the daemon from a command line interface |
def main(arguments=None):
if arguments is None:
arguments = sys.argv[1:]
server_parameters = get_server_parameters(arguments)
config = get_config(server_parameters.config_path, server_parameters.use_environment)
configure_log(config, server_parameters.log_level.upper())
validate_config(config, server_parameters)
importer = get_importer(config)
with get_context(server_parameters, config, importer) as context:
application = get_application(context)
server = run_server(application, context)
setup_signal_handler(server, config)
logging.debug('thumbor running at %s:%d' % (context.server.ip, context.server.port))
tornado.ioloop.IOLoop.instance().start() | Runs thumbor server with the specified arguments. |
def beginning_of_line(event):
" Move to the start of the current line. "
buff = event.current_buffer
buff.cursor_position += buff.document.get_start_of_line_position(after_whitespace=False) | Move to the start of the current line. |
async def pong(self, data: bytes = b"") -> None:
await self.ensure_open()
data = encode_data(data)
await self.write_frame(True, OP_PONG, data) | This coroutine sends a pong.
An unsolicited pong may serve as a unidirectional heartbeat.
The content may be overridden with the optional ``data`` argument
which must be a string (which will be encoded to UTF-8) or a
bytes-like object. |
def update(self, cur_value, mesg=None):
self.cur_value = cur_value
progress = float(self.cur_value) / self.max_value
num_chars = int(progress * self.max_chars)
num_left = self.max_chars - num_chars
if mesg is not None:
self.mesg = mesg
bar = self.template.format(self.progress_character * num_chars,
' ' * num_left,
progress * 100,
self.spinner_symbols[self.spinner_index],
self.mesg)
sys.stdout.write(bar)
if self.spinner:
self.spinner_index = (self.spinner_index + 1) % self.n_spinner
sys.stdout.flush() | Update progressbar with current value of process
Parameters
----------
cur_value : number
Current value of process. Should be <= max_value (but this is not
enforced). The percent of the progressbar will be computed as
(cur_value / max_value) * 100
mesg : str
Message to display to the right of the progressbar. If None, the
last message provided will be used. To clear the current message,
pass a null string, ''. |
def _create_row_labels(self):
labels = {}
for c in self._columns:
labels[c] = c
if self._alt_labels:
for k in self._alt_labels.keys():
labels[k] = self._alt_labels[k]
if self._label_suffix:
for k in labels.keys():
if k in self._nonnormal:
labels[k] = "{}, {}".format(labels[k],"median [Q1,Q3]")
elif k in self._categorical:
labels[k] = "{}, {}".format(labels[k],"n (%)")
else:
labels[k] = "{}, {}".format(labels[k],"mean (SD)")
return labels | Take the original labels for rows. Rename if alternative labels are
provided. Append label suffix if label_suffix is True.
Returns
----------
labels : dictionary
Dictionary, keys are original column name, values are final label. |
async def select(self, db):
res = True
async with self._cond:
for i in range(self.freesize):
res = res and (await self._pool[i].select(db))
else:
self._db = db
return res | Changes db index for all free connections.
All previously acquired connections will be closed when released. |
def matrix(fasta_path: 'path to tictax annotated fasta input',
scafstats_path: 'path to BBMap scaftstats file'):
records = SeqIO.parse(fasta_path, 'fasta')
df = tictax.matrix(records, scafstats_path)
df.to_csv(sys.stdout) | Generate taxonomic count matrix from tictax classified contigs |
def randdomain(self):
return '.'.join(
rand_readable(3, 6, use=self.random, density=3)
for _ in range(self.random.randint(1, 2))
).lower() | -> a randomized domain-like name |
def allocate_ip_for_subnet(self, subnet_id, mac, port_id):
subnet = self.get_subnet(subnet_id)
ip, mask, port_id = self.a10_allocate_ip_from_dhcp_range(subnet, "vlan", mac, port_id)
return ip, mask, port_id | Allocates an IP from the specified subnet and creates a port |
def transformer(self):
ttype = self.embedding.lower()
if ttype == 'mds':
return MDS(n_components=2, random_state=self.random_state)
if ttype == 'tsne':
return TSNE(n_components=2, random_state=self.random_state)
raise YellowbrickValueError("unknown embedding '{}'".format(ttype)) | Creates the internal transformer that maps the cluster center's high
dimensional space to its two dimensional space. |
def add_resource_types(resource_i, types):
if types is None:
return []
existing_type_ids = []
if resource_i.types:
for t in resource_i.types:
existing_type_ids.append(t.type_id)
new_type_ids = []
for templatetype in types:
if templatetype.id in existing_type_ids:
continue
rt_i = ResourceType()
rt_i.type_id = templatetype.id
rt_i.ref_key = resource_i.ref_key
if resource_i.ref_key == 'NODE':
rt_i.node_id = resource_i.id
elif resource_i.ref_key == 'LINK':
rt_i.link_id = resource_i.id
elif resource_i.ref_key == 'GROUP':
rt_i.group_id = resource_i.id
resource_i.types.append(rt_i)
new_type_ids.append(templatetype.id)
return new_type_ids | Save a reference to the types used for this resource.
@returns a list of type_ids representing the type ids
on the resource. |
def get_descriptor_output(descriptor, key, handler=None):
line = 'stub'
lines = ''
while line != '':
try:
line = descriptor.readline()
lines += line
except UnicodeDecodeError:
error_msg = "Error while decoding output of process {}".format(key)
if handler:
handler.logger.error("{} with command {}".format(
error_msg, handler.queue[key]['command']))
lines += error_msg + '\n'
return lines.replace('\n', '\n ') | Get the descriptor output and handle incorrect UTF-8 encoding of subprocess logs.
In case an process contains valid UTF-8 lines as well as invalid lines, we want to preserve
the valid and remove the invalid ones.
To do this we need to get each line and check for an UnicodeDecodeError. |
def abstracts(self, key, value):
result = []
source = force_single_element(value.get('9'))
for a_value in force_list(value.get('a')):
result.append({
'source': source,
'value': a_value,
})
return result | Populate the ``abstracts`` key. |
def base_url(self, space_id, content_type_id, environment_id=None, **kwargs):
return "spaces/{0}{1}/content_types/{2}/editor_interface".format(
space_id,
'/environments/{0}'.format(environment_id) if environment_id is not None else '',
content_type_id
) | Returns the URI for the editor interface. |
def get_all(self, name, default=None):
if default is None:
default = []
return self._headers.get_list(name) or default | make cookie python 3 version use this instead of getheaders |
def columnCount(self, parent):
if parent.isValid():
return parent.internalPointer().columnCount()
else:
return self.root.columnCount() | Returns the number of columns for the children of the given parent. |
def _work_path_to_rel_final_path(path, upload_path_mapping, upload_base_dir):
if not path or not isinstance(path, str):
return path
upload_path = None
if upload_path_mapping.get(path) is not None and os.path.isfile(path):
upload_path = upload_path_mapping[path]
else:
paths_to_check = [key for key in upload_path_mapping
if path.startswith(key)]
if paths_to_check:
for work_path in paths_to_check:
if os.path.isdir(work_path):
final_path = upload_path_mapping[work_path]
upload_path = path.replace(work_path, final_path)
break
if upload_path is not None:
return os.path.relpath(upload_path, upload_base_dir)
else:
return None | Check if `path` is a work-rooted path, and convert to a relative final-rooted path |
def delete_all_thumbnails(path, recursive=True):
total = 0
for thumbs in all_thumbnails(path, recursive=recursive).values():
total += _delete_using_thumbs_list(thumbs)
return total | Delete all files within a path which match the thumbnails pattern.
By default, matching files from all sub-directories are also removed. To
only remove from the path directory, set recursive=False. |
def encode_csv(data_dict, column_names):
import csv
import six
values = [str(data_dict[x]) for x in column_names]
str_buff = six.StringIO()
writer = csv.writer(str_buff, lineterminator='')
writer.writerow(values)
return str_buff.getvalue() | Builds a csv string.
Args:
data_dict: dict of {column_name: 1 value}
column_names: list of column names
Returns:
A csv string version of data_dict |
def e2dnde_deriv(self, x, params=None):
params = self.params if params is None else params
return np.squeeze(self.eval_e2dnde_deriv(x, params, self.scale,
self.extra_params)) | Evaluate derivative of E^2 times differential flux with
respect to E. |
def local_attr(self, name, context=None):
result = []
if name in self.locals:
result = self.locals[name]
else:
class_node = next(self.local_attr_ancestors(name, context), None)
if class_node:
result = class_node.locals[name]
result = [n for n in result if not isinstance(n, node_classes.DelAttr)]
if result:
return result
raise exceptions.AttributeInferenceError(
target=self, attribute=name, context=context
) | Get the list of assign nodes associated to the given name.
Assignments are looked for in both this class and in parents.
:returns: The list of assignments to the given name.
:rtype: list(NodeNG)
:raises AttributeInferenceError: If no attribute with this name
can be found in this class or parent classes. |
def encoded_content(self, path):
if path in self.__class__.asset_contents:
return self.__class__.asset_contents[path]
data = self.read_bytes(path)
self.__class__.asset_contents[path] = force_text(base64.b64encode(data))
return self.__class__.asset_contents[path] | Return the base64 encoded contents |
def path_join(*args):
return SEP.join((x for x in args if x not in (None, ''))).strip(SEP) | Join path parts to single path. |
def copy(self, version=None, tx_ins=None, tx_outs=None, lock_time=None,
tx_joinsplits=None, joinsplit_pubkey=None, joinsplit_sig=None):
return SproutTx(
version=version if version is not None else self.version,
tx_ins=tx_ins if tx_ins is not None else self.tx_ins,
tx_outs=tx_outs if tx_outs is not None else self.tx_outs,
lock_time=(lock_time if lock_time is not None
else self.lock_time),
tx_joinsplits=(tx_joinsplits if tx_joinsplits is not None
else self.tx_joinsplits),
joinsplit_pubkey=(joinsplit_pubkey if joinsplit_pubkey is not None
else self.joinsplit_pubkey),
joinsplit_sig=(joinsplit_sig if joinsplit_sig is not None
else self.joinsplit_sig)) | SproutTx, ... -> Tx
Makes a copy. Allows over-writing specific pieces. |
def controldata(self):
result = {}
if self._version_file_exists() and self.state != 'creating replica':
try:
env = {'LANG': 'C', 'LC_ALL': 'C', 'PATH': os.getenv('PATH')}
if os.getenv('SYSTEMROOT') is not None:
env['SYSTEMROOT'] = os.getenv('SYSTEMROOT')
data = subprocess.check_output([self._pgcommand('pg_controldata'), self._data_dir], env=env)
if data:
data = data.decode('utf-8').splitlines()
result = {l.split(':')[0].replace('Current ', '', 1): l.split(':', 1)[1].strip() for l in data
if l and ':' in l}
except subprocess.CalledProcessError:
logger.exception("Error when calling pg_controldata")
return result | return the contents of pg_controldata, or non-True value if pg_controldata call failed |
def segment_snrs(filters, stilde, psd, low_frequency_cutoff):
snrs = []
norms = []
for bank_template in filters:
snr, _, norm = matched_filter_core(
bank_template, stilde, h_norm=bank_template.sigmasq(psd),
psd=None, low_frequency_cutoff=low_frequency_cutoff)
snrs.append(snr)
norms.append(norm)
return snrs, norms | This functions calculates the snr of each bank veto template against
the segment
Parameters
----------
filters: list of FrequencySeries
The list of bank veto templates filters.
stilde: FrequencySeries
The current segment of data.
psd: FrequencySeries
low_frequency_cutoff: float
Returns
-------
snr (list): List of snr time series.
norm (list): List of normalizations factors for the snr time series. |
def _calculate_average(self, points):
assert len(self.theta) == len(points), \
"points has length %i, but should have length %i" % \
(len(points), len(self.theta))
new_point = {'x': 0, 'y': 0, 'time': 0}
for key in new_point:
new_point[key] = self.theta[0] * points[0][key] + \
self.theta[1] * points[1][key] + \
self.theta[2] * points[2][key]
return new_point | Calculate the arithmetic mean of the points x and y coordinates
seperately. |
def fit(self, blocks, y=None):
self.kmeans.fit(make_weninger_features(blocks))
self.kmeans.cluster_centers_.sort(axis=0)
self.kmeans.cluster_centers_[0, :] = np.zeros(2)
return self | Fit a k-means clustering model using an ordered sequence of blocks. |
def parse_boolean(value):
if value is None:
return None
if isinstance(value, bool):
return value
if isinstance(value, string_types):
value = value.lower()
if value == 'false':
return False
if value == 'true':
return True
raise ValueError("Could not convert value to boolean: {}".format(value)) | Coerce a value to boolean.
:param value: the value, could be a string, boolean, or None
:return: the value as coerced to a boolean |
def file_length(file_obj):
file_obj.seek(0, 2)
length = file_obj.tell()
file_obj.seek(0)
return length | Returns the length in bytes of a given file object.
Necessary because os.fstat only works on real files and not file-like
objects. This works on more types of streams, primarily StringIO. |
def start(self, level="WARN"):
if self.active:
return
handler = StreamHandler()
handler.setFormatter(Formatter(self.LOGFMT))
self.addHandler(handler)
self.setLevel(level.upper())
self.active = True
return | Start logging with this logger.
Until the logger is started, no messages will be emitted. This applies
to all loggers with the same name and any child loggers.
Messages less than the given priority level will be ignored. The
default level is 'WARN', which conforms to the *nix convention that a
successful run should produce no diagnostic output. Available levels
and their suggested meanings:
DEBUG - output useful for developers
INFO - trace normal program flow, especially external interactions
WARN - an abnormal condition was detected that might need attention
ERROR - an error was detected but execution continued
CRITICAL - an error was detected and execution was halted |
def get(self, key, default=None, type=None):
try:
value = self[key]
if type is not None:
return type(value)
return value
except (KeyError, ValueError):
return default | Returns the first value for a key.
If `type` is not None, the value will be converted by calling
`type` with the value as argument. If type() raises `ValueError`, it
will be treated as if the value didn't exist, and `default` will be
returned instead. |
def from_file(cls, filename):
with open(filename) as f:
molecule, origin, axes, nrep, subtitle, nuclear_charges = \
read_cube_header(f)
data = np.zeros(tuple(nrep), float)
tmp = data.ravel()
counter = 0
while True:
line = f.readline()
if len(line) == 0:
break
words = line.split()
for word in words:
tmp[counter] = float(word)
counter += 1
return cls(molecule, origin, axes, nrep, data, subtitle, nuclear_charges) | Create a cube object by loading data from a file.
*Arguemnts:*
filename
The file to load. It must contain the header with the
description of the grid and the molecule. |
def _loh_to_vcf(cur):
cn = int(float(cur["C"]))
minor_cn = int(float(cur["M"]))
if cur["type"].find("LOH"):
svtype = "LOH"
elif cn > 2:
svtype = "DUP"
elif cn < 1:
svtype = "DEL"
else:
svtype = None
if svtype:
info = ["SVTYPE=%s" % svtype, "END=%s" % cur["end"],
"SVLEN=%s" % (int(cur["end"]) - int(cur["start"])),
"CN=%s" % cn, "MajorCN=%s" % (cn - minor_cn), "MinorCN=%s" % minor_cn]
return [cur["chr"], cur["start"], ".", "N", "<%s>" % svtype, ".", ".",
";".join(info), "GT", "0/1"] | Convert LOH output into standardized VCF. |
def _parse_rule(self, rule):
values = rule.strip().split(self.RULE_DELIM, 4)
if len(values) >= 4:
codes = values[3].split(',')
for i in range(0, len(codes)):
try:
codes[i] = int(codes[i], 0)
except ValueError as e:
binwalk.core.common.warning("The specified return code '%s' for extractor '%s' is not a valid number!" % (codes[i], values[0]))
values[3] = codes
if len(values) >= 5:
values[4] = (values[4].lower() == 'true')
return values | Parses an extraction rule.
@rule - Rule string.
Returns an array of ['<case insensitive matching string>', '<file extension>', '<command to run>', '<comma separated return codes>', <recurse into extracted directories: True|False>]. |
def db_for_write(self, model, **hints):
try:
if model.sf_access == READ_ONLY:
raise WriteNotSupportedError("%r is a read-only model." % model)
except AttributeError:
pass
return None | Prevent write actions on read-only tables.
Raises:
WriteNotSupportedError: If models.sf_access is ``read_only``. |
def basic_filter_languages(languages, ranges):
if LanguageRange.WILDCARD in ranges:
yield from languages
return
found = set()
for language_range in ranges:
range_str = language_range.match_str
for language in languages:
if language in found:
continue
match_str = language.match_str
if match_str == range_str:
yield language
found.add(language)
continue
if len(range_str) < len(match_str):
if (match_str[:len(range_str)] == range_str and
match_str[len(range_str)] == "-"):
yield language
found.add(language)
continue | Filter languages using the string-based basic filter algorithm described in
RFC4647.
`languages` must be a sequence of :class:`LanguageTag` instances which are
to be filtered.
`ranges` must be an iterable which represent the basic language ranges to
filter with, in priority order. The language ranges must be given as
:class:`LanguageRange` objects.
Return an iterator of languages which matched any of the `ranges`. The
sequence produced by the iterator is in match order and duplicate-free. The
first range to match a language yields the language into the iterator, no
other range can yield that language afterwards. |
def _calculateCrcString(inputstring):
_checkString(inputstring, description='input CRC string')
register = 0xFFFF
for char in inputstring:
register = (register >> 8) ^ _CRC16TABLE[(register ^ ord(char)) & 0xFF]
return _numToTwoByteString(register, LsbFirst=True) | Calculate CRC-16 for Modbus.
Args:
inputstring (str): An arbitrary-length message (without the CRC).
Returns:
A two-byte CRC string, where the least significant byte is first. |
def train_agent(real_env, learner, world_model_dir, hparams, epoch):
initial_frame_chooser = rl_utils.make_initial_frame_chooser(
real_env, hparams.frame_stack_size, hparams.simulation_random_starts,
hparams.simulation_flip_first_random_for_beginning
)
env_fn = rl.make_simulated_env_fn_from_hparams(
real_env, hparams, batch_size=hparams.simulated_batch_size,
initial_frame_chooser=initial_frame_chooser, model_dir=world_model_dir,
sim_video_dir=os.path.join(
learner.agent_model_dir, "sim_videos_{}".format(epoch)
)
)
base_algo_str = hparams.base_algo
train_hparams = trainer_lib.create_hparams(hparams.base_algo_params)
if hparams.wm_policy_param_sharing:
train_hparams.optimizer_zero_grads = True
rl_utils.update_hparams_from_hparams(
train_hparams, hparams, base_algo_str + "_"
)
final_epoch = hparams.epochs - 1
is_special_epoch = (epoch + 3) == final_epoch or (epoch + 7) == final_epoch
is_final_epoch = epoch == final_epoch
env_step_multiplier = 3 if is_final_epoch else 2 if is_special_epoch else 1
learner.train(
env_fn, train_hparams, simulated=True, save_continuously=True,
epoch=epoch, env_step_multiplier=env_step_multiplier
) | Train the PPO agent in the simulated environment. |
def _handle_usecols(self, columns, usecols_key):
if self.usecols is not None:
if callable(self.usecols):
col_indices = _evaluate_usecols(self.usecols, usecols_key)
elif any(isinstance(u, str) for u in self.usecols):
if len(columns) > 1:
raise ValueError("If using multiple headers, usecols must "
"be integers.")
col_indices = []
for col in self.usecols:
if isinstance(col, str):
try:
col_indices.append(usecols_key.index(col))
except ValueError:
_validate_usecols_names(self.usecols, usecols_key)
else:
col_indices.append(col)
else:
col_indices = self.usecols
columns = [[n for i, n in enumerate(column) if i in col_indices]
for column in columns]
self._col_indices = col_indices
return columns | Sets self._col_indices
usecols_key is used if there are string usecols. |
def resolve_push_to(push_to, default_url, default_namespace):
protocol = 'http://' if push_to.startswith('http://') else 'https://'
url = push_to = REMOVE_HTTP.sub('', push_to)
namespace = default_namespace
parts = url.split('/', 1)
special_set = {'.', ':'}
char_set = set([c for c in parts[0]])
if len(parts) == 1:
if not special_set.intersection(char_set) and parts[0] != 'localhost':
registry_url = default_url
namespace = push_to
else:
registry_url = protocol + parts[0]
else:
registry_url = protocol + parts[0]
namespace = parts[1]
return registry_url, namespace | Given a push-to value, return the registry and namespace.
:param push_to: string: User supplied --push-to value.
:param default_url: string: Container engine's default_index value (e.g. docker.io).
:return: tuple: registry_url, namespace |
def MessageEncoder(field_number, is_repeated, is_packed):
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value):
for element in value:
write(tag)
local_EncodeVarint(write, element.ByteSize())
element._InternalSerialize(write)
return EncodeRepeatedField
else:
def EncodeField(write, value):
write(tag)
local_EncodeVarint(write, value.ByteSize())
return value._InternalSerialize(write)
return EncodeField | Returns an encoder for a message field. |
def load_path(self, path):
containing_module, _, last_item = path.rpartition('.')
if last_item[0].isupper():
path = containing_module
imported_obj = importlib.import_module(path)
if last_item[0].isupper():
try:
imported_obj = getattr(imported_obj, last_item)
except AttributeError:
msg = 'Cannot import "%s". ' \
'(Hint: CamelCase is only for classes)' % last_item
raise ConfigurationError(msg)
return imported_obj | Load and return a given import path to a module or class |
def get_id(self):
if self.type == 'NAF':
return self.node.get('id')
elif self.type == 'KAF':
return self.node.get('mid') | Returns the term identifier
@rtype: string
@return: the term identifier |
def nunique(expr):
output_type = types.int64
if isinstance(expr, SequenceExpr):
return NUnique(_value_type=output_type, _inputs=[expr])
elif isinstance(expr, SequenceGroupBy):
return GroupedNUnique(_data_type=output_type, _inputs=[expr.to_column()], _grouped=expr.input)
elif isinstance(expr, CollectionExpr):
unique_input = _extract_unique_input(expr)
if unique_input:
return nunique(unique_input)
else:
return NUnique(_value_type=types.int64, _inputs=expr._project_fields)
elif isinstance(expr, GroupBy):
if expr._to_agg:
inputs = expr.input[expr._to_agg.names]._project_fields
else:
inputs = expr.input._project_fields
return GroupedNUnique(_data_type=types.int64, _inputs=inputs,
_grouped=expr) | The distinct count.
:param expr:
:return: |
def buscar_por_ip_ambiente(self, ip, id_environment):
if not is_valid_int_param(id_environment):
raise InvalidParameterError(
u'Environment identifier is invalid or was not informed.')
if not is_valid_ip(ip):
raise InvalidParameterError(u'IP is invalid or was not informed.')
url = 'ip/' + str(ip) + '/ambiente/' + str(id_environment) + '/'
code, xml = self.submit(None, 'GET', url)
return self.response(code, xml) | Get IP with an associated environment.
:param ip: IP address in the format x1.x2.x3.x4.
:param id_environment: Identifier of the environment. Integer value and greater than zero.
:return: Dictionary with the following structure:
::
{'ip': {'id': < id >,
'id_vlan': < id_vlan >,
'oct4': < oct4 >,
'oct3': < oct3 >,
'oct2': < oct2 >,
'oct1': < oct1 >,
'descricao': < descricao > }}
:raise IpNaoExisteError: IP is not registered or not associated with environment.
:raise InvalidParameterError: The environment identifier and/or IP is/are null or invalid.
:raise DataBaseError: Networkapi failed to access the database. |
def swipe(self):
result = WBinArray(0, len(self))
for i in range(len(self)):
result[len(self) - i - 1] = self[i]
return result | Mirror current array value in reverse. Bits that had greater index will have lesser index, and
vice-versa. This method doesn't change this array. It creates a new one and return it as a result.
:return: WBinArray |
def interpolate(self, other, t):
return Vertex(self.pos.lerp(other.pos, t),
self.normal.lerp(other.normal, t)) | Create a new vertex between this vertex and `other` by linearly
interpolating all properties using a parameter of `t`. Subclasses should
override this to interpolate additional properties. |
def visit_with(self, node):
items = ", ".join(
("%s" % expr.accept(self)) + (vars and " as %s" % (vars.accept(self)) or "")
for expr, vars in node.items
)
return "with %s:\n%s" % (items, self._stmt_list(node.body)) | return an astroid.With node as string |
def upload_module(self, local_path=None, remote_path="/tmp/lime.ko"):
if local_path is None:
raise FileNotFoundFoundError(local_path)
self.shell.upload_file(local_path, remote_path) | Upload LiME kernel module to remote host
:type local_path: str
:param local_path: local path to lime kernel module
:type remote_path: str
:param remote_path: remote path to upload lime kernel module |
def _check_and_uninstall_python(ret, python, user=None):
ret = _python_installed(ret, python, user=user)
if ret['result']:
if ret['default']:
__salt__['pyenv.default']('system', runas=user)
if __salt__['pyenv.uninstall_python'](python, runas=user):
ret['result'] = True
ret['changes'][python] = 'Uninstalled'
ret['comment'] = 'Successfully removed python'
return ret
else:
ret['result'] = False
ret['comment'] = 'Failed to uninstall python'
return ret
else:
ret['result'] = True
ret['comment'] = 'python {0} is already absent'.format(python)
return ret | Verify that python is uninstalled |
def list(self, environment_vip=None):
uri = 'api/networkv6/?'
if environment_vip:
uri += 'environment_vip=%s' % environment_vip
return super(ApiNetworkIPv6, self).get(uri) | List networks redeipv6 ]
:param environment_vip: environment vip to filter
:return: IPv6 Networks |
def flattenPorts(root: LNode):
for u in root.children:
u.west = _flattenPortsSide(u.west)
u.east = _flattenPortsSide(u.east)
u.north = _flattenPortsSide(u.north)
u.south = _flattenPortsSide(u.south) | Flatten ports to simplify layout generation
:attention: children property is destroyed, parent property stays same |
def get_hkr_state(self):
self.update()
try:
return {
126.5: 'off',
127.0: 'on',
self.eco_temperature: 'eco',
self.comfort_temperature: 'comfort'
}[self.target_temperature]
except KeyError:
return 'manual' | Get the thermostate state. |
def exists(self, filename):
result = True
for repo in self._children:
if not repo.exists(filename):
result = False
return result | Report whether a file exists on all distribution points.
Determines file type by extension.
Args:
filename: Filename you wish to check. (No path! e.g.:
"AdobeFlashPlayer-14.0.0.176.pkg")
Returns:
Boolean |
def do_uninstall(ctx, verbose, fake):
aliases = cli.list_commands(ctx)
aliases.extend(['graft', 'harvest', 'sprout', 'resync', 'settings', 'install', 'uninstall'])
for alias in aliases:
system_command = 'git config --global --unset-all alias.{0}'.format(alias)
verbose_echo(system_command, verbose, fake)
if not fake:
os.system(system_command)
if not fake:
click.echo('\nThe following git aliases are uninstalled:\n')
output_aliases(aliases) | Uninstalls legit git aliases, including deprecated legit sub-commands. |
def cart2dir(self,cart):
cart=numpy.array(cart)
rad=old_div(numpy.pi,180.)
if len(cart.shape)>1:
Xs,Ys,Zs=cart[:,0],cart[:,1],cart[:,2]
else:
Xs,Ys,Zs=cart[0],cart[1],cart[2]
Rs=numpy.sqrt(Xs**2+Ys**2+Zs**2)
Decs=(old_div(numpy.arctan2(Ys,Xs),rad))%360.
try:
Incs=old_div(numpy.arcsin(old_div(Zs,Rs)),rad)
except:
print('trouble in cart2dir')
return numpy.zeros(3)
return numpy.array([Decs,Incs,Rs]).transpose() | converts a direction to cartesian coordinates |
def on_switch_page(self, notebook, page_pointer, page_num, user_param1=None):
page = notebook.get_nth_page(page_num)
for tab_info in list(self.tabs.values()):
if tab_info['page'] is page:
state_m = tab_info['state_m']
sm_id = state_m.state.get_state_machine().state_machine_id
selected_state_m = self.current_state_machine_m.selection.get_selected_state()
if selected_state_m is not state_m and sm_id in self.model.state_machine_manager.state_machines:
self.model.selected_state_machine_id = sm_id
self.current_state_machine_m.selection.set(state_m)
return | Update state selection when the active tab was changed |
def draw_heading(self, writer):
if self.dirty == self.STATE_REFRESH:
writer(u''.join(
(self.term.home, self.term.clear,
self.screen.msg_intro, '\n',
self.screen.header, '\n',)))
return True | Conditionally redraw screen when ``dirty`` attribute is valued REFRESH.
When Pager attribute ``dirty`` is ``STATE_REFRESH``, cursor is moved
to (0,0), screen is cleared, and heading is displayed.
:param writer: callable writes to output stream, receiving unicode.
:returns: True if class attribute ``dirty`` is ``STATE_REFRESH``. |
def include(gset, elem, value=True):
add = getattr(gset, 'add', None)
if add is None: add = getattr(gset, 'append', None)
if add is not None: add(elem)
else:
if not hasattr(gset, '__setitem__'):
raise Error("gset is not a supported container.")
gset[elem] = value
return elem | Do whatever it takes to make ``elem in gset`` true.
>>> L, S, D = [ ], set(), { }
>>> include(L, "Lucy"); include(S, "Sky"); include(D, "Diamonds");
>>> print L, S, D
['Lucy'] set(['Sky']) {'Diamonds': True}
Works for sets (using ``add``), lists (using ``append``) and dicts (using
``__setitem__``).
``value``
if ``gset`` is a dict, does ``gset[elem] = value``.
Returns ``elem``, or raises an Error if none of these operations are supported. |
def _visit_handlers(handlers, visitor, prefix, suffixes):
results = []
for handler in handlers:
for suffix in suffixes:
func = getattr(handler, '{}_{}'.format(prefix, suffix).lower(), None)
if func:
results.append(visitor(suffix, func))
return results | Use visitor partern to collect information from handlers |
def receive(self, path, diff, showProgress=True):
directory = os.path.dirname(path)
cmd = ["btrfs", "receive", "-e", directory]
if Store.skipDryRun(logger, self.dryrun)("Command: %s", cmd):
return None
if not os.path.exists(directory):
os.makedirs(directory)
process = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=DEVNULL,
)
_makeNice(process)
return _Writer(process, process.stdin, path, diff, showProgress) | Return a context manager for stream that will store a diff. |
def update_models(ctx, f=False):
if f:
manage(ctx, 'create_models_from_sql --force True', env={})
else:
manage(ctx, 'create_models_from_sql', env={}) | Updates local django db projects models using salic database from
MinC |
def shutdown(self, exitcode=0):
logger.info("shutting down system stats and metadata service")
self._system_stats.shutdown()
self._meta.shutdown()
if self._cloud:
logger.info("stopping streaming files and file change observer")
self._stop_file_observer()
self._end_file_syncing(exitcode)
self._run.history.close() | Stops system stats, streaming handlers, and uploads files without output, used by wandb.monitor |
def configuration_to_dict(handlers):
config_dict = defaultdict(dict)
for handler in handlers:
for option in handler.set_options:
value = _get_option(handler.target_obj, option)
config_dict[handler.section_prefix][option] = value
return config_dict | Returns configuration data gathered by given handlers as a dict.
:param list[ConfigHandler] handlers: Handlers list,
usually from parse_configuration()
:rtype: dict |
def serialize(self, subject, *objects_or_combinators):
ec_s = rdflib.BNode()
if self.operator is not None:
if subject is not None:
yield subject, self.predicate, ec_s
yield from oc(ec_s)
yield from self._list.serialize(ec_s, self.operator, *objects_or_combinators)
else:
for thing in objects_or_combinators:
if isinstance(thing, Combinator):
object = rdflib.BNode()
hasType = False
for t in thing(object):
if t[1] == rdf.type:
hasType = True
yield t
if not hasType:
yield object, rdf.type, owl.Class
else:
object = thing
yield subject, self.predicate, object | object_combinators may also be URIRefs or Literals |
def plotDutyCycles(dutyCycle, filePath):
_,entropy = binaryEntropy(dutyCycle)
bins = np.linspace(0.0, 0.3, 200)
plt.hist(dutyCycle, bins, alpha=0.5, label='All cols')
plt.title("Histogram of duty cycles, entropy=" + str(float(entropy)))
plt.xlabel("Duty cycle")
plt.ylabel("Number of units")
plt.savefig(filePath)
plt.close() | Create plot showing histogram of duty cycles
:param dutyCycle: (torch tensor) the duty cycle of each unit
:param filePath: (str) Full filename of image file |
def resizeColumnsToContents(self, startCol=None, stopCol=None):
numCols = self.model().columnCount()
startCol = 0 if startCol is None else max(startCol, 0)
stopCol = numCols if stopCol is None else min(stopCol, numCols)
row = 0
for col in range(startCol, stopCol):
indexWidget = self.indexWidget(self.model().index(row, col))
if indexWidget:
contentsWidth = indexWidget.sizeHint().width()
else:
contentsWidth = self.header().sectionSizeHint(col)
self.header().resizeSection(col, contentsWidth) | Resizes all columns to the contents |
def query_term(self, term, verbose=False):
if term not in self:
sys.stderr.write("Term %s not found!\n" % term)
return
rec = self[term]
if verbose:
print(rec)
sys.stderr.write("all parents: {}\n".format(
repr(rec.get_all_parents())))
sys.stderr.write("all children: {}\n".format(
repr(rec.get_all_children())))
return rec | Given a GO ID, return GO object. |
def new(self, path, desc=None, bare=True):
if os.path.exists(path):
raise RepoError('Path already exists: %s' % path)
try:
os.mkdir(path)
if bare:
Repo.init_bare(path)
else:
Repo.init(path)
repo = Local(path)
if desc:
repo.setDescription(desc)
version = repo.addVersion()
version.save('Repo Initialization')
return repo
except Exception, e:
traceback.print_exc()
raise RepoError('Error creating repo') | Create a new bare repo.Local instance.
:param path: Path to new repo.
:param desc: Repo description.
:param bare: Create as bare repo.
:returns: New repo.Local instance. |
def use_google_symbol(fct):
def decorator(symbols):
google_symbols = []
if isinstance(symbols, str):
symbols = [symbols]
symbols = sorted(symbols)
for symbol in symbols:
dot_pos = symbol.find('.')
google_symbols.append(
symbol[:dot_pos] if (dot_pos > 0) else symbol)
data = fct(google_symbols)
data.columns = [s for s in symbols if s.split('.')[0] in data.columns]
return data
return decorator | Removes ".PA" or other market indicator from yahoo symbol
convention to suit google convention |
def get_currencies_info() -> Element:
response = requests.get(const.CBRF_API_URLS['info'])
return XML(response.text) | Get META information about currencies
url: http://www.cbr.ru/scripts/XML_val.asp
:return: :class: `Element <Element 'Valuta'>` object
:rtype: ElementTree.Element |
def _write(self, str_buf):
self._filehandle.write(str_buf)
self._buf_size += len(str_buf) | Uses the filehandle to the file in GCS to write to it. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.