code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def corr(sim=None, obs=None, node=None, skip_nan=False):
sim, obs = prepare_arrays(sim, obs, node, skip_nan)
return numpy.corrcoef(sim, obs)[0, 1] | Calculate the product-moment correlation coefficient after Pearson.
>>> from hydpy import round_
>>> from hydpy import corr
>>> round_(corr(sim=[0.5, 1.0, 1.5], obs=[1.0, 2.0, 3.0]))
1.0
>>> round_(corr(sim=[4.0, 2.0, 0.0], obs=[1.0, 2.0, 3.0]))
-1.0
>>> round_(corr(sim=[1.0, 2.0, 1.0], obs=[1.0, 2.0, 3.0]))
0.0
See the documentation on function |prepare_arrays| for some
additional instructions for use of function |corr|. |
def set_state(self, updater=None, **kwargs):
if callable(updater):
state_change = updater(self)
elif updater is not None:
state_change = updater
else:
state_change = kwargs
return [callback_result
for k, v in state_change.items()
for callback_result in self.set(k, v)] | Update the datastore.
:param func|dict updater: (state) => state_change or dict state_change
:rtype: Iterable[tornado.concurrent.Future] |
def validateURL(self, url):
url_parts = _parseURL(url)
if url_parts is None:
return False
proto, host, port, path = url_parts
if proto != self.proto:
return False
if port != self.port:
return False
if '*' in host:
return False
if not self.wildcard:
if host != self.host:
return False
elif ((not host.endswith(self.host)) and
('.' + host) != self.host):
return False
if path != self.path:
path_len = len(self.path)
trust_prefix = self.path[:path_len]
url_prefix = path[:path_len]
if trust_prefix != url_prefix:
return False
if '?' in self.path:
allowed = '&'
else:
allowed = '?/'
return (self.path[-1] in allowed or
path[path_len] in allowed)
return True | Validates a URL against this trust root.
@param url: The URL to check
@type url: C{str}
@return: Whether the given URL is within this trust root.
@rtype: C{bool} |
def _set_tk_config(self, keys, value):
if isinstance(keys, str):
keys = [keys]
for key in keys:
if key in self.tk.keys():
if value is None:
self.tk[key] = self._tk_defaults[key]
else:
self.tk[key] = value | Gets the config from the widget's tk object
:param string/List keys:
The tk config key or a list of tk keys.
:param variable value:
The value to set. If the value is `None`, the config value will be
reset to its default. |
def get_deep_features(audio_data, verbose=True):
from ._audio_feature_extractor import _get_feature_extractor
if not _is_audio_data_sarray(audio_data):
raise TypeError("Input must be audio data")
feature_extractor_name = 'VGGish'
feature_extractor = _get_feature_extractor(feature_extractor_name)
return feature_extractor.get_deep_features(audio_data, verbose=verbose) | Calculates the deep features used by the Sound Classifier.
Internally the Sound Classifier calculates deep features for both model
creation and predictions. If the same data will be used multiple times,
calculating the deep features just once will result in a significant speed
up.
Parameters
----------
audio_data : SArray
Audio data is represented as dicts with key 'data' and 'sample_rate',
see `turicreate.load_audio(...)`.
Examples
--------
>>> my_audio_data['deep_features'] = get_deep_features(my_audio_data['audio'])
>>> train, test = my_audio_data.random_split(.8)
>>> model = tc.sound_classifier.create(train, 'label', 'deep_features')
>>> predictions = model.predict(test) |
def enable_vt_mode(filehandle=None):
if filehandle is None:
filehandle = msvcrt.get_osfhandle(sys.__stdout__.fileno())
current_mode = wintypes.DWORD()
KERNEL32.GetConsoleMode(filehandle, ctypes.byref(current_mode))
new_mode = 0x0004 | current_mode.value
KERNEL32.SetConsoleMode(filehandle, new_mode) | Enables virtual terminal processing mode for the given console or stdout |
def check_function_semantics(self, line: str, position: int, tokens: ParseResults) -> ParseResults:
if not self._namespace_dict or NAMESPACE not in tokens:
return tokens
namespace, name = tokens[NAMESPACE], tokens[NAME]
if namespace in self.identifier_parser.namespace_to_pattern:
return tokens
if self._allow_naked_names and tokens[NAMESPACE] == DIRTY:
return tokens
valid_functions = set(itt.chain.from_iterable(
belns_encodings.get(k, set())
for k in self._namespace_dict[namespace][name]
))
if not valid_functions:
raise InvalidEntity(self.get_line_number(), line, position, namespace, name)
if tokens[FUNCTION] not in valid_functions:
raise InvalidFunctionSemantic(self.get_line_number(), line, position, tokens[FUNCTION], namespace, name,
valid_functions)
return tokens | Raise an exception if the function used on the tokens is wrong.
:raises: InvalidFunctionSemantic |
def nodes_with_recipe(recipename):
nodes = [n['name'] for n in
lib.get_nodes_with_recipe(recipename, env.chef_environment)]
if not len(nodes):
print("No nodes found with recipe '{0}'".format(recipename))
sys.exit(0)
return node(*nodes) | Configures a list of nodes that have the given recipe in their run list |
def content_id(self) -> Optional[UnstructuredHeader]:
try:
return cast(UnstructuredHeader, self[b'content-id'][0])
except (KeyError, IndexError):
return None | The ``Content-Id`` header. |
def ToResponse(self, columns_order=None, order_by=(), tqx=""):
tqx_dict = {}
if tqx:
tqx_dict = dict(opt.split(":") for opt in tqx.split(";"))
if tqx_dict.get("version", "0.6") != "0.6":
raise DataTableException(
"Version (%s) passed by request is not supported."
% tqx_dict["version"])
if tqx_dict.get("out", "json") == "json":
response_handler = tqx_dict.get("responseHandler",
"google.visualization.Query.setResponse")
return self.ToJSonResponse(columns_order, order_by,
req_id=tqx_dict.get("reqId", 0),
response_handler=response_handler)
elif tqx_dict["out"] == "html":
return self.ToHtml(columns_order, order_by)
elif tqx_dict["out"] == "csv":
return self.ToCsv(columns_order, order_by)
elif tqx_dict["out"] == "tsv-excel":
return self.ToTsvExcel(columns_order, order_by)
else:
raise DataTableException(
"'out' parameter: '%s' is not supported" % tqx_dict["out"]) | Writes the right response according to the request string passed in tqx.
This method parses the tqx request string (format of which is defined in
the documentation for implementing a data source of Google Visualization),
and returns the right response according to the request.
It parses out the "out" parameter of tqx, calls the relevant response
(ToJSonResponse() for "json", ToCsv() for "csv", ToHtml() for "html",
ToTsvExcel() for "tsv-excel") and passes the response function the rest of
the relevant request keys.
Args:
columns_order: Optional. Passed as is to the relevant response function.
order_by: Optional. Passed as is to the relevant response function.
tqx: Optional. The request string as received by HTTP GET. Should be in
the format "key1:value1;key2:value2...". All keys have a default
value, so an empty string will just do the default (which is calling
ToJSonResponse() with no extra parameters).
Returns:
A response string, as returned by the relevant response function.
Raises:
DataTableException: One of the parameters passed in tqx is not supported. |
def dst_to_src(self, dst_file):
m = re.match(self.dst_path + "/(.*)$", dst_file)
if (m is None):
return(None)
rel_path = m.group(1)
return(self.src_uri + '/' + rel_path) | Return the src URI from the dst filepath.
This does not rely on the destination filepath actually
existing on the local filesystem, just on pattern matching.
Return source URI on success, None on failure. |
def height_max(self, height_max):
if height_max is None:
self._height_limits[1] = None
return
height_max = float(height_max)
assert(0 <= self.height_min <= height_max)
self._height_limits[1] = height_max
self._update_layout() | Set the maximum height of the widget.
Parameters
----------
height_max: None | float
the maximum height of the widget. if None, maximum height
is unbounded |
def build_conflict_dict(key_list, val_list):
key_to_vals = defaultdict(list)
for key, val in zip(key_list, val_list):
key_to_vals[key].append(val)
return key_to_vals | Builds dict where a list of values is associated with more than one key
Args:
key_list (list):
val_list (list):
Returns:
dict: key_to_vals
CommandLine:
python -m utool.util_dict --test-build_conflict_dict
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> key_list = [ 1, 2, 2, 3, 1]
>>> val_list = ['a', 'b', 'c', 'd', 'e']
>>> key_to_vals = build_conflict_dict(key_list, val_list)
>>> result = ut.repr4(key_to_vals)
>>> print(result)
{
1: ['a', 'e'],
2: ['b', 'c'],
3: ['d'],
} |
def get_instance_type(self, port):
if port[portbindings.VNIC_TYPE] == portbindings.VNIC_BAREMETAL:
return a_const.BAREMETAL_RESOURCE
owner_to_type = {
n_const.DEVICE_OWNER_DHCP: a_const.DHCP_RESOURCE,
n_const.DEVICE_OWNER_DVR_INTERFACE: a_const.ROUTER_RESOURCE,
trunk_consts.TRUNK_SUBPORT_OWNER: a_const.VM_RESOURCE}
if port['device_owner'] in owner_to_type.keys():
return owner_to_type[port['device_owner']]
elif port['device_owner'].startswith(
n_const.DEVICE_OWNER_COMPUTE_PREFIX):
return a_const.VM_RESOURCE
return None | Determine the port type based on device owner and vnic type |
def add_param(self, param_name, layer_index, blob_index):
blobs = self.layers[layer_index].blobs
self.dict_param[param_name] = mx.nd.array(caffe.io.blobproto_to_array(blobs[blob_index])) | Add a param to the .params file |
def get_taf_remarks(txt: str) -> (str, str):
remarks_start = find_first_in_list(txt, TAF_RMK)
if remarks_start == -1:
return txt, ''
remarks = txt[remarks_start:]
txt = txt[:remarks_start].strip()
return txt, remarks | Returns report and remarks separated if found |
def raw_connection_from(engine_or_conn):
if hasattr(engine_or_conn, 'cursor'):
return engine_or_conn, False
if hasattr(engine_or_conn, 'connection'):
return engine_or_conn.connection, False
return engine_or_conn.raw_connection(), True | Extract a raw_connection and determine if it should be automatically closed.
Only connections opened by this package will be closed automatically. |
def _update_log_record(self, record):
if not hasattr(record, 'hostname'):
record.hostname = '-'
if not hasattr(record, 'job_id'):
record.job_id = self.job_id | Massage a log record before emitting it. Intended to be used by the
custom log handlers defined in this module. |
def __add_recent_file(self, fname):
if fname is None:
return
if fname in self.recent_files:
self.recent_files.remove(fname)
self.recent_files.insert(0, fname)
if len(self.recent_files) > self.get_option('max_recent_files'):
self.recent_files.pop(-1) | Add to recent file list |
def download_safe_format(product_id=None, tile=None, folder='.', redownload=False, entire_product=False, bands=None,
data_source=DataSource.SENTINEL2_L1C):
entire_product = entire_product and product_id is None
if tile is not None:
safe_request = AwsTileRequest(tile=tile[0], time=tile[1], data_folder=folder, bands=bands,
safe_format=True, data_source=data_source)
if entire_product:
safe_tile = safe_request.get_aws_service()
product_id = safe_tile.get_product_id()
if product_id is not None:
safe_request = AwsProductRequest(product_id, tile_list=[tile[0]], data_folder=folder, bands=bands,
safe_format=True) if entire_product else \
AwsProductRequest(product_id, data_folder=folder, bands=bands, safe_format=True)
safe_request.save_data(redownload=redownload) | Downloads .SAFE format structure in form of nested dictionaries. Either ``product_id`` or ``tile`` must
be specified.
:param product_id: original ESA product identification string. Default is ``None``
:type product_id: str
:param tile: tuple containing tile name and sensing time/date. Default is ``None``
:type tile: (str, str)
:param folder: location of the directory where the fetched data will be saved. Default is ``'.'``
:type folder: str
:param redownload: if ``True``, download again the requested data even though it's already saved to disk. If
``False``, do not download if data is already available on disk. Default is ``False``
:type redownload: bool
:param entire_product: in case tile is specified this flag determines if it will be place inside a .SAFE structure
of the product. Default is ``False``
:type entire_product: bool
:param bands: list of bands to download. If ``None`` all bands will be downloaded. Default is ``None``
:type bands: list(str) or None
:param data_source: In case of tile request the source of satellite data has to be specified. Default is Sentinel-2
L1C data.
:type data_source: constants.DataSource
:return: Nested dictionaries representing .SAFE structure.
:rtype: dict |
def _check_choices_attribute(self):
if self.choices:
warning_params = {
'msg': (
"'choices' contains an invalid time zone value '{value}' "
"which was not found as a supported time zone by pytz "
"{version}."
),
'hint': "Values must be found in pytz.all_timezones.",
'obj': self,
}
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
for optgroup_key in map(lambda x: x[0], option_value):
if optgroup_key not in pytz.all_timezones:
if optgroup_key not in self.empty_values:
warning_params.update({
'msg': warning_params['msg'].format(
value=optgroup_key,
version=pytz.VERSION
)
})
return [
checks.Warning(**warning_params)
]
elif option_key not in pytz.all_timezones:
if option_key not in self.empty_values:
warning_params.update({
'msg': warning_params['msg'].format(
value=option_key,
version=pytz.VERSION
)
})
return [
checks.Warning(**warning_params)
]
return [] | Checks to make sure that choices contains valid timezone choices. |
def cookie(
url,
name,
value,
expires=None):
u = urlparse(url)
domain = u.hostname
if '.' not in domain and not _is_ip_addr(domain):
domain += ".local"
port = str(u.port) if u.port is not None else None
secure = u.scheme == 'https'
if expires is not None:
if expires.tzinfo is not None:
raise ValueError('Cookie expiration must be a naive datetime')
expires = (expires - datetime(1970, 1, 1)).total_seconds()
return http_cookiejar.Cookie(
version=0,
name=name,
value=value,
port=port,
port_specified=port is not None,
domain=domain,
domain_specified=True,
domain_initial_dot=False,
path=u.path,
path_specified=True,
secure=secure,
expires=expires,
discard=False,
comment=None,
comment_url=None,
rest=None,
rfc2109=False,
) | Return a new Cookie using a slightly more
friendly API than that provided by six.moves.http_cookiejar
@param name The cookie name {str}
@param value The cookie value {str}
@param url The URL path of the cookie {str}
@param expires The expiry time of the cookie {datetime}. If provided,
it must be a naive timestamp in UTC. |
def convert_tensor_float_to_float16(tensor):
if not isinstance(tensor, onnx_proto.TensorProto):
raise ValueError('Expected input type is an ONNX TensorProto but got %s' % type(tensor))
if tensor.data_type == onnx_proto.TensorProto.FLOAT:
tensor.data_type = onnx_proto.TensorProto.FLOAT16
if tensor.float_data:
int_list = _npfloat16_to_int(np.float16(tensor.float_data))
tensor.int32_data[:] = int_list
tensor.float_data[:] = []
if tensor.raw_data:
float32_list = np.fromstring(tensor.raw_data, dtype='float32')
float16_list = np.float16(float32_list)
tensor.raw_data = float16_list.tostring()
return tensor | Convert tensor float to float16.
:param tensor: TensorProto object
:return tensor_float16: converted TensorProto object
Example:
::
from onnxmltools.utils.float16_converter import convert_tensor_float_to_float16
new_tensor = convert_tensor_float_to_float16(tensor) |
def get_distance(self, node):
delta = (node.pos[0]-self.pos[0], node.pos[1]-self.pos[1])
return sqrt(delta[0]**2+delta[1]**2) | Get the distance beetween 2 nodes
Args:
node (object): The other node. |
def portfolio(self) -> List[PortfolioItem]:
account = self.wrapper.accounts[0]
return [v for v in self.wrapper.portfolio[account].values()] | List of portfolio items of the default account. |
def _is_homogeneous_type(self):
if self._data.any_extension_types:
return len({block.dtype for block in self._data.blocks}) == 1
else:
return not self._data.is_mixed_type | Whether all the columns in a DataFrame have the same type.
Returns
-------
bool
Examples
--------
>>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type
True
>>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type
False
Items with the same type but different sizes are considered
different types.
>>> DataFrame({
... "A": np.array([1, 2], dtype=np.int32),
... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type
False |
async def jsk_retain(self, ctx: commands.Context, *, toggle: bool = None):
if toggle is None:
if self.retain:
return await ctx.send("Variable retention is set to ON.")
return await ctx.send("Variable retention is set to OFF.")
if toggle:
if self.retain:
return await ctx.send("Variable retention is already set to ON.")
self.retain = True
self._scope = Scope()
return await ctx.send("Variable retention is ON. Future REPL sessions will retain their scope.")
if not self.retain:
return await ctx.send("Variable retention is already set to OFF.")
self.retain = False
return await ctx.send("Variable retention is OFF. Future REPL sessions will dispose their scope when done.") | Turn variable retention for REPL on or off.
Provide no argument for current status. |
def load_json(file, new_root_dir=None, decompression=False):
if decompression:
with open(file, 'rb') as f:
my_object = load(f, decompression=decompression)
else:
with open(file, 'r') as f:
my_object = load(f, decompression=decompression)
if new_root_dir:
my_object.root_dir = new_root_dir
return my_object | Load a JSON file using json_tricks |
def _os_dispatch(func, *args, **kwargs):
if __grains__['kernel'] in SUPPORTED_BSD_LIKE:
kernel = 'bsd'
else:
kernel = __grains__['kernel'].lower()
_os_func = getattr(sys.modules[__name__], '_{0}_{1}'.format(kernel, func))
if callable(_os_func):
return _os_func(*args, **kwargs) | Internal, dispatches functions by operating system |
def unpack(d):
p = SBP._parser.parse(d)
assert p.preamble == SBP_PREAMBLE, "Invalid preamble 0x%x." % p.preamble
return SBP(p.msg_type, p.sender, p.length, p.payload, p.crc) | Unpack and return a framed binary message. |
def create_permissao_administrativa(self):
return PermissaoAdministrativa(
self.networkapi_url,
self.user,
self.password,
self.user_ldap) | Get an instance of permissao_administrativa services facade. |
def dependencies_order_of_build(target_contract, dependencies_map):
if not dependencies_map:
return [target_contract]
if target_contract not in dependencies_map:
raise ValueError('no dependencies defined for {}'.format(target_contract))
order = [target_contract]
todo = list(dependencies_map[target_contract])
while todo:
target_contract = todo.pop(0)
target_pos = len(order)
for dependency in dependencies_map[target_contract]:
if dependency in order:
target_pos = order.index(dependency)
else:
todo.append(dependency)
order.insert(target_pos, target_contract)
order.reverse()
return order | Return an ordered list of contracts that is sufficient to successfully
deploy the target contract.
Note:
This function assumes that the `dependencies_map` is an acyclic graph. |
def get_nexusport_binding(port_id, vlan_id, switch_ip, instance_id):
LOG.debug("get_nexusport_binding() called")
return _lookup_all_nexus_bindings(port_id=port_id,
vlan_id=vlan_id,
switch_ip=switch_ip,
instance_id=instance_id) | Lists a nexusport binding. |
def pubsub_pub(self, topic, payload, **kwargs):
args = (topic, payload)
return self._client.request('/pubsub/pub', args,
decoder='json', **kwargs) | Publish a message to a given pubsub topic
Publishing will publish the given payload (string) to
everyone currently subscribed to the given topic.
All data (including the id of the publisher) is automatically
base64 encoded when published.
.. code-block:: python
# publishes the message 'message' to the topic 'hello'
>>> c.pubsub_pub('hello', 'message')
[]
Parameters
----------
topic : str
Topic to publish to
payload : Data to be published to the given topic
Returns
-------
list : empty list |
def add_config_paths(**kwargs):
for k, path in kwargs.items():
if not os.path.exists(path):
raise ValueError(
'Configuration file "{}" does not exist'.format(k))
if k in cf.get_option('config_paths'):
raise ValueError('Configuration {!r} already exists'.format(k))
kwargs.update(**cf.get_option('config_paths'))
cf.set_option('config_paths', kwargs) | Add to the pool of available configuration files for BIDSLayout.
Args:
kwargs: dictionary specifying where to find additional config files.
Keys are names, values are paths to the corresponding .json file.
Example:
> add_config_paths(my_config='/path/to/config')
> layout = BIDSLayout('/path/to/bids', config=['bids', 'my_config']) |
def _write_to_command_buffer(self, to_write):
np.copyto(self._command_bool_ptr, True)
to_write += '0'
input_bytes = str.encode(to_write)
for index, val in enumerate(input_bytes):
self._command_buffer_ptr[index] = val | Write input to the command buffer. Reformat input string to the correct format.
Args:
to_write (str): The string to write to the command buffer. |
def _is_replacement_allowed(self, s):
if any(tag in s.parent_tags for tag in self.skipped_tags):
return False
if any(tag not in self.textflow_tags for tag in s.involved_tags):
return False
return True | Tests whether replacement is allowed on given piece of HTML text. |
def clean_global_runtime_state(reset_subsystem=False):
if reset_subsystem:
Subsystem.reset()
Goal.clear()
BuildConfigInitializer.reset() | Resets the global runtime state of a pants runtime for cleaner forking.
:param bool reset_subsystem: Whether or not to clean Subsystem global state. |
def register_logger(self, logger):
handler = CommandHandler(self)
handler.setFormatter(CommandFormatter())
logger.handlers = [handler]
logger.propagate = False
output = self.output
level = logging.WARNING
if output.is_debug():
level = logging.DEBUG
elif output.is_very_verbose() or output.is_verbose():
level = logging.INFO
logger.setLevel(level) | Register a new logger. |
def handle_routine(self, que, opts, host, target, mine=False):
opts = copy.deepcopy(opts)
single = Single(
opts,
opts['argv'],
host,
mods=self.mods,
fsclient=self.fsclient,
thin=self.thin,
mine=mine,
**target)
ret = {'id': single.id}
stdout, stderr, retcode = single.run()
try:
data = salt.utils.json.find_json(stdout)
if len(data) < 2 and 'local' in data:
ret['ret'] = data['local']
else:
ret['ret'] = {
'stdout': stdout,
'stderr': stderr,
'retcode': retcode,
}
except Exception:
ret['ret'] = {
'stdout': stdout,
'stderr': stderr,
'retcode': retcode,
}
que.put(ret) | Run the routine in a "Thread", put a dict on the queue |
def main():
parser = argparse.ArgumentParser(description='Run SSOIS and return the available images in a particular filter.')
parser.add_argument("--filter",
action="store",
default='r',
dest="filter",
choices=['r', 'u'],
help="Passband: default is r.")
parser.add_argument("--family", '-f',
action="store",
default=None,
help='List of objects to query.')
parser.add_argument("--member", '-m',
action="store",
default=None,
help='Member object of family to query.')
args = parser.parse_args()
if args.family != None and args.member == None:
get_family_info(str(args.family), args.filter)
elif args.family == None and args.member != None:
get_member_info(str(args.member), args.filter)
else:
print "Please input either a family or single member name" | Input asteroid family, filter type, and image type to query SSOIS |
def parse(self, rrstr):
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('CL record already initialized!')
(su_len, su_entry_version_unused, child_log_block_num_le, child_log_block_num_be) = struct.unpack_from('=BBLL', rrstr[:12], 2)
if su_len != RRCLRecord.length():
raise pycdlibexception.PyCdlibInvalidISO('Invalid length on rock ridge extension')
if child_log_block_num_le != utils.swab_32bit(child_log_block_num_be):
raise pycdlibexception.PyCdlibInvalidISO('Little endian block num does not equal big endian; corrupt ISO')
self.child_log_block_num = child_log_block_num_le
self._initialized = True | Parse a Rock Ridge Child Link record out of a string.
Parameters:
rrstr - The string to parse the record out of.
Returns:
Nothing. |
def list_ptr_records(self, device):
device_type = self._resolve_device_type(device)
href, svc_name = self._get_ptr_details(device, device_type)
uri = "/rdns/%s?href=%s" % (svc_name, href)
try:
resp, resp_body = self._retry_get(uri)
except exc.NotFound:
return []
records = [CloudDNSPTRRecord(rec, device)
for rec in resp_body.get("records", [])]
return records | Returns a list of all PTR records configured for this device. |
def serialize(self, sw):
detail = None
if self.detail is not None:
detail = Detail()
detail.any = self.detail
pyobj = FaultType(self.code, self.string, self.actor, detail)
sw.serialize(pyobj, typed=False) | Serialize the object. |
def kwargs(self):
keywords = self.keywords or []
return [keyword for keyword in keywords if keyword.arg is None] | The keyword arguments that unpack something.
:type: list(Keyword) |
def _apply_base_theme(app):
if QT_VERSION < (5,):
app.setStyle('plastique')
else:
app.setStyle('Fusion')
with open(_STYLESHEET) as stylesheet:
app.setStyleSheet(stylesheet.read()) | Apply base theme to the application.
Args:
app (QApplication): QApplication instance. |
def add_dataset(self, dataset, datasets_to_check=None):
showcase_dataset = self._get_showcase_dataset_dict(dataset)
if datasets_to_check is None:
datasets_to_check = self.get_datasets()
for dataset in datasets_to_check:
if showcase_dataset['package_id'] == dataset['id']:
return False
self._write_to_hdx('associate', showcase_dataset, 'package_id')
return True | Add a dataset
Args:
dataset (Union[Dataset,Dict,str]): Either a dataset id or dataset metadata either from a Dataset object or a dictionary
datasets_to_check (List[Dataset]): List of datasets against which to check existence of dataset. Defaults to datasets in showcase.
Returns:
bool: True if the dataset was added, False if already present |
def requiredUnless(col_name, arg, dm, df, *args):
if col_name in df.columns:
return None
arg_list = arg.split(",")
arg_list = [argument.strip('"') for argument in arg_list]
msg = ""
for a in arg_list:
if "." in a:
continue
if a not in df.columns:
msg += "{} column is required unless {} is present. ".format(col_name, a)
if msg:
return msg
else:
return None
return None | Arg is a string in the format "str1, str2, ..."
Each string will be a column name.
Col_name is required in df unless each column from arg is present. |
def mold_id_to_path(self, mold_id, default=_marker):
def handle_default(debug_msg=None):
if debug_msg:
logger.debug('mold_id_to_path:' + debug_msg, mold_id)
if default is _marker:
raise KeyError(
'Failed to lookup mold_id %s to a path' % mold_id)
return default
result = self.molds.get(mold_id)
if result:
return result
if not self.tracked_entry_points:
return handle_default()
try:
prefix, mold_basename = mold_id.split('/')
except ValueError:
return handle_default(
'mold_id %s not found and not in standard format')
entry_point = self.tracked_entry_points.get(prefix)
if entry_point is None:
return handle_default()
return join(self._entry_point_to_path(entry_point), mold_basename) | Lookup the filesystem path of a mold identifier. |
def convert_coord_object(coord):
assert isinstance(coord, Coordinate)
coord = coord.container()
return Tile(int(coord.zoom), int(coord.column), int(coord.row)) | Convert ModestMaps.Core.Coordinate -> raw_tiles.tile.Tile |
def add_loghandler (handler):
format = "%(levelname)s %(name)s %(asctime)s %(threadName)s %(message)s"
handler.setFormatter(logging.Formatter(format))
logging.getLogger(LOG_ROOT).addHandler(handler)
logging.getLogger().addHandler(handler) | Add log handler to root logger and LOG_ROOT and set formatting. |
def outputs_of(self, idx, create=False):
if create and not idx in self.edges:
self.edges[idx] = set()
return self.edges[idx] | Get a set of the outputs for a given node index. |
def localortho(lon, lat):
local_srs = osr.SpatialReference()
local_proj = '+proj=ortho +lat_0=%0.7f +lon_0=%0.7f +datum=WGS84 +units=m +no_defs ' % (lat, lon)
local_srs.ImportFromProj4(local_proj)
return local_srs | Create srs for local orthographic projection centered at lat, lon |
def isSuperTypeOf(self, other, matchTags=True, matchConstraints=True):
return (not matchTags or
(self.tagSet.isSuperTagSetOf(other.tagSet)) and
(not matchConstraints or self.subtypeSpec.isSuperTypeOf(other.subtypeSpec))) | Examine |ASN.1| type for subtype relationship with other ASN.1 type.
ASN.1 tags (:py:mod:`~pyasn1.type.tag`) and constraints
(:py:mod:`~pyasn1.type.constraint`) are examined when carrying
out ASN.1 types comparison.
Python class inheritance relationship is NOT considered.
Parameters
----------
other: a pyasn1 type object
Class instance representing ASN.1 type.
Returns
-------
: :class:`bool`
:class:`True` if *other* is a subtype of |ASN.1| type,
:class:`False` otherwise. |
def group_theta(node_length, node_idx):
theta = -np.pi + node_idx * 2 * np.pi / node_length
return theta | Returns an angle corresponding to a node of interest.
Intended to be used for placing node group labels at the correct spot.
:param float node_length: total number of nodes in the graph.
:param int node_idx: the index of the node of interest.
:returns: theta -- the angle of the node of interest in radians. |
def copy_path_flat(self):
path = cairo.cairo_copy_path_flat(self._pointer)
result = list(_iter_path(path))
cairo.cairo_path_destroy(path)
return result | Return a flattened copy of the current path
This method is like :meth:`copy_path`
except that any curves in the path will be approximated
with piecewise-linear approximations,
(accurate to within the current tolerance value,
see :meth:`set_tolerance`).
That is,
the result is guaranteed to not have any elements
of type :obj:`CURVE_TO <PATH_CURVE_TO>`
which will instead be replaced by
a series of :obj:`LINE_TO <PATH_LINE_TO>` elements.
:returns:
A list of ``(path_operation, coordinates)`` tuples.
See :meth:`copy_path` for the data structure. |
def end_coordsys(self):
coordsys = copy(self.location)
coordsys.origin = self.end_point
return coordsys | Coordinate system at end of effect.
All axes are parallel to the original vector evaluation location, with
the origin moved to this effect's end point.
:return: coordinate system at end of effect
:rtype: :class:`CoordSys` |
def is_model_admin_subclass(node):
if node.name[-5:] != 'Admin' or isinstance(node.parent, ClassDef):
return False
return node_is_subclass(node, 'django.contrib.admin.options.ModelAdmin') | Checks that node is derivative of ModelAdmin class. |
def cache_as_field(cache_name):
def cache_wrapper(func):
@functools.wraps(func)
def inner_wrapper(self, *args, **kwargs):
value = getattr(self, cache_name, UndefToken)
if value != UndefToken:
return value
ret = func(self, *args, **kwargs)
setattr(self, cache_name, ret)
return ret
return inner_wrapper
return cache_wrapper | Cache a functions return value as the field 'cache_name'. |
async def stop(self, wait_for_completion=True):
await self.set_position(
position=CurrentPosition(),
wait_for_completion=wait_for_completion) | Stop window.
Parameters:
* wait_for_completion: If set, function will return
after device has reached target position. |
def range(self, axis=None):
return (self.min(axis=axis), self.max(axis=axis)) | Return range tuple along specified axis |
def single_download_photos(photos):
global counter
counter = len(photos)
for photo in photos:
download_photo(photo) | Use single process to download photos
:param photos: The photos to be downloaded
:type photos: list of dicts |
def execute(self):
logging.info('Requesting view metadata for project %s' % self.project_name)
project_csv_meta = self.rws_connection.send_request(ProjectMetaDataRequest(self.project_name))
self.db_adapter.processMetaData(project_csv_meta)
for dataset_name in self.db_adapter.datasets.keys():
logging.info('Requesting data from dataset %s' % dataset_name)
form_name, _type = self.name_type_from_viewname(dataset_name)
form_data = self.rws_connection.send_request(
FormDataRequest(self.project_name, self.environment, _type, form_name))
logging.info('Populating dataset %s' % dataset_name)
self.db_adapter.processFormData(form_data, dataset_name)
logging.info('Process complete') | Generate local DB, pulling metadata and data from RWSConnection |
def cci(series, window=14):
price = typical_price(series)
typical_mean = rolling_mean(price, window)
res = (price - typical_mean) / (.015 * np.std(typical_mean))
return pd.Series(index=series.index, data=res) | compute commodity channel index |
def listFunctions(self, dbName=None):
if dbName is None:
dbName = self.currentDatabase()
iter = self._jcatalog.listFunctions(dbName).toLocalIterator()
functions = []
while iter.hasNext():
jfunction = iter.next()
functions.append(Function(
name=jfunction.name(),
description=jfunction.description(),
className=jfunction.className(),
isTemporary=jfunction.isTemporary()))
return functions | Returns a list of functions registered in the specified database.
If no database is specified, the current database is used.
This includes all temporary functions. |
def enable_mac(self, app=None):
def inputhook_mac(app=None):
if self.pyplot_imported:
pyplot = sys.modules['matplotlib.pyplot']
try:
pyplot.pause(0.01)
except:
pass
else:
if 'matplotlib.pyplot' in sys.modules:
self.pyplot_imported = True
self.set_inputhook(inputhook_mac)
self._current_gui = GUI_OSX | Enable event loop integration with MacOSX.
We call function pyplot.pause, which updates and displays active
figure during pause. It's not MacOSX-specific, but it enables to
avoid inputhooks in native MacOSX backend.
Also we shouldn't import pyplot, until user does it. Cause it's
possible to choose backend before importing pyplot for the first
time only. |
def remove_transcript_preferences(course_id):
try:
transcript_preference = TranscriptPreference.objects.get(course_id=course_id)
transcript_preference.delete()
except TranscriptPreference.DoesNotExist:
pass | Deletes course-wide transcript preferences.
Arguments:
course_id(str): course id |
def fixed_terms(self):
return {k: v for (k, v) in self.terms.items() if not v.random} | Return dict of all and only fixed effects in model. |
def get_list_from_xml(elem, tag="option", attributes=["name"]):
return flatten(([option.get(attr) for attr in attributes] + [option.text] for option in elem.findall(tag)), exclude=[None]) | This function searches for all "option"-tags and returns a list with all attributes and texts. |
def require_openid(f):
@wraps(f)
def decorator(*args, **kwargs):
if g.user is None:
next_url = url_for("login") + "?next=" + request.url
return redirect(next_url)
else:
return f(*args, **kwargs)
return decorator | Require user to be logged in. |
def serialize(self, entity, request=None):
def should_we_insert(value, field_spec):
return value not in self.missing or field_spec.required
errors = {}
ret = {}
for field_name, field_spec in self.spec.fields.items():
value = self._get_value_for_serialization(entity, field_name, field_spec)
func = self._get_serialize_func(field_name, self.spec)
try:
value = func(value, entity, request)
if should_we_insert(value, field_spec):
ret[field_name] = value
except ValidationError, e:
if hasattr(e, 'message_dict'):
errors.update(dict(zip(
[field_name + '.' + key for key in e.message_dict.keys()],
e.message_dict.values())))
else:
errors[field_name] = e.messages
if errors:
raise ValidationError(errors)
return None if ret == {} else ret | Serialize entity into dictionary.
The spec can affect how individual fields will be serialized by
implementing ``serialize()`` for the fields needing customization.
:returns: dictionary |
def get_node_parent_class(node):
while node.parent:
if isinstance(node, ClassDef):
return node
node = node.parent | Supposes that node is a mongoengine field in a class and tries to
get its parent class |
def on_train_begin(self, **kwargs:Any)->None:
"Initialize inner arguments."
self.wait, self.opt = 0, self.learn.opt
super().on_train_begin(**kwargs) | Initialize inner arguments. |
def remove_blank_dirs(self):
if self.is_blank():
try:
os.rmdir(self.path)
except OSError as e:
print(e)
else:
remove_empty_dir(self.path) | Remove blank dir and all blank subdirectories |
def run_mypy(self, paths=(), code=None):
if self.mypy:
set_mypy_path(stub_dir)
from coconut.command.mypy import mypy_run
args = list(paths) + self.mypy_args
if code is not None:
args += ["-c", code]
for line, is_err in mypy_run(args):
if code is None or line not in self.mypy_errs:
printerr(line)
if line not in self.mypy_errs:
self.mypy_errs.append(line)
self.register_error(errmsg="MyPy error") | Run MyPy with arguments. |
async def _run_server_task(self, started_signal):
try:
server = await websockets.serve(self._manage_connection, self.host, self.port)
port = server.sockets[0].getsockname()[1]
started_signal.set_result(port)
except Exception as err:
self._logger.exception("Error starting server on host %s, port %s", self.host, self.port)
started_signal.set_exception(err)
return
try:
while True:
await asyncio.sleep(1)
except asyncio.CancelledError:
self._logger.info("Stopping server due to stop() command")
finally:
server.close()
await server.wait_closed()
self._logger.debug("Server stopped, exiting task") | Create a BackgroundTask to manage the server.
This allows subclasess to attach their server related tasks as
subtasks that are properly cleaned up when this parent task is
stopped and not require them all to overload start() and stop()
to perform this action. |
def make_usage_key_from_deprecated_string(self, location_url):
warnings.warn(
"make_usage_key_from_deprecated_string is deprecated! Please use make_usage_key",
DeprecationWarning,
stacklevel=2
)
return BlockUsageLocator.from_string(location_url).replace(run=self.run) | Deprecated mechanism for creating a UsageKey given a CourseKey and a serialized Location.
NOTE: this prejudicially takes the tag, org, and course from the url not self.
Raises:
InvalidKeyError: if the url does not parse |
def func(self, volume):
return self._func(np.array(volume), self.eos_params) | The equation of state function with the paramters other than volume set
to the ones obtained from fitting.
Args:
volume (list/numpy.array)
Returns:
numpy.array |
def pbmc68k_reduced() -> AnnData:
filename = os.path.dirname(__file__) + '/10x_pbmc68k_reduced.h5ad'
return sc.read(filename) | Subsampled and processed 68k PBMCs.
10x PBMC 68k dataset from
https://support.10xgenomics.com/single-cell-gene-expression/datasets
The original PBMC 68k dataset was preprocessed using scanpy and was saved
keeping only 724 cells and 221 highly variable genes.
The saved file contains the annotation of cell types (key: 'bulk_labels'), UMAP coordinates,
louvain clustering and gene rankings based on the bulk_labels.
Returns
-------
Annotated data matrix. |
def get_roles_for_permission(permission, brain_or_object):
obj = api.get_object(brain_or_object)
valid_roles = get_valid_roles_for(obj)
for item in obj.ac_inherited_permissions(1):
name, value = item[:2]
if name == permission:
permission = Permission(name, value, obj)
roles = permission.getRoles()
return filter(lambda r: r in valid_roles, roles)
raise ValueError("The permission {} is invalid.".format(permission)) | Return the roles of the permission that is granted on the object
Code extracted from `IRoleManager.rolesOfPermission`
:param permission: The permission to get the roles
:param brain_or_object: Catalog brain or object
:returns: List of roles having the permission |
def _reader(self, name, stream, outbuf):
while True:
s = stream.readline()
if not s:
break
s = s.decode('utf-8').rstrip()
outbuf.append(s)
logger.debug('%s: %s' % (name, s))
stream.close() | Thread runner for reading lines of from a subprocess into a buffer.
:param name: The logical name of the stream (used for logging only).
:param stream: The stream to read from. This will typically a pipe
connected to the output stream of a subprocess.
:param outbuf: The list to append the read lines to. |
def copy(self, h5file=None):
h5 = copyh5(self.h5, h5file)
return QPImage(h5file=h5, h5dtype=self.h5dtype) | Create a copy of the current instance
This is done by recursively copying the underlying hdf5 data.
Parameters
----------
h5file: str, h5py.File, h5py.Group, or None
see `QPImage.__init__` |
def json2xml(json_data, factory=ET.Element):
if not isinstance(json_data, dict):
json_data = json.loads(json_data)
elem = internal_to_elem(json_data, factory)
return ET.tostring(elem) | Convert a JSON string into an XML string.
Whatever Element implementation we could import will be used by
default; if you want to use something else, pass the Element class
as the factory parameter. |
def check_sections(config):
default_sections = ['global', 'auth', 'napps', 'kytos']
for section in default_sections:
if not config.has_section(section):
config.add_section(section) | Create a empty config file. |
def colored_map(text, cmap):
if not __ISON: return text
for key, v in cmap.items():
if isinstance(v, dict):
text = text.replace(key, colored(key, **v))
else:
text = text.replace(key, colored(key, color=v))
return text | Return colorized text. cmap is a dict mapping tokens to color options.
.. Example:
colored_key("foo bar", {bar: "green"})
colored_key("foo bar", {bar: {"color": "green", "on_color": "on_red"}}) |
def display(self):
from IPython.core.display import display, HTML
display(HTML(self._repr_html_())) | Display the visualization inline in the IPython notebook.
This is deprecated, use the following instead::
from IPython.display import display
display(viz) |
def set_name(self, name):
if not name:
name = ''
self._client['config']['name'] = name
yield from self._server.client_name(self.identifier, name) | Set a client name. |
def centers(self):
w_cen = np.nanmean(self.wave.value, axis=1)
f_cen = np.nanmean(self.throughput, axis=1)
return np.asarray([w_cen, f_cen]) | A getter for the wavelength bin centers and average fluxes |
def loop_once(self):
while 1:
if not self._active_nodes:
self.triggershutdown()
raise RuntimeError("Unexpectedly no active workers available")
try:
eventcall = self.queue.get(timeout=2.0)
break
except Empty:
continue
callname, kwargs = eventcall
assert callname, kwargs
method = "worker_" + callname
call = getattr(self, method)
self.log("calling method", method, kwargs)
call(**kwargs)
if self.sched.tests_finished:
self.triggershutdown() | Process one callback from one of the workers. |
def run_async(self, time_limit):
self.background = time_limit
results = self.run()
return results, poller.AsyncPoller(results, self) | Run this module asynchronously and return a poller. |
def create(self, uri, buffer="queue", interval=10):
return self._http_client.put_json("subscriptions/{}".format(self.short_name), {
"subscription": {
"uri": uri,
"buffer": buffer,
"interval": interval,
}
}) | Create a subscription with this short name and the provided parameters
For more information on what the parameters required here mean, please
refer to the `WVA Documentation <http://goo.gl/DRcOQf>`_.
:raises WVAError: If there is a problem creating the new subscription |
def visit_Subscript(self, node: ast.Subscript) -> Any:
value = self.visit(node=node.value)
a_slice = self.visit(node=node.slice)
result = value[a_slice]
self.recomputed_values[node] = result
return result | Visit the ``slice`` and a ``value`` and get the element. |
def should_checkpoint(self):
result = self.last_result or {}
if result.get(DONE) and self.checkpoint_at_end:
return True
if self.checkpoint_freq:
return result.get(TRAINING_ITERATION,
0) % self.checkpoint_freq == 0
else:
return False | Whether this trial is due for checkpointing. |
def evecs(self):
if self._evecs is None:
errMsg = "The metric eigenvectors have not been set in the "
errMsg += "metricParameters instance."
raise ValueError(errMsg)
return self._evecs | The eigenvectors of the parameter space.
This is a Dictionary of numpy.matrix
Each entry in the dictionary is as described under evals.
Each numpy.matrix contains the eigenvectors which, with the eigenvalues
in evals, are needed to rotate the
coordinate system to one in which the metric is the identity matrix. |
def run(self):
(task_id, tasks) = self.server.get_task()
self.task_store.from_dict(tasks)
for (index, task) in self.task_store:
result = self.compute(index, task)
self.results.append(result)
self.server.task_done((task_id, self.results)) | This function needs to be called to start the computation. |
def same_intersection(intersection1, intersection2, wiggle=0.5 ** 40):
if intersection1.index_first != intersection2.index_first:
return False
if intersection1.index_second != intersection2.index_second:
return False
return np.allclose(
[intersection1.s, intersection1.t],
[intersection2.s, intersection2.t],
atol=0.0,
rtol=wiggle,
) | Check if two intersections are close to machine precision.
.. note::
This is a helper used only by :func:`verify_duplicates`, which in turn
is only used by :func:`generic_intersect`.
Args:
intersection1 (.Intersection): The first intersection.
intersection2 (.Intersection): The second intersection.
wiggle (Optional[float]): The amount of relative error allowed
in parameter values.
Returns:
bool: Indicates if the two intersections are the same to
machine precision. |
def postprocess_monograph(marc_xml, mods, uuid, counter, url):
dom = double_linked_dom(mods)
if not isinstance(marc_xml, MARCXMLRecord):
marc_xml = MARCXMLRecord(marc_xml)
add_missing_xml_attributes(dom, counter)
fix_invalid_type_parameter(dom)
if uuid:
add_uuid(dom, uuid)
add_marccountry_tag(dom)
add_genre(dom)
remove_hairs_from_tags(dom)
fix_issuance(dom)
fix_location_tag(dom)
fix_related_item_tag(dom)
fix_missing_electronic_locator_tag(dom, url)
fix_missing_lang_tags(marc_xml, dom)
return dom.prettify() | Fix bugs in `mods` produced by XSLT template.
Args:
marc_xml (str): Original Aleph record.
mods (str): XML string generated by XSLT template.
uuid (str): UUID of the package.
counter (int): Number of record, is added to XML headers.
url (str): URL of the publication (public or not).
Returns:
str: Updated XML. |
def install():
sudo("apt-get update -y -q")
apt("nginx libjpeg-dev python-dev python-setuptools git-core "
"postgresql libpq-dev memcached supervisor python-pip")
run("mkdir -p /home/%s/logs" % env.user)
sudo("pip install -U pip virtualenv virtualenvwrapper mercurial")
run("mkdir -p %s" % env.venv_home)
run("echo 'export WORKON_HOME=%s' >> /home/%s/.bashrc" % (env.venv_home,
env.user))
run("echo 'source /usr/local/bin/virtualenvwrapper.sh' >> "
"/home/%s/.bashrc" % env.user)
print(green("Successfully set up git, mercurial, pip, virtualenv, "
"supervisor, memcached.", bold=True)) | Installs the base system and Python requirements for the entire server. |
def search_weekday(weekday, jd, direction, offset):
return weekday_before(weekday, jd + (direction * offset)) | Determine the Julian date for the next or previous weekday |
def create(self, friendly_name=None, description=None):
if not self.exists():
try:
response = self._api.datasets_insert(self._name_parts,
friendly_name=friendly_name,
description=description)
except Exception as e:
raise e
if 'selfLink' not in response:
raise Exception("Could not create dataset %s" % self._full_name)
return self | Creates the Dataset with the specified friendly name and description.
Args:
friendly_name: (optional) the friendly name for the dataset if it is being created.
description: (optional) a description for the dataset if it is being created.
Returns:
The Dataset.
Raises:
Exception if the Dataset could not be created. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.