code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def nb_ll(data, P, R):
genes, cells = data.shape
clusters = P.shape[1]
lls = np.zeros((cells, clusters))
for c in range(clusters):
P_c = P[:,c].reshape((genes, 1))
R_c = R[:,c].reshape((genes, 1))
ll = gammaln(R_c + data) - gammaln(R_c)
ll += data*np.log(P_c) + xlog1py(R_c, -P_c)
lls[:,c] = ll.sum(0)
return lls | Returns the negative binomial log-likelihood of the data.
Args:
data (array): genes x cells
P (array): NB success probability param - genes x clusters
R (array): NB stopping param - genes x clusters
Returns:
cells x clusters array of log-likelihoods |
def transform_df(self, df):
if (len(df) == 0) or (len(self) == 0):
return df
for sc in self:
df = sc.transform_df(df)
return df | Transform values in a dataframe.
Returns dataframe |
def read_yaml(filename, add_constructor=None):
y = read_file(filename)
if add_constructor:
if not isinstance(add_constructor, list):
add_constructor = [add_constructor]
for a in add_constructor:
_yaml.add_constructor(*a)
if y:
return _yaml.load(y) | Reads YAML files
:param filename:
The full path to the YAML file
:param add_constructor:
A list of yaml constructors (loaders)
:returns:
Loaded YAML content as represented data structure
.. seealso::
:func:`util.structures.yaml_str_join`,
:func:`util.structures.yaml_loc_join` |
def multi_platform_open(cmd):
if platform == "linux" or platform == "linux2":
cmd = ['xdg-open', cmd]
elif platform == "darwin":
cmd = ['open', cmd]
elif platform == "win32":
cmd = ['start', cmd]
subprocess.check_call(cmd) | Take the given command and use the OS to automatically open the appropriate
resource. For instance, if a URL is provided, this will have the OS automatically
open the URL in the default web browser. |
def RtlGetVersion(os_version_info_struct):
rc = ctypes.windll.Ntdll.RtlGetVersion(ctypes.byref(os_version_info_struct))
if rc != 0:
raise OSError("Getting Windows version failed.") | Wraps the lowlevel RtlGetVersion routine.
Args:
os_version_info_struct: instance of either a RTL_OSVERSIONINFOW structure
or a RTL_OSVERSIONINFOEXW structure,
ctypes.Structure-wrapped, with the
dwOSVersionInfoSize field preset to
ctypes.sizeof(self).
Raises:
OSError: if the underlaying routine fails.
See: https://msdn.microsoft.com/en-us/library/
windows/hardware/ff561910(v=vs.85).aspx . |
def get_datasource(self, source_id, datasource_id):
target_url = self.client.get_url('DATASOURCE', 'GET', 'single', {'source_id': source_id, 'datasource_id': datasource_id})
return self.client.get_manager(Datasource)._get(target_url) | Get a Datasource object
:rtype: Datasource |
def get_config(self, config_name, require_ready=True):
if require_ready:
self.bots.check_configs_ready()
else:
self.bots.check_bots_ready()
return self.configs.get(config_name.lower(), {}) | Return the config with the given case-insensitive config_name.
Raise LookupError if no config exists with this name. |
def prod(x, axis=None, keepdims=False):
from .function_bases import prod as prod_base
if axis is None:
axis = range(x.ndim)
elif not hasattr(axis, '__iter__'):
axis = [axis]
return prod_base(x, axis, keepdims) | Reduction along axes with product operation.
Args:
x (Variable): An input variable.
axis (None, int or tuple of ints): Axis or axes along which product is
calculated. Passing the default value `None` will reduce all dimensions.
keepdims (bool): Flag whether the reduced axes are kept as a dimension with 1 element.
Returns:
~nnabla.Variable: N-D array.
Note:
Backward computation is not accurate in a zero value input. |
def reassign_label(cls, destination_cluster, label):
conn = Qubole.agent(version=Cluster.api_version)
data = {
"destination_cluster": destination_cluster,
"label": label
}
return conn.put(cls.rest_entity_path + "/reassign-label", data) | Reassign a label from one cluster to another.
Args:
`destination_cluster`: id/label of the cluster to move the label to
`label`: label to be moved from the source cluster |
def _add_parameter(self_, param_name,param_obj):
cls = self_.cls
type.__setattr__(cls,param_name,param_obj)
ParameterizedMetaclass._initialize_parameter(cls,param_name,param_obj)
try:
delattr(cls,'_%s__params'%cls.__name__)
except AttributeError:
pass | Add a new Parameter object into this object's class.
Supposed to result in a Parameter equivalent to one declared
in the class's source code. |
def logged_user(request):
dct = cookie_facade.retrive_cookie_data(request, USER_COOKIE_NAME).execute().result
if dct is None:
return Command()
return NodeSearch(dct['id']) | Returns a command that retrieves the current logged user based on secure cookie
If there is no logged user, the result from command is None |
def get_slugignores(root, fname='.slugignore'):
try:
with open(os.path.join(root, fname)) as f:
return [l.rstrip('\n') for l in f]
except IOError:
return [] | Given a root path, read any .slugignore file inside and return a list of
patterns that should be removed prior to slug compilation.
Return empty list if file does not exist. |
async def _go_through_packets_from_fd(self, fd, packet_callback, packet_count=None):
packets_captured = 0
self._log.debug('Starting to go through packets')
psml_struct, data = await self._get_psml_struct(fd)
while True:
try:
packet, data = await self._get_packet_from_stream(fd, data, got_first_packet=packets_captured > 0,
psml_structure=psml_struct)
except EOFError:
self._log.debug('EOF reached')
break
if packet:
packets_captured += 1
try:
packet_callback(packet)
except StopCapture:
self._log.debug('User-initiated capture stop in callback')
break
if packet_count and packets_captured >= packet_count:
break | A coroutine which goes through a stream and calls a given callback for each XML packet seen in it. |
def write_object(ctx, pin, management_key, object_id, data):
controller = ctx.obj['controller']
_ensure_authenticated(ctx, controller, pin, management_key)
def do_write_object(retry=True):
try:
controller.put_data(object_id, data.read())
except APDUError as e:
logger.debug('Failed writing object', exc_info=e)
if e.sw == SW.INCORRECT_PARAMETERS:
ctx.fail('Something went wrong, is the object id valid?')
raise
do_write_object() | Write an arbitrary PIV object.
Write a PIV object by providing the object id.
Yubico writable PIV objects are available in
the range 5f0000 - 5fffff.
\b
OBJECT-ID Id of PIV object in HEX.
DATA File containing the data to be written. Use '-' to use stdin. |
def prune_builds(self):
url = self._url("/build/prune")
return self._result(self._post(url), True) | Delete the builder cache
Returns:
(dict): A dictionary containing information about the operation's
result. The ``SpaceReclaimed`` key indicates the amount of
bytes of disk space reclaimed.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. |
def to_binary_string(self):
timestamp = datetime_to_timestamp(self.when)
token = binascii.unhexlify(self.token)
return struct.pack(self.FORMAT_PREFIX + '{0}s'.format(len(token)),
timestamp, len(token), token) | Pack the feedback to binary form and return it as string. |
def EnumerateInterfacesFromClient(args):
del args
pythoncom.CoInitialize()
for interface in (wmi.WMI().Win32_NetworkAdapterConfiguration() or []):
addresses = []
for ip_address in interface.IPAddress or []:
addresses.append(
rdf_client_network.NetworkAddress(human_readable_address=ip_address))
response = rdf_client_network.Interface(ifname=interface.Description)
if interface.MACAddress:
response.mac_address = binascii.unhexlify(
interface.MACAddress.replace(":", ""))
if addresses:
response.addresses = addresses
yield response | Enumerate all MAC addresses of all NICs.
Args:
args: Unused.
Yields:
`rdf_client_network.Interface` instances. |
def remove(self, email):
if email in self._collaborators:
if self._collaborators[email] == ShareRequestValue.Add:
del self._collaborators[email]
else:
self._collaborators[email] = ShareRequestValue.Remove
self._dirty = True | Remove a Collaborator.
Args:
str : Collaborator email address. |
def ticks(
cls, request,
length: (Ptypes.path, Integer('Duration of the stream, in seconds.')),
style: (Ptypes.path, String('Tick style.', enum=['compact', 'extended']))
) -> [
(200, 'Ok', TickStream),
(400, 'Invalid parameters')
]:
try:
length = int(length)
style = cls._styles[style]
except (ValueError, KeyError):
Respond(400)
def vetinari_clock():
start = time()
while time() - start <= length:
sleep(randint(25, 400) / 100)
yield strftime(style, localtime())
Respond(200, vetinari_clock()) | A streaming Lord Vetinari clock... |
def _runUnrealBuildTool(self, target, platform, configuration, args, capture=False):
platform = self._transformBuildToolPlatform(platform)
arguments = [self.getBuildScript(), target, platform, configuration] + args
if capture == True:
return Utility.capture(arguments, cwd=self.getEngineRoot(), raiseOnError=True)
else:
Utility.run(arguments, cwd=self.getEngineRoot(), raiseOnError=True) | Invokes UnrealBuildTool with the specified parameters |
def filter(self, endpoint, params):
params = self.parse_params(params)
params = urlencode(params)
path = '{0}?{1}'.format(endpoint, params)
return self.get(path) | Makes a get request by construction
the path from an endpoint and a dict
with filter query params
e.g.
params = {'category__in': [1,2]}
response = self.client.filter('/experiences/', params) |
def reply(self, text, markup=None, parse_mode=None):
if markup is None:
markup = {}
return self.send_text(
text,
reply_to_message_id=self.message["message_id"],
disable_web_page_preview="true",
reply_markup=self.bot.json_serialize(markup),
parse_mode=parse_mode,
) | Reply to the message this `Chat` object is based on.
:param str text: Text of the message to send
:param dict markup: Markup options
:param str parse_mode: Text parsing mode (``"Markdown"``, ``"HTML"`` or
``None``) |
def get_kafka_producer(acks='all',
value_serializer=lambda v: json.dumps(v).encode('utf-8')):
producer = KafkaProducer(
bootstrap_servers=get_kafka_brokers(),
security_protocol='SSL',
ssl_context=get_kafka_ssl_context(),
value_serializer=value_serializer,
acks=acks
)
return producer | Return a KafkaProducer that uses the SSLContext created with create_ssl_context. |
def __dynamic_expected_value(self, y):
return self.model.predict(self.data, np.ones(self.data.shape[0]) * y, output=self.model_output).mean(0) | This computes the expected value conditioned on the given label value. |
def _set_focused_item(self, item):
if not item:
return self._del_focused_item()
if item.model is not self._selection.focus:
self.queue_draw_item(self._focused_item, item)
self._selection.focus = item.model
self.emit('focus-changed', item) | Sets the focus to the passed item |
def _mm(n_items, data, initial_params, alpha, max_iter, tol, mm_fun):
if initial_params is None:
params = np.zeros(n_items)
else:
params = initial_params
converged = NormOfDifferenceTest(tol=tol, order=1)
for _ in range(max_iter):
nums, denoms = mm_fun(n_items, data, params)
params = log_transform((nums + alpha) / (denoms + alpha))
if converged(params):
return params
raise RuntimeError("Did not converge after {} iterations".format(max_iter)) | Iteratively refine MM estimates until convergence.
Raises
------
RuntimeError
If the algorithm does not converge after `max_iter` iterations. |
def sanitize_release_group(string):
if string is None:
return
string = re.sub(r'\[\w+\]', '', string)
return string.strip().upper() | Sanitize a `release_group` string to remove content in square brackets.
:param str string: the release group to sanitize.
:return: the sanitized release group.
:rtype: str |
def available_metadata(self):
url = self.base_url + 'metadata/types'
headers = {'Authorization': 'Bearer {}'.format(self.auth())}
r = requests.get(url, headers=headers)
return pd.read_json(r.content, orient='records')['name'] | List all scenario metadata indicators available in the connected
data source |
def zobrist_hash(board: chess.Board, *, _hasher: Callable[[chess.Board], int] = ZobristHasher(POLYGLOT_RANDOM_ARRAY)) -> int:
return _hasher(board) | Calculates the Polyglot Zobrist hash of the position.
A Zobrist hash is an XOR of pseudo-random values picked from
an array. Which values are picked is decided by features of the
position, such as piece positions, castling rights and en passant
squares. |
def search_channels(self, query, limit=25, offset=0):
r = self.kraken_request('GET', 'search/channels',
params={'query': query,
'limit': limit,
'offset': offset})
return models.Channel.wrap_search(r) | Search for channels and return them
:param query: the query string
:type query: :class:`str`
:param limit: maximum number of results
:type limit: :class:`int`
:param offset: offset for pagination
:type offset: :class:`int`
:returns: A list of channels
:rtype: :class:`list` of :class:`models.Channel` instances
:raises: None |
def _dispatch_rpc(self, address, rpc_id, arg_payload):
if self.emulator.is_tile_busy(address):
self._track_change('device.rpc_busy_response', (address, rpc_id, arg_payload, None, None), formatter=format_rpc)
raise BusyRPCResponse()
try:
resp = super(EmulatedDevice, self).call_rpc(address, rpc_id, arg_payload)
self._track_change('device.rpc_sent', (address, rpc_id, arg_payload, resp, None), formatter=format_rpc)
return resp
except AsynchronousRPCResponse:
self._track_change('device.rpc_started', (address, rpc_id, arg_payload, None, None), formatter=format_rpc)
raise
except Exception as exc:
self._track_change('device.rpc_exception', (address, rpc_id, arg_payload, None, exc), formatter=format_rpc)
raise | Background work queue handler to dispatch RPCs. |
def format_check(settings):
valid_keys = ['logs_folder', 'log_file', 'log_console', 'log_name',
'log_filename', 'keep_osm_tags']
for key in list(settings.keys()):
assert key in valid_keys, \
('{} not found in list of valid configuation keys').format(key)
assert isinstance(key, str), ('{} must be a string').format(key)
if key == 'keep_osm_tags':
assert isinstance(settings[key], list), \
('{} must be a list').format(key)
for value in settings[key]:
assert all(isinstance(element, str) for element in value), \
'all elements must be a string'
if key == 'log_file' or key == 'log_console':
assert isinstance(settings[key], bool), \
('{} must be boolean').format(key) | Check the format of a osmnet_config object.
Parameters
----------
settings : dict
osmnet_config as a dictionary
Returns
-------
Nothing |
def activate(self, experiment_key, user_id, attributes=None):
if not self.is_valid:
self.logger.error(enums.Errors.INVALID_DATAFILE.format('activate'))
return None
if not validator.is_non_empty_string(experiment_key):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('experiment_key'))
return None
if not isinstance(user_id, string_types):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))
return None
variation_key = self.get_variation(experiment_key, user_id, attributes)
if not variation_key:
self.logger.info('Not activating user "%s".' % user_id)
return None
experiment = self.config.get_experiment_from_key(experiment_key)
variation = self.config.get_variation_from_key(experiment_key, variation_key)
self.logger.info('Activating user "%s" in experiment "%s".' % (user_id, experiment.key))
self._send_impression_event(experiment, variation, user_id, attributes)
return variation.key | Buckets visitor and sends impression event to Optimizely.
Args:
experiment_key: Experiment which needs to be activated.
user_id: ID for user.
attributes: Dict representing user attributes and values which need to be recorded.
Returns:
Variation key representing the variation the user will be bucketed in.
None if user is not in experiment or if experiment is not Running. |
def request(method, url, **kwargs):
_set_content_type(kwargs)
if _content_type_is_json(kwargs) and kwargs.get('data') is not None:
kwargs['data'] = dumps(kwargs['data'])
_log_request(method, url, kwargs)
response = requests.request(method, url, **kwargs)
_log_response(response)
return response | A wrapper for ``requests.request``. |
def _compute_value(self, pkt):
fld, fval = pkt.getfield_and_val(self._length_of)
val = fld.i2len(pkt, fval)
ret = self._adjust(val)
assert(ret >= 0)
return ret | Computes the value of this field based on the provided packet and
the length_of field and the adjust callback
@param packet.Packet pkt: the packet from which is computed this field value. # noqa: E501
@return int: the computed value for this field.
@raise KeyError: the packet nor its payload do not contain an attribute
with the length_of name.
@raise AssertionError
@raise KeyError if _length_of is not one of pkt fields |
def list_():
cmd = 'pkgutil --pkgs'
ret = salt.utils.mac_utils.execute_return_result(cmd)
return ret.splitlines() | List the installed packages.
:return: A list of installed packages
:rtype: list
CLI Example:
.. code-block:: bash
salt '*' pkgutil.list |
def upload_file(self, file_name, file_path):
request = urllib.request.Request(self.url + '/rest/v1/data/upload/job_input?name=' + file_name)
if self.authorization_header() is not None:
request.add_header('Authorization', self.authorization_header())
request.add_header('User-Agent', 'GenePatternRest')
with open(file_path, 'rb') as f:
data = f.read()
try:
response = urllib.request.urlopen(request, data)
except IOError:
print("authentication failed")
return None
if response.getcode() != 201:
print("file upload failed, status code = %i" % response.getcode())
return None
return GPFile(self, response.info().get('Location')) | Upload a file to a server
Attempts to upload a local file with path filepath, to the server, where it
will be named filename.
Args:
:param file_name: The name that the uploaded file will be called on the server.
:param file_path: The path of the local file to upload.
Returns:
:return: A GPFile object that wraps the URI of the uploaded file, or None if the upload fails. |
def get_cpu_info():
import json
output = get_cpu_info_json()
output = json.loads(output, object_hook = _utf_to_str)
return output | Returns the CPU info by using the best sources of information for your OS.
Returns the result in a dict |
def send(self):
xml_request = self.get_xml_request()
if(self.connection._debug == 1):
print(xml_request)
Debug.warn('-' * 25)
Debug.warn(self._command)
Debug.dump("doc: \n", self._documents)
Debug.dump("cont: \n", self._content)
Debug.dump("nest cont \n", self._nested_content)
Debug.dump("Request: \n", xml_request)
response = _handle_response(self.connection._send_request(xml_request),
self._command, self.connection.document_id_xpath)
return response | Send an XML string version of content through the connection.
Returns:
Response object. |
def get_decode_format(flags):
c_flags = flags & FMT_COMMON_MASK
l_flags = flags & FMT_LEGACY_MASK
if c_flags:
if c_flags not in COMMON_FORMATS:
return FMT_BYTES, False
else:
return COMMON2UNIFIED[c_flags], True
else:
if not l_flags in LEGACY_FORMATS:
return FMT_BYTES, False
else:
return LEGACY2UNIFIED[l_flags], True | Returns a tuple of format, recognized |
async def _setcolor(self, *, color : discord.Colour):
data = self.bot.config.get("meta", {})
data['default_color'] = str(color)
await self.bot.config.put('meta', data)
await self.bot.responses.basic(message="The default color has been updated.") | Sets the default color of embeds. |
def refresh(self, *args, **kwargs):
result = self.fetch(*args, **kwargs)
self.store(self.key(*args, **kwargs), self.expiry(*args, **kwargs), result)
return result | Fetch the result SYNCHRONOUSLY and populate the cache |
def _find_new_additions(self):
for node, in_degree in self.graph.in_degree_iter():
if not self._already_known(node) and in_degree == 0:
self.inner.put((self._scores[node], node))
self.queued.add(node) | Find any nodes in the graph that need to be added to the internal
queue and add them.
Callers must hold the lock. |
def start_at(self, start_at):
if start_at is None:
raise ValueError("Invalid value for `start_at`, must not be `None`")
if len(start_at) < 1:
raise ValueError("Invalid value for `start_at`, length must be greater than or equal to `1`")
self._start_at = start_at | Sets the start_at of this Shift.
RFC 3339; shifted to location timezone + offset. Precision up to the minute is respected; seconds are truncated.
:param start_at: The start_at of this Shift.
:type: str |
def _num_taylor_coefficients(n):
_assert(n < 193, 'Number of derivatives too large. Must be less than 193')
correction = np.array([0, 0, 1, 3, 4, 7])[_get_logn(n)]
log2n = _get_logn(n - correction)
m = 2 ** (log2n + 3)
return m | Return number of taylor coefficients
Parameters
----------
n : scalar integer
Wanted number of taylor coefficients
Returns
-------
m : scalar integer
Number of taylor coefficients calculated
8 if n <= 6
16 if 6 < n <= 12
32 if 12 < n <= 25
64 if 25 < n <= 51
128 if 51 < n <= 103
256 if 103 < n <= 192 |
def delete(self, handle):
self._check_session()
self._rest.delete_request('objects', str(handle)) | Delete the specified object.
Arguments:
handle -- Handle of object to delete. |
def amplify_gmfs(imts, vs30s, gmfs):
n = len(vs30s)
out = [amplify_ground_shaking(im.period, vs30s[i], gmfs[m * n + i])
for m, im in enumerate(imts) for i in range(n)]
return numpy.array(out) | Amplify the ground shaking depending on the vs30s |
def flavor_create(self,
name,
flavor_id=0,
ram=0,
disk=0,
vcpus=1,
is_public=True):
nt_ks = self.compute_conn
nt_ks.flavors.create(
name=name, flavorid=flavor_id, ram=ram, disk=disk, vcpus=vcpus, is_public=is_public
)
return {'name': name,
'id': flavor_id,
'ram': ram,
'disk': disk,
'vcpus': vcpus,
'is_public': is_public} | Create a flavor |
def getDarkCurrentFunction(exposuretimes, imgs, **kwargs):
exposuretimes, imgs = getDarkCurrentAverages(exposuretimes, imgs)
offs, ascent, rmse = getLinearityFunction(exposuretimes, imgs, **kwargs)
return offs, ascent, rmse | get dark current function from given images and exposure times |
def _maybe_download_corpora(tmp_dir):
mnli_filename = "MNLI.zip"
mnli_finalpath = os.path.join(tmp_dir, "MNLI")
if not tf.gfile.Exists(mnli_finalpath):
zip_filepath = generator_utils.maybe_download(
tmp_dir, mnli_filename, _MNLI_URL)
zip_ref = zipfile.ZipFile(zip_filepath, "r")
zip_ref.extractall(tmp_dir)
zip_ref.close()
return mnli_finalpath | Download corpora for multinli.
Args:
tmp_dir: a string
Returns:
a string |
def deprecated (func):
def newfunc (*args, **kwargs):
warnings.warn("Call to deprecated function %s." % func.__name__,
category=DeprecationWarning)
return func(*args, **kwargs)
return update_func_meta(newfunc, func) | A decorator which can be used to mark functions as deprecated.
It emits a warning when the function is called. |
def btc_make_p2sh_p2wsh_redeem_script( witness_script_hex ):
witness_script_hash = hashing.bin_sha256(witness_script_hex.decode('hex')).encode('hex')
redeem_script = btc_script_serialize(['0020' + witness_script_hash])
return redeem_script | Make the redeem script for a p2sh-p2wsh witness script |
def get_type(t):
if isinstance(t, UserDefinedType):
if isinstance(t.type, Contract):
return 'address'
return str(t) | Convert a type to a str
If the instance is a Contract, return 'address' instead |
def load_file_to_base64_str(f_path):
path = abs_path(f_path)
with io.open(path, 'rb') as f:
f_bytes = f.read()
base64_str = base64.b64encode(f_bytes).decode("utf-8")
return base64_str | Loads the content of a file into a base64 string.
Args:
f_path: full path to the file including the file name.
Returns:
A base64 string representing the content of the file in utf-8 encoding. |
def on_remove(self, widget, data=None):
path_list = None
if self.view is not None:
model, path_list = self.tree_view.get_selection().get_selected_rows()
old_path = self.get_path()
models = [self.list_store[path][self.MODEL_STORAGE_ID] for path in path_list] if path_list else []
if models:
try:
self.remove_core_elements(models)
except AttributeError as e:
self._logger.warning("The respective core element of {1}.list_store couldn't be removed. -> {0}"
"".format(e, self.__class__.__name__))
if len(self.list_store) > 0:
self.tree_view.set_cursor(min(old_path[0], len(self.list_store) - 1))
return True
else:
self._logger.warning("Please select an element to be removed.") | Removes respective selected core elements and select the next one |
def encompass(self, x):
self.validate_time_inversion()
x.validate_time_inversion()
return DateTimeRange(
start_datetime=min(self.start_datetime, x.start_datetime),
end_datetime=max(self.end_datetime, x.end_datetime),
start_time_format=self.start_time_format,
end_time_format=self.end_time_format,
) | Newly set a time range that encompasses
the input and the current time range.
:param DateTimeRange x:
Value to compute encompass with the current time range.
:Sample Code:
.. code:: python
from datetimerange import DateTimeRange
dtr0 = DateTimeRange("2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900")
dtr1 = DateTimeRange("2015-03-22T10:05:00+0900", "2015-03-22T10:15:00+0900")
dtr0.encompass(dtr1)
:Output:
.. parsed-literal::
2015-03-22T10:00:00+0900 - 2015-03-22T10:15:00+0900 |
def complete_abstract_value(
self,
return_type: GraphQLAbstractType,
field_nodes: List[FieldNode],
info: GraphQLResolveInfo,
path: ResponsePath,
result: Any,
) -> AwaitableOrValue[Any]:
resolve_type_fn = return_type.resolve_type or self.type_resolver
runtime_type = resolve_type_fn(result, info, return_type)
if isawaitable(runtime_type):
async def await_complete_object_value():
value = self.complete_object_value(
self.ensure_valid_runtime_type(
await runtime_type, return_type, field_nodes, info, result
),
field_nodes,
info,
path,
result,
)
if isawaitable(value):
return await value
return value
return await_complete_object_value()
runtime_type = cast(Optional[Union[GraphQLObjectType, str]], runtime_type)
return self.complete_object_value(
self.ensure_valid_runtime_type(
runtime_type, return_type, field_nodes, info, result
),
field_nodes,
info,
path,
result,
) | Complete an abstract value.
Complete a value of an abstract type by determining the runtime object type of
that value, then complete the value for that type. |
def role(self):
try:
self._role = c_char(
self.lib.iperf_get_test_role(self._test)
).value.decode('utf-8')
except TypeError:
self._role = c_char(
chr(self.lib.iperf_get_test_role(self._test))
).value.decode('utf-8')
return self._role | The iperf3 instance role
valid roles are 'c'=client and 's'=server
:rtype: 'c' or 's' |
def do_list(self, args):
try:
resources = self.resource_manager.list_resources_info()
except Exception as e:
print(e)
else:
self.resources = []
for ndx, (resource_name, value) in enumerate(resources.items()):
if not args:
print('({0:2d}) {1}'.format(ndx, resource_name))
if value.alias:
print(' alias: {}'.format(value.alias))
self.resources.append((resource_name, value.alias or None)) | List all connected resources. |
def resolve_node_modules(self):
'import the modules specified in init'
if not self.resolved_node_modules:
try:
self.resolved_node_modules = [
importlib.import_module(mod, self.node_package)
for mod in self.node_modules
]
except ImportError:
self.resolved_node_modules = []
raise
return self.resolved_node_modules | import the modules specified in init |
def unlock(self):
if not unlockers.unlock(self, self._device.manufacturer):
raise errors.JLinkException('Failed to unlock device.')
return True | Unlocks the device connected to the J-Link.
Unlocking a device allows for access to read/writing memory, as well as
flash programming.
Note:
Unlock is not supported on all devices.
Supported Devices:
Kinetis
Returns:
``True``.
Raises:
JLinkException: if the device fails to unlock. |
def get_email_confirmation_redirect_url(self, request):
if request.user.is_authenticated:
if app_settings.EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL:
return \
app_settings.EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL
else:
return self.get_login_redirect_url(request)
else:
return app_settings.EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL | The URL to return to after successful e-mail confirmation. |
def memo_Y(f):
sub = {}
def Yf(*args):
hashable_args = tuple([repr(x) for x in args])
if args:
if hashable_args not in sub:
ret = sub[hashable_args] = f(Yf)(*args)
else:
ret = sub[hashable_args]
return ret
return f(Yf)()
return f(Yf) | Memoized Y combinator.
.. testsetup::
from proso.func import memo_Y
.. testcode::
@memo_Y
def fib(f):
def inner_fib(n):
if n > 1:
return f(n - 1) + f(n - 2)
else:
return n
return inner_fib
print(fib(100))
.. testoutput::
354224848179261915075 |
def _seqcluster_stats(data, out_dir):
name = dd.get_sample_name(data)
fn = data.get("seqcluster", {}).get("stat_file", None)
if not fn:
return None
out_file = os.path.join(out_dir, "%s.txt" % name)
df = pd.read_csv(fn, sep="\t", names = ["reads", "sample", "type"])
df_sample = df[df["sample"] == name]
df_sample.to_csv(out_file, sep="\t")
return out_file | Parse seqcluster output |
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
self.__dict__.update(state_dict) | Load the schedulers state.
Parameters
----------
state_dict : ``Dict[str, Any]``
Scheduler state. Should be an object returned from a call to ``state_dict``. |
def to_string(self):
node = quote_if_necessary(self.obj_dict['name'])
node_attr = list()
for attr in sorted(self.obj_dict['attributes']):
value = self.obj_dict['attributes'][attr]
if value == '':
value = '""'
if value is not None:
node_attr.append(
'%s=%s' % (attr, quote_if_necessary(value) ) )
else:
node_attr.append( attr )
if node in ('graph', 'node', 'edge') and len(node_attr) == 0:
return ''
node_attr = ', '.join(node_attr)
if node_attr:
node += ' [' + node_attr + ']'
return node + ';' | Return string representation of node in DOT language. |
def extract_endpoint_arguments(endpoint):
ep_args = endpoint._arguments
if ep_args is None:
return None
arg_docs = { k: format_endpoint_argument_doc(a) \
for k, a in ep_args.iteritems() }
return arg_docs | Extract the argument documentation from the endpoint. |
def remove_attribute(self, attribute: str) -> None:
attr_index = self.__attr_index(attribute)
if attr_index is not None:
self.yaml_node.value.pop(attr_index) | Remove an attribute from the node.
Use only if is_mapping() returns True.
Args:
attribute: The name of the attribute to remove. |
def relabel(self, qubits: Qubits) -> 'Gate':
gate = copy(self)
gate.vec = gate.vec.relabel(qubits)
return gate | Return a copy of this Gate with new qubits |
def run_restore(self, snapshot: Dict[Union[str, Key], Any]) -> 'BaseItemCollection':
try:
for name, snap in snapshot.items():
if isinstance(name, Key):
self._nested_items[name.group].run_restore(snap)
else:
self._nested_items[name].run_restore(snap)
return self
except Exception as e:
raise SnapshotError('Error while restoring snapshot: {}'.format(self._snapshot)) from e | Restores the state of a collection from a snapshot |
def window(preceding=None, following=None, group_by=None, order_by=None):
return Window(
preceding=preceding,
following=following,
group_by=group_by,
order_by=order_by,
how='rows',
) | Create a window clause for use with window functions.
This ROW window clause aggregates adjacent rows based on differences in row
number.
All window frames / ranges are inclusive.
Parameters
----------
preceding : int, tuple, or None, default None
Specify None for unbounded, 0 to include current row tuple for
off-center window
following : int, tuple, or None, default None
Specify None for unbounded, 0 to include current row tuple for
off-center window
group_by : expressions, default None
Either specify here or with TableExpr.group_by
order_by : expressions, default None
For analytic functions requiring an ordering, specify here, or let Ibis
determine the default ordering (for functions like rank)
Returns
-------
Window |
def from_file(self, vasprun_file):
vrun_obj = Vasprun(vasprun_file, parse_projected_eigen=True)
return VasprunLoader(vrun_obj) | Get a vasprun.xml file and return a VasprunLoader |
def tmp_expr(self, tmp):
self.state._inspect('tmp_read', BP_BEFORE, tmp_read_num=tmp)
try:
v = self.temps[tmp]
if v is None:
raise SimValueError('VEX temp variable %d does not exist. This is usually the result of an incorrect '
'slicing.' % tmp)
except IndexError:
raise SimValueError("Accessing a temp that is illegal in this tyenv")
self.state._inspect('tmp_read', BP_AFTER, tmp_read_expr=v)
return v | Returns the Claripy expression of a VEX temp value.
:param tmp: the number of the tmp
:param simplify: simplify the tmp before returning it
:returns: a Claripy expression of the tmp |
def set(self, language: str, value: str):
self[language] = value
self.__dict__.update(self)
return self | Sets the value in the specified language.
Arguments:
language:
The language to set the value in.
value:
The value to set. |
def check_conflicts(self):
shutit_global.shutit_global_object.yield_to_draw()
cfg = self.cfg
self.log('PHASE: conflicts', level=logging.DEBUG)
errs = []
self.pause_point('\nNow checking for conflicts between modules', print_input=False, level=3)
for module_id in self.module_ids():
if not cfg[module_id]['shutit.core.module.build']:
continue
conflicter = self.shutit_map[module_id]
for conflictee in conflicter.conflicts_with:
conflictee_obj = self.shutit_map.get(conflictee)
if conflictee_obj is None:
continue
if ((cfg[conflicter.module_id]['shutit.core.module.build'] or
self.is_to_be_built_or_is_installed(conflicter)) and
(cfg[conflictee_obj.module_id]['shutit.core.module.build'] or
self.is_to_be_built_or_is_installed(conflictee_obj))):
errs.append(('conflicter module id: ' + conflicter.module_id + ' is configured to be built or is already built but conflicts with module_id: ' + conflictee_obj.module_id,))
return errs | Checks for any conflicts between modules configured to be built. |
def create_user_profile(sender, instance, created, **kwargs):
if created:
profile = UserProfile.objects.get_or_create(user=instance)[0]
profile.hash_pass = create_htpasswd(instance.hash_pass)
profile.save()
else:
try:
up = UserProfile.objects.get(user=instance.id)
up.hash_pass = create_htpasswd(instance.hash_pass)
up.save()
except AttributeError:
pass | Create the UserProfile when a new User is saved |
def user(self):
if self._user is None:
url = "%s/users/%s" % (self.root, self._username)
self._user = CMPUser(url=url,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url,
initialize=False)
return self._user | gets the user properties |
def adapt_timefield_value(self, value):
if value is None:
return None
if isinstance(value, string_types):
return datetime.datetime(*(time.strptime(value, '%H:%M:%S')[:6]))
return datetime.datetime(1900, 1, 1, value.hour, value.minute, value.second) | Transform a time value to an object compatible with what is expected
by the backend driver for time columns. |
def run_transaction(transactor, callback):
if isinstance(transactor, sqlalchemy.engine.Connection):
return _txn_retry_loop(transactor, callback)
elif isinstance(transactor, sqlalchemy.engine.Engine):
with transactor.connect() as connection:
return _txn_retry_loop(connection, callback)
elif isinstance(transactor, sqlalchemy.orm.sessionmaker):
session = transactor(autocommit=True)
return _txn_retry_loop(session, callback)
else:
raise TypeError("don't know how to run a transaction on %s", type(transactor)) | Run a transaction with retries.
``callback()`` will be called with one argument to execute the
transaction. ``callback`` may be called more than once; it should have
no side effects other than writes to the database on the given
connection. ``callback`` should not call ``commit()` or ``rollback()``;
these will be called automatically.
The ``transactor`` argument may be one of the following types:
* `sqlalchemy.engine.Connection`: the same connection is passed to the callback.
* `sqlalchemy.engine.Engine`: a connection is created and passed to the callback.
* `sqlalchemy.orm.sessionmaker`: a session is created and passed to the callback. |
def expectRegion(self, filename, x, y, maxrms=0):
log.debug('expectRegion %s (%s, %s)', filename, x, y)
return self._expectFramebuffer(filename, x, y, maxrms) | Wait until a portion of the screen matches the target image
The region compared is defined by the box
(x, y), (x + image.width, y + image.height) |
def casefold_parts(self, parts):
if self.filesystem.is_windows_fs:
return [p.lower() for p in parts]
return parts | Return the lower-case version of parts for a Windows filesystem. |
def map_sites(self, stmts):
valid_statements = []
mapped_statements = []
for stmt in stmts:
mapped_stmt = self.map_stmt_sites(stmt)
if mapped_stmt is not None:
mapped_statements.append(mapped_stmt)
else:
valid_statements.append(stmt)
return valid_statements, mapped_statements | Check a set of statements for invalid modification sites.
Statements are checked against Uniprot reference sequences to determine
if residues referred to by post-translational modifications exist at
the given positions.
If there is nothing amiss with a statement (modifications on any of the
agents, modifications made in the statement, etc.), then the statement
goes into the list of valid statements. If there is a problem with the
statement, the offending modifications are looked up in the site map
(:py:attr:`site_map`), and an instance of :py:class:`MappedStatement`
is added to the list of mapped statements.
Parameters
----------
stmts : list of :py:class:`indra.statement.Statement`
The statements to check for site errors.
Returns
-------
tuple
2-tuple containing (valid_statements, mapped_statements). The first
element of the tuple is a list of valid statements
(:py:class:`indra.statement.Statement`) that were not found to
contain any site errors. The second element of the tuple is a list
of mapped statements (:py:class:`MappedStatement`) with information
on the incorrect sites and corresponding statements with correctly
mapped sites. |
def filter_grounded_only(stmts_in, **kwargs):
remove_bound = kwargs.get('remove_bound', False)
logger.info('Filtering %d statements for grounded agents...' %
len(stmts_in))
stmts_out = []
score_threshold = kwargs.get('score_threshold')
for st in stmts_in:
grounded = True
for agent in st.agent_list():
if agent is not None:
criterion = lambda x: _agent_is_grounded(x, score_threshold)
if not criterion(agent):
grounded = False
break
if not isinstance(agent, Agent):
continue
if remove_bound:
_remove_bound_conditions(agent, criterion)
elif _any_bound_condition_fails_criterion(agent, criterion):
grounded = False
break
if grounded:
stmts_out.append(st)
logger.info('%d statements after filter...' % len(stmts_out))
dump_pkl = kwargs.get('save')
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | Filter to statements that have grounded agents.
Parameters
----------
stmts_in : list[indra.statements.Statement]
A list of statements to filter.
score_threshold : Optional[float]
If scored groundings are available in a list and the highest score
if below this threshold, the Statement is filtered out.
save : Optional[str]
The name of a pickle file to save the results (stmts_out) into.
remove_bound: Optional[bool]
If true, removes ungrounded bound conditions from a statement.
If false (default), filters out statements with ungrounded bound
conditions.
Returns
-------
stmts_out : list[indra.statements.Statement]
A list of filtered statements. |
def batch_row_ids(data_batch):
item = data_batch.data[0]
user = data_batch.data[1]
return {'user_weight': user.astype(np.int64),
'item_weight': item.astype(np.int64)} | Generate row ids based on the current mini-batch |
def create_api_error_from_http_exception(e):
response = e.response
try:
explanation = response.json()['message']
except ValueError:
explanation = (response.content or '').strip()
cls = APIError
if response.status_code == 404:
if explanation and ('No such image' in str(explanation) or
'not found: does not exist or no pull access'
in str(explanation) or
'repository does not exist' in str(explanation)):
cls = ImageNotFound
else:
cls = NotFound
raise cls(e, response=response, explanation=explanation) | Create a suitable APIError from requests.exceptions.HTTPError. |
def _check_for_degenerate_interesting_groups(items):
igkey = ("algorithm", "bcbiornaseq", "interesting_groups")
interesting_groups = tz.get_in(igkey, items[0], [])
if isinstance(interesting_groups, str):
interesting_groups = [interesting_groups]
for group in interesting_groups:
values = [tz.get_in(("metadata", group), x, None) for x in items]
if all(x is None for x in values):
raise ValueError("group %s is labelled as an interesting group, "
"but does not appear in the metadata." % group)
if len(list(tz.unique(values))) == 1:
raise ValueError("group %s is marked as an interesting group, "
"but all samples have the same value." % group) | Make sure interesting_groups specify existing metadata and that
the interesting_group is not all of the same for all of the samples |
def new_symlink(self, vd, name, parent, rr_target, seqnum, rock_ridge,
rr_name, xa):
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record already initialized')
self._new(vd, name, parent, seqnum, False, 0, xa)
if rock_ridge:
self._rr_new(rock_ridge, rr_name, rr_target, False, False, False,
0o0120555) | Create a new symlink Directory Record. This implies that the new
record will be Rock Ridge.
Parameters:
vd - The Volume Descriptor this record is part of.
name - The name for this directory record.
parent - The parent of this directory record.
rr_target - The symlink target for this directory record.
seqnum - The sequence number for this directory record.
rock_ridge - The version of Rock Ridge to use for this directory record.
rr_name - The Rock Ridge name for this directory record.
xa - True if this is an Extended Attribute record.
Returns:
Nothing. |
def translate_docs(self, ds, **kwargs):
for d in ds:
self.map_doc(d, {}, self.invert_subject_object)
return [self.translate_doc(d, **kwargs) for d in ds] | Translate a set of solr results |
def projectSphereFilter(actor):
poly = actor.polydata()
psf = vtk.vtkProjectSphereFilter()
psf.SetInputData(poly)
psf.Update()
a = Actor(psf.GetOutput())
return a | Project a spherical-like object onto a plane.
.. hint:: |projectsphere| |projectsphere.py|_ |
def separate_words(text, acronyms=None):
words, _case, _sep = case_parse.parse_case(text, acronyms, preserve_case=True)
return ' '.join(words) | Return text in "seperate words" style.
Args:
text: input string to convert case
detect_acronyms: should attempt to detect acronyms
acronyms: a list of acronyms to detect
>>> separate_words("HELLO_WORLD")
'HELLO WORLD'
>>> separate_words("helloHTMLWorld", True, ["HTML"])
'hello HTML World' |
def handle_scd(self, conn, args):
reply = {
(b'GETINFO', b'version'): self.version,
}.get(args)
if reply is None:
raise AgentError(b'ERR 100696144 No such device <SCD>')
keyring.sendline(conn, b'D ' + reply) | No support for smart-card device protocol. |
def idxmax(self, **kwargs):
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().idxmax(**kwargs)
axis = kwargs.get("axis", 0)
index = self.index if axis == 0 else self.columns
def idxmax_builder(df, **kwargs):
if axis == 0:
df.index = index
else:
df.columns = index
return df.idxmax(**kwargs)
func = self._build_mapreduce_func(idxmax_builder, **kwargs)
return self._full_axis_reduce(axis, func) | Returns the first occurrence of the maximum over requested axis.
Returns:
A new QueryCompiler object containing the maximum of each column or axis. |
def section_exists(self, section):
if section in self.__sections:
LOGGER.debug("> '{0}' section exists in '{1}'.".format(section, self))
return True
else:
LOGGER.debug("> '{0}' section doesn't exists in '{1}'.".format(section, self))
return False | Checks if given section exists.
Usage::
>>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \
"[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"]
>>> sections_file_parser = SectionsFileParser()
>>> sections_file_parser.content = content
>>> sections_file_parser.parse()
<foundations.parsers.SectionsFileParser object at 0x845683844>
>>> sections_file_parser.section_exists("Section A")
True
>>> sections_file_parser.section_exists("Section C")
False
:param section: Section to check existence.
:type section: unicode
:return: Section existence.
:rtype: bool |
def create_avg_psf(skydir, ltc, event_class, event_types, dtheta,
egy, cth_bins, npts=None):
return create_avg_rsp(create_psf, skydir, ltc,
event_class, event_types,
dtheta, egy, cth_bins, npts) | Generate model for exposure-weighted PSF averaged over incidence
angle.
Parameters
----------
egy : `~numpy.ndarray`
Energies in MeV.
cth_bins : `~numpy.ndarray`
Bin edges in cosine of the incidence angle. |
def nameValue(name, value, valueType=str, quotes=False):
if valueType == bool:
if value:
return "--%s" % name
return ""
if value is None:
return ""
if quotes:
return "--%s '%s'" % (name, valueType(value))
return "--%s %s" % (name, valueType(value)) | Little function to make it easier to make name value strings for commands. |
def set_iscsi_volume(self, port_id,
initiator_iqn, initiator_dhcp=False,
initiator_ip=None, initiator_netmask=None,
target_dhcp=False, target_iqn=None, target_ip=None,
target_port=3260, target_lun=0, boot_prio=1,
chap_user=None, chap_secret=None,
mutual_chap_secret=None):
initiator_netmask = (_convert_netmask(initiator_netmask)
if initiator_netmask else None)
port_handler = _parse_physical_port_id(port_id)
iscsi_boot = _create_iscsi_boot(
initiator_iqn,
initiator_dhcp=initiator_dhcp,
initiator_ip=initiator_ip,
initiator_netmask=initiator_netmask,
target_dhcp=target_dhcp,
target_iqn=target_iqn,
target_ip=target_ip,
target_port=target_port,
target_lun=target_lun,
boot_prio=boot_prio,
chap_user=chap_user,
chap_secret=chap_secret,
mutual_chap_secret=mutual_chap_secret)
port = self._find_port(port_handler)
if port:
port_handler.set_iscsi_port(port, iscsi_boot)
else:
port = port_handler.create_iscsi_port(iscsi_boot)
self._add_port(port_handler, port) | Set iSCSI volume information to configuration.
:param port_id: Physical port ID.
:param initiator_iqn: IQN of initiator.
:param initiator_dhcp: True if DHCP is used in the iSCSI network.
:param initiator_ip: IP address of initiator. None if DHCP is used.
:param initiator_netmask: Netmask of initiator as integer. None if
DHCP is used.
:param target_dhcp: True if DHCP is used for iSCSI target.
:param target_iqn: IQN of target. None if DHCP is used.
:param target_ip: IP address of target. None if DHCP is used.
:param target_port: Port number of target. None if DHCP is used.
:param target_lun: LUN number of target. None if DHCP is used,
:param boot_prio: Boot priority of the volume. 1 indicates the highest
priority. |
def _set_zfcp_config_files(self, fcp, target_wwpn, target_lun):
device = '0.0.%s' % fcp
set_zfcp_conf = 'echo "%(device)s %(wwpn)s %(lun)s" >> /etc/zfcp.conf'\
% {'device': device, 'wwpn': target_wwpn,
'lun': target_lun}
trigger_uevent = 'echo "add" >> /sys/bus/ccw/devices/%s/uevent\n'\
% device
return '\n'.join((set_zfcp_conf,
trigger_uevent)) | rhel6 set WWPN and LUN in configuration files |
def prompt_config(sch, defaults=None, path=None):
out = {}
for name, attr in sch.attributes():
fullpath = name
if path:
fullpath = '{}.{}'.format(path, name)
if defaults is None:
defaults = {}
default = defaults.get(name)
if isinstance(attr, _schema.Schema):
value = prompt_config(attr, defaults=default, path=fullpath)
else:
if default is None:
default = attr.default
if default is None:
default = ''
value = prompt(fullpath, default)
out[name] = value
return sch.validate(out) | Utility function to recursively prompt for config values
Arguments:
- defaults<dict>: default values used for empty inputs
- path<str>: path to prepend to config keys (eg. "path.keyname") |
def _type_single(self, value, _type):
' apply type to the single value '
if value is None or _type in (None, NoneType):
pass
elif isinstance(value, _type):
value = dt2ts(value) if _type in [datetime, date] else value
else:
if _type in (datetime, date):
value = dt2ts(value)
elif _type in (unicode, str):
value = to_encoding(value)
else:
try:
value = _type(value)
except Exception:
value = to_encoding(value)
logger.error("typecast failed: %s(value=%s)" % (
_type.__name__, value))
raise
return value | apply type to the single value |
def get_assets(cls, lat, lon, begin=None, end=None):
instance = cls('planetary/earth/assets')
filters = {
'lat': lat,
'lon': lon,
'begin': begin,
'end': end,
}
return instance.get_resource(**filters) | Returns date and ids of flyovers
Args:
lat: latitude float
lon: longitude float
begin: date instance
end: date instance
Returns:
json |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.