code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def _OpenCollectionPath(coll_path):
"""Tries to open various types of collections at the given path."""
hunt_collection = results.HuntResultCollection(coll_path)
if hunt_collection and hunt_collection[0].payload:
return hunt_collection
indexed_collection = sequential_collection.GeneralIndexedCollection(coll_path)
if indexed_collection:
return indexed_collection
|
Tries to open various types of collections at the given path.
|
def unpack_rsp(cls, rsp_pb):
"""Unpack the init connect response"""
ret_type = rsp_pb.retType
ret_msg = rsp_pb.retMsg
if ret_type != RET_OK:
return RET_ERROR, ret_msg, None
res = {}
if rsp_pb.HasField('s2c'):
res['server_version'] = rsp_pb.s2c.serverVer
res['login_user_id'] = rsp_pb.s2c.loginUserID
res['conn_id'] = rsp_pb.s2c.connID
res['conn_key'] = rsp_pb.s2c.connAESKey
res['keep_alive_interval'] = rsp_pb.s2c.keepAliveInterval
else:
return RET_ERROR, "rsp_pb error", None
return RET_OK, "", res
|
Unpack the init connect response
|
def setup(applicationName,
applicationType=None,
style='plastique',
splash='',
splashType=None,
splashTextColor='white',
splashTextAlign=None,
theme=''):
"""
Wrapper system for the QApplication creation process to handle all proper
pre-application setup. This method will verify that there is no application
running, creating one if necessary. If no application is created, a None
value is returned - signaling that there is already an app running. If you
need to specify your own QApplication subclass, you can do so through the
applicationType parameter.
:note This method should always be used with the exec_ method to
handle the post setup process.
:param applicationName | <str>
applicationType | <subclass of QApplication> || None
style | <str> || <QStyle> | style to use for the new app
splash | <str> | filepath to use for a splash screen
splashType | <subclass of QSplashScreen> || None
splashTextColor | <str> || <QColor>
splashTextAlign | <Qt.Alignment>
:usage |import projexui
|
|def main(argv):
| # initialize the application
| data = projexui.setup()
|
| # do some initialization code
| window = MyWindow()
| window.show()
|
| # execute the application
| projexui.exec_(window, data)
:return { <str> key: <variant> value, .. }
"""
import_qt(globals())
output = {}
# check to see if there is a qapplication running
if not QtGui.QApplication.instance():
# make sure we have a valid QApplication type
if applicationType is None:
applicationType = QtGui.QApplication
app = applicationType([applicationName])
app.setApplicationName(applicationName)
app.setQuitOnLastWindowClosed(True)
stylize(app, style=style, theme=theme)
# utilized with the projexui.config.xschemeconfig
app.setProperty('useScheme', wrapVariant(True))
output['app'] = app
# create a new splash screen if desired
if splash:
if not splashType:
splashType = XLoggerSplashScreen
pixmap = QtGui.QPixmap(splash)
screen = splashType(pixmap)
if splashTextAlign is None:
splashTextAlign = QtCore.Qt.AlignLeft | QtCore.Qt.AlignBottom
screen.setTextColor(QtGui.QColor(splashTextColor))
screen.setTextAlignment(splashTextAlign)
screen.show()
QtGui.QApplication.instance().processEvents()
output['splash'] = screen
return output
|
Wrapper system for the QApplication creation process to handle all proper
pre-application setup. This method will verify that there is no application
running, creating one if necessary. If no application is created, a None
value is returned - signaling that there is already an app running. If you
need to specify your own QApplication subclass, you can do so through the
applicationType parameter.
:note This method should always be used with the exec_ method to
handle the post setup process.
:param applicationName | <str>
applicationType | <subclass of QApplication> || None
style | <str> || <QStyle> | style to use for the new app
splash | <str> | filepath to use for a splash screen
splashType | <subclass of QSplashScreen> || None
splashTextColor | <str> || <QColor>
splashTextAlign | <Qt.Alignment>
:usage |import projexui
|
|def main(argv):
| # initialize the application
| data = projexui.setup()
|
| # do some initialization code
| window = MyWindow()
| window.show()
|
| # execute the application
| projexui.exec_(window, data)
:return { <str> key: <variant> value, .. }
|
def _call(self, method, params):
"""Retrive the given resource.
:param resource: resource to retrieve
:param params: dict with the HTTP parameters needed to retrieve
the given resource
"""
url = self.base_url % {'token': self.bot_token, 'method': method}
logger.debug("Telegram bot calls method: %s params: %s",
method, str(params))
r = self.fetch(url, payload=params)
return r.text
|
Retrive the given resource.
:param resource: resource to retrieve
:param params: dict with the HTTP parameters needed to retrieve
the given resource
|
def _export_to2marc(self, key, value):
"""Populate the ``595`` MARC field."""
def _is_for_cds(value):
return 'CDS' in value
def _is_for_hal(value):
return 'HAL' in value and value['HAL']
def _is_not_for_hal(value):
return 'HAL' in value and not value['HAL']
result = []
if _is_for_cds(value):
result.append({'c': 'CDS'})
if _is_for_hal(value):
result.append({'c': 'HAL'})
elif _is_not_for_hal(value):
result.append({'c': 'not HAL'})
return result
|
Populate the ``595`` MARC field.
|
def pubkey(self, identity, ecdh=False):
"""Return public key."""
curve_name = identity.get_curve_name(ecdh=ecdh)
log.debug('"%s" getting public key (%s) from %s',
identity.to_string(), curve_name, self)
addr = identity.get_bip32_address(ecdh=ecdh)
result = self._defs.get_public_node(
self.conn,
n=addr,
ecdsa_curve_name=curve_name)
log.debug('result: %s', result)
return bytes(result.node.public_key)
|
Return public key.
|
def set_seed(seed: int):
""" Set random seed for python, numpy and pytorch RNGs """
random.seed(seed)
np.random.seed(seed)
torch.random.manual_seed(seed)
|
Set random seed for python, numpy and pytorch RNGs
|
def loadFromCheckpoint(savedModelDir, newSerialization=False):
""" Load saved model.
:param savedModelDir: (string)
Directory of where the experiment is to be or was saved
:returns: (:class:`nupic.frameworks.opf.model.Model`) The loaded model
instance.
"""
if newSerialization:
return HTMPredictionModel.readFromCheckpoint(savedModelDir)
else:
return Model.load(savedModelDir)
|
Load saved model.
:param savedModelDir: (string)
Directory of where the experiment is to be or was saved
:returns: (:class:`nupic.frameworks.opf.model.Model`) The loaded model
instance.
|
def baseline(y_true, y_score=None):
"""
Number of positive labels divided by number of labels,
or zero if there are no labels
"""
if len(y_true) > 0:
return np.nansum(y_true)/count(y_true, countna=False)
else:
return 0.0
|
Number of positive labels divided by number of labels,
or zero if there are no labels
|
def create_entity(self):
"""Create a new entity.
The entity will have a higher UID than any previously associated
with this world.
:return: the new entity
:rtype: :class:`essence.Entity`"""
self._highest_id_seen += 1
entity = Entity(self._highest_id_seen, self)
self._entities.append(entity)
return entity
|
Create a new entity.
The entity will have a higher UID than any previously associated
with this world.
:return: the new entity
:rtype: :class:`essence.Entity`
|
async def _connect_and_read(self):
"""Retreives and connects to Slack's RTM API.
Makes an authenticated call to Slack's RTM API to retrieve
a websocket URL. Then connects to the message server and
reads event messages as they come in.
If 'auto_reconnect' is specified we
retrieve a new url and reconnect any time the connection
is lost unintentionally or an exception is thrown.
Raises:
SlackApiError: Unable to retreive RTM URL from Slack.
websockets.exceptions: Errors thrown by the 'websockets' library.
"""
while not self._stopped:
try:
self._connection_attempts += 1
async with aiohttp.ClientSession(
loop=self._event_loop,
timeout=aiohttp.ClientTimeout(total=self.timeout),
) as session:
self._session = session
url, data = await self._retreive_websocket_info()
async with session.ws_connect(
url,
heartbeat=self.ping_interval,
ssl=self.ssl,
proxy=self.proxy,
) as websocket:
self._logger.debug("The Websocket connection has been opened.")
self._websocket = websocket
self._dispatch_event(event="open", data=data)
await self._read_messages()
except (
client_err.SlackClientNotConnectedError,
client_err.SlackApiError,
# TODO: Catch websocket exceptions thrown by aiohttp.
) as exception:
self._logger.debug(str(exception))
self._dispatch_event(event="error", data=exception)
if self.auto_reconnect and not self._stopped:
await self._wait_exponentially(exception)
continue
self._logger.exception(
"The Websocket encountered an error. Closing the connection..."
)
self._close_websocket()
raise
|
Retreives and connects to Slack's RTM API.
Makes an authenticated call to Slack's RTM API to retrieve
a websocket URL. Then connects to the message server and
reads event messages as they come in.
If 'auto_reconnect' is specified we
retrieve a new url and reconnect any time the connection
is lost unintentionally or an exception is thrown.
Raises:
SlackApiError: Unable to retreive RTM URL from Slack.
websockets.exceptions: Errors thrown by the 'websockets' library.
|
def compile_compiler_bridge(self, context):
"""Compile the compiler bridge to be used by zinc, using our scala bootstrapper.
It will compile and cache the jar, and materialize it if not already there.
:param context: The context of the task trying to compile the bridge.
This is mostly needed to use its scheduler to create digests of the relevant jars.
:return: The absolute path to the compiled scala-compiler-bridge jar.
"""
bridge_jar_name = 'scala-compiler-bridge.jar'
bridge_jar = os.path.join(self._compiler_bridge_cache_dir, bridge_jar_name)
global_bridge_cache_dir = os.path.join(self._zinc_factory.get_options().pants_bootstrapdir, fast_relpath(self._compiler_bridge_cache_dir, self._workdir()))
globally_cached_bridge_jar = os.path.join(global_bridge_cache_dir, bridge_jar_name)
# Workaround to avoid recompiling the bridge for every integration test
# We check the bootstrapdir (.cache) for the bridge.
# If it exists, we make a copy to the buildroot.
#
# TODO Remove when action caches are implemented.
if os.path.exists(globally_cached_bridge_jar):
# Cache the bridge jar under buildroot, to allow snapshotting
safe_mkdir(self._relative_to_buildroot(self._compiler_bridge_cache_dir))
safe_hardlink_or_copy(globally_cached_bridge_jar, bridge_jar)
if not os.path.exists(bridge_jar):
res = self._run_bootstrapper(bridge_jar, context)
context._scheduler.materialize_directories((
DirectoryToMaterialize(get_buildroot(), res.output_directory_digest),
))
# For the workaround above to work, we need to store a copy of the bridge in
# the bootstrapdir cache (.cache).
safe_mkdir(global_bridge_cache_dir)
safe_hardlink_or_copy(bridge_jar, globally_cached_bridge_jar)
return ClasspathEntry(bridge_jar, res.output_directory_digest)
else:
bridge_jar_snapshot = context._scheduler.capture_snapshots((PathGlobsAndRoot(
PathGlobs((self._relative_to_buildroot(bridge_jar),)),
text_type(get_buildroot())
),))[0]
bridge_jar_digest = bridge_jar_snapshot.directory_digest
return ClasspathEntry(bridge_jar, bridge_jar_digest)
|
Compile the compiler bridge to be used by zinc, using our scala bootstrapper.
It will compile and cache the jar, and materialize it if not already there.
:param context: The context of the task trying to compile the bridge.
This is mostly needed to use its scheduler to create digests of the relevant jars.
:return: The absolute path to the compiled scala-compiler-bridge jar.
|
def from_int(cls, integer):
"""
Constructs a `Deleted` using the `tinyint` value of the `rev_deleted`
column of the `revision` MariaDB table.
* DELETED_TEXT = 1
* DELETED_COMMENT = 2
* DELETED_USER = 4
* DELETED_RESTRICTED = 8
"""
bin_string = bin(integer)
return cls(
text=len(bin_string) >= 1 and bin_string[-1] == "1",
comment=len(bin_string) >= 2 and bin_string[-2] == "1",
user=len(bin_string) >= 3 and bin_string[-3] == "1",
restricted=len(bin_string) >= 4 and bin_string[-4] == "1"
)
|
Constructs a `Deleted` using the `tinyint` value of the `rev_deleted`
column of the `revision` MariaDB table.
* DELETED_TEXT = 1
* DELETED_COMMENT = 2
* DELETED_USER = 4
* DELETED_RESTRICTED = 8
|
def normalize(pw):
""" Lower case, and change the symbols to closest characters"""
pw_lower = pw.lower()
return ''.join(helper.L33T.get(c, c) for c in pw_lower)
|
Lower case, and change the symbols to closest characters
|
def _MergeTaskStorage(self, storage_writer):
"""Merges a task storage with the session storage.
This function checks all task stores that are ready to merge and updates
the scheduled tasks. Note that to prevent this function holding up
the task scheduling loop only the first available task storage is merged.
Args:
storage_writer (StorageWriter): storage writer for a session storage used
to merge task storage.
"""
if self._processing_profiler:
self._processing_profiler.StartTiming('merge_check')
for task_identifier in storage_writer.GetProcessedTaskIdentifiers():
try:
task = self._task_manager.GetProcessedTaskByIdentifier(task_identifier)
self._task_manager.SampleTaskStatus(task, 'processed')
to_merge = self._task_manager.CheckTaskToMerge(task)
if not to_merge:
storage_writer.RemoveProcessedTaskStorage(task)
self._task_manager.RemoveTask(task)
self._task_manager.SampleTaskStatus(task, 'removed_processed')
else:
storage_writer.PrepareMergeTaskStorage(task)
self._task_manager.UpdateTaskAsPendingMerge(task)
except KeyError:
logger.error(
'Unable to retrieve task: {0:s} to prepare it to be merged.'.format(
task_identifier))
continue
if self._processing_profiler:
self._processing_profiler.StopTiming('merge_check')
task = None
if not self._storage_merge_reader_on_hold:
task = self._task_manager.GetTaskPendingMerge(self._merge_task)
# Limit the number of attribute containers from a single task-based
# storage file that are merged per loop to keep tasks flowing.
if task or self._storage_merge_reader:
self._status = definitions.STATUS_INDICATOR_MERGING
if self._processing_profiler:
self._processing_profiler.StartTiming('merge')
if task:
if self._storage_merge_reader:
self._merge_task_on_hold = self._merge_task
self._storage_merge_reader_on_hold = self._storage_merge_reader
self._task_manager.SampleTaskStatus(
self._merge_task_on_hold, 'merge_on_hold')
self._merge_task = task
try:
self._storage_merge_reader = storage_writer.StartMergeTaskStorage(
task)
self._task_manager.SampleTaskStatus(task, 'merge_started')
except IOError as exception:
logger.error((
'Unable to merge results of task: {0:s} '
'with error: {1!s}').format(task.identifier, exception))
self._storage_merge_reader = None
if self._storage_merge_reader:
fully_merged = self._storage_merge_reader.MergeAttributeContainers(
maximum_number_of_containers=self._MAXIMUM_NUMBER_OF_CONTAINERS)
else:
# TODO: Do something more sensible when this happens, perhaps
# retrying the task once that is implemented. For now, we mark the task
# as fully merged because we can't continue with it.
fully_merged = True
if self._processing_profiler:
self._processing_profiler.StopTiming('merge')
if fully_merged:
try:
self._task_manager.CompleteTask(self._merge_task)
except KeyError as exception:
logger.error(
'Unable to complete task: {0:s} with error: {1!s}'.format(
self._merge_task.identifier, exception))
if not self._storage_merge_reader_on_hold:
self._merge_task = None
self._storage_merge_reader = None
else:
self._merge_task = self._merge_task_on_hold
self._storage_merge_reader = self._storage_merge_reader_on_hold
self._merge_task_on_hold = None
self._storage_merge_reader_on_hold = None
self._task_manager.SampleTaskStatus(
self._merge_task, 'merge_resumed')
self._status = definitions.STATUS_INDICATOR_RUNNING
self._number_of_produced_events = storage_writer.number_of_events
self._number_of_produced_sources = storage_writer.number_of_event_sources
self._number_of_produced_warnings = storage_writer.number_of_warnings
|
Merges a task storage with the session storage.
This function checks all task stores that are ready to merge and updates
the scheduled tasks. Note that to prevent this function holding up
the task scheduling loop only the first available task storage is merged.
Args:
storage_writer (StorageWriter): storage writer for a session storage used
to merge task storage.
|
def run_task_class(self, class_path, **options):
""" Runs a CumulusCI task class with task options via kwargs.
Use this keyword to run logic from CumulusCI tasks which have not
been configured in the project's cumulusci.yml file. This is
most useful in cases where a test needs to use task logic for
logic unique to the test and thus not worth making into a named
task for the project
Examples:
| =Keyword= | =task_class= | =task_options= |
| Run Task Class | cumulusci.task.utils.DownloadZip | url=http://test.com/test.zip dir=test_zip |
"""
logger.console("\n")
task_class, task_config = self._init_task(class_path, options, TaskConfig())
return self._run_task(task_class, task_config)
|
Runs a CumulusCI task class with task options via kwargs.
Use this keyword to run logic from CumulusCI tasks which have not
been configured in the project's cumulusci.yml file. This is
most useful in cases where a test needs to use task logic for
logic unique to the test and thus not worth making into a named
task for the project
Examples:
| =Keyword= | =task_class= | =task_options= |
| Run Task Class | cumulusci.task.utils.DownloadZip | url=http://test.com/test.zip dir=test_zip |
|
def pyquil_to_tk(prog: Program) -> Circuit:
"""
Convert a :py:class:`pyquil.Program` to a :math:`\\mathrm{t|ket}\\rangle` :py:class:`Circuit` .
Note that not all pyQuil operations are currently supported by pytket.
:param prog: A circuit to be converted
:return: The converted circuit
"""
reg_name = None
qubits = prog.get_qubits()
n_qubits = max(qubits) + 1
tkc = Circuit(n_qubits)
for i in prog.instructions:
if isinstance(i, Gate):
name = i.name
try:
optype = _known_quil_gate[name]
except KeyError as error:
raise NotImplementedError("Operation not supported by tket: " + str(i)) from error
if len(i.params) == 0:
tkc.add_operation(optype, [q.index for q in i.qubits])
else:
params = [p/PI for p in i.params]
op = tkc._get_op(optype,len(i.qubits),len(i.qubits),params)
tkc._add_operation(op, [q.index for q in i.qubits])
elif isinstance(i, Measurement):
if not i.classical_reg:
raise NotImplementedError("Program has no defined classical register for measurement on qubit: ", i.qubits[0])
reg = i.classical_reg
if reg_name and reg_name != reg.name:
raise NotImplementedError("Program has multiple classical registers: ", reg_name, reg.name)
reg_name = reg.name
op = tkc._get_op(OpType.Measure,1,1,str(reg.offset))
tkc._add_operation(op, [i.qubit.index])
elif isinstance(i, Declare):
continue
elif isinstance(i, Pragma):
continue
elif isinstance(i, Halt):
return tkc
else:
raise NotImplementedError("Pyquil instruction is not a gate: " + str(i))
return tkc
|
Convert a :py:class:`pyquil.Program` to a :math:`\\mathrm{t|ket}\\rangle` :py:class:`Circuit` .
Note that not all pyQuil operations are currently supported by pytket.
:param prog: A circuit to be converted
:return: The converted circuit
|
def disconnect_node(node, target_obj_result, graph, debug):
""" Disconnects `node` from `target_obj`
Args
----
node: LVLoadAreaCentreDing0, i.e.
Origin node - Ding0 graph object (e.g. LVLoadAreaCentreDing0)
target_obj_result: LVLoadAreaCentreDing0, i.e.
Origin node - Ding0 graph object (e.g. LVLoadAreaCentreDing0)
graph: :networkx:`NetworkX Graph Obj< >`
NetworkX graph object with nodes and newly created branches
debug: bool
If True, information is printed during process
"""
# backup kind and type of branch
branch_kind = graph.adj[node][target_obj_result]['branch'].kind
branch_type = graph.adj[node][target_obj_result]['branch'].type
branch_ring = graph.adj[node][target_obj_result]['branch'].ring
graph.remove_edge(node, target_obj_result)
if isinstance(target_obj_result, MVCableDistributorDing0):
neighbor_nodes = list(graph.neighbors(target_obj_result))
if len(neighbor_nodes) == 2:
node.grid.remove_cable_distributor(target_obj_result)
branch_length = calc_geo_dist_vincenty(neighbor_nodes[0], neighbor_nodes[1])
graph.add_edge(neighbor_nodes[0], neighbor_nodes[1], branch=BranchDing0(length=branch_length,
kind=branch_kind,
type=branch_type,
ring=branch_ring))
if debug:
logger.debug('disconnect edge {0}-{1}'.format(node, target_obj_result))
|
Disconnects `node` from `target_obj`
Args
----
node: LVLoadAreaCentreDing0, i.e.
Origin node - Ding0 graph object (e.g. LVLoadAreaCentreDing0)
target_obj_result: LVLoadAreaCentreDing0, i.e.
Origin node - Ding0 graph object (e.g. LVLoadAreaCentreDing0)
graph: :networkx:`NetworkX Graph Obj< >`
NetworkX graph object with nodes and newly created branches
debug: bool
If True, information is printed during process
|
def set_card_standard(self, title, text, smallImageUrl=None,
largeImageUrl=None):
"""Set response card as standard type.
title, text, and image cannot exceed 8,000 characters.
Args:
title: str. Title of Simple or Standard type card.
text: str. Content of Standard type card.
smallImageUrl: str. URL of small image. Cannot exceed 2,000
characters. Recommended pixel size: 720w x 480h.
largeImageUrl: str. URL of large image. Cannot exceed 2,000
characters. Recommended pixel size: 1200w x 800h.
"""
self.response.card.type = 'Standard'
self.response.card.title = title
self.response.card.text = text
if smallImageUrl:
self.response.card.image.smallImageUrl = smallImageUrl
if largeImageUrl:
self.response.card.image.largeImageUrl = largeImageUrl
|
Set response card as standard type.
title, text, and image cannot exceed 8,000 characters.
Args:
title: str. Title of Simple or Standard type card.
text: str. Content of Standard type card.
smallImageUrl: str. URL of small image. Cannot exceed 2,000
characters. Recommended pixel size: 720w x 480h.
largeImageUrl: str. URL of large image. Cannot exceed 2,000
characters. Recommended pixel size: 1200w x 800h.
|
def posthoc_mackwolfe(a, val_col, group_col, p=None, n_perm=100, sort=False, p_adjust=None):
'''Mack-Wolfe Test for Umbrella Alternatives.
In dose-finding studies one may assume an increasing treatment effect with
increasing dose level. However, the test subject may actually succumb to
toxic effects at high doses, which leads to decresing treatment
effects [1]_, [2]_.
The scope of the Mack-Wolfe Test is to test for umbrella alternatives for
either a known or unknown point P (i.e. dose-level), where the peak
(umbrella point) is present.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p : int, optional
The a-priori known peak as an ordinal number of the treatment group
including the zero dose level, i.e. p = {1, ..., k}. Defaults to None.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
p : float
P value.
stat : float
Statistic.
References
----------
.. [1] Chen, I.Y. (1991) Notes on the Mack-Wolfe and Chen-Wolfe Tests for
Umbrella Alternatives. Biom. J., 33, 281-290.
.. [2] Mack, G.A., Wolfe, D. A. (1981) K-sample rank tests for umbrella
alternatives. J. Amer. Statist. Assoc., 76, 175-181.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_mackwolfe(x, val_col = 0, group_col = 1)
'''
x = __convert_to_df(a, val_col, group_col)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col], ascending=True, inplace=True)
k = x[group_col].unique().size
if p:
if p > k:
print("Selected 'p' > number of groups:", str(p), " > ", str(k))
return False
elif p < 1:
print("Selected 'p' < 1: ", str(p))
return False
Rij = x[val_col].rank()
n = x.groupby(group_col)[val_col].count()
def _fn(Ri, Rj):
return np.sum(Ri.apply(lambda x: Rj[Rj > x].size))
def _ustat(Rij, g, k):
levels = np.unique(g)
U = np.identity(k)
for i in range(k):
for j in range(i):
U[i,j] = _fn(Rij[x[group_col] == levels[i]], Rij[x[group_col] == levels[j]])
U[j,i] = _fn(Rij[x[group_col] == levels[j]], Rij[x[group_col] == levels[i]])
return U
def _ap(p, U):
tmp1 = 0
if p > 0:
for i in range(p):
for j in range(i+1, p+1):
tmp1 += U[i,j]
tmp2 = 0
if p < k:
for i in range(p, k):
for j in range(i+1, k):
tmp2 += U[j,i]
return tmp1 + tmp2
def _n1(p, n):
return np.sum(n[:p+1])
def _n2(p, n):
return np.sum(n[p:k])
def _mean_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
return (N1**2 + N2**2 - np.sum(n**2) - n.iloc[p]**2)/4
def _var_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
N = np.sum(n)
var = (2 * (N1**3 + N2**3) + 3 * (N1**2 + N2**2) -\
np.sum(n**2 * (2*n + 3)) - n.iloc[p]**2 * (2 * n.iloc[p] + 3) +\
12. * n.iloc[p] * N1 * N2 - 12. * n.iloc[p] ** 2 * N) / 72.
return var
if p:
if (x.groupby(val_col).count() > 1).any().any():
print("Ties are present")
U = _ustat(Rij, x[group_col], k)
est = _ap(p, U)
mean = _mean_at(p, n)
sd = np.sqrt(_var_at(p, n))
stat = (est - mean)/sd
p_value = ss.norm.sf(stat)
else:
U = _ustat(Rij, x[group_col], k)
Ap = np.array([_ap(i, U) for i in range(k)]).ravel()
mean = np.array([_mean_at(i, n) for i in range(k)]).ravel()
var = np.array([_var_at(i, n) for i in range(k)]).ravel()
A = (Ap - mean) / np.sqrt(var)
stat = np.max(A)
p = A == stat
est = None
mt = []
for i in range(n_perm):
ix = Series(np.random.permutation(Rij))
Uix = _ustat(ix, x[group_col], k)
Apix = np.array([_ap(i, Uix) for i in range(k)])
Astarix = (Apix - mean) / np.sqrt(var)
mt.append(np.max(Astarix))
mt = np.array(mt)
p_value = mt[mt > stat] / n_perm
return p_value, stat
|
Mack-Wolfe Test for Umbrella Alternatives.
In dose-finding studies one may assume an increasing treatment effect with
increasing dose level. However, the test subject may actually succumb to
toxic effects at high doses, which leads to decresing treatment
effects [1]_, [2]_.
The scope of the Mack-Wolfe Test is to test for umbrella alternatives for
either a known or unknown point P (i.e. dose-level), where the peak
(umbrella point) is present.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p : int, optional
The a-priori known peak as an ordinal number of the treatment group
including the zero dose level, i.e. p = {1, ..., k}. Defaults to None.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
p : float
P value.
stat : float
Statistic.
References
----------
.. [1] Chen, I.Y. (1991) Notes on the Mack-Wolfe and Chen-Wolfe Tests for
Umbrella Alternatives. Biom. J., 33, 281-290.
.. [2] Mack, G.A., Wolfe, D. A. (1981) K-sample rank tests for umbrella
alternatives. J. Amer. Statist. Assoc., 76, 175-181.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_mackwolfe(x, val_col = 0, group_col = 1)
|
def _read_dataset_metadata(self):
"""Reads dataset metadata.
Returns:
instance of DatasetMetadata
"""
blob = self.storage_client.get_blob(
'dataset/' + self.dataset_name + '_dataset.csv')
buf = BytesIO()
blob.download_to_file(buf)
buf.seek(0)
return eval_lib.DatasetMetadata(buf)
|
Reads dataset metadata.
Returns:
instance of DatasetMetadata
|
def _create_key_manager(self, get_match_fuzzy, set_match_fuzzy,
get_enable_vi_bindings, set_enable_vi_bindings,
get_show_completion_columns,
set_show_completion_columns,
get_show_help, set_show_help,
stop_input_and_refresh_cli):
"""Create and initialize the keybinding manager.
:type get_fuzzy_match: callable
:param get_fuzzy_match: Gets the fuzzy matching config.
:type set_fuzzy_match: callable
:param set_fuzzy_match: Sets the fuzzy matching config.
:type get_enable_vi_bindings: callable
:param get_enable_vi_bindings: Gets the vi (or emacs) key bindings
config.
:type set_enable_vi_bindings: callable
:param set_enable_vi_bindings: Sets the vi (or emacs) key bindings
config.
:type get_show_completion_columns: callable
:param get_show_completion_columns: Gets the show completions in
multiple or single columns config.
type set_show_completion_columns: callable
:param set_show_completion_columns: Sets the show completions in
multiple or single columns config.
:type get_show_help: callable
:param get_show_help: Gets the show help pane config.
:type set_show_help: callable
:param set_show_help: Sets the show help pane config.
:type stop_input_and_refresh_cli: callable
param stop_input_and_refresh_cli: Stops input by raising an
`InputInterrupt`, forces a cli refresh to ensure certain
options take effect within the current session.
:rtype: :class:`prompt_toolkit.KeyBindingManager`
:return: A custom `KeyBindingManager`.
"""
assert callable(get_match_fuzzy)
assert callable(set_match_fuzzy)
assert callable(get_enable_vi_bindings)
assert callable(set_enable_vi_bindings)
assert callable(get_show_completion_columns)
assert callable(set_show_completion_columns)
assert callable(get_show_help)
assert callable(set_show_help)
assert callable(stop_input_and_refresh_cli)
self.manager = KeyBindingManager(
enable_search=True,
enable_abort_and_exit_bindings=True,
enable_system_bindings=True,
enable_auto_suggest_bindings=True,
enable_open_in_editor=False)
@self.manager.registry.add_binding(Keys.F2)
def handle_f2(_):
"""Toggle fuzzy matching.
:type _: :class:`prompt_toolkit.Event`
:param _: (Unused)
"""
set_match_fuzzy(not get_match_fuzzy())
@self.manager.registry.add_binding(Keys.F3)
def handle_f3(_):
"""Toggle Vi mode keybindings matching.
Disabling Vi keybindings will enable Emacs keybindings.
:type _: :class:`prompt_toolkit.Event`
:param _: (Unused)
"""
set_enable_vi_bindings(not get_enable_vi_bindings())
stop_input_and_refresh_cli()
@self.manager.registry.add_binding(Keys.F4)
def handle_f4(_):
"""Toggle multiple column completions.
:type _: :class:`prompt_toolkit.Event`
:param _: (Unused)
"""
set_show_completion_columns(not get_show_completion_columns())
stop_input_and_refresh_cli()
@self.manager.registry.add_binding(Keys.F5)
def handle_f5(_):
"""Toggle the help container.
:type _: :class:`prompt_toolkit.Event`
:param _: (Unused)
"""
set_show_help(not get_show_help())
stop_input_and_refresh_cli()
@self.manager.registry.add_binding(Keys.F9)
def handle_f9(event):
"""Switch between the default and docs buffers.
:type event: :class:`prompt_toolkit.Event`
:param event: Contains info about the event, namely the cli
which is used to changing which buffer is focused.
"""
if event.cli.current_buffer_name == u'clidocs':
event.cli.focus(u'DEFAULT_BUFFER')
else:
event.cli.focus(u'clidocs')
@self.manager.registry.add_binding(Keys.F10)
def handle_f10(event):
"""Quit when the `F10` key is pressed.
:type event: :class:`prompt_toolkit.Event`
:param event: Contains info about the event, namely the cli
which is used for exiting the app.
"""
event.cli.set_exit()
|
Create and initialize the keybinding manager.
:type get_fuzzy_match: callable
:param get_fuzzy_match: Gets the fuzzy matching config.
:type set_fuzzy_match: callable
:param set_fuzzy_match: Sets the fuzzy matching config.
:type get_enable_vi_bindings: callable
:param get_enable_vi_bindings: Gets the vi (or emacs) key bindings
config.
:type set_enable_vi_bindings: callable
:param set_enable_vi_bindings: Sets the vi (or emacs) key bindings
config.
:type get_show_completion_columns: callable
:param get_show_completion_columns: Gets the show completions in
multiple or single columns config.
type set_show_completion_columns: callable
:param set_show_completion_columns: Sets the show completions in
multiple or single columns config.
:type get_show_help: callable
:param get_show_help: Gets the show help pane config.
:type set_show_help: callable
:param set_show_help: Sets the show help pane config.
:type stop_input_and_refresh_cli: callable
param stop_input_and_refresh_cli: Stops input by raising an
`InputInterrupt`, forces a cli refresh to ensure certain
options take effect within the current session.
:rtype: :class:`prompt_toolkit.KeyBindingManager`
:return: A custom `KeyBindingManager`.
|
def install_config_kibana(self):
"""
install and config kibana
:return:
"""
if self.prompt_check("Download and install kibana"):
self.kibana_install()
if self.prompt_check("Configure and autostart kibana"):
self.kibana_config()
|
install and config kibana
:return:
|
def sudoku(G):
"""Solving Sudoku
:param G: integer matrix with 0 at empty cells
:returns bool: True if grid could be solved
:modifies: G will contain the solution
:complexity: huge, but linear for usual published 9x9 grids
"""
global N, N2, N4
if len(G) == 16: # for a 16 x 16 sudoku grid
N, N2, N4 = 4, 16, 256
e = 4 * N4
universe = e + 1
S = [[rc(a), rv(a), cv(a), bv(a)] for a in range(N4 * N2)]
A = [e]
for r in range(N2):
for c in range(N2):
if G[r][c] != 0:
a = assignation(r, c, G[r][c] - 1)
A += S[a]
sol = dancing_links(universe, S + [A])
if sol:
for a in sol:
if a < len(S):
G[row(a)][col(a)] = val(a) + 1
return True
else:
return False
|
Solving Sudoku
:param G: integer matrix with 0 at empty cells
:returns bool: True if grid could be solved
:modifies: G will contain the solution
:complexity: huge, but linear for usual published 9x9 grids
|
def result(self):
"""
The result of the job if available
throws ValueError is result is not available yet
throws ApiError if server returned an error indicating program execution was not successful
or if the job was cancelled
"""
if not self.is_done():
raise ValueError("Cannot get a result for a program that isn't completed.")
if self._raw['status'] == 'CANCELLED':
raise CancellationError(self._raw['result'])
elif self._raw['status'] == 'ERROR':
if self._machine == 'QVM':
raise QVMError(self._raw['result'])
elif self._machine == 'QPU':
raise QPUError(self._raw['result'])
elif self._machine == 'QUILC':
raise QUILCError(self._raw['result'])
else:
raise UnknownApiError(self._raw['result'])
if self._raw['program']['type'] == 'wavefunction':
return Wavefunction.from_bit_packed_string(
base64.b64decode(self._raw['result']), self._raw['program']['addresses'])
elif self._raw['program']['type'] in ['multishot', 'multishot-measure', 'expectation']:
return np.asarray(self._raw['result'])
else:
return self._raw['result']
|
The result of the job if available
throws ValueError is result is not available yet
throws ApiError if server returned an error indicating program execution was not successful
or if the job was cancelled
|
def skip(self, regex):
"""
Like :meth:`scan`, but return the number of characters matched.
>>> s = Scanner("test string")
>>> s.skip('test ')
5
"""
return self.scan_full(regex, return_string=False, advance_pointer=True)
|
Like :meth:`scan`, but return the number of characters matched.
>>> s = Scanner("test string")
>>> s.skip('test ')
5
|
def write_csvs(self, dirname: PathLike, skip_data: bool = True, sep: str = ','):
"""Write annotation to ``.csv`` files.
It is not possible to recover the full :class:`~anndata.AnnData` from the
output of this function. Use :meth:`~anndata.AnnData.write` for this.
Parameters
----------
dirname
Name of directory to which to export.
skip_data
Skip the data matrix :attr:`X`.
sep
Separator for the data.
"""
from .readwrite.write import write_csvs
write_csvs(dirname, self, skip_data=skip_data, sep=sep)
|
Write annotation to ``.csv`` files.
It is not possible to recover the full :class:`~anndata.AnnData` from the
output of this function. Use :meth:`~anndata.AnnData.write` for this.
Parameters
----------
dirname
Name of directory to which to export.
skip_data
Skip the data matrix :attr:`X`.
sep
Separator for the data.
|
def detx(self, det_id, t0set=None, calibration=None):
"""Retrieve the detector file for given detector id
If t0set is given, append the calibration data.
"""
url = 'detx/{0}?'.format(det_id) # '?' since it's ignored if no args
if t0set is not None:
url += '&t0set=' + t0set
if calibration is not None:
url += '&calibrid=' + calibration
detx = self._get_content(url)
return detx
|
Retrieve the detector file for given detector id
If t0set is given, append the calibration data.
|
def flag(self, key, env=None):
"""Feature flagging system
write flags to redis
$ dynaconf write redis -s DASHBOARD=1 -e premiumuser
meaning: Any premium user has DASHBOARD feature enabled
In your program do::
# premium user has access to dashboard?
>>> if settings.flag('dashboard', 'premiumuser'):
... activate_dashboard()
The value is ensured to be loaded fresh from redis server
It also works with file settings but the recommended is redis
as the data can be loaded once it is updated.
:param key: The flag name
:param env: The env to look for
"""
env = env or self.ENVVAR_PREFIX_FOR_DYNACONF or "DYNACONF"
with self.using_env(env):
value = self.get_fresh(key)
return value is True or value in true_values
|
Feature flagging system
write flags to redis
$ dynaconf write redis -s DASHBOARD=1 -e premiumuser
meaning: Any premium user has DASHBOARD feature enabled
In your program do::
# premium user has access to dashboard?
>>> if settings.flag('dashboard', 'premiumuser'):
... activate_dashboard()
The value is ensured to be loaded fresh from redis server
It also works with file settings but the recommended is redis
as the data can be loaded once it is updated.
:param key: The flag name
:param env: The env to look for
|
def __create_dir_property(self, dir_name, docstring):
"""
Generate getter and setter for a directory property.
"""
property_name = "{}_dir".format(dir_name)
private_name = "_" + property_name
setattr(self, private_name, None)
def fget(self):
return getattr(self, private_name)
def fset(self, path):
verify_dir(path, dir_name)
setattr(self, private_name, path)
p = property(fget=fget, fset=fset, doc=docstring)
setattr(self.__class__, property_name, p)
|
Generate getter and setter for a directory property.
|
def get_resource_search_session_for_bin(self, bin_id):
"""Gets a resource search session for the given bin.
arg: bin_id (osid.id.Id): the ``Id`` of the bin
return: (osid.resource.ResourceSearchSession) - ``a
ResourceSearchSession``
raise: NotFound - ``bin_id`` not found
raise: NullArgument - ``bin_id`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_resource_search()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_resource_search()`` and
``supports_visible_federation()`` are ``true``.*
"""
if not self.supports_resource_search():
raise errors.Unimplemented()
##
# Also include check to see if the catalog Id is found otherwise raise errors.NotFound
##
# pylint: disable=no-member
return sessions.ResourceSearchSession(bin_id, runtime=self._runtime)
|
Gets a resource search session for the given bin.
arg: bin_id (osid.id.Id): the ``Id`` of the bin
return: (osid.resource.ResourceSearchSession) - ``a
ResourceSearchSession``
raise: NotFound - ``bin_id`` not found
raise: NullArgument - ``bin_id`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_resource_search()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_resource_search()`` and
``supports_visible_federation()`` are ``true``.*
|
def select_inputs(self, address: str, amount: int) -> dict:
'''finds apropriate utxo's to include in rawtx, while being careful
to never spend old transactions with a lot of coin age.
Argument is intiger, returns list of apropriate UTXO's'''
utxos = []
utxo_sum = Decimal(0)
for tx in sorted(self.listunspent(address=address), key=itemgetter('confirmations')):
if tx["address"] not in (self.pa_parameters.P2TH_addr,
self.pa_parameters.test_P2TH_addr):
utxos.append(
MutableTxIn(txid=tx['txid'],
txout=tx['vout'],
sequence=Sequence.max(),
script_sig=ScriptSig.empty())
)
utxo_sum += Decimal(tx["amount"])
if utxo_sum >= amount:
return {'utxos': utxos, 'total': utxo_sum}
if utxo_sum < amount:
raise InsufficientFunds("Insufficient funds.")
raise Exception("undefined behavior :.(")
|
finds apropriate utxo's to include in rawtx, while being careful
to never spend old transactions with a lot of coin age.
Argument is intiger, returns list of apropriate UTXO's
|
def lookup_users(self, user_ids=None, screen_names=None, include_entities=None, tweet_mode=None):
""" Perform bulk look up of users from user ID or screen_name """
post_data = {}
if include_entities is not None:
include_entities = 'true' if include_entities else 'false'
post_data['include_entities'] = include_entities
if user_ids:
post_data['user_id'] = list_to_csv(user_ids)
if screen_names:
post_data['screen_name'] = list_to_csv(screen_names)
if tweet_mode:
post_data['tweet_mode'] = tweet_mode
return self._lookup_users(post_data=post_data)
|
Perform bulk look up of users from user ID or screen_name
|
def register(self, name, obj):
"""Registers an unique type description"""
if name in self.all:
log.debug('register: %s already existed: %s', name, obj.name)
# code.interact(local=locals())
raise DuplicateDefinitionException(
'register: %s already existed: %s' % (name, obj.name))
log.debug('register: %s ', name)
self.all[name] = obj
return obj
|
Registers an unique type description
|
def replace_cluster_role(self, name, body, **kwargs):
"""
replace the specified ClusterRole
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_cluster_role(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ClusterRole (required)
:param V1ClusterRole body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1ClusterRole
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_cluster_role_with_http_info(name, body, **kwargs)
else:
(data) = self.replace_cluster_role_with_http_info(name, body, **kwargs)
return data
|
replace the specified ClusterRole
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_cluster_role(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ClusterRole (required)
:param V1ClusterRole body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1ClusterRole
If the method is called asynchronously,
returns the request thread.
|
def create(
name,
attributes=None,
region=None,
key=None,
keyid=None,
profile=None,
):
'''
Create an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.create myqueue region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if attributes is None:
attributes = {}
attributes = _preprocess_attributes(attributes)
try:
conn.create_queue(QueueName=name, Attributes=attributes)
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
return {'result': True}
|
Create an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.create myqueue region=us-east-1
|
def run(self, *, delay=None):
"""Run this pipeline.
Parameters:
delay(int): The minimum amount of time, in milliseconds, the
pipeline should be delayed by.
Returns:
pipeline: Itself.
"""
self.broker.enqueue(self.messages[0], delay=delay)
return self
|
Run this pipeline.
Parameters:
delay(int): The minimum amount of time, in milliseconds, the
pipeline should be delayed by.
Returns:
pipeline: Itself.
|
def find_max_label_length(labels):
"""Return the maximum length for the labels."""
length = 0
for i in range(len(labels)):
if len(labels[i]) > length:
length = len(labels[i])
return length
|
Return the maximum length for the labels.
|
def index_of_reports(self, report, account_id):
"""
Index of Reports.
Shows all reports that have been run for the account of a specific type.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""ID"""
path["account_id"] = account_id
# REQUIRED - PATH - report
"""ID"""
path["report"] = report
self.logger.debug("GET /api/v1/accounts/{account_id}/reports/{report} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/accounts/{account_id}/reports/{report}".format(**path), data=data, params=params, all_pages=True)
|
Index of Reports.
Shows all reports that have been run for the account of a specific type.
|
def __get_translation(self, surah, ayah, lang):
"""
Perform http request to get translation from given surah, ayah and
language.
Parameter:
:surah -- Surah index from API pages.
:ayat -- Ayat key.
:lang -- Language code.
Return:
:string -- Translation from given surah and ayat.
"""
# Construct url to fetch translation data.
url = '{base}/translations/{lang}/{lang}_translation_{surah}.json'.format(
base=self.BASE_API, lang=lang, surah=int(surah)
)
try:
response = urlopen(url) # Fetch data from give url.
data = json.loads(response.read().decode('utf-8')) # Get response and convert to dict.
translation = data['verse'][ayah]
except ODOAException:
return None
else:
return translation
|
Perform http request to get translation from given surah, ayah and
language.
Parameter:
:surah -- Surah index from API pages.
:ayat -- Ayat key.
:lang -- Language code.
Return:
:string -- Translation from given surah and ayat.
|
def convertPixelXYToLngLat(self, pixelX, pixelY, level):
'''
converts a pixel x, y to a latitude and longitude.
'''
mapSize = self.getMapDimensionsByZoomLevel(level)
x = (self.clipValue(pixelX, 0, mapSize - 1) / mapSize) - 0.5
y = 0.5 - (self.clipValue(pixelY, 0, mapSize - 1) / mapSize)
lat = 90 - 360 * math.atan(math.exp(-y * 2 * math.pi)) / math.pi
lng = 360 * x
return (lng, lat)
|
converts a pixel x, y to a latitude and longitude.
|
def onRefreshPluginData(self, plugin_name, data):
"""
Frontend requests a data refresh
:param plugin_name: Name of plugin that changed
:type plugin_name: str
:param data: Additional data
:type data: None
:rtype: None
"""
logger.info(u"onRefreshPluginData: {}".format(plugin_name))
if not plugin_name:
logger.error("Missing plugin name")
return
reactor.callFromThread(self._sendJSON, {
'msg': "plugin_data_get",
'plugin_name': plugin_name
})
|
Frontend requests a data refresh
:param plugin_name: Name of plugin that changed
:type plugin_name: str
:param data: Additional data
:type data: None
:rtype: None
|
def roundness(im):
"""
from astropy.io import fits as pyfits
data=pyfits.getdata('j94f05bgq_flt.fits',ext=1)
star0=data[403:412,423:432]
star=data[396:432,3522:3558]
In [53]: findobj.roundness(star0)
Out[53]: 0.99401955054989544
In [54]: findobj.roundness(star)
Out[54]: 0.83091919980660645
"""
perimeter = im.shape[0]*2 +im.shape[1]*2 -4
area = im.size
return 4*np.pi*area/perimeter**2
|
from astropy.io import fits as pyfits
data=pyfits.getdata('j94f05bgq_flt.fits',ext=1)
star0=data[403:412,423:432]
star=data[396:432,3522:3558]
In [53]: findobj.roundness(star0)
Out[53]: 0.99401955054989544
In [54]: findobj.roundness(star)
Out[54]: 0.83091919980660645
|
def _namify_arguments(mapping):
"""
Ensure that a mapping of names to parameters has the parameters set to the
correct name.
"""
result = []
for name, parameter in mapping.iteritems():
parameter.name = name
result.append(parameter)
return result
|
Ensure that a mapping of names to parameters has the parameters set to the
correct name.
|
def copy_path_to_clipboard(i):
"""
Input: {
(add_quotes) - if 'yes', add quotes
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
import os
p=os.getcwd()
if i.get('add_quotes','')=='yes':
p='"'+p+'"'
rx=copy_to_clipboard({'string':p})
# Ignore error
return {'return':0}
|
Input: {
(add_quotes) - if 'yes', add quotes
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
|
def has_delete_permission(self, request, obj=None):
"""
Implement a lookup for object level permissions. Basically the same as
ModelAdmin.has_delete_permission, but also passes the obj parameter in.
"""
if settings.TREE_EDITOR_OBJECT_PERMISSIONS:
opts = self.opts
r = request.user.has_perm(opts.app_label + '.' + opts.get_delete_permission(), obj)
else:
r = True
return r and super(TreeEditor, self).has_delete_permission(request, obj)
|
Implement a lookup for object level permissions. Basically the same as
ModelAdmin.has_delete_permission, but also passes the obj parameter in.
|
def get_graphviz_dirtree(self, engine="automatic", **kwargs):
"""
Generate directory graph in the DOT language. The graph show the files and directories
in the node workdir.
Returns: graphviz.Digraph <https://graphviz.readthedocs.io/en/stable/api.html#digraph>
"""
if engine == "automatic":
engine = "fdp"
return Dirviz(self.workdir).get_cluster_graph(engine=engine, **kwargs)
|
Generate directory graph in the DOT language. The graph show the files and directories
in the node workdir.
Returns: graphviz.Digraph <https://graphviz.readthedocs.io/en/stable/api.html#digraph>
|
def list_vm_images_sub(access_token, subscription_id):
'''List VM images in a subscription.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON body of a list of VM images.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/providers/Microsoft.Compute/images',
'?api-version=', COMP_API])
return do_get_next(endpoint, access_token)
|
List VM images in a subscription.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON body of a list of VM images.
|
def build_and_start(query, directory):
"""This function will create and then start a new Async task with the
default callbacks argument defined in the decorator."""
Async(target=grep, args=[query, directory]).start()
|
This function will create and then start a new Async task with the
default callbacks argument defined in the decorator.
|
def _get_pos(self):
"""
Get current position for scroll bar.
"""
if self._canvas.height >= self._max_height:
return 0
else:
return self._canvas.start_line / (self._max_height - self._canvas.height + 1)
|
Get current position for scroll bar.
|
def proto_01_13_steps025dual(abf=exampleABF):
"""IC steps. See how hyperpol. step affects things."""
swhlab.ap.detect(abf)
standard_groupingForInj(abf,200)
for feature in ['freq','downslope']:
swhlab.ap.plot_values(abf,feature,continuous=False) #plot AP info
swhlab.plot.save(abf,tag='A_'+feature)
f1=swhlab.ap.getAvgBySweep(abf,'freq',None,1)
f2=swhlab.ap.getAvgBySweep(abf,'freq',1,None)
f1=np.nan_to_num(f1)
f2=np.nan_to_num(f2)
Xs=abf.clampValues(abf.dataX[int(abf.protoSeqX[1]+.01)])
swhlab.plot.new(abf,title="gain function",xlabel="command current (pA)",
ylabel="average inst. freq. (Hz)")
pylab.plot(Xs,f1,'.-',ms=20,alpha=.5,label="step 1",color='b')
pylab.plot(Xs,f2,'.-',ms=20,alpha=.5,label="step 2",color='r')
pylab.legend(loc='upper left')
pylab.axis([Xs[0],Xs[-1],None,None])
swhlab.plot.save(abf,tag='gain')
|
IC steps. See how hyperpol. step affects things.
|
def discard_between(
self,
min_rank=None,
max_rank=None,
min_score=None,
max_score=None,
):
"""
Remove members whose ranking is between *min_rank* and *max_rank*
OR whose score is between *min_score* and *max_score* (both ranges
inclusive). If no bounds are specified, no members will be removed.
"""
no_ranks = (min_rank is None) and (max_rank is None)
no_scores = (min_score is None) and (max_score is None)
# Default scope: nothing
if no_ranks and no_scores:
return
# Scope widens to given score range
if no_ranks and (not no_scores):
return self.discard_by_score(min_score, max_score)
# Scope widens to given rank range
if (not no_ranks) and no_scores:
return self.discard_by_rank(min_rank, max_rank)
# Scope widens to score range and then rank range
with self.redis.pipeline() as pipe:
self.discard_by_score(min_score, max_score, pipe)
self.discard_by_rank(min_rank, max_rank, pipe)
pipe.execute()
|
Remove members whose ranking is between *min_rank* and *max_rank*
OR whose score is between *min_score* and *max_score* (both ranges
inclusive). If no bounds are specified, no members will be removed.
|
def handle_register_or_upload(post_data, files, user, repository):
"""Process a `register` or `upload` comment issued via distutils.
This method is called with the authenticated user.
"""
name = post_data.get('name')
version = post_data.get('version')
if settings.LOCALSHOP_VERSIONING_TYPE:
scheme = get_versio_versioning_scheme(settings.LOCALSHOP_VERSIONING_TYPE)
try:
Version(version, scheme=scheme)
except AttributeError:
response = HttpResponseBadRequest(
reason="Invalid version supplied '{!s}' for '{!s}' scheme.".format(
version, settings.LOCALSHOP_VERSIONING_TYPE))
return response
if not name or not version:
logger.info("Missing name or version for package")
return HttpResponseBadRequest('No name or version given')
try:
condition = Q()
for search_name in get_search_names(name):
condition |= Q(name__iexact=search_name)
package = repository.packages.get(condition)
# Error out when we try to override a mirror'ed package for now
# not sure what the best thing is
if not package.is_local:
return HttpResponseBadRequest(
'%s is a pypi package!' % package.name)
try:
release = package.releases.get(version=version)
except ObjectDoesNotExist:
release = None
except ObjectDoesNotExist:
package = None
release = None
# Validate the data
form = forms.ReleaseForm(post_data, instance=release)
if not form.is_valid():
return HttpResponseBadRequest(reason=form.errors.values()[0][0])
if not package:
pkg_form = forms.PackageForm(post_data, repository=repository)
if not pkg_form.is_valid():
return HttpResponseBadRequest(
reason=six.next(six.itervalues(pkg_form.errors))[0])
package = pkg_form.save()
release = form.save(commit=False)
release.package = package
release.save()
# If this is an upload action then process the uploaded file
if files:
files = {
'distribution': files['content']
}
filename = files['distribution']._name
try:
release_file = release.files.get(filename=filename)
if settings.LOCALSHOP_RELEASE_OVERWRITE is False:
message = 'That it already released, please bump version.'
return HttpResponseBadRequest(message)
except ObjectDoesNotExist:
release_file = models.ReleaseFile(
release=release, filename=filename)
form_file = forms.ReleaseFileForm(
post_data, files, instance=release_file)
if not form_file.is_valid():
return HttpResponseBadRequest('ERRORS %s' % form_file.errors)
release_file = form_file.save(commit=False)
release_file.save()
return HttpResponse()
|
Process a `register` or `upload` comment issued via distutils.
This method is called with the authenticated user.
|
def _hexify(data, chunksize=None):
"""Convert a binary string into its hex encoding, broken up into chunks
of I{chunksize} characters separated by a space.
@param data: the binary string
@type data: string
@param chunksize: the chunk size. Default is L{dns.rdata._hex_chunksize}
@rtype: string
"""
if chunksize is None:
chunksize = _hex_chunksize
hex = data.encode('hex_codec')
l = len(hex)
if l > chunksize:
chunks = []
i = 0
while i < l:
chunks.append(hex[i : i + chunksize])
i += chunksize
hex = ' '.join(chunks)
return hex
|
Convert a binary string into its hex encoding, broken up into chunks
of I{chunksize} characters separated by a space.
@param data: the binary string
@type data: string
@param chunksize: the chunk size. Default is L{dns.rdata._hex_chunksize}
@rtype: string
|
def _update_scsi_devices(scsis_old_new, current_disks):
'''
Returns a list of vim.vm.device.VirtualDeviceSpec specifying the scsi
properties as input the old and new configs are defined in a dictionary.
scsi_diffs
List of old and new scsi properties
'''
device_config_specs = []
if scsis_old_new:
devs = [scsi['old']['adapter'] for scsi in scsis_old_new]
log.trace('Updating SCSI controllers %s', devs)
for item in scsis_old_new:
next_scsi = item['new']
current_scsi = item['old']
difference = recursive_diff(current_scsi, next_scsi)
difference.ignore_unset_values = False
if difference.changed():
log.trace('Virtual machine scsi device will be updated '
'key=%s bus_number=%s type=%s bus_sharing=%s',
current_scsi['key'],
current_scsi['bus_number'],
next_scsi['type'],
next_scsi['bus_sharing'])
# The sharedBus property is not optional
# The type can only be updated if we delete the original
# controller, create a new one with the properties and then
# attach the disk object to the newly created controller, even
# though the controller key stays the same the last step is
# mandatory
if next_scsi['type'] != current_scsi['type']:
device_config_specs.append(
_delete_device(current_scsi['object']))
device_config_specs.append(_apply_scsi_controller(
current_scsi['adapter'],
next_scsi['type'],
next_scsi['bus_sharing'],
current_scsi['key'],
current_scsi['bus_number'], 'add'))
disks_to_update = []
for disk_key in current_scsi['device']:
disk_objects = \
[disk['object'] for disk in current_disks]
disks_to_update.append(
_get_device_by_key(disk_objects, disk_key))
for current_disk in disks_to_update:
disk_spec = vim.vm.device.VirtualDeviceSpec()
disk_spec.device = current_disk
disk_spec.operation = 'edit'
device_config_specs.append(disk_spec)
else:
device_config_specs.append(_apply_scsi_controller(
current_scsi['adapter'],
current_scsi['type'],
next_scsi['bus_sharing'],
current_scsi['key'],
current_scsi['bus_number'], 'edit'))
return device_config_specs
|
Returns a list of vim.vm.device.VirtualDeviceSpec specifying the scsi
properties as input the old and new configs are defined in a dictionary.
scsi_diffs
List of old and new scsi properties
|
def _get_subcats(self, recurse=False):
""" Get the subcategories of this category
recurse -- whether to include their subcategories as well
"""
if recurse:
# No need to filter
return sorted([Category(e) for e in self._subcats_recursive],
key=lambda c: c.sort_breadcrumb)
# get all the subcategories, with only the first subdir added
# number of path components to ingest
parts = len(self.path.split('/')) + 1 if self.path else 1
# convert the subcategories into separated pathlists with only 'parts'
# parts
subcats = [c.split('/')[:parts] for c in self._subcats_recursive]
# join them back into a path, and make unique
subcats = {'/'.join(c) for c in subcats}
# convert to a bunch of Category objects
return sorted([Category(c) for c in subcats], key=lambda c: c.sort_name or c.name)
|
Get the subcategories of this category
recurse -- whether to include their subcategories as well
|
def reindex_model_on_save(sender, document, **kwargs):
'''(Re/Un)Index Mongo document on post_save'''
if current_app.config.get('AUTO_INDEX'):
reindex.delay(document)
|
(Re/Un)Index Mongo document on post_save
|
def dataframe_to_smp(dataframe,smp_filename,name_col="name",
datetime_col="datetime",value_col="value",
datetime_format="dd/mm/yyyy",
value_format="{0:15.6E}",
max_name_len=12):
""" write a dataframe as an smp file
Parameters
----------
dataframe : pandas.DataFrame
smp_filename : str
smp file to write
name_col: str
the column in the dataframe the marks the site namne
datetime_col: str
the column in the dataframe that is a datetime instance
value_col: str
the column in the dataframe that is the values
datetime_format: str
either 'dd/mm/yyyy' or 'mm/dd/yyy'
value_format: str
a python float-compatible format
"""
formatters = {"name":lambda x:"{0:<20s}".format(str(x)[:max_name_len]),
"value":lambda x:value_format.format(x)}
if datetime_format.lower().startswith("d"):
dt_fmt = "%d/%m/%Y %H:%M:%S"
elif datetime_format.lower().startswith("m"):
dt_fmt = "%m/%d/%Y %H:%M:%S"
else:
raise Exception("unrecognized datetime_format: " +\
"{0}".format(str(datetime_format)))
for col in [name_col,datetime_col,value_col]:
assert col in dataframe.columns
dataframe.loc[:,"datetime_str"] = dataframe.loc[:,"datetime"].\
apply(lambda x:x.strftime(dt_fmt))
if isinstance(smp_filename,str):
smp_filename = open(smp_filename,'w')
# need this to remove the leading space that pandas puts in front
s = dataframe.loc[:,[name_col,"datetime_str",value_col]].\
to_string(col_space=0,
formatters=formatters,
justify=None,
header=False,
index=False)
for ss in s.split('\n'):
smp_filename.write("{0:<s}\n".format(ss.strip()))
dataframe.pop("datetime_str")
|
write a dataframe as an smp file
Parameters
----------
dataframe : pandas.DataFrame
smp_filename : str
smp file to write
name_col: str
the column in the dataframe the marks the site namne
datetime_col: str
the column in the dataframe that is a datetime instance
value_col: str
the column in the dataframe that is the values
datetime_format: str
either 'dd/mm/yyyy' or 'mm/dd/yyy'
value_format: str
a python float-compatible format
|
def copyKeyMultipart(srcBucketName, srcKeyName, srcKeyVersion, dstBucketName, dstKeyName, sseAlgorithm=None, sseKey=None,
copySourceSseAlgorithm=None, copySourceSseKey=None):
"""
Copies a key from a source key to a destination key in multiple parts. Note that if the
destination key exists it will be overwritten implicitly, and if it does not exist a new
key will be created. If the destination bucket does not exist an error will be raised.
:param str srcBucketName: The name of the bucket to be copied from.
:param str srcKeyName: The name of the key to be copied from.
:param str srcKeyVersion: The version of the key to be copied from.
:param str dstBucketName: The name of the destination bucket for the copy.
:param str dstKeyName: The name of the destination key that will be created or overwritten.
:param str sseAlgorithm: Server-side encryption algorithm for the destination.
:param str sseKey: Server-side encryption key for the destination.
:param str copySourceSseAlgorithm: Server-side encryption algorithm for the source.
:param str copySourceSseKey: Server-side encryption key for the source.
:rtype: str
:return: The version of the copied file (or None if versioning is not enabled for dstBucket).
"""
s3 = boto3.resource('s3')
dstBucket = s3.Bucket(oldstr(dstBucketName))
dstObject = dstBucket.Object(oldstr(dstKeyName))
copySource = {'Bucket': oldstr(srcBucketName), 'Key': oldstr(srcKeyName)}
if srcKeyVersion is not None:
copySource['VersionId'] = oldstr(srcKeyVersion)
# The boto3 functions don't allow passing parameters as None to
# indicate they weren't provided. So we have to do a bit of work
# to ensure we only provide the parameters when they are actually
# required.
destEncryptionArgs = {}
if sseKey is not None:
destEncryptionArgs.update({'SSECustomerAlgorithm': sseAlgorithm,
'SSECustomerKey': sseKey})
copyEncryptionArgs = {}
if copySourceSseKey is not None:
copyEncryptionArgs.update({'CopySourceSSECustomerAlgorithm': copySourceSseAlgorithm,
'CopySourceSSECustomerKey': copySourceSseKey})
copyEncryptionArgs.update(destEncryptionArgs)
dstObject.copy(copySource, ExtraArgs=copyEncryptionArgs)
# Unfortunately, boto3's managed copy doesn't return the version
# that it actually copied to. So we have to check immediately
# after, leaving open the possibility that it may have been
# modified again in the few seconds since the copy finished. There
# isn't much we can do about it.
info = boto3.client('s3').head_object(Bucket=dstObject.bucket_name, Key=dstObject.key,
**destEncryptionArgs)
return info.get('VersionId', None)
|
Copies a key from a source key to a destination key in multiple parts. Note that if the
destination key exists it will be overwritten implicitly, and if it does not exist a new
key will be created. If the destination bucket does not exist an error will be raised.
:param str srcBucketName: The name of the bucket to be copied from.
:param str srcKeyName: The name of the key to be copied from.
:param str srcKeyVersion: The version of the key to be copied from.
:param str dstBucketName: The name of the destination bucket for the copy.
:param str dstKeyName: The name of the destination key that will be created or overwritten.
:param str sseAlgorithm: Server-side encryption algorithm for the destination.
:param str sseKey: Server-side encryption key for the destination.
:param str copySourceSseAlgorithm: Server-side encryption algorithm for the source.
:param str copySourceSseKey: Server-side encryption key for the source.
:rtype: str
:return: The version of the copied file (or None if versioning is not enabled for dstBucket).
|
def _set_tieBreaking(self, v, load=False):
"""
Setter method for tieBreaking, mapped from YANG variable /brocade_mpls_rpc/show_mpls_te_path/input/tieBreaking (tie-breaking)
If this variable is read-only (config: false) in the
source YANG file, then _set_tieBreaking is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tieBreaking() directly.
YANG Description: Tie breaking mode for CSPF when multiple paths to destination exists
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'most-fill': {'value': 2}, u'random': {'value': 0}, u'least-fill': {'value': 1}},), is_leaf=True, yang_name="tieBreaking", rest_name="tieBreaking", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='tie-breaking', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tieBreaking must be of a type compatible with tie-breaking""",
'defined-type': "brocade-mpls:tie-breaking",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'most-fill': {'value': 2}, u'random': {'value': 0}, u'least-fill': {'value': 1}},), is_leaf=True, yang_name="tieBreaking", rest_name="tieBreaking", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='tie-breaking', is_config=True)""",
})
self.__tieBreaking = t
if hasattr(self, '_set'):
self._set()
|
Setter method for tieBreaking, mapped from YANG variable /brocade_mpls_rpc/show_mpls_te_path/input/tieBreaking (tie-breaking)
If this variable is read-only (config: false) in the
source YANG file, then _set_tieBreaking is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tieBreaking() directly.
YANG Description: Tie breaking mode for CSPF when multiple paths to destination exists
|
def fit(self, Xs=None, ys=None, Xt=None, yt=None):
"""Build a coupling matrix from source and target sets of samples
(Xs, ys) and (Xt, yt)
Parameters
----------
Xs : array-like, shape (n_source_samples, n_features)
The training input samples.
ys : array-like, shape (n_source_samples,)
The class labels
Xt : array-like, shape (n_target_samples, n_features)
The training input samples.
yt : array-like, shape (n_target_samples,)
The class labels. If some target samples are unlabeled, fill the
yt's elements with -1.
Warning: Note that, due to this convention -1 cannot be used as a
class label
Returns
-------
self : object
Returns self.
"""
# check the necessary inputs parameters are here
if check_params(Xs=Xs, Xt=Xt):
# pairwise distance
self.cost_ = dist(Xs, Xt, metric=self.metric)
self.cost_ = cost_normalization(self.cost_, self.norm)
if (ys is not None) and (yt is not None):
if self.limit_max != np.infty:
self.limit_max = self.limit_max * np.max(self.cost_)
# assumes labeled source samples occupy the first rows
# and labeled target samples occupy the first columns
classes = [c for c in np.unique(ys) if c != -1]
for c in classes:
idx_s = np.where((ys != c) & (ys != -1))
idx_t = np.where(yt == c)
# all the coefficients corresponding to a source sample
# and a target sample :
# with different labels get a infinite
for j in idx_t[0]:
self.cost_[idx_s[0], j] = self.limit_max
# distribution estimation
self.mu_s = self.distribution_estimation(Xs)
self.mu_t = self.distribution_estimation(Xt)
# store arrays of samples
self.xs_ = Xs
self.xt_ = Xt
return self
|
Build a coupling matrix from source and target sets of samples
(Xs, ys) and (Xt, yt)
Parameters
----------
Xs : array-like, shape (n_source_samples, n_features)
The training input samples.
ys : array-like, shape (n_source_samples,)
The class labels
Xt : array-like, shape (n_target_samples, n_features)
The training input samples.
yt : array-like, shape (n_target_samples,)
The class labels. If some target samples are unlabeled, fill the
yt's elements with -1.
Warning: Note that, due to this convention -1 cannot be used as a
class label
Returns
-------
self : object
Returns self.
|
def process_json(filename):
"""
Converts a DOE CODE .json file into DOE CODE projects
Yields DOE CODE records from a DOE CODE .json file
"""
logger.debug('Processing DOE CODE json: %s', filename)
doecode_json = json.load(open(filename))
for record in doecode_json['records']:
yield record
|
Converts a DOE CODE .json file into DOE CODE projects
Yields DOE CODE records from a DOE CODE .json file
|
def verify(password_hash, password):
"""
Takes the output of scryptsalsa208sha256 and compares it against
a user provided password to see if they are the same
:param password_hash: bytes
:param password: bytes
:rtype: boolean
.. versionadded:: 1.2
"""
ensure(len(password_hash) == PWHASH_SIZE,
"The password hash must be exactly %s bytes long" %
nacl.bindings.crypto_pwhash_scryptsalsa208sha256_STRBYTES,
raising=exc.ValueError)
return nacl.bindings.crypto_pwhash_scryptsalsa208sha256_str_verify(
password_hash, password
)
|
Takes the output of scryptsalsa208sha256 and compares it against
a user provided password to see if they are the same
:param password_hash: bytes
:param password: bytes
:rtype: boolean
.. versionadded:: 1.2
|
def ogr2ogr(src, dst, options):
"""
a simple wrapper for gdal.VectorTranslate aka `ogr2ogr <https://www.gdal.org/ogr2ogr.html>`_
Parameters
----------
src: str or :osgeo:class:`ogr.DataSource`
the input data set
dst: str
the output data set
options: dict
additional parameters passed to gdal.VectorTranslate;
see `gdal.VectorTranslateOptions <http://gdal.org/python/osgeo.gdal-module.html#VectorTranslateOptions>`_
Returns
-------
"""
out = gdal.VectorTranslate(dst, src, options=gdal.VectorTranslateOptions(**options))
out = None
|
a simple wrapper for gdal.VectorTranslate aka `ogr2ogr <https://www.gdal.org/ogr2ogr.html>`_
Parameters
----------
src: str or :osgeo:class:`ogr.DataSource`
the input data set
dst: str
the output data set
options: dict
additional parameters passed to gdal.VectorTranslate;
see `gdal.VectorTranslateOptions <http://gdal.org/python/osgeo.gdal-module.html#VectorTranslateOptions>`_
Returns
-------
|
def create_waveform_generator(variable_params, data,
recalibration=None, gates=None,
**static_params):
"""Creates a waveform generator for use with a model.
Parameters
----------
variable_params : list of str
The names of the parameters varied.
data : dict
Dictionary mapping detector names to either a
:py:class:`<pycbc.types.TimeSeries TimeSeries>` or
:py:class:`<pycbc.types.FrequencySeries FrequencySeries>`.
recalibration : dict, optional
Dictionary mapping detector names to
:py:class:`<pycbc.calibration.Recalibrate>` instances for
recalibrating data.
gates : dict of tuples, optional
Dictionary of detectors -> tuples of specifying gate times. The
sort of thing returned by :py:func:`pycbc.gate.gates_from_cli`.
Returns
-------
pycbc.waveform.FDomainDetFrameGenerator
A waveform generator for frequency domain generation.
"""
# figure out what generator to use based on the approximant
try:
approximant = static_params['approximant']
except KeyError:
raise ValueError("no approximant provided in the static args")
generator_function = generator.select_waveform_generator(approximant)
# get data parameters; we'll just use one of the data to get the
# values, then check that all the others are the same
delta_f = None
for d in data.values():
if delta_f is None:
delta_f = d.delta_f
delta_t = d.delta_t
start_time = d.start_time
else:
if not all([d.delta_f == delta_f, d.delta_t == delta_t,
d.start_time == start_time]):
raise ValueError("data must all have the same delta_t, "
"delta_f, and start_time")
waveform_generator = generator.FDomainDetFrameGenerator(
generator_function, epoch=start_time,
variable_args=variable_params, detectors=list(data.keys()),
delta_f=delta_f, delta_t=delta_t,
recalib=recalibration, gates=gates,
**static_params)
return waveform_generator
|
Creates a waveform generator for use with a model.
Parameters
----------
variable_params : list of str
The names of the parameters varied.
data : dict
Dictionary mapping detector names to either a
:py:class:`<pycbc.types.TimeSeries TimeSeries>` or
:py:class:`<pycbc.types.FrequencySeries FrequencySeries>`.
recalibration : dict, optional
Dictionary mapping detector names to
:py:class:`<pycbc.calibration.Recalibrate>` instances for
recalibrating data.
gates : dict of tuples, optional
Dictionary of detectors -> tuples of specifying gate times. The
sort of thing returned by :py:func:`pycbc.gate.gates_from_cli`.
Returns
-------
pycbc.waveform.FDomainDetFrameGenerator
A waveform generator for frequency domain generation.
|
def network_profile_name_list(self, obj):
"""Get AP profile names."""
profile_list = pointer(WLAN_PROFILE_INFO_LIST())
self._wlan_get_profile_list(self._handle,
byref(obj['guid']),
byref(profile_list))
profiles = cast(profile_list.contents.ProfileInfo,
POINTER(WLAN_PROFILE_INFO))
profile_name_list = []
for i in range(profile_list.contents.dwNumberOfItems):
profile_name = ''
for j in range(len(profiles[i].strProfileName)):
profile_name += profiles[i].strProfileName[j]
profile_name_list.append(profile_name)
return profile_name_list
|
Get AP profile names.
|
def _all_recall_native_type(self, data, ptitem, prefix):
"""Checks if loaded data has the type it was stored in. If not converts it.
:param data: Data item to be checked and converted
:param ptitem: HDf5 Node or Leaf from where data was loaded
:param prefix: Prefix for recalling the data type from the hdf5 node attributes
:return:
Tuple, first item is the (converted) `data` item, second boolean whether
item was converted or not.
"""
typestr = self._all_get_from_attrs(ptitem, prefix + HDF5StorageService.SCALAR_TYPE)
colltype = self._all_get_from_attrs(ptitem, prefix + HDF5StorageService.COLL_TYPE)
type_changed = False
# Check what the original data type was from the hdf5 node attributes
if colltype == HDF5StorageService.COLL_SCALAR:
# Here data item was a scalar
if isinstance(data, np.ndarray):
# If we recall a numpy scalar, pytables loads a 1d array :-/
# So we have to change it to a real scalar value
data = np.array([data])[0]
type_changed = True
if not typestr is None:
# Check if current type and stored type match
# if not convert the data
if typestr != type(data).__name__:
if typestr == str.__name__:
data = data.decode(self._encoding)
else:
try:
data = pypetconstants.PARAMETERTYPEDICT[typestr](data)
except KeyError:
# For compatibility with files from older pypet versions
data = pypetconstants.COMPATPARAMETERTYPEDICT[typestr](data)
type_changed = True
elif (colltype == HDF5StorageService.COLL_TUPLE or
colltype == HDF5StorageService.COLL_LIST):
# Here data item was originally a tuple or a list
if type(data) is not list and type is not tuple:
# If the original type cannot be recalled, first convert it to a list
type_changed = True
data = list(data)
if len(data) > 0:
first_item = data[0]
# Check if the type of the first item was conserved
if not typestr == type(first_item).__name__:
if not isinstance(data, list):
data = list(data)
# If type was not conserved we need to convert all items
# in the list or tuple
for idx, item in enumerate(data):
if typestr == str.__name__:
data[idx] = data[idx].decode(self._encoding)
else:
try:
data[idx] = pypetconstants.PARAMETERTYPEDICT[typestr](item)
except KeyError:
# For compatibility with files from older pypet versions:
data[idx] = pypetconstants.COMPATPARAMETERTYPEDICT[typestr](item)
type_changed = True
if colltype == HDF5StorageService.COLL_TUPLE:
# If it was originally a tuple we need to convert it back to tuple
if type(data) is not tuple:
data = tuple(data)
type_changed = True
elif colltype == HDF5StorageService.COLL_EMPTY_DICT:
data = {}
type_changed = True
elif isinstance(data, np.ndarray):
if typestr == str.__name__:
data = np.core.defchararray.decode(data, self._encoding)
type_changed = True
if colltype == HDF5StorageService.COLL_MATRIX:
# Here data item was originally a matrix
data = np.matrix(data)
type_changed = True
return data, type_changed
|
Checks if loaded data has the type it was stored in. If not converts it.
:param data: Data item to be checked and converted
:param ptitem: HDf5 Node or Leaf from where data was loaded
:param prefix: Prefix for recalling the data type from the hdf5 node attributes
:return:
Tuple, first item is the (converted) `data` item, second boolean whether
item was converted or not.
|
def DictReader(ltsvfile, labels=None, dict_type=dict):
"""Make LTSV Reader for reading selected labels.
:param ltsvfile: iterable of lines.
:param labels: sequence of labels.
:return: generator of record in {label: value, ...} form.
"""
for rec in reader(ltsvfile, labels):
yield dict_type(rec)
|
Make LTSV Reader for reading selected labels.
:param ltsvfile: iterable of lines.
:param labels: sequence of labels.
:return: generator of record in {label: value, ...} form.
|
def get_feature_variable_boolean(self, feature_key, variable_key, user_id, attributes=None):
""" Returns value for a certain boolean variable attached to a feature flag.
Args:
feature_key: Key of the feature whose variable's value is being accessed.
variable_key: Key of the variable whose value is to be accessed.
user_id: ID for user.
attributes: Dict representing user attributes.
Returns:
Boolean value of the variable. None if:
- Feature key is invalid.
- Variable key is invalid.
- Mismatch with type of variable.
"""
variable_type = entities.Variable.Type.BOOLEAN
return self._get_feature_variable_for_type(feature_key, variable_key, variable_type, user_id, attributes)
|
Returns value for a certain boolean variable attached to a feature flag.
Args:
feature_key: Key of the feature whose variable's value is being accessed.
variable_key: Key of the variable whose value is to be accessed.
user_id: ID for user.
attributes: Dict representing user attributes.
Returns:
Boolean value of the variable. None if:
- Feature key is invalid.
- Variable key is invalid.
- Mismatch with type of variable.
|
def __try_parse_number(self, string):
"""Try to parse a string to a number, else return False."""
try:
return int(string)
except ValueError:
try:
return float(string)
except ValueError:
return False
|
Try to parse a string to a number, else return False.
|
def block_compute(x_start, x_stop,
y_start, y_stop,
z_start, z_stop,
origin=(0, 0, 0),
block_size=(512, 512, 16)):
"""
Get bounding box coordinates (in 3D) of small cutouts to request in
order to reconstitute a larger cutout.
Arguments:
x_start (int): The lower bound of dimension x
x_stop (int): The upper bound of dimension x
y_start (int): The lower bound of dimension y
y_stop (int): The upper bound of dimension y
z_start (int): The lower bound of dimension z
z_stop (int): The upper bound of dimension z
Returns:
[((x_start, x_stop), (y_start, y_stop), (z_start, z_stop)), ... ]
"""
# x
x_bounds = range(origin[0], x_stop + block_size[0], block_size[0])
x_bounds = [x for x in x_bounds if (x > x_start and x < x_stop)]
if len(x_bounds) is 0:
x_slices = [(x_start, x_stop)]
else:
x_slices = []
for start_x in x_bounds[:-1]:
x_slices.append((start_x, start_x + block_size[0]))
x_slices.append((x_start, x_bounds[0]))
x_slices.append((x_bounds[-1], x_stop))
# y
y_bounds = range(origin[1], y_stop + block_size[1], block_size[1])
y_bounds = [y for y in y_bounds if (y > y_start and y < y_stop)]
if len(y_bounds) is 0:
y_slices = [(y_start, y_stop)]
else:
y_slices = []
for start_y in y_bounds[:-1]:
y_slices.append((start_y, start_y + block_size[1]))
y_slices.append((y_start, y_bounds[0]))
y_slices.append((y_bounds[-1], y_stop))
# z
z_bounds = range(origin[2], z_stop + block_size[2], block_size[2])
z_bounds = [z for z in z_bounds if (z > z_start and z < z_stop)]
if len(z_bounds) is 0:
z_slices = [(z_start, z_stop)]
else:
z_slices = []
for start_z in z_bounds[:-1]:
z_slices.append((start_z, start_z + block_size[2]))
z_slices.append((z_start, z_bounds[0]))
z_slices.append((z_bounds[-1], z_stop))
# alright, yuck. but now we have {x, y, z}_slices, each of which hold the
# start- and end-points of each cube-aligned boundary. For instance, if you
# requested z-slices 4 through 20, it holds [(4, 16), (16, 20)].
# For my next trick, I'll convert these to a list of:
# ((x_start, x_stop), (y_start, y_stop), (z_start, z_stop))
chunks = []
for x in x_slices:
for y in y_slices:
for z in z_slices:
chunks.append((x, y, z))
return chunks
|
Get bounding box coordinates (in 3D) of small cutouts to request in
order to reconstitute a larger cutout.
Arguments:
x_start (int): The lower bound of dimension x
x_stop (int): The upper bound of dimension x
y_start (int): The lower bound of dimension y
y_stop (int): The upper bound of dimension y
z_start (int): The lower bound of dimension z
z_stop (int): The upper bound of dimension z
Returns:
[((x_start, x_stop), (y_start, y_stop), (z_start, z_stop)), ... ]
|
def d3logpdf_dlink3(self, link_f, y, Y_metadata=None):
"""
Third order derivative log-likelihood function at y given link(f) w.r.t link(f)
.. math::
\\frac{d^{3} \\ln p(y_{i}|\\lambda(f_{i}))}{d^{3}\\lambda(f)} = 0
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata not used in gaussian
:returns: third derivative of log likelihood evaluated at points link(f)
:rtype: Nx1 array
"""
N = y.shape[0]
D = link_f.shape[1]
d3logpdf_dlink3 = np.zeros((N,D))
return d3logpdf_dlink3
|
Third order derivative log-likelihood function at y given link(f) w.r.t link(f)
.. math::
\\frac{d^{3} \\ln p(y_{i}|\\lambda(f_{i}))}{d^{3}\\lambda(f)} = 0
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata not used in gaussian
:returns: third derivative of log likelihood evaluated at points link(f)
:rtype: Nx1 array
|
def loc(lexer: Lexer, start_token: Token) -> Optional[Location]:
"""Return a location object.
Used to identify the place in the source that created a given parsed object.
"""
if not lexer.no_location:
end_token = lexer.last_token
source = lexer.source
return Location(
start_token.start, end_token.end, start_token, end_token, source
)
return None
|
Return a location object.
Used to identify the place in the source that created a given parsed object.
|
def union(self, sig: Scope) -> Scope:
""" Create a new Set produce by the union of 2 Set """
new = Scope(sig=self._hsig.values(), state=self.state)
new |= sig
return new
|
Create a new Set produce by the union of 2 Set
|
def filer(filelist):
"""
Helper script that creates a dictionary of the stain name: /sequencepath/strain_name.extension)
:param filelist: list of files to parse
:return filedict: dictionary of stain name: /sequencepath/strain_name.extension
"""
# Initialise the dictionary
filedict = dict()
for seqfile in filelist:
# Split off the file extension and remove the path from the name
strainname = os.path.splitext(os.path.basename(seqfile))[0]
# Populate the dictionary
filedict[strainname] = seqfile
return filedict
|
Helper script that creates a dictionary of the stain name: /sequencepath/strain_name.extension)
:param filelist: list of files to parse
:return filedict: dictionary of stain name: /sequencepath/strain_name.extension
|
def initialize_tracer(self, io_loop=None):
"""
Initialize Jaeger Tracer based on the passed `jaeger_client.Config`.
Save it to `opentracing.tracer` global variable.
Only the first call to this method has any effect.
"""
with Config._initialized_lock:
if Config._initialized:
logger.warn('Jaeger tracer already initialized, skipping')
return
Config._initialized = True
tracer = self.new_tracer(io_loop)
self._initialize_global_tracer(tracer=tracer)
return tracer
|
Initialize Jaeger Tracer based on the passed `jaeger_client.Config`.
Save it to `opentracing.tracer` global variable.
Only the first call to this method has any effect.
|
def runDia(diagram):
"""Generate the diagrams using Dia."""
ifname = '{}.dia'.format(diagram)
ofname = '{}.png'.format(diagram)
cmd = 'dia -t png-libart -e {} {}'.format(ofname, ifname)
print(' {}'.format(cmd))
subprocess.call(cmd, shell=True)
return True
|
Generate the diagrams using Dia.
|
def edit(self, obj_id, parameters, create_if_not_exists=False):
"""
Edit an item with option to create if it doesn't exist
:param obj_id: int
:param create_if_not_exists: bool
:param parameters: dict
:return: dict|str
"""
if create_if_not_exists:
response = self._client.session.put(
'{url}/{id}/edit'.format(
url=self.endpoint_url, id=obj_id
),
data=parameters
)
else:
response = self._client.session.patch(
'{url}/{id}/edit'.format(
url=self.endpoint_url, id=obj_id
),
data=parameters
)
return self.process_response(response)
|
Edit an item with option to create if it doesn't exist
:param obj_id: int
:param create_if_not_exists: bool
:param parameters: dict
:return: dict|str
|
def ns(self):
"""Get the namespace of a node """
ret = libxml2mod.xmlNodeGetNs(self._o)
if ret is None:return None
__tmp = xmlNs(_obj=ret)
return __tmp
|
Get the namespace of a node
|
def minOpar(self,dangle,tdisrupt=None,_return_raw=False):
"""
NAME:
minOpar
PURPOSE:
return the approximate minimum parallel frequency at a given angle
INPUT:
dangle - parallel angle
OUTPUT:
minimum frequency that gets to this parallel angle
HISTORY:
2015-12-28 - Written - Bovy (UofT)
"""
if tdisrupt is None: tdisrupt= self._tdisrupt
# First construct the breakpoints for this dangle
Oparb= (dangle-self._kick_interpdOpar_poly.x[:-1])/self._timpact
# Find the lower limit of the integration in the pw-linear-kick approx.
lowx= ((Oparb-self._kick_interpdOpar_poly.c[-1])\
*(tdisrupt-self._timpact)+Oparb*self._timpact-dangle)\
/((tdisrupt-self._timpact)\
*(1.+self._kick_interpdOpar_poly.c[-2]*self._timpact)\
+self._timpact)
lowx[lowx < 0.]= numpy.inf
lowbindx= numpy.argmin(lowx)
if _return_raw:
return (lowbindx,lowx[lowbindx])
else:
return Oparb[lowbindx]-lowx[lowbindx]
|
NAME:
minOpar
PURPOSE:
return the approximate minimum parallel frequency at a given angle
INPUT:
dangle - parallel angle
OUTPUT:
minimum frequency that gets to this parallel angle
HISTORY:
2015-12-28 - Written - Bovy (UofT)
|
def request(self, method, url, erc, **kwargs):
"""Abstract base method for making requests to the Webex Teams APIs.
This base method:
* Expands the API endpoint URL to an absolute URL
* Makes the actual HTTP request to the API endpoint
* Provides support for Webex Teams rate-limiting
* Inspects response codes and raises exceptions as appropriate
Args:
method(basestring): The request-method type ('GET', 'POST', etc.).
url(basestring): The URL of the API endpoint to be called.
erc(int): The expected response code that should be returned by the
Webex Teams API endpoint to indicate success.
**kwargs: Passed on to the requests package.
Raises:
ApiError: If anything other than the expected response code is
returned by the Webex Teams API endpoint.
"""
# Ensure the url is an absolute URL
abs_url = self.abs_url(url)
# Update request kwargs with session defaults
kwargs.setdefault('timeout', self.single_request_timeout)
while True:
# Make the HTTP request to the API endpoint
response = self._req_session.request(method, abs_url, **kwargs)
try:
# Check the response code for error conditions
check_response_code(response, erc)
except RateLimitError as e:
# Catch rate-limit errors
# Wait and retry if automatic rate-limit handling is enabled
if self.wait_on_rate_limit:
warnings.warn(RateLimitWarning(response))
time.sleep(e.retry_after)
continue
else:
# Re-raise the RateLimitError
raise
else:
return response
|
Abstract base method for making requests to the Webex Teams APIs.
This base method:
* Expands the API endpoint URL to an absolute URL
* Makes the actual HTTP request to the API endpoint
* Provides support for Webex Teams rate-limiting
* Inspects response codes and raises exceptions as appropriate
Args:
method(basestring): The request-method type ('GET', 'POST', etc.).
url(basestring): The URL of the API endpoint to be called.
erc(int): The expected response code that should be returned by the
Webex Teams API endpoint to indicate success.
**kwargs: Passed on to the requests package.
Raises:
ApiError: If anything other than the expected response code is
returned by the Webex Teams API endpoint.
|
def compose(*funcs):
"""Compose `funcs` to a single function.
>>> compose(operator.abs, operator.add)(-2,-3)
5
>>> compose()('nada')
'nada'
>>> compose(sorted, set, partial(filter, None))(range(3)[::-1]*2)
[1, 2]
"""
# slightly optimized for most common cases and hence verbose
if len(funcs) == 2: f0,f1=funcs; return lambda *a,**kw: f0(f1(*a,**kw))
elif len(funcs) == 3: f0,f1,f2=funcs; return lambda *a,**kw: f0(f1(f2(*a,**kw)))
elif len(funcs) == 0: return lambda x:x # XXX single kwarg
elif len(funcs) == 1: return funcs[0]
else:
def composed(*args,**kwargs):
y = funcs[-1](*args,**kwargs)
for f in funcs[:0:-1]: y = f(y)
return y
return composed
|
Compose `funcs` to a single function.
>>> compose(operator.abs, operator.add)(-2,-3)
5
>>> compose()('nada')
'nada'
>>> compose(sorted, set, partial(filter, None))(range(3)[::-1]*2)
[1, 2]
|
def _finalize(self):
"""Dump traces using cPickle."""
container = {}
try:
for name in self._traces:
container[name] = self._traces[name]._trace
container['_state_'] = self._state_
file = open(self.filename, 'w+b')
std_pickle.dump(container, file)
file.close()
except AttributeError:
pass
|
Dump traces using cPickle.
|
def generate(ast_tree: ast.Tree, model_name: str):
"""
:param ast_tree: AST to generate from
:param model_name: class to generate
:return: sympy source code for model
"""
component_ref = ast.ComponentRef.from_string(model_name)
ast_tree_new = copy.deepcopy(ast_tree)
ast_walker = TreeWalker()
flat_tree = flatten(ast_tree_new, component_ref)
sympy_gen = SympyGenerator()
ast_walker.walk(sympy_gen, flat_tree)
return sympy_gen.src[flat_tree]
|
:param ast_tree: AST to generate from
:param model_name: class to generate
:return: sympy source code for model
|
def pop_object(self, element):
'''
Pop the object element if the object contains an higher TLP then allowed.
'''
redacted_text = "Redacted. Object contained TLP value higher than allowed."
element['id'] = ''
element['url'] = ''
element['type'] = ''
element['tags'] = []
element['etlp'] = None
element['title'] = redacted_text
element['tlpColor'] = element['tlpColor']
element['uploaded_on'] = ''
element['uploaded_by'] = ''
element['description'] = redacted_text
element['children_types'] = []
element['summary']['type'] = ''
element['summary']['value'] = ''
element['summary']['title'] = redacted_text
element['summary']['description'] = redacted_text
return element
|
Pop the object element if the object contains an higher TLP then allowed.
|
async def find_deleted(self, seq_set: SequenceSet,
selected: SelectedMailbox) -> Sequence[int]:
"""Return all the active message UIDs that have the ``\\Deleted`` flag.
Args:
seq_set: The sequence set of the possible messages.
selected: The selected mailbox session.
"""
session_flags = selected.session_flags
return [msg.uid async for _, msg in self.find(seq_set, selected)
if Deleted in msg.get_flags(session_flags)]
|
Return all the active message UIDs that have the ``\\Deleted`` flag.
Args:
seq_set: The sequence set of the possible messages.
selected: The selected mailbox session.
|
def summary(self):
"""
Gets summary (e.g. residuals, deviance, pValues) of model on
training set. An exception is thrown if
`trainingSummary is None`.
"""
if self.hasSummary:
return GeneralizedLinearRegressionTrainingSummary(
super(GeneralizedLinearRegressionModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
|
Gets summary (e.g. residuals, deviance, pValues) of model on
training set. An exception is thrown if
`trainingSummary is None`.
|
def load_commodities(self):
"""
Load the commodities for Amounts in this object.
"""
if isinstance(self.fee, Amount):
self.fee = Amount("{0:.8f} {1}".format(self.fee.to_double(), self.currency))
else:
self.fee = Amount("{0:.8f} {1}".format(self.fee, self.currency))
if isinstance(self.amount, Amount):
self.amount = Amount("{0:.8f} {1}".format(self.amount.to_double(), self.currency))
else:
self.amount = Amount("{0:.8f} {1}".format(self.amount, self.currency))
|
Load the commodities for Amounts in this object.
|
def messageRemote(self, cmdObj, consequence=None, **args):
"""
Send a message to the peer identified by the target, via the
given L{Command} object and arguments.
@param cmdObj: a L{twisted.protocols.amp.Command}, whose serialized
form will be the message.
@param consequence: an L{IDeliveryConsequence} provider which will
handle the result of this message (or None, if no response processing
is desired).
@param args: keyword arguments which match the C{cmdObj}'s arguments
list.
@return: L{None}
"""
messageBox = cmdObj.makeArguments(args, self)
messageBox[COMMAND] = cmdObj.commandName
messageData = messageBox.serialize()
self.queue.queueMessage(self.sender, self.target,
Value(AMP_MESSAGE_TYPE, messageData),
consequence)
|
Send a message to the peer identified by the target, via the
given L{Command} object and arguments.
@param cmdObj: a L{twisted.protocols.amp.Command}, whose serialized
form will be the message.
@param consequence: an L{IDeliveryConsequence} provider which will
handle the result of this message (or None, if no response processing
is desired).
@param args: keyword arguments which match the C{cmdObj}'s arguments
list.
@return: L{None}
|
def get_compression_filter(byte_counts):
"""Determine whether or not to use a compression on the array stored in
a hierarchical data format, and which compression library to use to that purpose.
Compression reduces the HDF5 file size and also helps improving I/O efficiency
for large datasets.
Parameters
----------
byte_counts : int
Returns
-------
FILTERS : instance of the tables.Filters class
"""
assert isinstance(byte_counts, numbers.Integral) and byte_counts > 0
if 2 * byte_counts > 1000 * memory()['free']:
try:
FILTERS = tables.filters(complevel = 5, complib = 'blosc',
shuffle = True, least_significant_digit = 6)
except tables.FiltersWarning:
FILTERS = tables.filters(complevel = 5, complib = 'lzo',
shuffle = True, least_significant_digit = 6)
else:
FILTERS = None
return FILTERS
|
Determine whether or not to use a compression on the array stored in
a hierarchical data format, and which compression library to use to that purpose.
Compression reduces the HDF5 file size and also helps improving I/O efficiency
for large datasets.
Parameters
----------
byte_counts : int
Returns
-------
FILTERS : instance of the tables.Filters class
|
def to_filespec(cls, args, root='', exclude=None):
"""Return a dict representation of this glob list, relative to the buildroot.
The format of the dict is {'globs': [ 'list', 'of' , 'strings' ]
(optional) 'exclude' : [{'globs' : ... }, ...] }
The globs are in zglobs format.
"""
result = {'globs': [os.path.join(root, arg) for arg in args]}
if exclude:
result['exclude'] = []
for exclude in exclude:
if hasattr(exclude, 'filespec'):
result['exclude'].append(exclude.filespec)
else:
result['exclude'].append({'globs': [os.path.join(root, x) for x in exclude]})
return result
|
Return a dict representation of this glob list, relative to the buildroot.
The format of the dict is {'globs': [ 'list', 'of' , 'strings' ]
(optional) 'exclude' : [{'globs' : ... }, ...] }
The globs are in zglobs format.
|
def split_iter(src, sep=None, maxsplit=None):
"""Splits an iterable based on a separator, *sep*, a max of
*maxsplit* times (no max by default). *sep* can be:
* a single value
* an iterable of separators
* a single-argument callable that returns True when a separator is
encountered
``split_iter()`` yields lists of non-separator values. A separator will
never appear in the output.
>>> list(split_iter(['hi', 'hello', None, None, 'sup', None, 'soap', None]))
[['hi', 'hello'], ['sup'], ['soap']]
Note that ``split_iter`` is based on :func:`str.split`, so if
*sep* is ``None``, ``split()`` **groups** separators. If empty lists
are desired between two contiguous ``None`` values, simply use
``sep=[None]``:
>>> list(split_iter(['hi', 'hello', None, None, 'sup', None]))
[['hi', 'hello'], ['sup']]
>>> list(split_iter(['hi', 'hello', None, None, 'sup', None], sep=[None]))
[['hi', 'hello'], [], ['sup'], []]
Using a callable separator:
>>> falsy_sep = lambda x: not x
>>> list(split_iter(['hi', 'hello', None, '', 'sup', False], falsy_sep))
[['hi', 'hello'], [], ['sup'], []]
See :func:`split` for a list-returning version.
"""
if not is_iterable(src):
raise TypeError('expected an iterable')
if maxsplit is not None:
maxsplit = int(maxsplit)
if maxsplit == 0:
yield [src]
return
if callable(sep):
sep_func = sep
elif not is_scalar(sep):
sep = frozenset(sep)
sep_func = lambda x: x in sep
else:
sep_func = lambda x: x == sep
cur_group = []
split_count = 0
for s in src:
if maxsplit is not None and split_count >= maxsplit:
sep_func = lambda x: False
if sep_func(s):
if sep is None and not cur_group:
# If sep is none, str.split() "groups" separators
# check the str.split() docs for more info
continue
split_count += 1
yield cur_group
cur_group = []
else:
cur_group.append(s)
if cur_group or sep is not None:
yield cur_group
return
|
Splits an iterable based on a separator, *sep*, a max of
*maxsplit* times (no max by default). *sep* can be:
* a single value
* an iterable of separators
* a single-argument callable that returns True when a separator is
encountered
``split_iter()`` yields lists of non-separator values. A separator will
never appear in the output.
>>> list(split_iter(['hi', 'hello', None, None, 'sup', None, 'soap', None]))
[['hi', 'hello'], ['sup'], ['soap']]
Note that ``split_iter`` is based on :func:`str.split`, so if
*sep* is ``None``, ``split()`` **groups** separators. If empty lists
are desired between two contiguous ``None`` values, simply use
``sep=[None]``:
>>> list(split_iter(['hi', 'hello', None, None, 'sup', None]))
[['hi', 'hello'], ['sup']]
>>> list(split_iter(['hi', 'hello', None, None, 'sup', None], sep=[None]))
[['hi', 'hello'], [], ['sup'], []]
Using a callable separator:
>>> falsy_sep = lambda x: not x
>>> list(split_iter(['hi', 'hello', None, '', 'sup', False], falsy_sep))
[['hi', 'hello'], [], ['sup'], []]
See :func:`split` for a list-returning version.
|
def _try_redeem_disposable_app(file, client):
"""
Attempt to redeem a one time code registred on the client.
"""
redeemedClient = client.redeem_onetime_code(None)
if redeemedClient is None:
return None
else:
return _BlotreDisposableApp(file,
redeemedClient.client,
creds = redeemedClient.creds,
config = redeemedClient.config)
|
Attempt to redeem a one time code registred on the client.
|
def daily_pr_intensity(pr, thresh='1 mm/day', freq='YS'):
r"""Average daily precipitation intensity
Return the average precipitation over wet days.
Parameters
----------
pr : xarray.DataArray
Daily precipitation [mm/d or kg/m²/s]
thresh : str
precipitation value over which a day is considered wet. Default : '1 mm/day'
freq : str, optional
Resampling frequency defining the periods
defined in http://pandas.pydata.org/pandas-docs/stable/timeseries.html#resampling. Default : '1 mm/day'
Returns
-------
xarray.DataArray
The average precipitation over wet days for each period
Notes
-----
Let :math:`\mathbf{p} = p_0, p_1, \ldots, p_n` be the daily precipitation and :math:`thresh` be the precipitation
threshold defining wet days. Then the daily precipitation intensity is defined as
.. math::
\frac{\sum_{i=0}^n p_i [p_i \leq thresh]}{\sum_{i=0}^n [p_i \leq thresh]}
where :math:`[P]` is 1 if :math:`P` is true, and 0 if false.
Examples
--------
The following would compute for each grid cell of file `pr.day.nc` the average
precipitation fallen over days with precipitation >= 5 mm at seasonal
frequency, ie DJF, MAM, JJA, SON, DJF, etc.:
>>> pr = xr.open_dataset('pr.day.nc')
>>> daily_int = daily_pr_intensity(pr, thresh='5 mm/day', freq="QS-DEC")
"""
t = utils.convert_units_to(thresh, pr, 'hydro')
# put pr=0 for non wet-days
pr_wd = xr.where(pr >= t, pr, 0)
pr_wd.attrs['units'] = pr.units
# sum over wanted period
s = pr_wd.resample(time=freq).sum(dim='time', keep_attrs=True)
sd = utils.pint_multiply(s, 1 * units.day, 'mm')
# get number of wetdays over period
wd = wetdays(pr, thresh=thresh, freq=freq)
return sd / wd
|
r"""Average daily precipitation intensity
Return the average precipitation over wet days.
Parameters
----------
pr : xarray.DataArray
Daily precipitation [mm/d or kg/m²/s]
thresh : str
precipitation value over which a day is considered wet. Default : '1 mm/day'
freq : str, optional
Resampling frequency defining the periods
defined in http://pandas.pydata.org/pandas-docs/stable/timeseries.html#resampling. Default : '1 mm/day'
Returns
-------
xarray.DataArray
The average precipitation over wet days for each period
Notes
-----
Let :math:`\mathbf{p} = p_0, p_1, \ldots, p_n` be the daily precipitation and :math:`thresh` be the precipitation
threshold defining wet days. Then the daily precipitation intensity is defined as
.. math::
\frac{\sum_{i=0}^n p_i [p_i \leq thresh]}{\sum_{i=0}^n [p_i \leq thresh]}
where :math:`[P]` is 1 if :math:`P` is true, and 0 if false.
Examples
--------
The following would compute for each grid cell of file `pr.day.nc` the average
precipitation fallen over days with precipitation >= 5 mm at seasonal
frequency, ie DJF, MAM, JJA, SON, DJF, etc.:
>>> pr = xr.open_dataset('pr.day.nc')
>>> daily_int = daily_pr_intensity(pr, thresh='5 mm/day', freq="QS-DEC")
|
def path_exists(value,
allow_empty = False,
**kwargs):
"""Validate that ``value`` is a path-like object that exists on the local
filesystem.
:param value: The value to validate.
:param allow_empty: If ``True``, returns :obj:`None <python:None>` if
``value`` is empty. If ``False``, raises a
:class:`EmptyValueError <validator_collection.errors.EmptyValueError>`
if ``value`` is empty. Defaults to ``False``.
:type allow_empty: :class:`bool <python:bool>`
:returns: The file name represented by ``value``.
:rtype: Path-like object / :obj:`None <python:None>`
:raises EmptyValueError: if ``allow_empty`` is ``False`` and ``value``
is empty
:raises NotPathlikeError: if ``value`` is not a path-like object
:raises PathExistsError: if ``value`` does not exist
"""
if not value and not allow_empty:
raise errors.EmptyValueError('value (%s) was empty' % value)
elif not value:
return None
value = path(value, force_run = True) # pylint: disable=E1123
if not os.path.exists(value):
raise errors.PathExistsError('value (%s) not found' % value)
return value
|
Validate that ``value`` is a path-like object that exists on the local
filesystem.
:param value: The value to validate.
:param allow_empty: If ``True``, returns :obj:`None <python:None>` if
``value`` is empty. If ``False``, raises a
:class:`EmptyValueError <validator_collection.errors.EmptyValueError>`
if ``value`` is empty. Defaults to ``False``.
:type allow_empty: :class:`bool <python:bool>`
:returns: The file name represented by ``value``.
:rtype: Path-like object / :obj:`None <python:None>`
:raises EmptyValueError: if ``allow_empty`` is ``False`` and ``value``
is empty
:raises NotPathlikeError: if ``value`` is not a path-like object
:raises PathExistsError: if ``value`` does not exist
|
async def _init_writer(self):
"""
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
"""
async with self._initialization_lock:
if not self.initialized:
self.stream = await aiofiles.open(
file=self.absolute_file_path,
mode=self.mode,
encoding=self.encoding,
loop=self.loop,
)
|
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
|
def aligned_covariance(fit, type='noise'):
"""
Covariance rescaled so that eigenvectors sum to 1
and rotated into data coordinates from PCA space
"""
cov = fit._covariance_matrix(type)
# Rescale eigenvectors to sum to 1
cov /= N.linalg.norm(cov)
return dot(fit.axes,cov)
|
Covariance rescaled so that eigenvectors sum to 1
and rotated into data coordinates from PCA space
|
def getRemoteObject(self, busName, objectPath, interfaces=None,
replaceKnownInterfaces=False):
"""
Creates a L{RemoteDBusObject} instance to represent the
specified DBus object. If explicit interfaces are not
supplied, DBus object introspection will be used to obtain
them automatically.
@type busName: C{string}
@param busName: Name of the bus exporting the desired object
@type objectPath: C{string}
@param objectPath: DBus path of the desired object
@type interfaces: None, C{string} or L{interface.DBusInterface} or a
list of C{string}/L{interface.DBusInterface}
@param interfaces: May be None, a single value, or a list of string
interface names and/or instances of
L{interface.DBusInterface}. If None or any of the
specified interface names are unknown, full
introspection will be attempted. If interfaces
consists of solely of L{interface.DBusInterface}
instances and/or known interfacep names, no
introspection will be preformed.
@type replaceKnownInterfaces: C{bool}
@param replaceKnownInterfaces: If True (defaults to False), any
interfaces discovered during the
introspection process will override any
previous, cached values.
@rtype: L{twisted.internet.defer.Deferred}
@returns: A Deferred to the L{RemoteDBusObject} instance
"""
weak_id = (busName, objectPath, interfaces)
need_introspection = False
required_interfaces = set()
if interfaces is not None:
ifl = []
if not isinstance(interfaces, list):
interfaces = [interfaces]
for i in interfaces:
if isinstance(i, interface.DBusInterface):
ifl.append(i)
required_interfaces.add(i.name)
else:
required_interfaces.add(i)
if i in interface.DBusInterface.knownInterfaces:
ifl.append(interface.DBusInterface.knownInterfaces[i])
else:
need_introspection = True
if not need_introspection:
return defer.succeed(
RemoteDBusObject(self, busName, objectPath, ifl)
)
d = self.conn.introspectRemoteObject(
busName,
objectPath,
replaceKnownInterfaces,
)
def ok(ifaces):
missing = required_interfaces - {q.name for q in ifaces}
if missing:
raise error.IntrospectionFailed(
'Introspection failed to find interfaces: '
+ ','.join(missing)
)
prox = RemoteDBusObject(self, busName, objectPath, ifaces)
self._weakProxies[weak_id] = prox
return prox
d.addCallback(ok)
return d
|
Creates a L{RemoteDBusObject} instance to represent the
specified DBus object. If explicit interfaces are not
supplied, DBus object introspection will be used to obtain
them automatically.
@type busName: C{string}
@param busName: Name of the bus exporting the desired object
@type objectPath: C{string}
@param objectPath: DBus path of the desired object
@type interfaces: None, C{string} or L{interface.DBusInterface} or a
list of C{string}/L{interface.DBusInterface}
@param interfaces: May be None, a single value, or a list of string
interface names and/or instances of
L{interface.DBusInterface}. If None or any of the
specified interface names are unknown, full
introspection will be attempted. If interfaces
consists of solely of L{interface.DBusInterface}
instances and/or known interfacep names, no
introspection will be preformed.
@type replaceKnownInterfaces: C{bool}
@param replaceKnownInterfaces: If True (defaults to False), any
interfaces discovered during the
introspection process will override any
previous, cached values.
@rtype: L{twisted.internet.defer.Deferred}
@returns: A Deferred to the L{RemoteDBusObject} instance
|
def generate_readme(catalog, export_path=None):
"""Genera una descripción textual en formato Markdown sobre los
metadatos generales de un catálogo (título, editor, fecha de
publicación, et cetera), junto con:
- estado de los metadatos a nivel catálogo,
- estado global de los metadatos,
- cantidad de datasets federados y no federados,
- detalles de los datasets no federados
- cantidad de datasets y distribuciones incluidas
Es utilizada por la rutina diaria de `libreria-catalogos` para generar
un README con información básica sobre los catálogos mantenidos.
Args:
catalog (str o dict): Path a un catálogo en cualquier formato,
JSON, XLSX, o diccionario de python.
export_path (str): Path donde exportar el texto generado (en
formato Markdown). Si se especifica, el método no devolverá
nada.
Returns:
str: Texto de la descripción generada.
"""
# Si se paso una ruta, guardarla
if isinstance(catalog, string_types):
catalog_path_or_url = catalog
else:
catalog_path_or_url = None
catalog = read_catalog(catalog)
validation = validate_catalog(catalog)
# Solo necesito indicadores para un catalogo
indicators = generate_catalogs_indicators(
catalog, CENTRAL_CATALOG)[0][0]
with io.open(os.path.join(TEMPLATES_PATH, 'catalog_readme.txt'), 'r',
encoding='utf-8') as template_file:
readme_template = template_file.read()
not_federated_datasets_list = "\n".join([
"- [{}]({})".format(dataset[0], dataset[1])
for dataset in indicators["datasets_no_federados"]
])
federated_removed_datasets_list = "\n".join([
"- [{}]({})".format(dataset[0], dataset[1])
for dataset in indicators["datasets_federados_eliminados"]
])
federated_datasets_list = "\n".join([
"- [{}]({})".format(dataset[0], dataset[1])
for dataset in indicators["datasets_federados"]
])
non_federated_pct = 1.0 - indicators["datasets_federados_pct"] if \
indicators["datasets_federados_pct"] is not None else \
indicators["datasets_federados_pct"]
content = {
"title": catalog.get("title"),
"publisher_name": traverse_dict(
catalog, ["publisher", "name"]),
"publisher_mbox": traverse_dict(
catalog, ["publisher", "mbox"]),
"catalog_path_or_url": catalog_path_or_url,
"description": catalog.get("description"),
"global_status": validation["status"],
"catalog_status": validation["error"]["catalog"]["status"],
"no_of_datasets": len(catalog["dataset"]),
"no_of_distributions": sum([len(dataset["distribution"]) for
dataset in catalog["dataset"]]),
"federated_datasets": indicators["datasets_federados_cant"],
"not_federated_datasets": indicators["datasets_no_federados_cant"],
"not_federated_datasets_pct": non_federated_pct,
"not_federated_datasets_list": not_federated_datasets_list,
"federated_removed_datasets_list": federated_removed_datasets_list,
"federated_datasets_list": federated_datasets_list,
}
catalog_readme = readme_template.format(**content)
if export_path:
with io.open(export_path, 'w+', encoding='utf-8') as target:
target.write(catalog_readme)
else:
return catalog_readme
|
Genera una descripción textual en formato Markdown sobre los
metadatos generales de un catálogo (título, editor, fecha de
publicación, et cetera), junto con:
- estado de los metadatos a nivel catálogo,
- estado global de los metadatos,
- cantidad de datasets federados y no federados,
- detalles de los datasets no federados
- cantidad de datasets y distribuciones incluidas
Es utilizada por la rutina diaria de `libreria-catalogos` para generar
un README con información básica sobre los catálogos mantenidos.
Args:
catalog (str o dict): Path a un catálogo en cualquier formato,
JSON, XLSX, o diccionario de python.
export_path (str): Path donde exportar el texto generado (en
formato Markdown). Si se especifica, el método no devolverá
nada.
Returns:
str: Texto de la descripción generada.
|
def identity_factor(self):
"""
Returns the identity factor.
Def: The identity factor of a factor has the same scope and cardinality as the original factor,
but the values for all the assignments is 1. When the identity factor is multiplied with
the factor it returns the factor itself.
Returns
-------
DiscreteFactor: The identity factor.
Examples
--------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi_identity = phi.identity_factor()
>>> phi_identity.variables
['x1', 'x2', 'x3']
>>> phi_identity.values
array([[[ 1., 1.],
[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.],
[ 1., 1.]]])
"""
return DiscreteFactor(self.variables, self.cardinality, np.ones(self.values.size))
|
Returns the identity factor.
Def: The identity factor of a factor has the same scope and cardinality as the original factor,
but the values for all the assignments is 1. When the identity factor is multiplied with
the factor it returns the factor itself.
Returns
-------
DiscreteFactor: The identity factor.
Examples
--------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi_identity = phi.identity_factor()
>>> phi_identity.variables
['x1', 'x2', 'x3']
>>> phi_identity.values
array([[[ 1., 1.],
[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.],
[ 1., 1.]]])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.