code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def get_line_numbers(self, buffer):
"""
Return a (start_line, end_line) pair.
"""
# Get absolute cursor positions from the text object.
from_, to = self.operator_range(buffer.document)
from_ += buffer.cursor_position
to += buffer.cursor_position
# Take the start of the lines.
from_, _ = buffer.document.translate_index_to_position(from_)
to, _ = buffer.document.translate_index_to_position(to)
return from_, to
|
Return a (start_line, end_line) pair.
|
def solve_gfl(data, edges=None, weights=None,
minlam=0.2, maxlam=1000.0, numlam=30,
alpha=0.2, inflate=2., converge=1e-6,
maxsteps=1000000, lam=None, verbose=0,
missing_val=None, full_path=False,
loss='normal'):
'''A very easy-to-use version of GFL solver that just requires the data and
the edges.'''
#Fix no edge cases
if edges.shape[0] < 1:
return data
#Keep initial edges
init_edges = edges
if verbose:
print('Decomposing graph into trails')
if loss == 'binomial':
flat_data = data[0].flatten()
nonmissing_flat_data = flat_data, data[1].flatten()
else:
flat_data = data.flatten()
nonmissing_flat_data = flat_data
if edges is None:
if loss == 'binomial':
if verbose:
print('Using default edge set of a grid of same shape as the data: {0}'.format(data[0].shape))
edges = hypercube_edges(data[0].shape)
else:
if verbose:
print('Using default edge set of a grid of same shape as the data: {0}'.format(data.shape))
edges = hypercube_edges(data.shape)
if missing_val is not None:
if verbose:
print('Removing all data points whose data value is {0}'.format(missing_val))
edges = [(e1,e2) for (e1,e2) in edges if flat_data[e1] != missing_val and flat_data[e2] != missing_val]
if loss == 'binomial':
nonmissing_flat_data = flat_data[flat_data != missing_val], nonmissing_flat_data[1][flat_data != missing_val]
else:
nonmissing_flat_data = flat_data[flat_data != missing_val]
########### Setup the graph
g = Graph()
g.add_edges_from(edges)
chains = decompose_graph(g, heuristic='greedy')
ntrails, trails, breakpoints, edges = chains_to_trails(chains)
if verbose:
print('Setting up trail solver')
########### Setup the solver
if loss == 'normal':
solver = TrailSolver(alpha, inflate, maxsteps, converge)
elif loss == 'logistic':
solver = LogisticTrailSolver(alpha, inflate, maxsteps, converge)
elif loss == 'binomial':
solver = BinomialTrailSolver(alpha, inflate, maxsteps, converge)
else:
raise NotImplementedError('Loss must be normal, logistic, or binomial')
# Set the data and pre-cache any necessary structures
solver.set_data(nonmissing_flat_data, edges, ntrails, trails, breakpoints, weights=weights)
if verbose:
print('Solving')
########### Run the solver
if lam:
# Fixed lambda
beta = solver.solve(lam)
else:
# Grid search to find the best lambda
beta = solver.solution_path(minlam, maxlam, numlam, verbose=max(0, verbose-1))
if not full_path:
beta = beta['best']
########### Fix disconnected nodes
mask = np.ones_like(beta)
mask[init_edges[:,0]] = 0
mask[init_edges[:,1]] = 0
beta[mask>0] = data[mask>0]
return beta
|
A very easy-to-use version of GFL solver that just requires the data and
the edges.
|
def __find_block_neighbors(self, block, level_blocks, unhandled_block_indexes):
"""!
@brief Search block neighbors that are parts of new clusters (density is greater than threshold and that are
not cluster members yet), other neighbors are ignored.
@param[in] block (bang_block): BANG-block for which neighbors should be found (which can be part of cluster).
@param[in] level_blocks (list): BANG-blocks on specific level.
@param[in] unhandled_block_indexes (set): Blocks that have not been processed yet.
@return (list) Block neighbors that can become part of cluster.
"""
neighbors = []
handled_block_indexes = []
for unhandled_index in unhandled_block_indexes:
if block.is_neighbor(level_blocks[unhandled_index]):
handled_block_indexes.append(unhandled_index)
neighbors.append(level_blocks[unhandled_index])
# Maximum number of neighbors is eight
if len(neighbors) == 8:
break
for handled_index in handled_block_indexes:
unhandled_block_indexes.remove(handled_index)
return neighbors
|
!
@brief Search block neighbors that are parts of new clusters (density is greater than threshold and that are
not cluster members yet), other neighbors are ignored.
@param[in] block (bang_block): BANG-block for which neighbors should be found (which can be part of cluster).
@param[in] level_blocks (list): BANG-blocks on specific level.
@param[in] unhandled_block_indexes (set): Blocks that have not been processed yet.
@return (list) Block neighbors that can become part of cluster.
|
def call_function(self, command, response_length=0, params=[], timeout_sec=1):
"""Send specified command to the PN532 and expect up to response_length
bytes back in a response. Note that less than the expected bytes might
be returned! Params can optionally specify an array of bytes to send as
parameters to the function call. Will wait up to timeout_secs seconds
for a response and return a bytearray of response bytes, or None if no
response is available within the timeout.
"""
# Build frame data with command and parameters.
data = bytearray(2+len(params))
data[0] = PN532_HOSTTOPN532
data[1] = command & 0xFF
data[2:] = params
# Send frame and wait for response.
self._write_frame(data)
if not self._wait_ready(timeout_sec):
return None
# Verify ACK response and wait to be ready for function response.
response = self._read_data(len(PN532_ACK))
if response != PN532_ACK:
raise RuntimeError('Did not receive expected ACK from PN532!')
if not self._wait_ready(timeout_sec):
return None
# Read response bytes.
response = self._read_frame(response_length+2)
# Check that response is for the called function.
if not (response[0] == PN532_PN532TOHOST and response[1] == (command+1)):
raise RuntimeError('Received unexpected command response!')
# Return response data.
return response[2:]
|
Send specified command to the PN532 and expect up to response_length
bytes back in a response. Note that less than the expected bytes might
be returned! Params can optionally specify an array of bytes to send as
parameters to the function call. Will wait up to timeout_secs seconds
for a response and return a bytearray of response bytes, or None if no
response is available within the timeout.
|
def read_altitude(self, sealevel_pa=101325.0):
"""Calculates the altitude in meters."""
# Calculation taken straight from section 3.6 of the datasheet.
pressure = float(self.read_pressure())
altitude = 44330.0 * (1.0 - pow(pressure / sealevel_pa, (1.0/5.255)))
self.logger.debug('Altitude {0} m', altitude)
return altitude
|
Calculates the altitude in meters.
|
def update_roles_gce(use_cache=True, cache_expiration=86400, cache_path="~/.gcetools/instances", group_name=None, region=None, zone=None):
"""
Dynamically update fabric's roles by using assigning the tags associated with
each machine in Google Compute Engine.
use_cache - will store a local cache in ~/.gcetools/
cache_expiration - cache expiration in seconds (default: 1 day)
cache_path - the path to store instances data (default: ~/.gcetools/instances)
group_name - optional managed instance group to use instead of the global instance pool
region - gce region name (such as `us-central1`) for a regional managed instance group
zone - gce zone name (such as `us-central1-a`) for a zone managed instance group
How to use:
- Call 'update_roles_gce' at the end of your fabfile.py (it will run each
time you run fabric).
- On each function use the regular @roles decorator and set the role to the name
of one of the tags associated with the instances you wish to work with
"""
data = _get_data(use_cache, cache_expiration, group_name=group_name, region=region, zone=zone)
roles = _get_roles(data)
env.roledefs.update(roles)
_data_loaded = True
return INSTANCES_CACHE
|
Dynamically update fabric's roles by using assigning the tags associated with
each machine in Google Compute Engine.
use_cache - will store a local cache in ~/.gcetools/
cache_expiration - cache expiration in seconds (default: 1 day)
cache_path - the path to store instances data (default: ~/.gcetools/instances)
group_name - optional managed instance group to use instead of the global instance pool
region - gce region name (such as `us-central1`) for a regional managed instance group
zone - gce zone name (such as `us-central1-a`) for a zone managed instance group
How to use:
- Call 'update_roles_gce' at the end of your fabfile.py (it will run each
time you run fabric).
- On each function use the regular @roles decorator and set the role to the name
of one of the tags associated with the instances you wish to work with
|
def get_disconnect_message(self, code: int):
'''
http://channels.readthedocs.io/en/stable/asgi/www.html#disconnection
'''
self.order += 1
return {
'channel': 'websocket.disconnect',
'reply_channel': None,
'path': self.path,
'order': self.order,
'code': code,
}
|
http://channels.readthedocs.io/en/stable/asgi/www.html#disconnection
|
def rgb2ansi(r, g, b):
"""
Convert an RGB color to 256 ansi graphics.
"""
# Thanks to
# https://github.com/tehmaze/ansi/blob/master/ansi/colour/rgb.py
grayscale = False
poss = True
step = 2.5
while poss:
if min(r, g, b) < step:
grayscale = max(r, g, b) < step
poss = False
step += 42.5
if grayscale:
return 232 + int(float(sum((r, g, b)) / 33.0))
m = ((r, 36), (g, 6), (b, 1))
return 16 + sum(int(6 * float(val) / 256) * mod for val, mod in m)
|
Convert an RGB color to 256 ansi graphics.
|
def _login_request(self, username=None, secret=None):
"""Send a login request with paramerters."""
url = 'http://' + self._host + '/login_sid.lua'
params = {}
if username:
params['username'] = username
if secret:
params['response'] = secret
plain = self._request(url, params)
dom = xml.dom.minidom.parseString(plain)
sid = get_text(dom.getElementsByTagName('SID')[0].childNodes)
challenge = get_text(
dom.getElementsByTagName('Challenge')[0].childNodes)
return (sid, challenge)
|
Send a login request with paramerters.
|
def _run(self):
""" Execution body
:return: Execution result
:rtype: kser.result.Result
"""
if KSER_METRICS_ENABLED == "yes":
KSER_TASK_COUNT.inc()
logger.debug(
"{}.Run: {}[{}]".format(
self.__class__.__name__, self.__class__.path, self.uuid
),
extra=dict(
kmsg=Message(
self.uuid, entrypoint=self.__class__.path,
params=self.params, metadata=self.metadata
).dump()
)
)
return self.run()
|
Execution body
:return: Execution result
:rtype: kser.result.Result
|
def get_last_api_metadata(self):
"""Get meta data for the last Mbed Cloud API call.
:returns: meta data of the last Mbed Cloud API call
:rtype: ApiMetadata
"""
last_metadata = None
for key, api in iteritems(self.apis):
api_client = api.api_client
if api_client is not None:
metadata = api_client.get_last_metadata()
if metadata is not None and metadata.get('timestamp', None) is not None:
if last_metadata is None:
last_metadata = metadata
elif metadata["timestamp"] >= last_metadata["timestamp"]:
last_metadata = metadata
if last_metadata is not None:
last_metadata = ApiMetadata(last_metadata.get("url"),
last_metadata.get("method"),
last_metadata.get("response", None),
last_metadata.get("return_data", None),
last_metadata.get("exception", None))
return last_metadata
|
Get meta data for the last Mbed Cloud API call.
:returns: meta data of the last Mbed Cloud API call
:rtype: ApiMetadata
|
def index_humansorted(seq, key=None, reverse=False, alg=ns.DEFAULT):
"""
This is a wrapper around ``index_natsorted(seq, alg=ns.LOCALE)``.
Parameters
----------
seq: iterable
The input to sort.
key: callable, optional
A key used to determine how to sort each element of the sequence.
It is **not** applied recursively.
It should accept a single argument and return a single value.
reverse : {{True, False}}, optional
Return the list in reversed sorted order. The default is
`False`.
alg : ns enum, optional
This option is used to control which algorithm `natsort`
uses when sorting. For details into these options, please see
the :class:`ns` class documentation. The default is `ns.LOCALE`.
Returns
-------
out : tuple
The ordered indexes of the input.
See Also
--------
humansorted
order_by_index
Notes
-----
Please read :ref:`locale_issues` before using `humansorted`.
Examples
--------
Use `index_humansorted` just like the builtin `sorted`::
>>> a = ['Apple', 'Banana', 'apple', 'banana']
>>> index_humansorted(a)
[2, 0, 3, 1]
"""
return index_natsorted(seq, key, reverse, alg | ns.LOCALE)
|
This is a wrapper around ``index_natsorted(seq, alg=ns.LOCALE)``.
Parameters
----------
seq: iterable
The input to sort.
key: callable, optional
A key used to determine how to sort each element of the sequence.
It is **not** applied recursively.
It should accept a single argument and return a single value.
reverse : {{True, False}}, optional
Return the list in reversed sorted order. The default is
`False`.
alg : ns enum, optional
This option is used to control which algorithm `natsort`
uses when sorting. For details into these options, please see
the :class:`ns` class documentation. The default is `ns.LOCALE`.
Returns
-------
out : tuple
The ordered indexes of the input.
See Also
--------
humansorted
order_by_index
Notes
-----
Please read :ref:`locale_issues` before using `humansorted`.
Examples
--------
Use `index_humansorted` just like the builtin `sorted`::
>>> a = ['Apple', 'Banana', 'apple', 'banana']
>>> index_humansorted(a)
[2, 0, 3, 1]
|
def save_to_file(self, path):
"""
Dump all cookies to file.
Cookies are dumped as JSON-serialized dict of keys and values.
"""
with open(path, 'w') as out:
out.write(json.dumps(self.get_dict()))
|
Dump all cookies to file.
Cookies are dumped as JSON-serialized dict of keys and values.
|
def tAx(mt, x, t):
""" n/Ax : Returns the EPV (net single premium) of a deferred whole life insurance. """
return mt.Mx[x + t] / mt.Dx[x]
|
n/Ax : Returns the EPV (net single premium) of a deferred whole life insurance.
|
def get_notificant(self, id, **kwargs): # noqa: E501
"""Get a specific notification target # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_notificant(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerNotificant
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_notificant_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_notificant_with_http_info(id, **kwargs) # noqa: E501
return data
|
Get a specific notification target # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_notificant(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerNotificant
If the method is called asynchronously,
returns the request thread.
|
def run(self):
"""The main routine for a thread's work.
The thread pulls tasks from the task queue and executes them until it
encounters a death token. The death token is a tuple of two Nones.
"""
try:
quit_request_detected = False
while True:
function, arguments = self.task_queue.get()
if function is None:
# this allows us to watch the threads die and identify
# threads that may be hanging or deadlocked
self.config.logger.info('quits')
break
if quit_request_detected:
continue
try:
try:
args, kwargs = arguments
except ValueError:
args = arguments
kwargs = {}
function(*args, **kwargs) # execute the task
except Exception:
self.config.logger.error("Error in processing a job",
exc_info=True)
except KeyboardInterrupt: # TODO: can probably go away
self.config.logger.info('quit request detected')
quit_request_detected = True
#thread.interrupt_main() # only needed if signal handler
# not registered
except Exception:
self.config.logger.critical("Failure in task_queue", exc_info=True)
|
The main routine for a thread's work.
The thread pulls tasks from the task queue and executes them until it
encounters a death token. The death token is a tuple of two Nones.
|
def zSetSurfaceData(self, surfNum, radius=None, thick=None, material=None, semidia=None,
conic=None, comment=None):
"""Sets surface data"""
if self.pMode == 0: # Sequential mode
surf = self.pLDE.GetSurfaceAt(surfNum)
if radius is not None:
surf.pRadius = radius
if thick is not None:
surf.pThickness = thick
if material is not None:
surf.pMaterial = material
if semidia is not None:
surf.pSemiDiameter = semidia
if conic is not None:
surf.pConic = conic
if comment is not None:
surf.pComment = comment
else:
raise NotImplementedError('Function not implemented for non-sequential mode')
|
Sets surface data
|
def get_conditional_instance(self, parameter_names):
""" get a new Schur instance that includes conditional update from
some parameters becoming known perfectly
Parameters
----------
parameter_names : list
parameters that are to be treated as notionally perfectly
known
Returns
-------
la_cond : Schur
a new Schur instance conditional on perfect knowledge
of some parameters
Note
----
this method is used by the get_parameter_contribution() method -
don't call this method directly
"""
if not isinstance(parameter_names, list):
parameter_names = [parameter_names]
for iname, name in enumerate(parameter_names):
name = str(name).lower()
parameter_names[iname] = name
assert name in self.jco.col_names,\
"contribution parameter " + name + " not found jco"
keep_names = []
for name in self.jco.col_names:
if name not in parameter_names:
keep_names.append(name)
if len(keep_names) == 0:
raise Exception("Schur.contribution_from_Parameters " +
"atleast one parameter must remain uncertain")
#get the reduced predictions
if self.predictions is None:
raise Exception("Schur.contribution_from_Parameters " +
"no predictions have been set")
# cond_preds = []
# for pred in self.predictions:
# cond_preds.append(pred.get(keep_names, pred.col_names))
cond_preds = self.predictions.get(row_names=keep_names)
la_cond = Schur(jco=self.jco.get(self.jco.row_names, keep_names),
parcov=self.parcov.condition_on(parameter_names),
obscov=self.obscov, predictions=cond_preds,verbose=False)
return la_cond
|
get a new Schur instance that includes conditional update from
some parameters becoming known perfectly
Parameters
----------
parameter_names : list
parameters that are to be treated as notionally perfectly
known
Returns
-------
la_cond : Schur
a new Schur instance conditional on perfect knowledge
of some parameters
Note
----
this method is used by the get_parameter_contribution() method -
don't call this method directly
|
def global_include(self, pattern):
"""
Include all files anywhere in the current directory that match the
pattern. This is very inefficient on large file trees.
"""
if self.allfiles is None:
self.findall()
match = translate_pattern(os.path.join('**', pattern))
found = [f for f in self.allfiles if match.match(f)]
self.extend(found)
return bool(found)
|
Include all files anywhere in the current directory that match the
pattern. This is very inefficient on large file trees.
|
def disconnect(self):
"""Gracefully close connection to stomp server."""
if self._connected:
self._connected = False
self._conn.disconnect()
|
Gracefully close connection to stomp server.
|
def extend_left_to(self, window, max_size):
"""Adjust the offset to start where the given window on our left ends if possible,
but don't make yourself larger than max_size.
The resize will assure that the new window still contains the old window area"""
rofs = self.ofs - window.ofs_end()
nsize = rofs + self.size
rofs -= nsize - min(nsize, max_size)
self.ofs = self.ofs - rofs
self.size += rofs
|
Adjust the offset to start where the given window on our left ends if possible,
but don't make yourself larger than max_size.
The resize will assure that the new window still contains the old window area
|
def list_numbers(self, **kwargs): # noqa: E501
"""Get your numbers # noqa: E501
List all your purchased numbers # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_numbers(async=True)
>>> result = thread.get()
:param async bool
:return: ResponseNumberList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.list_numbers_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_numbers_with_http_info(**kwargs) # noqa: E501
return data
|
Get your numbers # noqa: E501
List all your purchased numbers # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_numbers(async=True)
>>> result = thread.get()
:param async bool
:return: ResponseNumberList
If the method is called asynchronously,
returns the request thread.
|
def _serialize_list(cls, list_):
"""
:type list_: list
:rtype: list
"""
list_serialized = []
for item in list_:
item_serialized = cls.serialize(item)
list_serialized.append(item_serialized)
return list_serialized
|
:type list_: list
:rtype: list
|
def process_pybel_graph(graph):
"""Return a PybelProcessor by processing a PyBEL graph.
Parameters
----------
graph : pybel.struct.BELGraph
A PyBEL graph to process
Returns
-------
bp : PybelProcessor
A PybelProcessor object which contains INDRA Statements in
bp.statements.
"""
bp = PybelProcessor(graph)
bp.get_statements()
if bp.annot_manager.failures:
logger.warning('missing %d annotation pairs',
sum(len(v)
for v in bp.annot_manager.failures.values()))
return bp
|
Return a PybelProcessor by processing a PyBEL graph.
Parameters
----------
graph : pybel.struct.BELGraph
A PyBEL graph to process
Returns
-------
bp : PybelProcessor
A PybelProcessor object which contains INDRA Statements in
bp.statements.
|
def run_gatk_germline_pipeline(job, samples, config):
"""
Downloads shared files and calls the GATK best practices germline pipeline for a cohort of samples
:param JobFunctionWrappingJob job: passed automatically by Toil
:param list[GermlineSample] samples: List of GermlineSample namedtuples
:param Namespace config: Configuration options for pipeline
Requires the following config attributes:
config.preprocess_only If True, then stops pipeline after preprocessing steps
config.joint_genotype If True, then joint genotypes cohort
config.run_oncotator If True, then adds Oncotator to pipeline
Additional parameters are needed for downstream steps. Refer to pipeline README for more information.
"""
# Determine the available disk space on a worker node before any jobs have been run.
work_dir = job.fileStore.getLocalTempDir()
st = os.statvfs(work_dir)
config.available_disk = st.f_bavail * st.f_frsize
# Check that there is a reasonable number of samples for joint genotyping
num_samples = len(samples)
if config.joint_genotype and not 30 < num_samples < 200:
job.fileStore.logToMaster('WARNING: GATK recommends batches of '
'30 to 200 samples for joint genotyping. '
'The current cohort has %d samples.' % num_samples)
shared_files = Job.wrapJobFn(download_shared_files, config).encapsulate()
job.addChild(shared_files)
if config.preprocess_only:
for sample in samples:
shared_files.addChildJobFn(prepare_bam,
sample.uuid,
sample.url,
shared_files.rv(),
paired_url=sample.paired_url,
rg_line=sample.rg_line)
else:
run_pipeline = Job.wrapJobFn(gatk_germline_pipeline,
samples,
shared_files.rv()).encapsulate()
shared_files.addChild(run_pipeline)
if config.run_oncotator:
annotate = Job.wrapJobFn(annotate_vcfs, run_pipeline.rv(), shared_files.rv())
run_pipeline.addChild(annotate)
|
Downloads shared files and calls the GATK best practices germline pipeline for a cohort of samples
:param JobFunctionWrappingJob job: passed automatically by Toil
:param list[GermlineSample] samples: List of GermlineSample namedtuples
:param Namespace config: Configuration options for pipeline
Requires the following config attributes:
config.preprocess_only If True, then stops pipeline after preprocessing steps
config.joint_genotype If True, then joint genotypes cohort
config.run_oncotator If True, then adds Oncotator to pipeline
Additional parameters are needed for downstream steps. Refer to pipeline README for more information.
|
def _calc_checksum(self, secret):
"""Calculate string.
:param secret: The secret key.
:returns: The checksum.
"""
return str_to_uascii(
hashlib.sha256(mysql_aes_encrypt(self.salt, secret)).hexdigest()
)
|
Calculate string.
:param secret: The secret key.
:returns: The checksum.
|
def setup(self):
"""
Initialize the crochet library.
This starts the reactor in a thread, and connect's Twisted's logs to
Python's standard library logging module.
This must be called at least once before the library can be used, and
can be called multiple times.
"""
if self._started:
return
self._common_setup()
if platform.type == "posix":
self._reactor.callFromThread(self._startReapingProcesses)
if self._startLoggingWithObserver:
observer = ThreadLogObserver(PythonLoggingObserver().emit)
def start():
# Twisted is going to override warnings.showwarning; let's
# make sure that has no effect:
from twisted.python import log
original = log.showwarning
log.showwarning = warnings.showwarning
self._startLoggingWithObserver(observer, False)
log.showwarning = original
self._reactor.callFromThread(start)
# We only want to stop the logging thread once the reactor has
# shut down:
self._reactor.addSystemEventTrigger(
"after", "shutdown", observer.stop)
t = threading.Thread(
target=lambda: self._reactor.run(installSignalHandlers=False),
name="CrochetReactor")
t.start()
self._atexit_register(self._reactor.callFromThread, self._reactor.stop)
self._atexit_register(_store.log_errors)
if self._watchdog_thread is not None:
self._watchdog_thread.start()
|
Initialize the crochet library.
This starts the reactor in a thread, and connect's Twisted's logs to
Python's standard library logging module.
This must be called at least once before the library can be used, and
can be called multiple times.
|
def deserialize_upload(value, url):
"""
Restore file and name and storage from serialized value and the upload url.
"""
result = {'name': None, 'storage': None}
try:
result = signing.loads(value, salt=url)
except signing.BadSignature:
# TODO: Log invalid signature
pass
else:
try:
result['storage'] = get_storage_class(result['storage'])
except (ImproperlyConfigured, ImportError):
# TODO: Log invalid class
result = {'name': None, 'storage': None}
return result
|
Restore file and name and storage from serialized value and the upload url.
|
def ordersku_update(self, oid, sku_id=None, sku_props=None):
'''taobao.trade.ordersku.update 更新交易订单的销售属性
需要商家或以上权限才可调用此接口,可重复调用本接口更新交易备注,本接口同时具有添加备注的功能'''
request = TOPRequest('taobao.trade.ordersku.update')
request['oid'] = oid
if sku_id!=None: request['sku_id'] = sku_id
if sku_props!=None: request['sku_props'] = sku_props
self.create(self.execute(request)['order'])
return self
|
taobao.trade.ordersku.update 更新交易订单的销售属性
需要商家或以上权限才可调用此接口,可重复调用本接口更新交易备注,本接口同时具有添加备注的功能
|
def decode_offset_fetch_response(cls, response):
"""
Decode OffsetFetchResponse to OffsetFetchResponsePayloads
Arguments:
response: OffsetFetchResponse
"""
return [
kafka.structs.OffsetFetchResponsePayload(
topic, partition, offset, metadata, error
)
for topic, partitions in response.topics
for partition, offset, metadata, error in partitions
]
|
Decode OffsetFetchResponse to OffsetFetchResponsePayloads
Arguments:
response: OffsetFetchResponse
|
def window(data, param):
"""
MAYBE WE CAN DO THIS WITH NUMPY (no, the edges of windows are not graceful with numpy)
data - list of records
"""
name = param.name # column to assign window function result
edges = param.edges # columns to gourp by
where = param.where # DO NOT CONSIDER THESE VALUES
sortColumns = param.sort # columns to sort by
calc_value = jx_expression_to_function(
param.value
) # function that takes a record and returns a value (for aggregation)
aggregate = param.aggregate # WindowFunction to apply
_range = (
param.range
) # of form {"min":-10, "max":0} to specify the size and relative position of window
data = filter(data, where)
if not aggregate and not edges:
if sortColumns:
data = sort(data, sortColumns, already_normalized=True)
# SIMPLE CALCULATED VALUE
for rownum, r in enumerate(data):
try:
r[name] = calc_value(r, rownum, data)
except Exception as e:
raise e
return
try:
edge_values = [e.value.var for e in edges]
except Exception as e:
raise Log.error("can only support simple variable edges", cause=e)
if not aggregate or aggregate == "none":
for _, values in groupby(data, edge_values):
if not values:
continue # CAN DO NOTHING WITH THIS ZERO-SAMPLE
if sortColumns:
sequence = sort(values, sortColumns, already_normalized=True)
else:
sequence = values
for rownum, r in enumerate(sequence):
r[name] = calc_value(r, rownum, sequence)
return
for keys, values in groupby(data, edge_values):
if not values:
continue # CAN DO NOTHING WITH THIS ZERO-SAMPLE
sequence = sort(values, sortColumns)
for rownum, r in enumerate(sequence):
r["__temp__"] = calc_value(r, rownum, sequence)
head = coalesce(_range.max, _range.stop)
tail = coalesce(_range.min, _range.start)
# PRELOAD total
total = aggregate()
for i in range(tail, head):
total.add(sequence[i].__temp__)
# WINDOW FUNCTION APPLICATION
for i, r in enumerate(sequence):
r[name] = total.end()
total.add(sequence[i + head].__temp__)
total.sub(sequence[i + tail].__temp__)
for r in data:
r["__temp__"] = None
|
MAYBE WE CAN DO THIS WITH NUMPY (no, the edges of windows are not graceful with numpy)
data - list of records
|
def post_structure(entry, site):
"""
A post structure with extensions.
"""
author = entry.authors.all()[0]
return {'title': entry.title,
'description': six.text_type(entry.html_content),
'link': '%s://%s%s' % (PROTOCOL, site.domain,
entry.get_absolute_url()),
# Basic Extensions
'permaLink': '%s://%s%s' % (PROTOCOL, site.domain,
entry.get_absolute_url()),
'categories': [cat.title for cat in entry.categories.all()],
'dateCreated': DateTime(entry.creation_date.isoformat()),
'postid': entry.pk,
'userid': author.get_username(),
# Useful Movable Type Extensions
'mt_excerpt': entry.excerpt,
'mt_allow_comments': int(entry.comment_enabled),
'mt_allow_pings': (int(entry.pingback_enabled) or
int(entry.trackback_enabled)),
'mt_keywords': entry.tags,
# Useful Wordpress Extensions
'wp_author': author.get_username(),
'wp_author_id': author.pk,
'wp_author_display_name': author.__str__(),
'wp_password': entry.password,
'wp_slug': entry.slug,
'sticky': entry.featured}
|
A post structure with extensions.
|
def init_autoindex(self, auto_interval):
"""Initialize and start the auto-indexing of the collections. If auto_interval is None this is a no op.
:param str|int auto_interval: The auto-indexing interval from the configuration file or CLI argument
"""
if not auto_interval:
return
from pywb.manager.autoindex import AutoIndexer
colls_dir = self.warcserver.root_dir if self.warcserver.root_dir else None
indexer = AutoIndexer(colls_dir=colls_dir, interval=int(auto_interval))
if not os.path.isdir(indexer.root_path):
msg = 'No managed directory "{0}" for auto-indexing'
logging.error(msg.format(indexer.root_path))
import sys
sys.exit(2)
msg = 'Auto-Indexing Enabled on "{0}", checking every {1} secs'
logging.info(msg.format(indexer.root_path, auto_interval))
indexer.start()
|
Initialize and start the auto-indexing of the collections. If auto_interval is None this is a no op.
:param str|int auto_interval: The auto-indexing interval from the configuration file or CLI argument
|
def all(self, list_id, subscriber_hash, **queryparams):
"""
Get the last 50 events of a member’s activity on a specific list,
including opens, clicks, and unsubscribes.
:param list_id: The unique id for the list.
:type list_id: :py:class:`str`
:param subscriber_hash: The MD5 hash of the lowercase version of the
list member’s email address.
:type subscriber_hash: :py:class:`str`
:param queryparams: The query string parameters
queryparams['fields'] = []
queryparams['exclude_fields'] = []
"""
subscriber_hash = check_subscriber_hash(subscriber_hash)
self.list_id = list_id
self.subscriber_hash = subscriber_hash
return self._mc_client._get(url=self._build_path(list_id, 'members', subscriber_hash, 'activity'), **queryparams)
|
Get the last 50 events of a member’s activity on a specific list,
including opens, clicks, and unsubscribes.
:param list_id: The unique id for the list.
:type list_id: :py:class:`str`
:param subscriber_hash: The MD5 hash of the lowercase version of the
list member’s email address.
:type subscriber_hash: :py:class:`str`
:param queryparams: The query string parameters
queryparams['fields'] = []
queryparams['exclude_fields'] = []
|
def _register_factory(self, factory_name, factory, override):
# type: (str, type, bool) -> None
"""
Registers a component factory
:param factory_name: The name of the factory
:param factory: The factory class object
:param override: If true, previous factory is overridden, else an
exception is risen if a previous factory with that
name already exists
:raise ValueError: The factory name already exists or is invalid
:raise TypeError: Invalid factory type
"""
if not factory_name or not is_string(factory_name):
raise ValueError("A factory name must be a non-empty string")
if not inspect.isclass(factory):
raise TypeError(
"Invalid factory class '{0}'".format(type(factory).__name__)
)
with self.__factories_lock:
if factory_name in self.__factories:
if override:
_logger.info("Overriding factory '%s'", factory_name)
else:
raise ValueError(
"'{0}' factory already exist".format(factory_name)
)
self.__factories[factory_name] = factory
# Trigger an event
self._fire_ipopo_event(
constants.IPopoEvent.REGISTERED, factory_name
)
|
Registers a component factory
:param factory_name: The name of the factory
:param factory: The factory class object
:param override: If true, previous factory is overridden, else an
exception is risen if a previous factory with that
name already exists
:raise ValueError: The factory name already exists or is invalid
:raise TypeError: Invalid factory type
|
def update(self, parent=None):
"""
Updates the resource. This will trigger an api PATCH request.
:param parent ResourceBase: the parent of the resource - used for nesting the request url, optional
:raises ResourceError: if the resource does not have an id (does not exist yet)
:returns: the resource itself
"""
if not self.id:
raise self.ResourceError('cannot update a resource without an ID')
data = self.__class__._process_request(
connection.patch,
parent=parent,
id=self.id,
payload=self.payload()
)
return self._reload(data)
|
Updates the resource. This will trigger an api PATCH request.
:param parent ResourceBase: the parent of the resource - used for nesting the request url, optional
:raises ResourceError: if the resource does not have an id (does not exist yet)
:returns: the resource itself
|
def make_vcard_data(name, displayname, email=None, phone=None, fax=None,
videophone=None, memo=None, nickname=None, birthday=None,
url=None, pobox=None, street=None, city=None, region=None,
zipcode=None, country=None, org=None, lat=None, lng=None,
source=None, rev=None, title=None, photo_uri=None):
"""\
Creates a string encoding the contact information as vCard 3.0.
Only a subset of available vCard properties is supported.
:param str name: The name. If it contains a semicolon, , the first part
is treated as lastname and the second part is treated as forename.
:param str displayname: Common name.
:param str|iterable email: E-mail address. Multiple values are allowed.
:param str|iterable phone: Phone number. Multiple values are allowed.
:param str|iterable fax: Fax number. Multiple values are allowed.
:param str|iterable videophone: Phone number for video calls.
Multiple values are allowed.
:param str memo: A notice for the contact.
:param str nickname: Nickname.
:param str|date birthday: Birthday. If a string is provided,
it should encode the date as YYYY-MM-DD value.
:param str|iterable url: Homepage. Multiple values are allowed.
:param str|None pobox: P.O. box (address information).
:param str|None street: Street address.
:param str|None city: City (address information).
:param str|None region: Region (address information).
:param str|None zipcode: Zip code (address information).
:param str|None country: Country (address information).
:param str org: Company / organization name.
:param float lat: Latitude.
:param float lng: Longitude.
:param str source: URL where to obtain the vCard.
:param str|date rev: Revision of the vCard / last modification date.
:param str|iterable|None title: Job Title. Multiple values are allowed.
:param str|iterable|None photo_uri: Photo URI. Multiple values are allowed.
:rtype: str
"""
def make_multifield(name, val):
if val is None:
return ()
if isinstance(val, str_type):
val = (val,)
return ['{0}:{1}'.format(name, escape(i)) for i in val]
escape = _escape_vcard
data = ['BEGIN:VCARD', 'VERSION:3.0',
'N:{0}'.format(name),
'FN:{0}'.format(escape(displayname))]
if org:
data.append('ORG:{0}'.format(escape(org)))
data.extend(make_multifield('EMAIL', email))
data.extend(make_multifield('TEL', phone))
data.extend(make_multifield('TEL;TYPE=FAX', fax))
data.extend(make_multifield('TEL;TYPE=VIDEO', videophone))
data.extend(make_multifield('URL', url))
data.extend(make_multifield('TITLE', title))
data.extend(make_multifield('PHOTO;VALUE=uri', photo_uri))
if nickname:
data.append('NICKNAME:{0}'.format(escape(nickname)))
adr_properties = (pobox, street, city, region, zipcode, country)
if any(adr_properties):
adr_data = [escape(i or '') for i in adr_properties]
data.append('ADR:{0};;{1};{2};{3};{4};{5}'.format(*adr_data))
if birthday:
try:
birthday = birthday.strftime('%Y-%m-%d')
except AttributeError:
pass
if not _looks_like_datetime(birthday):
raise ValueError('"birthday" does not seem to be a valid date or date/time representation')
data.append('BDAY:{0};'.format(birthday))
if lat or lng and (not(all((lat, lng)))):
raise ValueError('Incomplete geo information, please specify latitude and longitude.')
if lat and lng:
data.append('GEO:{0};{1}'.format(lat, lng))
if source:
data.append('SOURCE:{0}'.format(escape(url)))
if memo:
data.append('NOTE:{0}'.format(escape(memo)))
if rev:
if not _looks_like_datetime(rev):
raise ValueError('"rev" does not seem to be a valid date or date/time representation')
data.append('REV:{0}'.format(rev))
data.append('END:VCARD')
data.append('')
return '\r\n'.join(data)
|
\
Creates a string encoding the contact information as vCard 3.0.
Only a subset of available vCard properties is supported.
:param str name: The name. If it contains a semicolon, , the first part
is treated as lastname and the second part is treated as forename.
:param str displayname: Common name.
:param str|iterable email: E-mail address. Multiple values are allowed.
:param str|iterable phone: Phone number. Multiple values are allowed.
:param str|iterable fax: Fax number. Multiple values are allowed.
:param str|iterable videophone: Phone number for video calls.
Multiple values are allowed.
:param str memo: A notice for the contact.
:param str nickname: Nickname.
:param str|date birthday: Birthday. If a string is provided,
it should encode the date as YYYY-MM-DD value.
:param str|iterable url: Homepage. Multiple values are allowed.
:param str|None pobox: P.O. box (address information).
:param str|None street: Street address.
:param str|None city: City (address information).
:param str|None region: Region (address information).
:param str|None zipcode: Zip code (address information).
:param str|None country: Country (address information).
:param str org: Company / organization name.
:param float lat: Latitude.
:param float lng: Longitude.
:param str source: URL where to obtain the vCard.
:param str|date rev: Revision of the vCard / last modification date.
:param str|iterable|None title: Job Title. Multiple values are allowed.
:param str|iterable|None photo_uri: Photo URI. Multiple values are allowed.
:rtype: str
|
def detach_zone(organization_id_or_slug):
'''Detach the zone of a given <organization>.'''
organization = Organization.objects.get_by_id_or_slug(
organization_id_or_slug)
if not organization:
exit_with_error(
'No organization found for {0}'.format(organization_id_or_slug)
)
log.info('Detaching {organization} from {organization.zone}'.format(
organization=organization))
organization.zone = None
organization.save()
log.info('Done')
|
Detach the zone of a given <organization>.
|
def deleteoutputfile(project, filename, credentials=None):
"""Delete an output file"""
user, oauth_access_token = parsecredentials(credentials) #pylint: disable=unused-variable
if filename: filename = filename.replace("..","") #Simple security
if not filename or len(filename) == 0:
#Deleting all output files and resetting
Project.reset(project, user)
msg = "Deleted"
return withheaders(flask.make_response(msg), 'text/plain',{'Content-Length':len(msg), 'allow_origin': settings.ALLOW_ORIGIN}) #200
elif os.path.isdir(Project.path(project, user) + filename):
#Deleting specified directory
shutil.rmtree(Project.path(project, user) + filename)
msg = "Deleted"
return withheaders(flask.make_response(msg), 'text/plain',{'Content-Length':len(msg), 'allow_origin': settings.ALLOW_ORIGIN}) #200
else:
try:
file = clam.common.data.CLAMOutputFile(Project.path(project, user), filename)
except:
raise flask.abort(404)
success = file.delete()
if not success:
raise flask.abort(404)
else:
msg = "Deleted"
return withheaders(flask.make_response(msg), 'text/plain',{'Content-Length':len(msg), 'allow_origin': settings.ALLOW_ORIGIN})
|
Delete an output file
|
def PackageVariable(key, help, default, searchfunc=None):
# NB: searchfunc is currently undocumented and unsupported
"""
The input parameters describe a 'package list' option, thus they
are returned with the correct converter and validator appended. The
result is usable for input to opts.Add() .
A 'package list' option may either be 'all', 'none' or a list of
package names (separated by space).
"""
help = '\n '.join(
(help, '( yes | no | /path/to/%s )' % key))
return (key, help, default,
lambda k, v, e: _validator(k,v,e,searchfunc),
_converter)
|
The input parameters describe a 'package list' option, thus they
are returned with the correct converter and validator appended. The
result is usable for input to opts.Add() .
A 'package list' option may either be 'all', 'none' or a list of
package names (separated by space).
|
def score(self):
""" Returns the sum of the accidental dignities
score.
"""
if not self.scoreProperties:
self.scoreProperties = self.getScoreProperties()
return sum(self.scoreProperties.values())
|
Returns the sum of the accidental dignities
score.
|
def parse(self, filename, verbose=0):
"""
Parse the given file. Return :class:`EventReport`.
"""
run_completed, start_datetime, end_datetime = False, None, None
filename = os.path.abspath(filename)
report = EventReport(filename)
w = WildCard("*Error|*Warning|*Comment|*Bug|*ERROR|*WARNING|*COMMENT|*BUG")
import warnings
warnings.simplefilter('ignore', yaml.error.UnsafeLoaderWarning)
with YamlTokenizer(filename) as tokens:
for doc in tokens:
if w.match(doc.tag):
#print("got doc.tag", doc.tag,"--")
try:
#print(doc.text)
event = yaml.load(doc.text) # Can't use ruamel safe_load!
#yaml.load(doc.text, Loader=ruamel.yaml.Loader)
#print(event.yaml_tag, type(event))
except:
#raise
# Wrong YAML doc. Check tha doc tag and instantiate the proper event.
message = "Malformatted YAML document at line: %d\n" % doc.lineno
message += doc.text
# This call is very expensive when we have many exceptions due to malformatted YAML docs.
if verbose:
message += "Traceback:\n %s" % straceback()
if "error" in doc.tag.lower():
print("It seems an error. doc.tag:", doc.tag)
event = AbinitYamlError(message=message, src_file=__file__, src_line=0)
else:
event = AbinitYamlWarning(message=message, src_file=__file__, src_line=0)
event.lineno = doc.lineno
report.append(event)
# Check whether the calculation completed.
if doc.tag == "!FinalSummary":
#print(doc)
run_completed = True
d = doc.as_dict()
#print(d)
start_datetime, end_datetime = d["start_datetime"], d["end_datetime"]
report.set_run_completed(run_completed, start_datetime, end_datetime)
return report
|
Parse the given file. Return :class:`EventReport`.
|
def delete_namespaced_custom_object(self, group, version, namespace, plural, name, body, **kwargs):
"""
Deletes the specified namespace scoped custom object
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_custom_object(group, version, namespace, plural, name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group: the custom resource's group (required)
:param str version: the custom resource's version (required)
:param str namespace: The custom resource's namespace (required)
:param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
:param str name: the custom object's name (required)
:param V1DeleteOptions body: (required)
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_custom_object_with_http_info(group, version, namespace, plural, name, body, **kwargs)
else:
(data) = self.delete_namespaced_custom_object_with_http_info(group, version, namespace, plural, name, body, **kwargs)
return data
|
Deletes the specified namespace scoped custom object
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_custom_object(group, version, namespace, plural, name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group: the custom resource's group (required)
:param str version: the custom resource's version (required)
:param str namespace: The custom resource's namespace (required)
:param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
:param str name: the custom object's name (required)
:param V1DeleteOptions body: (required)
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.
:return: object
If the method is called asynchronously,
returns the request thread.
|
def run_iter(self, mine=False, jid=None):
'''
Execute and yield returns as they come in, do not print to the display
mine
The Single objects will use mine_functions defined in the roster,
pillar, or master config (they will be checked in that order) and
will modify the argv with the arguments from mine_functions
'''
fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
jid = self.returners[fstr](passed_jid=jid or self.opts.get('jid', None))
# Save the invocation information
argv = self.opts['argv']
if self.opts.get('raw_shell', False):
fun = 'ssh._raw'
args = argv
else:
fun = argv[0] if argv else ''
args = argv[1:]
job_load = {
'jid': jid,
'tgt_type': self.tgt_type,
'tgt': self.opts['tgt'],
'user': self.opts['user'],
'fun': fun,
'arg': args,
}
# save load to the master job cache
if self.opts['master_job_cache'] == 'local_cache':
self.returners['{0}.save_load'.format(self.opts['master_job_cache'])](jid, job_load, minions=self.targets.keys())
else:
self.returners['{0}.save_load'.format(self.opts['master_job_cache'])](jid, job_load)
for ret in self.handle_ssh(mine=mine):
host = next(six.iterkeys(ret))
self.cache_job(jid, host, ret[host], fun)
if self.event:
id_, data = next(six.iteritems(ret))
if isinstance(data, six.text_type):
data = {'return': data}
if 'id' not in data:
data['id'] = id_
data['jid'] = jid # make the jid in the payload the same as the jid in the tag
self.event.fire_event(
data,
salt.utils.event.tagify(
[jid, 'ret', host],
'job'))
yield ret
|
Execute and yield returns as they come in, do not print to the display
mine
The Single objects will use mine_functions defined in the roster,
pillar, or master config (they will be checked in that order) and
will modify the argv with the arguments from mine_functions
|
def real_sound_match_abstract_sound(self, abstract_pos: AbstractPosition) -> bool:
"""
If an observed position
:param abstract_pos:
:return:
"""
assert isinstance(abstract_pos, AbstractPosition)
if self.before is not None and self.after is not None:
return self.position == abstract_pos.position and self.before.match_list(abstract_pos.before) and \
self.after.match_list(abstract_pos.after)
elif self.before is None and self.after is None:
return self.position == abstract_pos.position
elif self.before is None:
return self.position == abstract_pos.position and self.after.match_list(abstract_pos.after)
else:
return self.position == abstract_pos.position and self.before.match_list(abstract_pos.before)
|
If an observed position
:param abstract_pos:
:return:
|
def add(self, child):
"""
Adds a typed child object to the component type.
@param child: Child object to be added.
"""
if isinstance(child, Parameter):
self.add_parameter(child)
elif isinstance(child, Property):
self.add_property(child)
elif isinstance(child, DerivedParameter):
self.add_derived_parameter(child)
elif isinstance(child, IndexParameter):
self.add_index_parameter(child)
elif isinstance(child, Constant):
self.add_constant(child)
elif isinstance(child, Exposure):
self.add_exposure(child)
elif isinstance(child, Requirement):
self.add_requirement(child)
elif isinstance(child, ComponentRequirement):
self.add_component_requirement(child)
elif isinstance(child, InstanceRequirement):
self.add_instance_requirement(child)
elif isinstance(child, Children):
self.add_children(child)
elif isinstance(child, Text):
self.add_text(child)
elif isinstance(child, Link):
self.add_link(child)
elif isinstance(child, Path):
self.add_path(child)
elif isinstance(child, EventPort):
self.add_event_port(child)
elif isinstance(child, ComponentReference):
self.add_component_reference(child)
elif isinstance(child, Attachments):
self.add_attachments(child)
else:
raise ModelError('Unsupported child element')
|
Adds a typed child object to the component type.
@param child: Child object to be added.
|
def _reportFutures(self):
"""Sends futures status updates to broker at intervals of
scoop.TIME_BETWEEN_STATUS_REPORTS seconds. Is intended to be run by a
separate thread."""
try:
while True:
time.sleep(scoop.TIME_BETWEEN_STATUS_REPORTS)
fids = set(x.id for x in scoop._control.execQueue.movable)
fids.update(set(x.id for x in scoop._control.execQueue.ready))
fids.update(set(x.id for x in scoop._control.execQueue.inprogress))
self.socket.send_multipart([
STATUS_UPDATE,
pickle.dumps(fids),
])
except AttributeError:
# The process is being shut down.
pass
|
Sends futures status updates to broker at intervals of
scoop.TIME_BETWEEN_STATUS_REPORTS seconds. Is intended to be run by a
separate thread.
|
def thumbnail(self):
"""
This method returns a thumbnail representation of the file if the data is a supported graphics format.
Input:
* None
Output:
* A byte stream representing a thumbnail of a support graphics file
Example::
file = client.get_file("4ddfds", 0)
open("thumbnail.jpg", "wb").write(file.thumbnail())
"""
response = GettRequest().get("/files/%s/%s/blob/thumb" % (self.sharename, self.fileid))
return response.response
|
This method returns a thumbnail representation of the file if the data is a supported graphics format.
Input:
* None
Output:
* A byte stream representing a thumbnail of a support graphics file
Example::
file = client.get_file("4ddfds", 0)
open("thumbnail.jpg", "wb").write(file.thumbnail())
|
def ui_device_label(self):
"""UI string identifying the device (drive) if toplevel."""
return ': '.join(filter(None, [
self.ui_device_presentation,
self.loop_file or
self.drive_label or self.ui_id_label or self.ui_id_uuid
]))
|
UI string identifying the device (drive) if toplevel.
|
def detect(self):
"""
Try to contact a remote webservice and parse the returned output.
Determine the IP address from the parsed output and return.
"""
if self.opts_url and self.opts_parser:
url = self.opts_url
parser = self.opts_parser
else:
url, parser = choice(self.urls) # noqa: S311
parser = globals().get("_parser_" + parser)
theip = _get_ip_from_url(url, parser)
if theip is None:
LOG.info("Could not detect IP using webcheck! Offline?")
self.set_current_value(theip)
return theip
|
Try to contact a remote webservice and parse the returned output.
Determine the IP address from the parsed output and return.
|
def filter_select_columns_intensity(df, prefix, columns):
"""
Filter dataframe to include specified columns, retaining any Intensity columns.
"""
# Note: I use %s.+ (not %s.*) so it forces a match with the prefix string, ONLY if it is followed by something.
return df.filter(regex='^(%s.+|%s)$' % (prefix, '|'.join(columns)) )
|
Filter dataframe to include specified columns, retaining any Intensity columns.
|
def query(url, **kwargs):
'''
Query a resource, and decode the return data
Passes through all the parameters described in the
:py:func:`utils.http.query function <salt.utils.http.query>`:
.. autofunction:: salt.utils.http.query
CLI Example:
.. code-block:: bash
salt '*' http.query http://somelink.com/
salt '*' http.query http://somelink.com/ method=POST \
params='key1=val1&key2=val2'
salt '*' http.query http://somelink.com/ method=POST \
data='<xml>somecontent</xml>'
For more information about the ``http.query`` module, refer to the
:ref:`HTTP Tutorial <tutorial-http>`.
'''
opts = __opts__.copy()
if 'opts' in kwargs:
opts.update(kwargs['opts'])
del kwargs['opts']
return salt.utils.http.query(url=url, opts=opts, **kwargs)
|
Query a resource, and decode the return data
Passes through all the parameters described in the
:py:func:`utils.http.query function <salt.utils.http.query>`:
.. autofunction:: salt.utils.http.query
CLI Example:
.. code-block:: bash
salt '*' http.query http://somelink.com/
salt '*' http.query http://somelink.com/ method=POST \
params='key1=val1&key2=val2'
salt '*' http.query http://somelink.com/ method=POST \
data='<xml>somecontent</xml>'
For more information about the ``http.query`` module, refer to the
:ref:`HTTP Tutorial <tutorial-http>`.
|
def errdp(marker, number):
"""
Substitute a double precision number for the first occurrence of
a marker found in the current long error message.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/errdp_c.html
:param marker: A substring of the error message to be replaced.
:type marker: str
:param number: The d.p. number to substitute for marker.
:type number: float
"""
marker = stypes.stringToCharP(marker)
number = ctypes.c_double(number)
libspice.errdp_c(marker, number)
|
Substitute a double precision number for the first occurrence of
a marker found in the current long error message.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/errdp_c.html
:param marker: A substring of the error message to be replaced.
:type marker: str
:param number: The d.p. number to substitute for marker.
:type number: float
|
def create(image_data):
"""
:param image_data: ImageMetadata
:return: V1Pod,
https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1Pod.md
"""
# convert environment variables to Kubernetes objects
env_variables = []
for key, value in image_data.env_variables.items():
env_variables.append(client.V1EnvVar(name=key, value=value))
# convert exposed ports to Kubernetes objects
exposed_ports = []
if image_data.exposed_ports is not None:
for port in image_data.exposed_ports:
splits = port.split("/", 1)
port = int(splits[0])
protocol = splits[1].upper() if len(splits) > 1 else None
exposed_ports.append(client.V1ContainerPort(container_port=port, protocol=protocol))
# generate container name {image-name}-{username}-{random-4-letters}
# take just name of image and remove tag
image_name = image_data.name.split("/")[-1].split(":")[0]
random_string = ''.join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(4))
container_name = '{image_name}-{user_name}-{random_string}'.format(
image_name=image_name,
user_name=getpass.getuser(),
random_string=random_string)
container = client.V1Container(command=image_data.command,
env=env_variables,
image=image_data.name,
name=container_name,
ports=exposed_ports)
pod_metadata = client.V1ObjectMeta(name=container_name + "-pod")
pod_spec = client.V1PodSpec(containers=[container])
pod = client.V1Pod(spec=pod_spec, metadata=pod_metadata)
return pod
|
:param image_data: ImageMetadata
:return: V1Pod,
https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1Pod.md
|
def _decode_names(self):
"""Decode names (hopefully ASCII or UTF-8) into Unicode.
"""
if self.subject_name is not None:
subject_name = []
for part in self.subject_name:
new_part = []
for name, value in part:
try:
name = name.decode("utf-8")
value = value.decode("utf-8")
except UnicodeError:
continue
new_part.append((name, value))
subject_name.append(tuple(new_part))
self.subject_name = tuple(subject_name)
for key, old in self.alt_names.items():
new = []
for name in old:
try:
name = name.decode("utf-8")
except UnicodeError:
continue
new.append(name)
self.alt_names[key] = new
|
Decode names (hopefully ASCII or UTF-8) into Unicode.
|
def get_lock_behaviour(triggers, all_data, lock):
"""Binary state lock protects from version increments if set"""
updates = {}
lock_key = config._forward_aliases.get(Constants.VERSION_LOCK_FIELD)
# if we are explicitly setting or locking the version, then set the lock field True anyway
if lock:
updates[Constants.VERSION_LOCK_FIELD] = config.VERSION_LOCK_VALUE
elif (
triggers
and lock_key
and str(all_data.get(lock_key)) == str(config.VERSION_LOCK_VALUE)
):
triggers.clear()
updates[Constants.VERSION_LOCK_FIELD] = config.VERSION_UNLOCK_VALUE
return updates
|
Binary state lock protects from version increments if set
|
def _subprocessor(self, disabled_qubits):
"""Create a subprocessor by deleting a set of qubits. We assume
this removes all evil edges, and return an :class:`eden_processor`
instance.
"""
edgelist = [(p, q) for p, q in self._edgelist if
p not in disabled_qubits and
q not in disabled_qubits]
return eden_processor(edgelist, self.M, self.N, self.L, random_bundles=self._random_bundles)
|
Create a subprocessor by deleting a set of qubits. We assume
this removes all evil edges, and return an :class:`eden_processor`
instance.
|
def fast_cov(x, y=None, destination=None):
"""calculate the covariance matrix for the columns of x (MxN), or optionally, the covariance matrix between the
columns of x and and the columns of y (MxP). (In the language of statistics, the columns are variables, the rows
are observations).
Args:
x (numpy array-like) MxN in shape
y (numpy array-like) MxP in shape
destination (numpy array-like) optional location where to store the results as they are calculated (e.g. a numpy
memmap of a file)
returns (numpy array-like) array of the covariance values
for defaults (y=None), shape is NxN
if y is provided, shape is NxP
"""
validate_inputs(x, y, destination)
if y is None:
y = x
if destination is None:
destination = numpy.zeros((x.shape[1], y.shape[1]))
mean_x = numpy.mean(x, axis=0)
mean_y = numpy.mean(y, axis=0)
mean_centered_x = (x - mean_x).astype(destination.dtype)
mean_centered_y = (y - mean_y).astype(destination.dtype)
numpy.dot(mean_centered_x.T, mean_centered_y, out=destination)
numpy.divide(destination, (x.shape[0] - 1), out=destination)
return destination
|
calculate the covariance matrix for the columns of x (MxN), or optionally, the covariance matrix between the
columns of x and and the columns of y (MxP). (In the language of statistics, the columns are variables, the rows
are observations).
Args:
x (numpy array-like) MxN in shape
y (numpy array-like) MxP in shape
destination (numpy array-like) optional location where to store the results as they are calculated (e.g. a numpy
memmap of a file)
returns (numpy array-like) array of the covariance values
for defaults (y=None), shape is NxN
if y is provided, shape is NxP
|
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'consumption_preference_category_id'
) and self.consumption_preference_category_id is not None:
_dict[
'consumption_preference_category_id'] = self.consumption_preference_category_id
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'consumption_preferences'
) and self.consumption_preferences is not None:
_dict['consumption_preferences'] = [
x._to_dict() for x in self.consumption_preferences
]
return _dict
|
Return a json dictionary representing this model.
|
def CELERY_RESULT_BACKEND(self):
"""Redis result backend config"""
# allow specify directly
configured = get('CELERY_RESULT_BACKEND', None)
if configured:
return configured
if not self._redis_available():
return None
host, port = self.REDIS_HOST, self.REDIS_PORT
if host and port:
default = "redis://{host}:{port}/{db}".format(
host=host, port=port,
db=self.CELERY_REDIS_RESULT_DB)
return default
|
Redis result backend config
|
def main():
"""
Start the GUI
:return:
"""
app = QApplication(sys.argv)
rlbot_icon = QtGui.QIcon(os.path.join(get_rlbot_directory(), 'img', 'rlbot_icon.png'))
app.setWindowIcon(rlbot_icon)
window = RLBotQTGui()
window.show()
app.exec_()
|
Start the GUI
:return:
|
def fc(inputs,
num_units_out,
activation=tf.nn.relu,
stddev=0.01,
bias=0.0,
weight_decay=0,
batch_norm_params=None,
is_training=True,
trainable=True,
restore=True,
scope=None,
reuse=None):
"""Adds a fully connected layer followed by an optional batch_norm layer.
FC creates a variable called 'weights', representing the fully connected
weight matrix, that is multiplied by the input. If `batch_norm` is None, a
second variable called 'biases' is added to the result of the initial
vector-matrix multiplication.
Args:
inputs: a [B x N] tensor where B is the batch size and N is the number of
input units in the layer.
num_units_out: the number of output units in the layer.
activation: activation function.
stddev: the standard deviation for the weights.
bias: the initial value of the biases.
weight_decay: the weight decay.
batch_norm_params: parameters for the batch_norm. If is None don't use it.
is_training: whether or not the model is in training mode.
trainable: whether or not the variables should be trainable or not.
restore: whether or not the variables should be marked for restore.
scope: Optional scope for variable_scope.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
Returns:
the tensor variable representing the result of the series of operations.
"""
with tf.variable_scope(scope, 'FC', [inputs], reuse=reuse):
num_units_in = inputs.get_shape()[1]
weights_shape = [num_units_in, num_units_out]
weights_initializer = tf.truncated_normal_initializer(stddev=stddev)
l2_regularizer = None
if weight_decay and weight_decay > 0:
l2_regularizer = losses.l2_regularizer(weight_decay)
weights = variables.variable('weights',
shape=weights_shape,
initializer=weights_initializer,
regularizer=l2_regularizer,
trainable=trainable,
restore=restore)
if batch_norm_params is not None:
outputs = tf.matmul(inputs, weights)
with scopes.arg_scope([batch_norm], is_training=is_training,
trainable=trainable, restore=restore):
outputs = batch_norm(outputs, **batch_norm_params)
else:
bias_shape = [num_units_out,]
bias_initializer = tf.constant_initializer(bias)
biases = variables.variable('biases',
shape=bias_shape,
initializer=bias_initializer,
trainable=trainable,
restore=restore)
outputs = tf.nn.xw_plus_b(inputs, weights, biases)
if activation:
outputs = activation(outputs)
return outputs
|
Adds a fully connected layer followed by an optional batch_norm layer.
FC creates a variable called 'weights', representing the fully connected
weight matrix, that is multiplied by the input. If `batch_norm` is None, a
second variable called 'biases' is added to the result of the initial
vector-matrix multiplication.
Args:
inputs: a [B x N] tensor where B is the batch size and N is the number of
input units in the layer.
num_units_out: the number of output units in the layer.
activation: activation function.
stddev: the standard deviation for the weights.
bias: the initial value of the biases.
weight_decay: the weight decay.
batch_norm_params: parameters for the batch_norm. If is None don't use it.
is_training: whether or not the model is in training mode.
trainable: whether or not the variables should be trainable or not.
restore: whether or not the variables should be marked for restore.
scope: Optional scope for variable_scope.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
Returns:
the tensor variable representing the result of the series of operations.
|
async def delete_tag(self, tag):
"""
DELETE /api/tags/{tag}.{_format}
Permanently remove one tag from every entry
:param tag: string The Tag
:return data related to the ext
"""
path = '/api/tags/{tag}.{ext}'.format(tag=tag, ext=self.format)
params = {'access_token': self.token}
return await self.query(path, "delete", **params)
|
DELETE /api/tags/{tag}.{_format}
Permanently remove one tag from every entry
:param tag: string The Tag
:return data related to the ext
|
def all_host_infos():
'''
Summarize all host information.
'''
output = []
output.append(["Operating system", os()])
output.append(["CPUID information", cpu()])
output.append(["CC information", compiler()])
output.append(["JDK information", from_cmd("java -version")])
output.append(["MPI information", from_cmd("mpirun -version")])
output.append(["Scala information", from_cmd("scala -version")])
output.append(["OpenCL headers", from_cmd(
"find /usr/include|grep opencl.h")])
output.append(["OpenCL libraries", from_cmd(
"find /usr/lib/ -iname '*opencl*'")])
output.append(["NVidia SMI", from_cmd("nvidia-smi -q")])
output.append(["OpenCL Details", opencl()])
return output
|
Summarize all host information.
|
def invalidate(self, comparison: Comparison[Entity, Entity]) -> None:
"""
Invalidate paths in a zone. See https://api.cloudflare.com
/#zone-purge-individual-files-by-url-and-cache-tags
:param comparison: The comparison whose changes to invalidate.
:raises requests.exceptions.RequestException: On request failure.
:raises RuntimeError: If the request succeeded but could not be carried
out.
"""
@backoff.on_exception(backoff.expo,
requests.exceptions.RequestException,
max_tries=5,
giveup=lambda e:
400 <= e.response.status_code < 500)
def _request(chunk: List[str]) -> requests.Response:
"""
Send a purge cache request to Cloudflare. This method will
automatically retry with a back-off in case of server-side error.
:param chunk: The list of paths to purge. These should not have a
leading slash, and will be combined with the prefix
to form a URL.
:return: Cloudflare's response to our successful request.
:raises requests.exceptions.RequestException: If the request fails
on the 5th attempt.
"""
response = self._session.delete(
f'{self._API_BASE}/client/v4/zones/{self._zone}/purge_cache',
headers={
'X-Auth-Email': self._email,
'X-Auth-Key': self._key
},
json={
'files': [self._prefix + path for path in chunk]
})
response.raise_for_status()
return response
paths = itertools.chain(comparison.deleted(), comparison.modified())
for chunk_ in util.chunk(paths, self._MAX_INVALIDATIONS_PER_REQUEST):
chunk_ = list(chunk_)
if not chunk_:
# nothing to do
return
logger.info('Invalidating %d paths (%s)', len(chunk_),
', '.join(chunk_))
response_ = _request(chunk_)
logger.debug('Cloudflare invalidation response [%d]: %s',
response_.status_code,
response_.text)
json_ = response_.json()
if not json_['success']:
# this would be strange - the API returned a success response
# code, but success was not "true"
# TODO more appropriate exception, with handling upstream
raise RuntimeError('Cloudflare reported failure')
logger.info('Created invalidation %s', json_['result']['id'])
|
Invalidate paths in a zone. See https://api.cloudflare.com
/#zone-purge-individual-files-by-url-and-cache-tags
:param comparison: The comparison whose changes to invalidate.
:raises requests.exceptions.RequestException: On request failure.
:raises RuntimeError: If the request succeeded but could not be carried
out.
|
def max_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None):
"""Adds a Max Pooling layer.
It is assumed by the wrapper that the pooling is only done per image and not
in depth or batch.
Args:
inputs: a tensor of size [batch_size, height, width, depth].
kernel_size: a list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: a list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: the padding method, either 'VALID' or 'SAME'.
scope: Optional scope for name_scope.
Returns:
a tensor representing the results of the pooling operation.
Raises:
ValueError: if 'kernel_size' is not a 2-D list
"""
with tf.name_scope(scope, 'MaxPool', [inputs]):
kernel_h, kernel_w = _two_element_tuple(kernel_size)
stride_h, stride_w = _two_element_tuple(stride)
return tf.nn.max_pool(inputs,
ksize=[1, kernel_h, kernel_w, 1],
strides=[1, stride_h, stride_w, 1],
padding=padding)
|
Adds a Max Pooling layer.
It is assumed by the wrapper that the pooling is only done per image and not
in depth or batch.
Args:
inputs: a tensor of size [batch_size, height, width, depth].
kernel_size: a list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: a list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: the padding method, either 'VALID' or 'SAME'.
scope: Optional scope for name_scope.
Returns:
a tensor representing the results of the pooling operation.
Raises:
ValueError: if 'kernel_size' is not a 2-D list
|
def colorize(text, color=None, **kwargs):
"""
Colorize the text
kwargs arguments:
style=, bg=
"""
style = None
bg = None
# ================ #
# Keyword checking #
# ================ #
if 'style' in kwargs:
if kwargs['style'] not in STYLE:
raise WrongStyle('"{}" is wrong argument for {}'.format(kwargs['style'], 'style'))
style = kwargs['style']
if 'bg' in kwargs:
if kwargs['bg'] not in BACKGROUND:
raise WrongBackground('"{}" is wrong argument for {}'.format(kwargs['bg'], 'bg'))
bg = kwargs['bg']
if color not in COLOR:
raise WrongColor('"{}" is wrong argument for {}'.format(color, 'color'))
# ===================== #
# Colorizing the string #
# ===================== #
if '\x1b[0m' not in text:
text = '\x1b[' + ';'.join([str(STYLE[style]), str(COLOR[color]), str(BACKGROUND[bg])])\
+ 'm' + text + '\x1b[0m'
else:
lst = text.split('\x1b[0m')
text = ''
for x in lst:
if not x.startswith('\x1b['):
x = '\x1b[' + ';'.join([str(STYLE[style]), str(COLOR[color]), str(BACKGROUND[bg])])\
+ 'm' + x + '\x1b[0m'
else:
x += '\x1b[0m'
text += x
return text
|
Colorize the text
kwargs arguments:
style=, bg=
|
def set_wts_get_npred_wt(gta, maskname):
"""Set a weights file and get the weighted npred for all the sources
Parameters
----------
gta : `fermipy.GTAnalysis`
The analysis object
maskname : str
The path to the file with the mask
Returns
-------
odict : dict
Dictionary mapping from source name to weighted npred
"""
if is_null(maskname):
maskname = None
gta.set_weights_map(maskname)
for name in gta.like.sourceNames():
gta._init_source(name)
gta._update_roi()
return build_srcdict(gta, 'npred_wt')
|
Set a weights file and get the weighted npred for all the sources
Parameters
----------
gta : `fermipy.GTAnalysis`
The analysis object
maskname : str
The path to the file with the mask
Returns
-------
odict : dict
Dictionary mapping from source name to weighted npred
|
def rule_low_registers(self, arg):
"""Low registers are R0 - R7"""
r_num = self.check_register(arg)
if r_num > 7:
raise iarm.exceptions.RuleError(
"Register {} is not a low register".format(arg))
|
Low registers are R0 - R7
|
def _set_data(self, **kwargs):
"""Sets data from given parameters
Old values are deleted.
If a paremeter is not given, nothing is changed.
Parameters
----------
shape: 3-tuple of Integer
\tGrid shape
grid: Dict of 3-tuples to strings
\tCell content
attributes: List of 3-tuples
\tCell attributes
row_heights: Dict of 2-tuples to float
\t(row, tab): row_height
col_widths: Dict of 2-tuples to float
\t(col, tab): col_width
macros: String
\tMacros from macro list
"""
if "shape" in kwargs:
self.shape = kwargs["shape"]
if "grid" in kwargs:
self.dict_grid.clear()
self.dict_grid.update(kwargs["grid"])
if "attributes" in kwargs:
self.attributes[:] = kwargs["attributes"]
if "row_heights" in kwargs:
self.row_heights = kwargs["row_heights"]
if "col_widths" in kwargs:
self.col_widths = kwargs["col_widths"]
if "macros" in kwargs:
self.macros = kwargs["macros"]
|
Sets data from given parameters
Old values are deleted.
If a paremeter is not given, nothing is changed.
Parameters
----------
shape: 3-tuple of Integer
\tGrid shape
grid: Dict of 3-tuples to strings
\tCell content
attributes: List of 3-tuples
\tCell attributes
row_heights: Dict of 2-tuples to float
\t(row, tab): row_height
col_widths: Dict of 2-tuples to float
\t(col, tab): col_width
macros: String
\tMacros from macro list
|
def _extract_optimizer_param_name_and_group(optimizer_name, param):
"""Extract param group and param name from the given parameter name.
Raises an error if the param name doesn't match one of
- ``optimizer__param_groups__<group>__<name>``
- ``optimizer__<name>``
In the second case group defaults to 'all'.
The second case explicitly forbids ``optimizer__foo__bar``
since we do not know how to deal with unknown sub-params.
"""
pat_1 = '__param_groups__(?P<group>[0-9])__(?P<name>.+)'
pat_2 = '__(?!.*__.*)(?P<name>.+)'
pat_1 = optimizer_name + pat_1
pat_2 = optimizer_name + pat_2
match_1 = re.compile(pat_1).fullmatch(param)
match_2 = re.compile(pat_2).fullmatch(param)
match = match_1 or match_2
if not match:
raise AttributeError('Invalid parameter "{}" for optimizer "{}"'.format(
param,
optimizer_name,
))
groups = match.groupdict()
param_group = groups.get('group', 'all')
param_name = groups['name']
return param_group, param_name
|
Extract param group and param name from the given parameter name.
Raises an error if the param name doesn't match one of
- ``optimizer__param_groups__<group>__<name>``
- ``optimizer__<name>``
In the second case group defaults to 'all'.
The second case explicitly forbids ``optimizer__foo__bar``
since we do not know how to deal with unknown sub-params.
|
def get_knowledge_category(self):
"""Gets the grade associated with the knowledge dimension.
return: (osid.grading.Grade) - the grade
raise: IllegalState - has_knowledge_category() is false
raise: OperationFailed - unable to complete request
compliance: mandatory - This method must be implemented.
"""
if not self.has_knowledge_category():
raise IllegalState()
else:
return Grade(self._get_grade_map(self._my_map['knowledgeCategoryId'])),
|
Gets the grade associated with the knowledge dimension.
return: (osid.grading.Grade) - the grade
raise: IllegalState - has_knowledge_category() is false
raise: OperationFailed - unable to complete request
compliance: mandatory - This method must be implemented.
|
def string(self, *args, **kwargs):
"""Compare attributes of pairs with string algorithm.
Shortcut of :class:`recordlinkage.compare.String`::
from recordlinkage.compare import String
indexer = recordlinkage.Compare()
indexer.add(String())
"""
compare = String(*args, **kwargs)
self.add(compare)
return self
|
Compare attributes of pairs with string algorithm.
Shortcut of :class:`recordlinkage.compare.String`::
from recordlinkage.compare import String
indexer = recordlinkage.Compare()
indexer.add(String())
|
def autocomplete():
"""Entry Point for completion of main and subcommand options.
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'PIP_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
current = cwords[cword - 1]
except IndexError:
current = ''
subcommands = [cmd for cmd, summary in get_summaries()]
options = []
# subcommand
try:
subcommand_name = [w for w in cwords if w in subcommands][0]
except IndexError:
subcommand_name = None
parser = create_main_parser()
# subcommand options
if subcommand_name:
# special case: 'help' subcommand has no options
if subcommand_name == 'help':
sys.exit(1)
# special case: list locally installed dists for show and uninstall
should_list_installed = (
subcommand_name in ['show', 'uninstall'] and
not current.startswith('-')
)
if should_list_installed:
installed = []
lc = current.lower()
for dist in get_installed_distributions(local_only=True):
if dist.key.startswith(lc) and dist.key not in cwords[1:]:
installed.append(dist.key)
# if there are no dists installed, fall back to option completion
if installed:
for dist in installed:
print(dist)
sys.exit(1)
subcommand = commands_dict[subcommand_name]()
for opt in subcommand.parser.option_list_all:
if opt.help != optparse.SUPPRESS_HELP:
for opt_str in opt._long_opts + opt._short_opts:
options.append((opt_str, opt.nargs))
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
options = [(x, v) for (x, v) in options if x not in prev_opts]
# filter options by current input
options = [(k, v) for k, v in options if k.startswith(current)]
# get completion type given cwords and available subcommand options
completion_type = get_path_completion_type(
cwords, cword, subcommand.parser.option_list_all,
)
# get completion files and directories if ``completion_type`` is
# ``<file>``, ``<dir>`` or ``<path>``
if completion_type:
options = auto_complete_paths(current, completion_type)
options = ((opt, 0) for opt in options)
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1] and option[0][:2] == "--":
opt_label += '='
print(opt_label)
else:
# show main parser options only when necessary
opts = [i.option_list for i in parser.option_groups]
opts.append(parser.option_list)
opts = (o for it in opts for o in it)
if current.startswith('-'):
for opt in opts:
if opt.help != optparse.SUPPRESS_HELP:
subcommands += opt._long_opts + opt._short_opts
else:
# get completion type given cwords and all available options
completion_type = get_path_completion_type(cwords, cword, opts)
if completion_type:
subcommands = auto_complete_paths(current, completion_type)
print(' '.join([x for x in subcommands if x.startswith(current)]))
sys.exit(1)
|
Entry Point for completion of main and subcommand options.
|
def set_attribute(self, code, value):
"""Set attribute for user"""
attr, _ = self.get_or_create(code=code)
attr.value = value
attr.save()
|
Set attribute for user
|
def _cbc_encrypt(self, content, final_key):
"""This method encrypts the content."""
aes = AES.new(final_key, AES.MODE_CBC, self._enc_iv)
padding = (16 - len(content) % AES.block_size)
for _ in range(padding):
content += chr(padding).encode()
temp = bytes(content)
return aes.encrypt(temp)
|
This method encrypts the content.
|
def add_vrf(self, auth, attr):
""" Add a new VRF.
* `auth` [BaseAuth]
AAA options.
* `attr` [vrf_attr]
The news VRF's attributes.
Add a VRF based on the values stored in the `attr` dict.
Returns a dict describing the VRF which was added.
This is the documentation of the internal backend function. It's
exposed over XML-RPC, please also see the XML-RPC documentation for
:py:func:`nipap.xmlrpc.NipapXMLRPC.add_vrf` for full understanding.
"""
self._logger.debug("add_vrf called; attr: %s" % unicode(attr))
# sanity check - do we have all attributes?
req_attr = [ 'rt', 'name' ]
self._check_attr(attr, req_attr, _vrf_attrs)
insert, params = self._sql_expand_insert(attr)
sql = "INSERT INTO ip_net_vrf " + insert
self._execute(sql, params)
vrf_id = self._lastrowid()
vrf = self.list_vrf(auth, { 'id': vrf_id })[0]
# write to audit table
audit_params = {
'vrf_id': vrf['id'],
'vrf_rt': vrf['rt'],
'vrf_name': vrf['name'],
'username': auth.username,
'authenticated_as': auth.authenticated_as,
'full_name': auth.full_name,
'authoritative_source': auth.authoritative_source,
'description': 'Added VRF %s with attr: %s' % (vrf['rt'], unicode(vrf))
}
sql, params = self._sql_expand_insert(audit_params)
self._execute('INSERT INTO ip_net_log %s' % sql, params)
return vrf
|
Add a new VRF.
* `auth` [BaseAuth]
AAA options.
* `attr` [vrf_attr]
The news VRF's attributes.
Add a VRF based on the values stored in the `attr` dict.
Returns a dict describing the VRF which was added.
This is the documentation of the internal backend function. It's
exposed over XML-RPC, please also see the XML-RPC documentation for
:py:func:`nipap.xmlrpc.NipapXMLRPC.add_vrf` for full understanding.
|
def _init_go_sources(self, go_sources_arg, go2obj_arg):
"""Return GO sources which are present in GODag."""
gos_user = set(go_sources_arg)
if 'children' in self.kws and self.kws['children']:
gos_user |= get_leaf_children(gos_user, go2obj_arg)
gos_godag = set(go2obj_arg)
gos_source = gos_user.intersection(gos_godag)
gos_missing = gos_user.difference(gos_godag)
if not gos_missing:
return gos_source
sys.stdout.write("{N} GO IDs NOT FOUND IN GO DAG: {GOs}\n".format(
N=len(gos_missing), GOs=" ".join([str(e) for e in gos_missing])))
return gos_source
|
Return GO sources which are present in GODag.
|
def scroll(self, scroll_id=None, body=None, params=None):
"""
Scroll a search request created by specifying the scroll parameter.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-scroll.html>`_
:arg scroll_id: The scroll ID
:arg body: The scroll ID if not passed by URL or query parameter.
:arg scroll: Specify how long a consistent view of the index should be
maintained for scrolled search
:arg rest_total_hits_as_int: This parameter is used to restore the total hits as a number
in the response. This param is added version 6.x to handle mixed cluster queries where nodes
are in multiple versions (7.0 and 6.latest)
"""
if scroll_id in SKIP_IN_PATH and body in SKIP_IN_PATH:
raise ValueError("You need to supply scroll_id or body.")
elif scroll_id and not body:
body = {"scroll_id": scroll_id}
elif scroll_id:
params["scroll_id"] = scroll_id
return self.transport.perform_request(
"GET", "/_search/scroll", params=params, body=body
)
|
Scroll a search request created by specifying the scroll parameter.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-scroll.html>`_
:arg scroll_id: The scroll ID
:arg body: The scroll ID if not passed by URL or query parameter.
:arg scroll: Specify how long a consistent view of the index should be
maintained for scrolled search
:arg rest_total_hits_as_int: This parameter is used to restore the total hits as a number
in the response. This param is added version 6.x to handle mixed cluster queries where nodes
are in multiple versions (7.0 and 6.latest)
|
def target_lines(self):
"""The formatted target_type(...) lines for this target.
This is just a convenience method for extracting and re-injecting the changed
`dependency_lines` into the target text.
"""
target_lines = self._target_source_lines[:]
deps_begin, deps_end = self._dependencies_interval
target_lines[deps_begin:deps_end] = self.dependency_lines()
return target_lines
|
The formatted target_type(...) lines for this target.
This is just a convenience method for extracting and re-injecting the changed
`dependency_lines` into the target text.
|
def _stream_helper(self, response, decode=False):
"""Generator for data coming from a chunked-encoded HTTP response."""
if response.raw._fp.chunked:
if decode:
for chunk in json_stream(self._stream_helper(response, False)):
yield chunk
else:
reader = response.raw
while not reader.closed:
# this read call will block until we get a chunk
data = reader.read(1)
if not data:
break
if reader._fp.chunk_left:
data += reader.read(reader._fp.chunk_left)
yield data
else:
# Response isn't chunked, meaning we probably
# encountered an error immediately
yield self._result(response, json=decode)
|
Generator for data coming from a chunked-encoded HTTP response.
|
def powered_off(name, connection=None, username=None, password=None):
'''
Stops a VM by power off.
.. versionadded:: 2016.3.0
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
.. code-block:: yaml
domain_name:
virt.stopped
'''
return _virt_call(name, 'stop', 'unpowered', 'Machine has been powered off',
connection=connection, username=username, password=password)
|
Stops a VM by power off.
.. versionadded:: 2016.3.0
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
.. code-block:: yaml
domain_name:
virt.stopped
|
def get_unknown_check_result_brok(cmd_line):
"""Create unknown check result brok and fill it with command data
:param cmd_line: command line to extract data
:type cmd_line: str
:return: unknown check result brok
:rtype: alignak.objects.brok.Brok
"""
match = re.match(
r'^\[([0-9]{10})] PROCESS_(SERVICE)_CHECK_RESULT;'
r'([^\;]*);([^\;]*);([^\;]*);([^\|]*)(?:\|(.*))?', cmd_line)
if not match:
match = re.match(
r'^\[([0-9]{10})] PROCESS_(HOST)_CHECK_RESULT;'
r'([^\;]*);([^\;]*);([^\|]*)(?:\|(.*))?', cmd_line)
if not match:
return None
data = {
'time_stamp': int(match.group(1)),
'host_name': match.group(3),
}
if match.group(2) == 'SERVICE':
data['service_description'] = match.group(4)
data['return_code'] = match.group(5)
data['output'] = match.group(6)
data['perf_data'] = match.group(7)
else:
data['return_code'] = match.group(4)
data['output'] = match.group(5)
data['perf_data'] = match.group(6)
return Brok({'type': 'unknown_%s_check_result' % match.group(2).lower(), 'data': data})
|
Create unknown check result brok and fill it with command data
:param cmd_line: command line to extract data
:type cmd_line: str
:return: unknown check result brok
:rtype: alignak.objects.brok.Brok
|
def extract_response(self, extractors):
""" extract value from requests.Response and store in OrderedDict.
Args:
extractors (list):
[
{"resp_status_code": "status_code"},
{"resp_headers_content_type": "headers.content-type"},
{"resp_content": "content"},
{"resp_content_person_first_name": "content.person.name.first_name"}
]
Returns:
OrderDict: variable binds ordered dict
"""
if not extractors:
return {}
logger.log_debug("start to extract from response object.")
extracted_variables_mapping = OrderedDict()
extract_binds_order_dict = utils.ensure_mapping_format(extractors)
for key, field in extract_binds_order_dict.items():
extracted_variables_mapping[key] = self.extract_field(field)
return extracted_variables_mapping
|
extract value from requests.Response and store in OrderedDict.
Args:
extractors (list):
[
{"resp_status_code": "status_code"},
{"resp_headers_content_type": "headers.content-type"},
{"resp_content": "content"},
{"resp_content_person_first_name": "content.person.name.first_name"}
]
Returns:
OrderDict: variable binds ordered dict
|
def center(self) -> Location:
"""
:return: a Point corresponding to the absolute position of the center
of the well relative to the deck (with the front-left corner of slot 1
as (0,0,0))
"""
top = self.top()
center_z = top.point.z - (self._depth / 2.0)
return Location(Point(x=top.point.x, y=top.point.y, z=center_z), self)
|
:return: a Point corresponding to the absolute position of the center
of the well relative to the deck (with the front-left corner of slot 1
as (0,0,0))
|
def get_url(self, action, obj=None, domain=True):
"""
Returns an RFC3987 IRI for a HTML representation of the given object, action.
If domain is true, the current site's domain will be added.
"""
if not obj:
url = reverse('actstream_detail', None, (action.pk,))
elif hasattr(obj, 'get_absolute_url'):
url = obj.get_absolute_url()
else:
ctype = ContentType.objects.get_for_model(obj)
url = reverse('actstream_actor', None, (ctype.pk, obj.pk))
if domain:
return add_domain(Site.objects.get_current().domain, url)
return url
|
Returns an RFC3987 IRI for a HTML representation of the given object, action.
If domain is true, the current site's domain will be added.
|
def project_activity(index, start, end):
"""Compute the metrics for the project activity section of the enriched
github pull requests index.
Returns a dictionary containing a "metric" key. This key contains the
metrics for this section.
:param index: index object
:param start: start date to get the data from
:param end: end date to get the data upto
:return: dictionary with the value of the metrics
"""
results = {
"metrics": [SubmittedPRs(index, start, end),
ClosedPRs(index, start, end)]
}
return results
|
Compute the metrics for the project activity section of the enriched
github pull requests index.
Returns a dictionary containing a "metric" key. This key contains the
metrics for this section.
:param index: index object
:param start: start date to get the data from
:param end: end date to get the data upto
:return: dictionary with the value of the metrics
|
def idd2grouplist(fhandle):
"""wrapper for iddtxt2grouplist"""
try:
txt = fhandle.read()
return iddtxt2grouplist(txt)
except AttributeError as e:
txt = open(fhandle, 'r').read()
return iddtxt2grouplist(txt)
|
wrapper for iddtxt2grouplist
|
def get_jid(jid):
'''
Return the information returned when the specified job id was executed
'''
cb_ = _get_connection()
_verify_views()
ret = {}
for result in cb_.query(DESIGN_NAME, 'jid_returns', key=six.text_type(jid), include_docs=True):
ret[result.value] = result.doc.value
return ret
|
Return the information returned when the specified job id was executed
|
def _is_small_molecule(pe):
"""Return True if the element is a small molecule"""
val = isinstance(pe, _bp('SmallMolecule')) or \
isinstance(pe, _bpimpl('SmallMolecule')) or \
isinstance(pe, _bp('SmallMoleculeReference')) or \
isinstance(pe, _bpimpl('SmallMoleculeReference'))
return val
|
Return True if the element is a small molecule
|
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: WorkflowContext for this WorkflowInstance
:rtype: twilio.rest.taskrouter.v1.workspace.workflow.WorkflowContext
"""
if self._context is None:
self._context = WorkflowContext(
self._version,
workspace_sid=self._solution['workspace_sid'],
sid=self._solution['sid'],
)
return self._context
|
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: WorkflowContext for this WorkflowInstance
:rtype: twilio.rest.taskrouter.v1.workspace.workflow.WorkflowContext
|
def validate_capacity(capacity):
"""Validate ScalingConfiguration capacity for serverless DBCluster"""
if capacity not in VALID_SCALING_CONFIGURATION_CAPACITIES:
raise ValueError(
"ScalingConfiguration capacity must be one of: {}".format(
", ".join(map(
str,
VALID_SCALING_CONFIGURATION_CAPACITIES
))
)
)
return capacity
|
Validate ScalingConfiguration capacity for serverless DBCluster
|
def libvlc_media_get_duration(p_md):
'''Get duration (in ms) of media descriptor object item.
@param p_md: media descriptor object.
@return: duration of media item or -1 on error.
'''
f = _Cfunctions.get('libvlc_media_get_duration', None) or \
_Cfunction('libvlc_media_get_duration', ((1,),), None,
ctypes.c_longlong, Media)
return f(p_md)
|
Get duration (in ms) of media descriptor object item.
@param p_md: media descriptor object.
@return: duration of media item or -1 on error.
|
def _chunk_filter(self, extensions):
""" Create a filter from the extensions and ignore files """
if isinstance(extensions, six.string_types):
extensions = extensions.split()
def _filter(chunk):
""" Exclusion filter """
name = chunk['name']
if extensions is not None:
if not any(name.endswith(e) for e in extensions):
return False
for pattern in self.state.ignore_re:
if pattern.match(name):
return False
for pattern in self.state.ignore:
if fnmatch.fnmatchcase(name, pattern):
return False
return True
return _filter
|
Create a filter from the extensions and ignore files
|
def _set_show_support_save_status(self, v, load=False):
"""
Setter method for show_support_save_status, mapped from YANG variable /brocade_ras_ext_rpc/show_support_save_status (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_show_support_save_status is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_show_support_save_status() directly.
YANG Description: Information on the status of recent support save request
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=show_support_save_status.show_support_save_status, is_leaf=True, yang_name="show-support-save-status", rest_name="show-support-save-status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'showSupportSaveStatus'}}, namespace='urn:brocade.com:mgmt:brocade-ras-ext', defining_module='brocade-ras-ext', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """show_support_save_status must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=show_support_save_status.show_support_save_status, is_leaf=True, yang_name="show-support-save-status", rest_name="show-support-save-status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'showSupportSaveStatus'}}, namespace='urn:brocade.com:mgmt:brocade-ras-ext', defining_module='brocade-ras-ext', yang_type='rpc', is_config=True)""",
})
self.__show_support_save_status = t
if hasattr(self, '_set'):
self._set()
|
Setter method for show_support_save_status, mapped from YANG variable /brocade_ras_ext_rpc/show_support_save_status (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_show_support_save_status is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_show_support_save_status() directly.
YANG Description: Information on the status of recent support save request
|
def interpret_expenditure_entry(entry):
"""Interpret data fields within a CO-TRACER expediture report.
Interpret the expenditure amount, expenditure date, filed date, amended,
and amendment fields of the provided entry. All dates (expenditure and
filed) are interpreted together and, if any fails, all will retain their
original value. Likewise, amended and amendment are interpreted together and
if one is malformed, both will retain their original value. Entry may be
edited in place and side-effects are possible in coupled code. However,
client code should use the return value to guard against future changes.
A value with the key 'AmountsInterpreted' will be set to True or False in
the returned entry if floating point values are successfully interpreted
(ExpenditureAmount) or not respectively.
A value with the key 'DatesInterpreted' will be set to True or False in
the returned entry if ISO 8601 strings are successfully interpreted
(ExpenditureDate and FiledDate) or not respectively.
A value with the key 'BooleanFieldsInterpreted' will be set to True or
False in the returned entry if boolean strings are successfully interpreted
(Amended and Amendment) or not respectively.
@param entry: The expenditure report data to manipulate / interpret.
@type entry: dict
@return: The entry passed
@raise ValueError: Raised if any expected field cannot be found in entry.
"""
try:
expenditure_amount = float(entry['ExpenditureAmount'])
entry['AmountsInterpreted'] = True
entry['ExpenditureAmount'] = expenditure_amount
except ValueError:
entry['AmountsInterpreted'] = False
try:
expenditure_date = parse_iso_str(entry['ExpenditureDate'])
filed_date = parse_iso_str(entry['FiledDate'])
entry['DatesInterpreted'] = True
entry['ExpenditureDate'] = expenditure_date
entry['FiledDate'] = filed_date
except ValueError:
entry['DatesInterpreted'] = False
try:
amended = parse_yes_no_str(entry['Amended'])
amendment = parse_yes_no_str(entry['Amendment'])
entry['BooleanFieldsInterpreted'] = True
entry['Amended'] = amended
entry['Amendment'] = amendment
except ValueError:
entry['BooleanFieldsInterpreted'] = False
return entry
|
Interpret data fields within a CO-TRACER expediture report.
Interpret the expenditure amount, expenditure date, filed date, amended,
and amendment fields of the provided entry. All dates (expenditure and
filed) are interpreted together and, if any fails, all will retain their
original value. Likewise, amended and amendment are interpreted together and
if one is malformed, both will retain their original value. Entry may be
edited in place and side-effects are possible in coupled code. However,
client code should use the return value to guard against future changes.
A value with the key 'AmountsInterpreted' will be set to True or False in
the returned entry if floating point values are successfully interpreted
(ExpenditureAmount) or not respectively.
A value with the key 'DatesInterpreted' will be set to True or False in
the returned entry if ISO 8601 strings are successfully interpreted
(ExpenditureDate and FiledDate) or not respectively.
A value with the key 'BooleanFieldsInterpreted' will be set to True or
False in the returned entry if boolean strings are successfully interpreted
(Amended and Amendment) or not respectively.
@param entry: The expenditure report data to manipulate / interpret.
@type entry: dict
@return: The entry passed
@raise ValueError: Raised if any expected field cannot be found in entry.
|
def get_partitioned_view_result(self, partition_key, ddoc_id, view_name,
raw_result=False, **kwargs):
"""
Retrieves the partitioned view result based on the design document and
view name.
See :func:`~cloudant.database.CouchDatabase.get_view_result` method for
further details.
:param str partition_key: Partition key.
:param str ddoc_id: Design document id used to get result.
:param str view_name: Name of the view used to get result.
:param bool raw_result: Dictates whether the view result is returned
as a default Result object or a raw JSON response.
Defaults to False.
:param kwargs: See
:func:`~cloudant.database.CouchDatabase.get_view_result` method for
available keyword arguments.
:returns: The result content either wrapped in a QueryResult or
as the raw response JSON content.
:rtype: QueryResult, dict
"""
ddoc = DesignDocument(self, ddoc_id)
view = View(ddoc, view_name, partition_key=partition_key)
return self._get_view_result(view, raw_result, **kwargs)
|
Retrieves the partitioned view result based on the design document and
view name.
See :func:`~cloudant.database.CouchDatabase.get_view_result` method for
further details.
:param str partition_key: Partition key.
:param str ddoc_id: Design document id used to get result.
:param str view_name: Name of the view used to get result.
:param bool raw_result: Dictates whether the view result is returned
as a default Result object or a raw JSON response.
Defaults to False.
:param kwargs: See
:func:`~cloudant.database.CouchDatabase.get_view_result` method for
available keyword arguments.
:returns: The result content either wrapped in a QueryResult or
as the raw response JSON content.
:rtype: QueryResult, dict
|
def main():
''' main program loop '''
conn = symphony.Config('/etc/es-bot/es-bot.cfg')
# connect to pod
try:
agent, pod, symphony_sid = conn.connect()
print ('connected: %s' % (symphony_sid))
except Exception as err:
print ('failed to connect!: %s' % (err))
# main loop
msgFormat = 'MESSAGEML'
message = '<messageML> hello world. </messageML>'
# send message
try:
status_code, retstring = agent.send_message(symphony_sid, msgFormat, message)
print ("%s: %s" % (status_code, retstring))
except Exception as err:
print (retstring, err)
|
main program loop
|
async def install_sandboxed_update(filename, loop):
"""
Create a virtual environment and activate it, and then install an
update candidate (leaves virtual environment activated)
:return: a result dict and the path to python in the virtual environment
"""
log.debug("Creating virtual environment")
venv_dir, python, venv_site_pkgs\
= await create_virtual_environment(loop=loop)
log.debug("Installing update server into virtual environment")
out, err, returncode = await _install(python, filename, loop)
if err or returncode != 0:
log.error("Install failed: {}".format(err))
res = {'status': 'failure', 'message': err}
else:
log.debug("Install successful")
res = {'status': 'success'}
return res, python, venv_site_pkgs, venv_dir
|
Create a virtual environment and activate it, and then install an
update candidate (leaves virtual environment activated)
:return: a result dict and the path to python in the virtual environment
|
def procs():
'''
Return the process data
.. versionchanged:: 2016.11.4
Added support for AIX
CLI Example:
.. code-block:: bash
salt '*' status.procs
'''
# Get the user, pid and cmd
ret = {}
uind = 0
pind = 0
cind = 0
plines = __salt__['cmd.run'](__grains__['ps'], python_shell=True).splitlines()
guide = plines.pop(0).split()
if 'USER' in guide:
uind = guide.index('USER')
elif 'UID' in guide:
uind = guide.index('UID')
if 'PID' in guide:
pind = guide.index('PID')
if 'COMMAND' in guide:
cind = guide.index('COMMAND')
elif 'CMD' in guide:
cind = guide.index('CMD')
for line in plines:
if not line:
continue
comps = line.split()
ret[comps[pind]] = {'user': comps[uind],
'cmd': ' '.join(comps[cind:])}
return ret
|
Return the process data
.. versionchanged:: 2016.11.4
Added support for AIX
CLI Example:
.. code-block:: bash
salt '*' status.procs
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.