code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def exists(name, attributes):
'''
Make sure the given attributes exist on the file/directory
name
The path to the file/directory
attributes
The attributes that should exist on the file/directory, this is accepted as
an array, with key and value split with an equals sign, if you want to specify
a hex value then add 0x to the beginning of the value.
'''
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}}
if not os.path.exists(name):
ret['result'] = False
ret['comment'] = "File or directory doesn't exist"
return ret
current_attrs = __salt__['xattr.list'](name)
current_ids = current_attrs.keys()
for attr in attributes:
attr_id, attr_val = attr.split("=")
attr_hex = attr_val.startswith("0x")
if attr_hex:
# Remove spaces and new lines so we can match these
current_attrs[attr_id] = __salt__['xattr.read'](name, attr_id, hex=True).replace(" ", "").replace("\n", "")
attr_val = attr_val[2:].replace(" ", "")
if attr_id not in current_attrs:
value_matches = False
else:
value_matches = ((current_attrs[attr_id] == attr_val) or
(attr_hex and current_attrs[attr_id] == attr_val))
if attr_id in current_ids and value_matches:
continue
else:
ret['changes'][attr_id] = attr_val
__salt__['xattr.write'](name, attr_id, attr_val, attr_hex)
if not ret['changes']:
ret['comment'] = 'All values existed correctly.'
return ret
|
Make sure the given attributes exist on the file/directory
name
The path to the file/directory
attributes
The attributes that should exist on the file/directory, this is accepted as
an array, with key and value split with an equals sign, if you want to specify
a hex value then add 0x to the beginning of the value.
|
def view_isometric(self):
"""
Resets the camera to a default isometric view showing all the
actors in the scene.
"""
self.camera_position = self.get_default_cam_pos()
self.camera_set = False
return self.reset_camera()
|
Resets the camera to a default isometric view showing all the
actors in the scene.
|
def get_files_to_remove(self):
"""
Returns orphaned media files to be removed grouped by resource type.
All files which paths start with any of exclude paths are ignored.
"""
files_to_remove = {}
needful_files = self.get_needful_files()
for resources_type, resources in self.get_uploaded_resources():
exclude_paths = self.get_exclude_paths()
resources = {resource for resource in resources if not resource.startswith(exclude_paths)}
files_to_remove[resources_type] = resources - needful_files
return files_to_remove
|
Returns orphaned media files to be removed grouped by resource type.
All files which paths start with any of exclude paths are ignored.
|
def parse_slab_stats(slab_stats):
"""Convert output from memcached's `stats slabs` into a Python dict.
Newlines are returned by memcached along with carriage returns
(i.e. '\r\n').
>>> parse_slab_stats(
"STAT 1:chunk_size 96\r\nSTAT 1:chunks_per_page 10922\r\nSTAT "
"active_slabs 1\r\nSTAT total_malloced 1048512\r\nEND\r\n")
{
'slabs': {
1: {
'chunk_size': 96,
'chunks_per_page': 10922,
# ...
},
},
'active_slabs': 1,
'total_malloced': 1048512,
}
"""
stats_dict = {'slabs': defaultdict(lambda: {})}
for line in slab_stats.splitlines():
if line == 'END':
break
# e.g.: "STAT 1:chunks_per_page 10922"
cmd, key, value = line.split(' ')
if cmd != 'STAT':
continue
# e.g.: "STAT active_slabs 1"
if ":" not in key:
stats_dict[key] = int(value)
continue
slab, key = key.split(':')
stats_dict['slabs'][int(slab)][key] = int(value)
return stats_dict
|
Convert output from memcached's `stats slabs` into a Python dict.
Newlines are returned by memcached along with carriage returns
(i.e. '\r\n').
>>> parse_slab_stats(
"STAT 1:chunk_size 96\r\nSTAT 1:chunks_per_page 10922\r\nSTAT "
"active_slabs 1\r\nSTAT total_malloced 1048512\r\nEND\r\n")
{
'slabs': {
1: {
'chunk_size': 96,
'chunks_per_page': 10922,
# ...
},
},
'active_slabs': 1,
'total_malloced': 1048512,
}
|
def stack_frames(
sig,
sampling_frequency,
frame_length=0.020,
frame_stride=0.020,
filter=lambda x: np.ones(
(x,
)),
zero_padding=True):
"""Frame a signal into overlapping frames.
Args:
sig (array): The audio signal to frame of size (N,).
sampling_frequency (int): The sampling frequency of the signal.
frame_length (float): The length of the frame in second.
frame_stride (float): The stride between frames.
filter (array): The time-domain filter for applying to each frame.
By default it is one so nothing will be changed.
zero_padding (bool): If the samples is not a multiple of
frame_length(number of frames sample), zero padding will
be done for generating last frame.
Returns:
array: Stacked_frames-Array of frames of size (number_of_frames x frame_len).
"""
# Check dimension
s = "Signal dimention should be of the format of (N,) but it is %s instead"
assert sig.ndim == 1, s % str(sig.shape)
# Initial necessary values
length_signal = sig.shape[0]
frame_sample_length = int(
np.round(
sampling_frequency *
frame_length)) # Defined by the number of samples
frame_stride = float(np.round(sampling_frequency * frame_stride))
# Zero padding is done for allocating space for the last frame.
if zero_padding:
# Calculation of number of frames
numframes = (int(math.ceil((length_signal
- frame_sample_length) / frame_stride)))
print(numframes,length_signal,frame_sample_length,frame_stride)
# Zero padding
len_sig = int(numframes * frame_stride + frame_sample_length)
additive_zeros = np.zeros((len_sig - length_signal,))
signal = np.concatenate((sig, additive_zeros))
else:
# No zero padding! The last frame which does not have enough
# samples(remaining samples <= frame_sample_length), will be dropped!
numframes = int(math.floor((length_signal
- frame_sample_length) / frame_stride))
# new length
len_sig = int((numframes - 1) * frame_stride + frame_sample_length)
signal = sig[0:len_sig]
# Getting the indices of all frames.
indices = np.tile(np.arange(0,
frame_sample_length),
(numframes,
1)) + np.tile(np.arange(0,
numframes * frame_stride,
frame_stride),
(frame_sample_length,
1)).T
indices = np.array(indices, dtype=np.int32)
# Extracting the frames based on the allocated indices.
frames = signal[indices]
# Apply the windows function
window = np.tile(filter(frame_sample_length), (numframes, 1))
Extracted_Frames = frames * window
return Extracted_Frames
|
Frame a signal into overlapping frames.
Args:
sig (array): The audio signal to frame of size (N,).
sampling_frequency (int): The sampling frequency of the signal.
frame_length (float): The length of the frame in second.
frame_stride (float): The stride between frames.
filter (array): The time-domain filter for applying to each frame.
By default it is one so nothing will be changed.
zero_padding (bool): If the samples is not a multiple of
frame_length(number of frames sample), zero padding will
be done for generating last frame.
Returns:
array: Stacked_frames-Array of frames of size (number_of_frames x frame_len).
|
def from_args(cls: Type[ConfigT], args: Namespace) -> ConfigT:
"""Build and return a new :class:`IMAPConfig` using command-line
arguments.
Args:
args: The arguments parsed from the command-line.
"""
parsed_args = cls.parse_args(args)
return cls(args, host=args.host, port=args.port, debug=args.debug,
reject_insecure_auth=not args.insecure_login,
cert_file=args.cert, key_file=args.key,
**parsed_args)
|
Build and return a new :class:`IMAPConfig` using command-line
arguments.
Args:
args: The arguments parsed from the command-line.
|
def main():
"""Main method."""
args = parse_cmd_arguments()
html_file = args.file
try:
json.loads(args.add_tags or '{}')
json.loads(args.exc_tags or '{}')
except ValueError:
print('\033[91m' + 'Invalid json string: please provide a valid json '
'string e.g {}'.format('\'{"img": "data-url"}\'') + '\033[0m')
sys.exit(1)
staticfied = staticfy(html_file, args=args).encode('utf-8')
file_ops(staticfied, args=args)
|
Main method.
|
def _ldtpize_accessible(self, acc):
"""
Get LDTP format accessibile name
@param acc: Accessible handle
@type acc: object
@return: object type, stripped object name (associated / direct),
associated label
@rtype: tuple
"""
actual_role = self._get_role(acc)
label = self._get_title(acc)
if re.match("AXWindow", actual_role, re.M | re.U | re.L):
# Strip space and new line from window title
strip = r"( |\n)"
else:
# Strip space, colon, dot, underscore and new line from
# all other object types
strip = r"( |:|\.|_|\n)"
if label:
# Return the role type (if, not in the know list of roles,
# return ukn - unknown), strip the above characters from name
# also return labely_by string
label = re.sub(strip, u"", label)
role = abbreviated_roles.get(actual_role, "ukn")
if self._ldtp_debug and role == "ukn":
print(actual_role, acc)
return role, label
|
Get LDTP format accessibile name
@param acc: Accessible handle
@type acc: object
@return: object type, stripped object name (associated / direct),
associated label
@rtype: tuple
|
def numBlast_sort(blast, numHits, evalueT, bitT):
"""
parse b6 output with sorting
"""
header = ['#query', 'target', 'pident', 'alen', 'mismatch', 'gapopen',
'qstart', 'qend', 'tstart', 'tend', 'evalue', 'bitscore']
yield header
hmm = {h:[] for h in header}
for line in blast:
if line.startswith('#'):
continue
line = line.strip().split('\t')
# Evalue and Bitscore thresholds
line[10], line[11] = float(line[10]), float(line[11])
evalue, bit = line[10], line[11]
if evalueT is not False and evalue > evalueT:
continue
if bitT is not False and bit < bitT:
continue
for i, h in zip(line, header):
hmm[h].append(i)
hmm = pd.DataFrame(hmm)
for query, df in hmm.groupby(by = ['#query']):
df = df.sort_values(by = ['bitscore'], ascending = False)
for hit in df[header].values[0:numHits]:
yield hit
|
parse b6 output with sorting
|
def gaps(args):
"""
%prog gaps agpfile
Print out the distribution of gapsizes. Option --merge allows merging of
adjacent gaps which is used by tidy().
"""
from jcvi.graphics.histogram import loghistogram
p = OptionParser(gaps.__doc__)
p.add_option("--merge", dest="merge", default=False, action="store_true",
help="Merge adjacent gaps (to conform to AGP specification)")
p.add_option("--header", default=False, action="store_true",
help="Produce an AGP header [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
merge = opts.merge
agpfile, = args
if merge:
merged_agpfile = agpfile.replace(".agp", ".merged.agp")
fw = open(merged_agpfile, "w")
agp = AGP(agpfile)
sizes = []
data = [] # store merged AGPLine's
priorities = ("centromere", "telomere", "scaffold", "contig", \
"clone", "fragment")
for is_gap, alines in groupby(agp, key=lambda x: (x.object, x.is_gap)):
alines = list(alines)
is_gap = is_gap[1]
if is_gap:
gap_size = sum(x.gap_length for x in alines)
gap_types = set(x.gap_type for x in alines)
for gtype in ("centromere", "telomere"):
if gtype in gap_types:
gap_size = gtype
sizes.append(gap_size)
b = deepcopy(alines[0])
b.object_beg = min(x.object_beg for x in alines)
b.object_end = max(x.object_end for x in alines)
b.gap_length = sum(x.gap_length for x in alines)
assert b.gap_length == b.object_end - b.object_beg + 1
b.component_type = 'U' if b.gap_length == 100 else 'N'
gtypes = [x.gap_type for x in alines]
for gtype in priorities:
if gtype in gtypes:
b.gap_type = gtype
break
linkages = [x.linkage for x in alines]
for linkage in ("no", "yes"):
if linkage in linkages:
b.linkage = linkage
break
alines = [b]
data.extend(alines)
loghistogram(sizes)
if opts.header:
AGP.print_header(fw, organism="Medicago truncatula",
taxid=3880, source="J. Craig Venter Institute")
if merge:
for ob, bb in groupby(data, lambda x: x.object):
for i, b in enumerate(bb):
b.part_number = i + 1
print(b, file=fw)
return merged_agpfile
|
%prog gaps agpfile
Print out the distribution of gapsizes. Option --merge allows merging of
adjacent gaps which is used by tidy().
|
def generate_doxygen_xml(app):
"""Run the doxygen make commands if we're on the ReadTheDocs server"""
read_the_docs_build = os.environ.get('READTHEDOCS', None) == 'True'
if read_the_docs_build:
run_doxygen('..')
sys.stderr.write('Check if shared lib exists\n')
run_build_lib('..')
sys.stderr.write('The wrapper path: %s\n' % str(os.listdir('../wrapper')))
rabit._loadlib()
|
Run the doxygen make commands if we're on the ReadTheDocs server
|
def _parse_modes(mode_string, unary_modes=""):
"""
Parse the mode_string and return a list of triples.
If no string is supplied return an empty list.
>>> _parse_modes('')
[]
If no sign is supplied, return an empty list.
>>> _parse_modes('ab')
[]
Discard unused args.
>>> _parse_modes('+a foo bar baz')
[['+', 'a', None]]
Return none for unary args when not provided
>>> _parse_modes('+abc foo', unary_modes='abc')
[['+', 'a', 'foo'], ['+', 'b', None], ['+', 'c', None]]
This function never throws an error:
>>> import random
>>> def random_text(min_len = 3, max_len = 80):
... len = random.randint(min_len, max_len)
... chars_to_choose = [chr(x) for x in range(0,1024)]
... chars = (random.choice(chars_to_choose) for x in range(len))
... return ''.join(chars)
>>> def random_texts(min_len = 3, max_len = 80):
... while True:
... yield random_text(min_len, max_len)
>>> import itertools
>>> texts = itertools.islice(random_texts(), 1000)
>>> set(type(_parse_modes(text)) for text in texts) == {list}
True
"""
# mode_string must be non-empty and begin with a sign
if not mode_string or not mode_string[0] in '+-':
return []
modes = []
parts = mode_string.split()
mode_part, args = parts[0], parts[1:]
for ch in mode_part:
if ch in "+-":
sign = ch
continue
arg = args.pop(0) if ch in unary_modes and args else None
modes.append([sign, ch, arg])
return modes
|
Parse the mode_string and return a list of triples.
If no string is supplied return an empty list.
>>> _parse_modes('')
[]
If no sign is supplied, return an empty list.
>>> _parse_modes('ab')
[]
Discard unused args.
>>> _parse_modes('+a foo bar baz')
[['+', 'a', None]]
Return none for unary args when not provided
>>> _parse_modes('+abc foo', unary_modes='abc')
[['+', 'a', 'foo'], ['+', 'b', None], ['+', 'c', None]]
This function never throws an error:
>>> import random
>>> def random_text(min_len = 3, max_len = 80):
... len = random.randint(min_len, max_len)
... chars_to_choose = [chr(x) for x in range(0,1024)]
... chars = (random.choice(chars_to_choose) for x in range(len))
... return ''.join(chars)
>>> def random_texts(min_len = 3, max_len = 80):
... while True:
... yield random_text(min_len, max_len)
>>> import itertools
>>> texts = itertools.islice(random_texts(), 1000)
>>> set(type(_parse_modes(text)) for text in texts) == {list}
True
|
def newAddress(self, currency='btc', label=''):
"""
Send a request for a new cryptocurrency deposit address
with an optional label. Return the response.
Arguements:
currency -- a Gemini supported cryptocurrency (btc, eth)
label -- optional label for the deposit address
"""
request = '/v1/deposit/' + currency + '/newAddress'
url = self.base_url + request
params = {
'request': request,
'nonce': self.get_nonce()
}
if label != '':
params['label'] = label
return requests.post(url, headers=self.prepare(params))
|
Send a request for a new cryptocurrency deposit address
with an optional label. Return the response.
Arguements:
currency -- a Gemini supported cryptocurrency (btc, eth)
label -- optional label for the deposit address
|
def stop(self, signal=None):
"""Stop the heroku local subprocess and all of its children.
"""
signal = signal or self.int_signal
self.out.log("Cleaning up local Heroku process...")
if self._process is None:
self.out.log("No local Heroku process was running.")
return
try:
os.killpg(os.getpgid(self._process.pid), signal)
self.out.log("Local Heroku process terminated.")
except OSError:
self.out.log("Local Heroku was already terminated.")
self.out.log(traceback.format_exc())
finally:
self._process = None
|
Stop the heroku local subprocess and all of its children.
|
def get_submission(submission_uuid, read_replica=False):
"""Retrieves a single submission by uuid.
Args:
submission_uuid (str): Identifier for the submission.
Kwargs:
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Raises:
SubmissionNotFoundError: Raised if the submission does not exist.
SubmissionRequestError: Raised if the search parameter is not a string.
SubmissionInternalError: Raised for unknown errors.
Examples:
>>> get_submission("20b78e0f32df805d21064fc912f40e9ae5ab260d")
{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
}
"""
if not isinstance(submission_uuid, six.string_types):
if isinstance(submission_uuid, UUID):
submission_uuid = six.text_type(submission_uuid)
else:
raise SubmissionRequestError(
msg="submission_uuid ({!r}) must be serializable".format(submission_uuid)
)
cache_key = Submission.get_cache_key(submission_uuid)
try:
cached_submission_data = cache.get(cache_key)
except Exception:
# The cache backend could raise an exception
# (for example, memcache keys that contain spaces)
logger.exception("Error occurred while retrieving submission from the cache")
cached_submission_data = None
if cached_submission_data:
logger.info("Get submission {} (cached)".format(submission_uuid))
return cached_submission_data
try:
submission = _get_submission_model(submission_uuid, read_replica)
submission_data = SubmissionSerializer(submission).data
cache.set(cache_key, submission_data)
except Submission.DoesNotExist:
logger.error("Submission {} not found.".format(submission_uuid))
raise SubmissionNotFoundError(
u"No submission matching uuid {}".format(submission_uuid)
)
except Exception as exc:
# Something very unexpected has just happened (like DB misconfig)
err_msg = "Could not get submission due to error: {}".format(exc)
logger.exception(err_msg)
raise SubmissionInternalError(err_msg)
logger.info("Get submission {}".format(submission_uuid))
return submission_data
|
Retrieves a single submission by uuid.
Args:
submission_uuid (str): Identifier for the submission.
Kwargs:
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Raises:
SubmissionNotFoundError: Raised if the submission does not exist.
SubmissionRequestError: Raised if the search parameter is not a string.
SubmissionInternalError: Raised for unknown errors.
Examples:
>>> get_submission("20b78e0f32df805d21064fc912f40e9ae5ab260d")
{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
}
|
def _disc_kn(clearness_index, airmass, max_airmass=12):
"""
Calculate Kn for `disc`
Parameters
----------
clearness_index : numeric
airmass : numeric
max_airmass : float
airmass > max_airmass is set to max_airmass before being used
in calculating Kn.
Returns
-------
Kn : numeric
am : numeric
airmass used in the calculation of Kn. am <= max_airmass.
"""
# short names for equations
kt = clearness_index
am = airmass
am = np.minimum(am, max_airmass) # GH 450
# powers of kt will be used repeatedly, so compute only once
kt2 = kt * kt # about the same as kt ** 2
kt3 = kt2 * kt # 5-10x faster than kt ** 3
bools = (kt <= 0.6)
a = np.where(bools,
0.512 - 1.56*kt + 2.286*kt2 - 2.222*kt3,
-5.743 + 21.77*kt - 27.49*kt2 + 11.56*kt3)
b = np.where(bools,
0.37 + 0.962*kt,
41.4 - 118.5*kt + 66.05*kt2 + 31.9*kt3)
c = np.where(bools,
-0.28 + 0.932*kt - 2.048*kt2,
-47.01 + 184.2*kt - 222.0*kt2 + 73.81*kt3)
delta_kn = a + b * np.exp(c*am)
Knc = 0.866 - 0.122*am + 0.0121*am**2 - 0.000653*am**3 + 1.4e-05*am**4
Kn = Knc - delta_kn
return Kn, am
|
Calculate Kn for `disc`
Parameters
----------
clearness_index : numeric
airmass : numeric
max_airmass : float
airmass > max_airmass is set to max_airmass before being used
in calculating Kn.
Returns
-------
Kn : numeric
am : numeric
airmass used in the calculation of Kn. am <= max_airmass.
|
def get_hash(key: str) -> int:
"""Gets a hash of the provided key.
Parameters
----------
key :
A string used to create a seed for the random number generator.
Returns
-------
int
A hash of the provided key.
"""
# 4294967295 == 2**32 - 1 which is the maximum allowable seed for a `numpy.random.RandomState`.
return int(hashlib.sha1(key.encode('utf8')).hexdigest(), 16) % 4294967295
|
Gets a hash of the provided key.
Parameters
----------
key :
A string used to create a seed for the random number generator.
Returns
-------
int
A hash of the provided key.
|
def GetErrorText(
self,
Error,
Language = 0):
"""
Configures or sets a PCAN Channel value
Remarks:
The current languages available for translation are:
Neutral (0x00), German (0x07), English (0x09), Spanish (0x0A),
Italian (0x10) and French (0x0C)
The return value of this method is a 2-touple, where
the first value is the result (TPCANStatus) of the method and
the second one, the error text
Parameters:
Error : A TPCANStatus error code
Language : Indicates a 'Primary language ID' (Default is Neutral(0))
Returns:
A touple with 2 values
"""
try:
mybuffer = create_string_buffer(256)
res = self.__m_dllBasic.CAN_GetErrorText(Error,Language,byref(mybuffer))
return TPCANStatus(res),mybuffer.value
except:
logger.error("Exception on PCANBasic.GetErrorText")
raise
|
Configures or sets a PCAN Channel value
Remarks:
The current languages available for translation are:
Neutral (0x00), German (0x07), English (0x09), Spanish (0x0A),
Italian (0x10) and French (0x0C)
The return value of this method is a 2-touple, where
the first value is the result (TPCANStatus) of the method and
the second one, the error text
Parameters:
Error : A TPCANStatus error code
Language : Indicates a 'Primary language ID' (Default is Neutral(0))
Returns:
A touple with 2 values
|
def read_elements(fd, endian, mtps, is_name=False):
"""Read elements from the file.
If list of possible matrix data types mtps is provided, the data type
of the elements are verified.
"""
mtpn, num_bytes, data = read_element_tag(fd, endian)
if mtps and mtpn not in [etypes[mtp]['n'] for mtp in mtps]:
raise ParseError('Got type {}, expected {}'.format(
mtpn, ' / '.join('{} ({})'.format(
etypes[mtp]['n'], mtp) for mtp in mtps)))
if not data:
# full format, read data
data = fd.read(num_bytes)
# Seek to next 64-bit boundary
mod8 = num_bytes % 8
if mod8:
fd.seek(8 - mod8, 1)
# parse data and return values
if is_name:
# names are stored as miINT8 bytes
fmt = 's'
val = [unpack(endian, fmt, s)
for s in data.split(b'\0') if s]
if len(val) == 0:
val = ''
elif len(val) == 1:
val = asstr(val[0])
else:
val = [asstr(s) for s in val]
else:
fmt = etypes[inv_etypes[mtpn]]['fmt']
val = unpack(endian, fmt, data)
return val
|
Read elements from the file.
If list of possible matrix data types mtps is provided, the data type
of the elements are verified.
|
def delete_validating_webhook_configuration(self, name, **kwargs):
"""
delete a ValidatingWebhookConfiguration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_validating_webhook_configuration(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ValidatingWebhookConfiguration (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_validating_webhook_configuration_with_http_info(name, **kwargs)
else:
(data) = self.delete_validating_webhook_configuration_with_http_info(name, **kwargs)
return data
|
delete a ValidatingWebhookConfiguration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_validating_webhook_configuration(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ValidatingWebhookConfiguration (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
|
def enrich_rnas_with_genes(graph):
"""Add the corresponding gene node for each RNA/miRNA node and connect them with a transcription edge.
:param pybel.BELGraph graph: A BEL graph
"""
for rna_node in list(graph):
if rna_node[FUNCTION] not in {MIRNA, RNA} or FUSION in rna_node or VARIANTS in rna_node:
continue
gene_node = rna_node.get_gene()
graph.add_transcription(gene_node, rna_node)
|
Add the corresponding gene node for each RNA/miRNA node and connect them with a transcription edge.
:param pybel.BELGraph graph: A BEL graph
|
def calc_requiredremotesupply_v1(self):
"""Calculate the required maximum supply from another location
that can be discharged into the dam.
Required control parameters:
|HighestRemoteSupply|
|WaterLevelSupplyThreshold|
Required derived parameter:
|WaterLevelSupplySmoothPar|
Required aide sequence:
|WaterLevel|
Calculated flux sequence:
|RequiredRemoteSupply|
Basic equation:
:math:`RequiredRemoteSupply = HighestRemoteSupply \\cdot
smooth_{logistic1}(WaterLevelSupplyThreshold-WaterLevel,
WaterLevelSupplySmoothPar)`
Used auxiliary method:
|smooth_logistic1|
Examples:
Method |calc_requiredremotesupply_v1| is functionally identical
with method |calc_allowedremoterelieve_v2|. Hence the following
examples serve for testing purposes only (see the documentation
on function |calc_allowedremoterelieve_v2| for more detailed
information):
>>> from hydpy import pub
>>> pub.timegrids = '2001.03.30', '2001.04.03', '1d'
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> highestremotesupply(_11_1_12=1.0, _03_31_12=1.0,
... _04_1_12=2.0, _10_31_12=2.0)
>>> waterlevelsupplythreshold(_11_1_12=3.0, _03_31_12=2.0,
... _04_1_12=4.0, _10_31_12=4.0)
>>> waterlevelsupplytolerance(_11_1_12=0.0, _03_31_12=0.0,
... _04_1_12=1.0, _10_31_12=1.0)
>>> derived.waterlevelsupplysmoothpar.update()
>>> derived.toy.update()
>>> from hydpy import UnitTest
>>> test = UnitTest(model,
... model.calc_requiredremotesupply_v1,
... last_example=9,
... parseqs=(aides.waterlevel,
... fluxes.requiredremotesupply))
>>> test.nexts.waterlevel = range(9)
>>> model.idx_sim = pub.timegrids.init['2001.03.30']
>>> test(first_example=2, last_example=6)
| ex. | waterlevel | requiredremotesupply |
-------------------------------------------
| 3 | 1.0 | 1.0 |
| 4 | 2.0 | 1.0 |
| 5 | 3.0 | 0.0 |
| 6 | 4.0 | 0.0 |
>>> model.idx_sim = pub.timegrids.init['2001.04.01']
>>> test()
| ex. | waterlevel | requiredremotesupply |
-------------------------------------------
| 1 | 0.0 | 2.0 |
| 2 | 1.0 | 1.999998 |
| 3 | 2.0 | 1.999796 |
| 4 | 3.0 | 1.98 |
| 5 | 4.0 | 1.0 |
| 6 | 5.0 | 0.02 |
| 7 | 6.0 | 0.000204 |
| 8 | 7.0 | 0.000002 |
| 9 | 8.0 | 0.0 |
"""
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
aid = self.sequences.aides.fastaccess
toy = der.toy[self.idx_sim]
flu.requiredremotesupply = (
con.highestremotesupply[toy] *
smoothutils.smooth_logistic1(
con.waterlevelsupplythreshold[toy]-aid.waterlevel,
der.waterlevelsupplysmoothpar[toy]))
|
Calculate the required maximum supply from another location
that can be discharged into the dam.
Required control parameters:
|HighestRemoteSupply|
|WaterLevelSupplyThreshold|
Required derived parameter:
|WaterLevelSupplySmoothPar|
Required aide sequence:
|WaterLevel|
Calculated flux sequence:
|RequiredRemoteSupply|
Basic equation:
:math:`RequiredRemoteSupply = HighestRemoteSupply \\cdot
smooth_{logistic1}(WaterLevelSupplyThreshold-WaterLevel,
WaterLevelSupplySmoothPar)`
Used auxiliary method:
|smooth_logistic1|
Examples:
Method |calc_requiredremotesupply_v1| is functionally identical
with method |calc_allowedremoterelieve_v2|. Hence the following
examples serve for testing purposes only (see the documentation
on function |calc_allowedremoterelieve_v2| for more detailed
information):
>>> from hydpy import pub
>>> pub.timegrids = '2001.03.30', '2001.04.03', '1d'
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> highestremotesupply(_11_1_12=1.0, _03_31_12=1.0,
... _04_1_12=2.0, _10_31_12=2.0)
>>> waterlevelsupplythreshold(_11_1_12=3.0, _03_31_12=2.0,
... _04_1_12=4.0, _10_31_12=4.0)
>>> waterlevelsupplytolerance(_11_1_12=0.0, _03_31_12=0.0,
... _04_1_12=1.0, _10_31_12=1.0)
>>> derived.waterlevelsupplysmoothpar.update()
>>> derived.toy.update()
>>> from hydpy import UnitTest
>>> test = UnitTest(model,
... model.calc_requiredremotesupply_v1,
... last_example=9,
... parseqs=(aides.waterlevel,
... fluxes.requiredremotesupply))
>>> test.nexts.waterlevel = range(9)
>>> model.idx_sim = pub.timegrids.init['2001.03.30']
>>> test(first_example=2, last_example=6)
| ex. | waterlevel | requiredremotesupply |
-------------------------------------------
| 3 | 1.0 | 1.0 |
| 4 | 2.0 | 1.0 |
| 5 | 3.0 | 0.0 |
| 6 | 4.0 | 0.0 |
>>> model.idx_sim = pub.timegrids.init['2001.04.01']
>>> test()
| ex. | waterlevel | requiredremotesupply |
-------------------------------------------
| 1 | 0.0 | 2.0 |
| 2 | 1.0 | 1.999998 |
| 3 | 2.0 | 1.999796 |
| 4 | 3.0 | 1.98 |
| 5 | 4.0 | 1.0 |
| 6 | 5.0 | 0.02 |
| 7 | 6.0 | 0.000204 |
| 8 | 7.0 | 0.000002 |
| 9 | 8.0 | 0.0 |
|
def image_question_encoder(encoder_inputs,
encoder_self_attention_bias,
hparams,
query=None,
name="image_question_encoder",
save_weights_to=None,
make_image_summary=True):
"""A stack of self attention layers."""
x = encoder_inputs
with tf.variable_scope(name):
for layer in range(hparams.num_encoder_layers or hparams.num_hidden_layers):
with tf.variable_scope("layer_%d" % layer):
with tf.variable_scope("self_attention"):
y = vqa_layers.multihead_attention(
common_layers.layer_preprocess(x, hparams),
None,
encoder_self_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=hparams.self_attention_type,
block_length=hparams.block_length,
save_weights_to=save_weights_to,
make_image_summary=make_image_summary,
scale_dotproduct=hparams.scale_dotproduct,
)
utils.collect_named_outputs(
"norms", "encoder_self_attention_%d"%(layer),
tf.norm(y, axis=-1))
x = common_layers.layer_postprocess(x, y, hparams)
utils.collect_named_outputs(
"norms", "encoder_self_attention_postprocess_%d"%(layer),
tf.norm(x, axis=-1))
if query is not None:
with tf.variable_scope("encdec_attention"):
y = common_attention.multihead_attention(
common_layers.layer_preprocess(x, hparams),
query,
None,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=hparams.self_attention_type,
block_length=hparams.block_length,
save_weights_to=save_weights_to,
make_image_summary=make_image_summary,
scale_dotproduct=hparams.scale_dotproduct,
)
utils.collect_named_outputs(
"norms",
"encoder_decoder_attention_%d"%(layer),
tf.norm(y, axis=-1))
x = common_layers.layer_postprocess(x, y, hparams)
utils.collect_named_outputs(
"norms",
"encoder_decoder_attention_post_%d"%(layer),
tf.norm(x, axis=-1))
with tf.variable_scope("ffn"):
y = common_layers.dense_relu_dense(
common_layers.layer_preprocess(x, hparams),
hparams.filter_size,
hparams.hidden_size,
dropout=hparams.relu_dropout,
)
utils.collect_named_outputs(
"norms", "encoder_ffn_%d"%(layer), tf.norm(y, axis=-1))
x = common_layers.layer_postprocess(x, y, hparams)
utils.collect_named_outputs(
"norms", "encoder_ffn_postprocess_%d"%(layer),
tf.norm(x, axis=-1))
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
return common_layers.layer_preprocess(x, hparams)
|
A stack of self attention layers.
|
def new_file(self, path: str, checksum: str=None, to_archive: bool=False,
tags: List[models.Tag]=None) -> models.File:
"""Create a new file."""
new_file = self.File(path=path, checksum=checksum, to_archive=to_archive, tags=tags)
return new_file
|
Create a new file.
|
def get_lib_name(self):
""" Parse Cargo.toml to get the name of the shared library. """
# We import in here to make sure the the setup_requires are already installed
import toml
cfg = toml.load(self.path)
name = cfg.get("lib", {}).get("name")
if name is None:
name = cfg.get("package", {}).get("name")
if name is None:
raise Exception(
"Can not parse library name from Cargo.toml. "
"Cargo.toml missing value for 'name' key "
"in both the [package] section and the [lib] section"
)
name = re.sub(r"[./\\-]", "_", name)
return name
|
Parse Cargo.toml to get the name of the shared library.
|
def score(self, obs, pstates):
"""
Compute the log probability under the model.
"""
pstates_idx = np.array([self.e[p] for p in pstates])
framelogprob = self._compute_log_likelihood(obs, pstates_idx)
logprob, _ = self._do_forward_pass(framelogprob, pstates_idx)
return logprob
|
Compute the log probability under the model.
|
def request(self, url, params={}, headers={}):
"""
Retrieve SDMX messages.
If needed, override in subclasses to support other data providers.
:param url: The URL of the message.
:type url: str
:return: the xml data as file-like object
"""
# Generate current config. Merge in any given headers
cur_config = self.config.copy()
if 'headers' in cur_config:
cur_config['headers'] = cur_config['headers'].copy()
cur_config['headers'].update(headers)
else:
cur_config['headers'] = headers
with closing(requests.get(url, params=params, **cur_config)) as response:
if response.status_code == requests.codes.OK:
# Prepare the temp file. xml content will be
# stored in a binary file, json in a textfile.
if (response.headers.get('Content-Type')
and ('json' in response.headers['Content-Type'])):
enc, fmode = response.encoding, 'w+t'
else:
enc, fmode = None, 'w+b'
# Create temp file ensuring 2to3 compatibility
if str_type == str: # we are on py3
source = STF(
max_size=self.max_size, mode=fmode, encoding=enc)
else:
# On py27 we must omit the 'encoding' kwarg
source = STF(max_size=self.max_size, mode=fmode)
for c in response.iter_content(chunk_size=1000000,
decode_unicode=bool(enc)):
source.write(c)
else:
source = None
code = int(response.status_code)
if 400 <= code <= 499:
raise response.raise_for_status()
return source, response.url, response.headers, code
|
Retrieve SDMX messages.
If needed, override in subclasses to support other data providers.
:param url: The URL of the message.
:type url: str
:return: the xml data as file-like object
|
def __assert_equal(expected, returned, assert_print_result=True):
'''
Test if two objects are equal
'''
result = "Pass"
try:
if assert_print_result:
assert (expected == returned), "{0} is not equal to {1}".format(expected, returned)
else:
assert (expected == returned), "Result is not equal"
except AssertionError as err:
result = "Fail: " + six.text_type(err)
return result
|
Test if two objects are equal
|
def resolve_local(self, uri, base_uri, ref):
"""
Resolve a local ``uri``.
Does not check the store first.
:argument str uri: the URI to resolve
:returns: the retrieved document
"""
# read it from the filesystem
file_path = None
# make the reference saleskingstyle
item_name = None
if (uri.startswith(u"file") or
uri.startswith(u"File")):
if ref.startswith(u"./"):
ref = ref.split(u"./")[-1]
org_ref = ref
if ref.find(u"#properties") != -1:
ref = ref.split(u"#properties")[0]
if ref.find(u".json") != -1:
item_name = ref.split(u".json")[0]
# on windwos systesm this needs to happen
if base_uri.startswith(u"file://") is True:
base_uri = base_uri.split(u"file://")[1]
elif base_uri.startswith(u"File://") is True:
base_uri = base_uri.split(u"File://")[1]
file_path = os.path.join(base_uri, ref)
result = None
try:
schema_file = open(file_path, "r").read()
result = json.loads(schema_file.decode("utf-8"))
except IOError as e:
log.error(u"file not found %s" % e)
msg = "Could not find schema file. %s" % file_path
raise SalesKingException("SCHEMA_NOT_FOUND", msg)
if self.cache_remote:
self.store[uri] = result
return result
|
Resolve a local ``uri``.
Does not check the store first.
:argument str uri: the URI to resolve
:returns: the retrieved document
|
def features(self, expand=False):
"""Return the list of feature-value pairs in the conjunction."""
featvals = []
for term in self._terms:
if isinstance(term, AVM):
featvals.extend(term.features(expand=expand))
return featvals
|
Return the list of feature-value pairs in the conjunction.
|
def get_snapshot_command_history(self, name, limit=20, offset=0, view=None):
"""
Retrieve a list of commands triggered by a snapshot policy.
@param name: The name of the snapshot policy.
@param limit: Maximum number of commands to retrieve.
@param offset: Index of first command to retrieve.
@param view: View to materialize. Valid values are 'full', 'summary', 'export', 'export_redacted'.
@return: List of commands triggered by a snapshot policy.
@since: API v6
"""
params = {
'limit': limit,
'offset': offset,
}
if view:
params['view'] = view
return self._get("snapshots/policies/%s/history" % name, ApiSnapshotCommand, True,
params=params, api_version=6)
|
Retrieve a list of commands triggered by a snapshot policy.
@param name: The name of the snapshot policy.
@param limit: Maximum number of commands to retrieve.
@param offset: Index of first command to retrieve.
@param view: View to materialize. Valid values are 'full', 'summary', 'export', 'export_redacted'.
@return: List of commands triggered by a snapshot policy.
@since: API v6
|
def _signed_add_overflow(state, a, b):
"""
Sign extend the value to 512 bits and check the result can be represented
in 256. Following there is a 32 bit excerpt of this condition:
a + b -80000000 -3fffffff -00000001 +00000000 +00000001 +3fffffff +7fffffff
+80000000 True True True False False False False
+c0000001 True False False False False False False
+ffffffff True False False False False False False
+00000000 False False False False False False False
+00000001 False False False False False False True
+3fffffff False False False False False False True
+7fffffff False False False False True True True
"""
add = Operators.SEXTEND(a, 256, 512) + Operators.SEXTEND(b, 256, 512)
cond = Operators.OR(add < -(1 << 255), add >= (1 << 255))
return cond
|
Sign extend the value to 512 bits and check the result can be represented
in 256. Following there is a 32 bit excerpt of this condition:
a + b -80000000 -3fffffff -00000001 +00000000 +00000001 +3fffffff +7fffffff
+80000000 True True True False False False False
+c0000001 True False False False False False False
+ffffffff True False False False False False False
+00000000 False False False False False False False
+00000001 False False False False False False True
+3fffffff False False False False False False True
+7fffffff False False False False True True True
|
def mavlink_packet(self, msg):
'''handle an incoming mavlink packet'''
# check for any closed graphs
for i in range(len(self.graphs) - 1, -1, -1):
if not self.graphs[i].is_alive():
self.graphs[i].close()
self.graphs.pop(i)
# add data to the rest
for g in self.graphs:
g.add_mavlink_packet(msg)
|
handle an incoming mavlink packet
|
def get(self, chargeback_id, **params):
"""Verify the chargeback ID and retrieve the chargeback from the API."""
if not chargeback_id or not chargeback_id.startswith(self.RESOURCE_ID_PREFIX):
raise IdentifierError(
"Invalid chargeback ID: '{id}'. A chargeback ID should start with '{prefix}'.".format(
id=chargeback_id, prefix=self.RESOURCE_ID_PREFIX)
)
return super(Chargebacks, self).get(chargeback_id, **params)
|
Verify the chargeback ID and retrieve the chargeback from the API.
|
def datatype2schemacls(
_datatype, _registry=None, _factory=None, _force=True,
_besteffort=True, **kwargs
):
"""Get a schema class which has been associated to input data type by the
registry or the factory in this order.
:param type datatype: data type from where get associated schema.
:param SchemaRegisgry _registry: registry from where call the getbydatatype
. Default is the global registry.
:param SchemaFactory _factory: factory from where call the getschemacls if
getbydatatype returns None. Default is the global factory.
:param bool _force: if true (default), force the building of schema class
if no schema is associated to input data type.
:param bool _besteffort: if True (default), try to resolve schema by
inheritance.
:param dict kwargs: factory builder kwargs.
:rtype: type
:return: Schema associated to input registry or factory. None if no
association found.
"""
result = None
gdbt = getbydatatype if _registry is None else _registry.getbydatatype
result = gdbt(_datatype, besteffort=_besteffort)
if result is None:
gscls = getschemacls if _factory is None else _factory.getschemacls
result = gscls(_datatype, besteffort=_besteffort)
if result is None and _force:
_build = build if _factory is None else _factory.build
result = _build(_resource=_datatype, **kwargs)
return result
|
Get a schema class which has been associated to input data type by the
registry or the factory in this order.
:param type datatype: data type from where get associated schema.
:param SchemaRegisgry _registry: registry from where call the getbydatatype
. Default is the global registry.
:param SchemaFactory _factory: factory from where call the getschemacls if
getbydatatype returns None. Default is the global factory.
:param bool _force: if true (default), force the building of schema class
if no schema is associated to input data type.
:param bool _besteffort: if True (default), try to resolve schema by
inheritance.
:param dict kwargs: factory builder kwargs.
:rtype: type
:return: Schema associated to input registry or factory. None if no
association found.
|
def _print_header(data):
"""
Create vcf header to make
a valid vcf.
"""
print("##fileformat=VCFv4.2", file=STDOUT, end="")
print("##source=seqbuster2.3", file=STDOUT, end="")
print("##reference=mirbase", file=STDOUT, end="")
for pos in data:
print("##contig=<ID=%s>" % pos["chrom"], file=STDOUT, end="")
print('##INFO=<ID=ID,Number=1,Type=String,Description="miRNA name">', file=STDOUT, end="")
print('##FORMAT=<ID=GT,Number=1,Type=Integer,Description="Genotype">', file=STDOUT, end="")
print('##FORMAT=<ID=NR,Number=A,Type=Integer,Description="Total reads supporting the variant">', file=STDOUT, end="")
print('##FORMAT=<ID=NS,Number=A,Type=Float,Description="Total number of different sequences supporting the variant">', file=STDOUT, end="")
print("#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tSAMP001", file=STDOUT, end="")
|
Create vcf header to make
a valid vcf.
|
def generate(self, api):
"""
Generates a module for each namespace.
Each namespace will have Python classes to represent data types and
routes in the Stone spec.
"""
rsrc_folder = os.path.join(os.path.dirname(__file__), 'python_rsrc')
self.logger.info('Copying stone_validators.py to output folder')
shutil.copy(os.path.join(rsrc_folder, 'stone_validators.py'),
self.target_folder_path)
self.logger.info('Copying stone_serializers.py to output folder')
shutil.copy(os.path.join(rsrc_folder, 'stone_serializers.py'),
self.target_folder_path)
self.logger.info('Copying stone_base.py to output folder')
shutil.copy(os.path.join(rsrc_folder, 'stone_base.py'),
self.target_folder_path)
for namespace in api.namespaces.values():
reserved_namespace_name = fmt_namespace(namespace.name)
with self.output_to_relative_path('{}.py'.format(reserved_namespace_name)):
self._generate_base_namespace_module(api, namespace)
if reserved_namespace_name != namespace.name:
with self.output_to_relative_path('{}.py'.format(namespace.name)):
self._generate_dummy_namespace_module(reserved_namespace_name)
|
Generates a module for each namespace.
Each namespace will have Python classes to represent data types and
routes in the Stone spec.
|
def order_by(self, column, direction="asc"):
"""
Add a "order by" clause to the query
:param column: The order by column
:type column: str
:param direction: The direction of the order
:type direction: str
:return: The current QueryBuilder instance
:rtype: QueryBuilder
"""
if self.unions:
prop = "union_orders"
else:
prop = "orders"
if direction.lower() == "asc":
direction = "asc"
else:
direction = "desc"
getattr(self, prop).append({"column": column, "direction": direction})
return self
|
Add a "order by" clause to the query
:param column: The order by column
:type column: str
:param direction: The direction of the order
:type direction: str
:return: The current QueryBuilder instance
:rtype: QueryBuilder
|
def _main(self):
"""module "main" method. Only used by external modules.
:return: None
"""
self.set_proctitle(self.name)
self.set_signal_handler()
logger.info("process for module %s is now running (pid=%d)", self.name, os.getpid())
# Will block here!
try:
self.main()
except (IOError, EOFError):
pass
# logger.warning('[%s] EOF exception: %s', self.name, traceback.format_exc())
except Exception as exp: # pylint: disable=broad-except
logger.exception('main function exception: %s', exp)
self.do_stop()
logger.info("process for module %s is now exiting (pid=%d)", self.name, os.getpid())
exit()
|
module "main" method. Only used by external modules.
:return: None
|
def user_provenance(self, document): # type: (ProvDocument) -> None
"""Add the user provenance."""
self.self_check()
(username, fullname) = _whoami()
if not self.full_name:
self.full_name = fullname
document.add_namespace(UUID)
document.add_namespace(ORCID)
document.add_namespace(FOAF)
account = document.agent(
ACCOUNT_UUID, {provM.PROV_TYPE: FOAF["OnlineAccount"],
"prov:label": username,
FOAF["accountName"]: username})
user = document.agent(
self.orcid or USER_UUID,
{provM.PROV_TYPE: PROV["Person"],
"prov:label": self.full_name,
FOAF["name"]: self.full_name,
FOAF["account"]: account})
# cwltool may be started on the shell (directly by user),
# by shell script (indirectly by user)
# or from a different program
# (which again is launched by any of the above)
#
# We can't tell in which way, but ultimately we're still
# acting in behalf of that user (even if we might
# get their name wrong!)
document.actedOnBehalfOf(account, user)
|
Add the user provenance.
|
def validate(spec, data):
"""Validates the data and creates the config objects"""
data = copy.deepcopy(data)
validated_data = {}
def validate_keys(section, config, section_data):
if not isinstance(section_data, dict) or section == spec.MODEL:
return
extra_args = [key for key in section_data.keys() if key not in config.SCHEMA().fields]
if extra_args:
raise PolyaxonfileError('Extra arguments passed for `{}`: {}'.format(
section, extra_args))
def add_validated_section(section, config):
if data.get(section):
section_data = data[section]
validate_keys(section=section, config=config, section_data=section_data)
validated_data[section] = config.from_dict(section_data)
add_validated_section(spec.ENVIRONMENT, spec.ENVIRONMENT_CONFIG)
add_validated_section(spec.BUILD, BuildConfig)
add_validated_section(spec.RUN, RunConfig)
add_validated_section(spec.MODEL, ModelConfig)
add_validated_section(spec.TRAIN, TrainConfig)
add_validated_section(spec.EVAL, EvalConfig)
return validated_data
|
Validates the data and creates the config objects
|
def operands(self):
"""
Return an iterator over this instruction's operands.
The iterator will yield a ValueRef for each operand.
"""
if not self.is_instruction:
raise ValueError('expected instruction value, got %s'
% (self._kind,))
it = ffi.lib.LLVMPY_InstructionOperandsIter(self)
parents = self._parents.copy()
parents.update(instruction=self)
return _OperandsIterator(it, parents)
|
Return an iterator over this instruction's operands.
The iterator will yield a ValueRef for each operand.
|
def get_next_action(self, request, application, label, roles):
""" Process the get_next_action request at the current step. """
# if user is logged and and not applicant, steal the
# application
if 'is_applicant' in roles:
# if we got this far, then we either we are logged in as applicant,
# or we know the secret for this application.
new_person = None
reason = None
details = None
attrs, _ = saml.parse_attributes(request)
saml_id = attrs['persistent_id']
if saml_id is not None:
query = Person.objects.filter(saml_id=saml_id)
if application.content_type.model == "person":
query = query.exclude(pk=application.applicant.pk)
if query.count() > 0:
new_person = Person.objects.get(saml_id=saml_id)
reason = "SAML id is already in use by existing person."
details = (
"It is not possible to continue this application "
+ "as is because the saml identity already exists "
+ "as a registered user.")
del query
if request.user.is_authenticated:
new_person = request.user
reason = "%s was logged in " \
"and accessed the secret URL." % new_person
details = (
"If you want to access this application "
+ "as %s " % application.applicant
+ "without %s stealing it, " % new_person
+ "you will have to ensure %s is " % new_person
+ "logged out first.")
if new_person is not None:
if application.applicant != new_person:
if 'steal' in request.POST:
old_applicant = application.applicant
application.applicant = new_person
application.save()
log.change(
application.application_ptr,
"Stolen application from %s" % old_applicant)
messages.success(
request,
"Stolen application from %s" % old_applicant)
url = base.get_url(request, application, roles, label)
return HttpResponseRedirect(url)
else:
return render(
template_name='kgapplications'
'/project_aed_steal.html',
context={
'application': application,
'person': new_person,
'reason': reason,
'details': details,
},
request=request)
# if the user is the leader, show him the leader specific page.
if ('is_leader' in roles or 'is_delegate' in roles) \
and 'is_admin' not in roles \
and 'is_applicant' not in roles:
actions = ['reopen']
if 'reopen' in request.POST:
return 'reopen'
return render(
template_name='kgapplications/project_aed_for_leader.html',
context={'application': application,
'actions': actions, 'roles': roles, },
request=request)
# otherwise do the default behaviour for StateWithSteps
return super(StateApplicantEnteringDetails, self) \
.get_next_action(request, application, label, roles)
|
Process the get_next_action request at the current step.
|
def invalid_index(self, name):
"""Show an invalid index error message."""
self.stderr.write("Unknown index: {}".format(name))
self.stderr.write("Supported indices are:")
for index in index_builder.indexes:
self.stderr.write(" * {}".format(index.__class__.__name__))
|
Show an invalid index error message.
|
def _walk_modules(modules, class_name, path, ignored_formats, args):
"""
Helper generator that traverses modules in returns a flattened
iterator.
"""
for module in _iter_modules(modules=modules,
class_name=class_name,
path=path,
ignored_formats=ignored_formats,
args=args):
for section in module.sections:
for lecture in section.lectures:
for resource in lecture.resources:
yield module, section, lecture, resource
|
Helper generator that traverses modules in returns a flattened
iterator.
|
def tunnel_settings_system_tunnel_replicator_load_balance(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
tunnel_settings = ET.SubElement(config, "tunnel-settings", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
system = ET.SubElement(tunnel_settings, "system")
tunnel = ET.SubElement(system, "tunnel")
replicator = ET.SubElement(tunnel, "replicator")
load_balance = ET.SubElement(replicator, "load-balance")
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def system_methodHelp(self, method_name: str)->str:
"""将docstring返回.
system.methodHelp('add') => "Adds two integers together"
Return:
(str): - 函数的帮助文本
"""
method = None
if method_name in self.funcs:
method = self.funcs[method_name]
elif self.instance is not None:
try:
method = resolve_dotted_attribute(
self.instance,
method_name,
self.allow_dotted_names
)
except AttributeError:
pass
if method is None:
return ""
else:
return pydoc.getdoc(method)
|
将docstring返回.
system.methodHelp('add') => "Adds two integers together"
Return:
(str): - 函数的帮助文本
|
def ReadBytes(self, address, num_bytes):
"""Reads at most num_bytes starting from offset <address>."""
address = int(address)
buf = ctypes.create_string_buffer(num_bytes)
bytesread = ctypes.c_size_t(0)
res = ReadProcessMemory(self.h_process, address, buf, num_bytes,
ctypes.byref(bytesread))
if res == 0:
err = wintypes.GetLastError()
if err == 299:
# Only part of ReadProcessMemory has been done, let's return it.
return buf.raw[:bytesread.value]
raise process_error.ProcessError("Error in ReadProcessMemory: %d" % err)
return buf.raw[:bytesread.value]
|
Reads at most num_bytes starting from offset <address>.
|
def ReleaseFileSystem(self, file_system):
"""Releases a cached file system object.
Args:
file_system (FileSystem): file system object.
Returns:
bool: True if the file system object can be closed.
Raises:
PathSpecError: if the path specification is incorrect.
RuntimeError: if the file system object is not cached or an inconsistency
is detected in the cache.
"""
identifier, cache_value = self._file_system_cache.GetCacheValueByObject(
file_system)
if not identifier:
raise RuntimeError('Object not cached.')
if not cache_value:
raise RuntimeError('Invalid cache value.')
self._file_system_cache.ReleaseObject(identifier)
result = cache_value.IsDereferenced()
if result:
self._file_system_cache.RemoveObject(identifier)
return result
|
Releases a cached file system object.
Args:
file_system (FileSystem): file system object.
Returns:
bool: True if the file system object can be closed.
Raises:
PathSpecError: if the path specification is incorrect.
RuntimeError: if the file system object is not cached or an inconsistency
is detected in the cache.
|
def servicenames(self):
"Give the list of services available in this folder."
return set([service['name'].rstrip('/').split('/')[-1]
for service in self._json_struct.get('services', [])])
|
Give the list of services available in this folder.
|
def process_tree(self, channel_node):
"""
Returns a list of all file names associated with a tree. Profiling suggests using a global list with `extend`
is faster than using a global set or deque.
:param channel_node: Root node of the channel being processed
:return: The list of unique file names in `channel_node`.
"""
file_names = []
self.process_tree_recur(file_names, channel_node)
return [x for x in set(file_names) if x]
|
Returns a list of all file names associated with a tree. Profiling suggests using a global list with `extend`
is faster than using a global set or deque.
:param channel_node: Root node of the channel being processed
:return: The list of unique file names in `channel_node`.
|
def select(self, **kws):
'''
Find all servers with indicated protocol support. Shuffled.
Filter by TOR support, and pruning level.
'''
lst = [i for i in self.values() if i.select(**kws)]
random.shuffle(lst)
return lst
|
Find all servers with indicated protocol support. Shuffled.
Filter by TOR support, and pruning level.
|
def calculate_bidirectional_lstm_output_shapes(operator):
'''
See bidirectional LSTM's conversion function for its output shapes.
'''
check_input_and_output_numbers(operator, input_count_range=[1, 5], output_count_range=[1, 5])
check_input_and_output_types(operator, good_input_types=[FloatTensorType])
input_shape = operator.inputs[0].type.shape
# LSTM accepts [N, C] and [N, C, 1, 1] inputs
if len(input_shape) not in [2, 4]:
raise RuntimeError('Input must be a 2-D or 4-D tensor')
params = operator.raw_operator.biDirectionalLSTM
# The following line is more accurate but it may break some tests
# output_shape = ['None', params.outputVectorSize] if params.params.sequenceOutput else [1, 2 *params.outputVectorSize]
output_shape = ['None', 2 * params.outputVectorSize]
state_shape = [1, params.outputVectorSize]
# TODO: Changing input shapes of an operator is dangerous, this should be move to Topology's _fix_shapes function
if len(operator.inputs) > 1:
Y_h_in = operator.inputs[1] # The forward initial hidden state of a single sequence
Y_h_in.type.shape = state_shape
Y_h_rev_in = operator.inputs[3] # The backward initial hidden state of a single sequence
Y_h_rev_in.type.shape = state_shape
if len(operator.inputs) > 2:
Y_c_in = operator.inputs[2] # The forward initial cell state of a single sequence
Y_c_in.type.shape = state_shape
Y_c_rev_in = operator.inputs[4] # The backward initial cell state of a single sequence
Y_c_rev_in.type.shape = state_shape
operator.outputs[0].type.shape = output_shape
if len(operator.outputs) > 1:
operator.outputs[1].type.shape = state_shape
operator.outputs[3].type.shape = state_shape
if len(operator.outputs) > 2:
operator.outputs[2].type.shape = state_shape
operator.outputs[4].type.shape = state_shape
|
See bidirectional LSTM's conversion function for its output shapes.
|
def apply_exclude(self, high):
'''
Read in the __exclude__ list and remove all excluded objects from the
high data
'''
if '__exclude__' not in high:
return high
ex_sls = set()
ex_id = set()
exclude = high.pop('__exclude__')
for exc in exclude:
if isinstance(exc, six.string_types):
# The exclude statement is a string, assume it is an sls
ex_sls.add(exc)
if isinstance(exc, dict):
# Explicitly declared exclude
if len(exc) != 1:
continue
key = next(six.iterkeys(exc))
if key == 'sls':
ex_sls.add(exc['sls'])
elif key == 'id':
ex_id.add(exc['id'])
# Now the excludes have been simplified, use them
if ex_sls:
# There are sls excludes, find the associtaed ids
for name, body in six.iteritems(high):
if name.startswith('__'):
continue
if body.get('__sls__', '') in ex_sls:
ex_id.add(name)
for id_ in ex_id:
if id_ in high:
high.pop(id_)
return high
|
Read in the __exclude__ list and remove all excluded objects from the
high data
|
def send(token, title, **kwargs):
"""
Site: https://boxcar.io/
API: http://help.boxcar.io/knowledgebase/topics/48115-boxcar-api
Desc: Best app for system administrators
"""
headers = {
"Content-type": "application/x-www-form-urlencoded",
"User-Agent": "DBMail/%s" % get_version(),
}
data = {
"user_credentials": token,
"notification[title]": from_unicode(title),
"notification[sound]": "notifier-2"
}
for k, v in kwargs.items():
data['notification[%s]' % k] = from_unicode(v)
http = HTTPSConnection(kwargs.pop("api_url", "new.boxcar.io"))
http.request(
"POST", "/api/notifications",
headers=headers,
body=urlencode(data))
response = http.getresponse()
if response.status != 201:
raise BoxcarError(response.reason)
return True
|
Site: https://boxcar.io/
API: http://help.boxcar.io/knowledgebase/topics/48115-boxcar-api
Desc: Best app for system administrators
|
def _has_expired(self):
""" Has this HIT expired yet? """
expired = False
if hasattr(self, 'Expiration'):
now = datetime.datetime.utcnow()
expiration = datetime.datetime.strptime(self.Expiration, '%Y-%m-%dT%H:%M:%SZ')
expired = (now >= expiration)
else:
raise ValueError("ERROR: Request for expired property, but no Expiration in HIT!")
return expired
|
Has this HIT expired yet?
|
def parse_args():
"""Return parsed command-line arguments."""
threshold_choices = docutils.frontend.OptionParser.threshold_choices
parser = argparse.ArgumentParser(
description=__doc__ + (' Sphinx is enabled.'
if SPHINX_INSTALLED else ''),
prog='rstcheck')
parser.add_argument('files', nargs='+', type=decode_filename,
help='files to check')
parser.add_argument('--config', metavar='CONFIG', default=None,
help='location of config file')
parser.add_argument('-r', '--recursive', action='store_true',
help='run recursively over directories')
parser.add_argument('--report', metavar='level',
choices=threshold_choices,
default='info',
help='report system messages at or higher than '
'level; ' +
', '.join(choice for choice in threshold_choices
if not choice.isdigit()) +
' (default: %(default)s)')
parser.add_argument('--ignore-language', '--ignore',
metavar='language', default='',
help='comma-separated list of languages to ignore')
parser.add_argument('--ignore-messages',
metavar='messages', default='',
help='python regex that match the messages to ignore')
parser.add_argument('--ignore-directives',
metavar='directives', default='',
help='comma-separated list of directives to ignore')
parser.add_argument('--ignore-substitutions',
metavar='substitutions', default='',
help='comma-separated list of substitutions to ignore')
parser.add_argument('--ignore-roles',
metavar='roles', default='',
help='comma-separated list of roles to ignore')
parser.add_argument('--debug', action='store_true',
help='show messages helpful for debugging')
parser.add_argument('--version', action='version',
version='%(prog)s ' + __version__)
args = parser.parse_args()
if '-' in args.files:
if len(args.files) > 1:
parser.error("'-' for standard in can only be checked alone")
else:
args.files = list(find_files(filenames=args.files,
recursive=args.recursive))
return args
|
Return parsed command-line arguments.
|
def exec_python(attr, src, executable="python"):
"""Runs a python subproc to calculate a package attribute.
Args:
attr (str): Name of package attribute being created.
src (list of str): Python code to execute, will be converted into
semicolon-delimited single line of code.
Returns:
str: Output of python process.
"""
import subprocess
if isinstance(src, basestring):
src = [src]
p = popen([executable, "-c", "; ".join(src)],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if p.returncode:
from rez.exceptions import InvalidPackageError
raise InvalidPackageError(
"Error determining package attribute '%s':\n%s" % (attr, err))
return out.strip()
|
Runs a python subproc to calculate a package attribute.
Args:
attr (str): Name of package attribute being created.
src (list of str): Python code to execute, will be converted into
semicolon-delimited single line of code.
Returns:
str: Output of python process.
|
def get(self, idx, default=None):
"""
Return the first placeholder shape with matching *idx* value, or
*default* if not found.
"""
for placeholder in self:
if placeholder.element.ph_idx == idx:
return placeholder
return default
|
Return the first placeholder shape with matching *idx* value, or
*default* if not found.
|
def build(self, builder):
"""
Build XML by appending to builder
"""
builder.start("SourceID", {})
builder.data(self.source_id)
builder.end("SourceID")
|
Build XML by appending to builder
|
def __nn_filter_helper(R_data, R_indices, R_ptr, S, aggregate):
'''Nearest-neighbor filter helper function.
This is an internal function, not for use outside of the decompose module.
It applies the nearest-neighbor filter to S, assuming that the first index
corresponds to observations.
Parameters
----------
R_data, R_indices, R_ptr : np.ndarrays
The `data`, `indices`, and `indptr` of a scipy.sparse matrix
S : np.ndarray
The observation data to filter
aggregate : callable
The aggregation operator
Returns
-------
S_out : np.ndarray like S
The filtered data array
'''
s_out = np.empty_like(S)
for i in range(len(R_ptr)-1):
# Get the non-zeros out of the recurrence matrix
targets = R_indices[R_ptr[i]:R_ptr[i+1]]
if not len(targets):
s_out[i] = S[i]
continue
neighbors = np.take(S, targets, axis=0)
if aggregate is np.average:
weights = R_data[R_ptr[i]:R_ptr[i+1]]
s_out[i] = aggregate(neighbors, axis=0, weights=weights)
else:
s_out[i] = aggregate(neighbors, axis=0)
return s_out
|
Nearest-neighbor filter helper function.
This is an internal function, not for use outside of the decompose module.
It applies the nearest-neighbor filter to S, assuming that the first index
corresponds to observations.
Parameters
----------
R_data, R_indices, R_ptr : np.ndarrays
The `data`, `indices`, and `indptr` of a scipy.sparse matrix
S : np.ndarray
The observation data to filter
aggregate : callable
The aggregation operator
Returns
-------
S_out : np.ndarray like S
The filtered data array
|
def validate_is_non_abstract_vertex_type(self, vertex_classname):
"""Validate that a vertex classname corresponds to a non-abstract vertex class."""
element = self.get_vertex_schema_element_or_raise(vertex_classname)
if element.abstract:
raise InvalidClassError(u'Expected a non-abstract vertex class, but {} is abstract'
.format(vertex_classname))
|
Validate that a vertex classname corresponds to a non-abstract vertex class.
|
def docker_version(host=None, component='server'):
""" Return the version of Docker [Server]
:param host: host or IP of the machine Docker is running on
:type host: str
:param component: Docker component
:type component: str
:return: Docker version
:rtype: str
"""
if component.lower() == 'client':
component = 'Client'
else:
component = 'Server'
# sudo is required for non-coreOS installs
command = 'sudo docker version -f {{.{}.Version}}'.format(component)
if host is None:
success, output = shakedown.run_command_on_master(command, None, None, False)
else:
success, output = shakedown.run_command_on_host(host, command, None, None, False)
if success:
return output
else:
return 'unknown'
|
Return the version of Docker [Server]
:param host: host or IP of the machine Docker is running on
:type host: str
:param component: Docker component
:type component: str
:return: Docker version
:rtype: str
|
def relation_types():
"""Get a list of relation types supported by this charm"""
rel_types = []
md = metadata()
for key in ('provides', 'requires', 'peers'):
section = md.get(key)
if section:
rel_types.extend(section.keys())
return rel_types
|
Get a list of relation types supported by this charm
|
def get_last_result(self):
"""Read the last conversion result when in continuous conversion mode.
Will return a signed integer value.
"""
# Retrieve the conversion register value, convert to a signed int, and
# return it.
result = self._device.readList(ADS1x15_POINTER_CONVERSION, 2)
return self._conversion_value(result[1], result[0])
|
Read the last conversion result when in continuous conversion mode.
Will return a signed integer value.
|
def copy_value(self, orig_key, new_key):
"""Copy value"""
data = self.model.get_data()
if isinstance(data, list):
data.append(data[orig_key])
if isinstance(data, set):
data.add(data[orig_key])
else:
data[new_key] = data[orig_key]
self.set_data(data)
|
Copy value
|
def beacon(config):
'''
Scan for the configured services and fire events
Example Config
.. code-block:: yaml
beacons:
service:
- services:
salt-master: {}
mysql: {}
The config above sets up beacons to check for
the salt-master and mysql services.
The config also supports two other parameters for each service:
`onchangeonly`: when `onchangeonly` is True the beacon will fire
events only when the service status changes. Otherwise, it will fire an
event at each beacon interval. The default is False.
`delay`: when `delay` is greater than 0 the beacon will fire events only
after the service status changes, and the delay (in seconds) has passed.
Applicable only when `onchangeonly` is True. The default is 0.
`emitatstartup`: when `emitatstartup` is False the beacon will not fire
event when the minion is reload. Applicable only when `onchangeonly` is True.
The default is True.
`uncleanshutdown`: If `uncleanshutdown` is present it should point to the
location of a pid file for the service. Most services will not clean up
this pid file if they are shutdown uncleanly (e.g. via `kill -9`) or if they
are terminated through a crash such as a segmentation fault. If the file is
present, then the beacon will add `uncleanshutdown: True` to the event. If
not present, the field will be False. The field is only added when the
service is NOT running. Omitting the configuration variable altogether will
turn this feature off.
Please note that some init systems can remove the pid file if the service
registers as crashed. One such example is nginx on CentOS 7, where the
service unit removes the pid file when the service shuts down (IE: the pid
file is observed as removed when kill -9 is sent to the nginx master
process). The 'uncleanshutdown' option might not be of much use there,
unless the unit file is modified.
Here is an example that will fire an event 30 seconds after the state of nginx
changes and report an uncleanshutdown. This example is for Arch, which
places nginx's pid file in `/run`.
.. code-block:: yaml
beacons:
service:
- services:
nginx:
onchangeonly: True
delay: 30
uncleanshutdown: /run/nginx.pid
'''
ret = []
_config = {}
list(map(_config.update, config))
for service in _config.get('services', {}):
ret_dict = {}
service_config = _config['services'][service]
ret_dict[service] = {'running': __salt__['service.status'](service)}
ret_dict['service_name'] = service
ret_dict['tag'] = service
currtime = time.time()
# If no options is given to the service, we fall back to the defaults
# assign a False value to oncleanshutdown and onchangeonly. Those
# key:values are then added to the service dictionary.
if not service_config:
service_config = {}
if 'oncleanshutdown' not in service_config:
service_config['oncleanshutdown'] = False
if 'emitatstartup' not in service_config:
service_config['emitatstartup'] = True
if 'onchangeonly' not in service_config:
service_config['onchangeonly'] = False
if 'delay' not in service_config:
service_config['delay'] = 0
# We only want to report the nature of the shutdown
# if the current running status is False
# as well as if the config for the beacon asks for it
if 'uncleanshutdown' in service_config and not ret_dict[service]['running']:
filename = service_config['uncleanshutdown']
ret_dict[service]['uncleanshutdown'] = True if os.path.exists(filename) else False
if 'onchangeonly' in service_config and service_config['onchangeonly'] is True:
if service not in LAST_STATUS:
LAST_STATUS[service] = ret_dict[service]
if service_config['delay'] > 0:
LAST_STATUS[service]['time'] = currtime
elif not service_config['emitatstartup']:
continue
else:
ret.append(ret_dict)
if LAST_STATUS[service]['running'] != ret_dict[service]['running']:
LAST_STATUS[service] = ret_dict[service]
if service_config['delay'] > 0:
LAST_STATUS[service]['time'] = currtime
else:
ret.append(ret_dict)
if 'time' in LAST_STATUS[service]:
elapsedtime = int(round(currtime - LAST_STATUS[service]['time']))
if elapsedtime > service_config['delay']:
del LAST_STATUS[service]['time']
ret.append(ret_dict)
else:
ret.append(ret_dict)
return ret
|
Scan for the configured services and fire events
Example Config
.. code-block:: yaml
beacons:
service:
- services:
salt-master: {}
mysql: {}
The config above sets up beacons to check for
the salt-master and mysql services.
The config also supports two other parameters for each service:
`onchangeonly`: when `onchangeonly` is True the beacon will fire
events only when the service status changes. Otherwise, it will fire an
event at each beacon interval. The default is False.
`delay`: when `delay` is greater than 0 the beacon will fire events only
after the service status changes, and the delay (in seconds) has passed.
Applicable only when `onchangeonly` is True. The default is 0.
`emitatstartup`: when `emitatstartup` is False the beacon will not fire
event when the minion is reload. Applicable only when `onchangeonly` is True.
The default is True.
`uncleanshutdown`: If `uncleanshutdown` is present it should point to the
location of a pid file for the service. Most services will not clean up
this pid file if they are shutdown uncleanly (e.g. via `kill -9`) or if they
are terminated through a crash such as a segmentation fault. If the file is
present, then the beacon will add `uncleanshutdown: True` to the event. If
not present, the field will be False. The field is only added when the
service is NOT running. Omitting the configuration variable altogether will
turn this feature off.
Please note that some init systems can remove the pid file if the service
registers as crashed. One such example is nginx on CentOS 7, where the
service unit removes the pid file when the service shuts down (IE: the pid
file is observed as removed when kill -9 is sent to the nginx master
process). The 'uncleanshutdown' option might not be of much use there,
unless the unit file is modified.
Here is an example that will fire an event 30 seconds after the state of nginx
changes and report an uncleanshutdown. This example is for Arch, which
places nginx's pid file in `/run`.
.. code-block:: yaml
beacons:
service:
- services:
nginx:
onchangeonly: True
delay: 30
uncleanshutdown: /run/nginx.pid
|
def sort_values(self, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
"""
Sort by the values.
Sort a Series in ascending or descending order by some
criterion.
Parameters
----------
axis : {0 or 'index'}, default 0
Axis to direct sorting. The value 'index' is accepted for
compatibility with DataFrame.sort_values.
ascending : bool, default True
If True, sort values in ascending order, otherwise descending.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort' or 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also :func:`numpy.sort` for more
information. 'mergesort' is the only stable algorithm.
na_position : {'first' or 'last'}, default 'last'
Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at
the end.
Returns
-------
Series
Series ordered by values.
See Also
--------
Series.sort_index : Sort by the Series indices.
DataFrame.sort_values : Sort DataFrame by the values along either axis.
DataFrame.sort_index : Sort DataFrame by indices.
Examples
--------
>>> s = pd.Series([np.nan, 1, 3, 10, 5])
>>> s
0 NaN
1 1.0
2 3.0
3 10.0
4 5.0
dtype: float64
Sort values ascending order (default behaviour)
>>> s.sort_values(ascending=True)
1 1.0
2 3.0
4 5.0
3 10.0
0 NaN
dtype: float64
Sort values descending order
>>> s.sort_values(ascending=False)
3 10.0
4 5.0
2 3.0
1 1.0
0 NaN
dtype: float64
Sort values inplace
>>> s.sort_values(ascending=False, inplace=True)
>>> s
3 10.0
4 5.0
2 3.0
1 1.0
0 NaN
dtype: float64
Sort values putting NAs first
>>> s.sort_values(na_position='first')
0 NaN
1 1.0
2 3.0
4 5.0
3 10.0
dtype: float64
Sort a series of strings
>>> s = pd.Series(['z', 'b', 'd', 'a', 'c'])
>>> s
0 z
1 b
2 d
3 a
4 c
dtype: object
>>> s.sort_values()
3 a
1 b
4 c
2 d
0 z
dtype: object
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
# Validate the axis parameter
self._get_axis_number(axis)
# GH 5856/5853
if inplace and self._is_cached:
raise ValueError("This Series is a view of some other array, to "
"sort in-place you must create a copy")
def _try_kind_sort(arr):
# easier to ask forgiveness than permission
try:
# if kind==mergesort, it can fail for object dtype
return arr.argsort(kind=kind)
except TypeError:
# stable sort not available for object dtype
# uses the argsort default quicksort
return arr.argsort(kind='quicksort')
arr = self._values
sortedIdx = np.empty(len(self), dtype=np.int32)
bad = isna(arr)
good = ~bad
idx = ibase.default_index(len(self))
argsorted = _try_kind_sort(arr[good])
if is_list_like(ascending):
if len(ascending) != 1:
raise ValueError('Length of ascending (%d) must be 1 '
'for Series' % (len(ascending)))
ascending = ascending[0]
if not is_bool(ascending):
raise ValueError('ascending must be boolean')
if not ascending:
argsorted = argsorted[::-1]
if na_position == 'last':
n = good.sum()
sortedIdx[:n] = idx[good][argsorted]
sortedIdx[n:] = idx[bad]
elif na_position == 'first':
n = bad.sum()
sortedIdx[n:] = idx[good][argsorted]
sortedIdx[:n] = idx[bad]
else:
raise ValueError('invalid na_position: {!r}'.format(na_position))
result = self._constructor(arr[sortedIdx], index=self.index[sortedIdx])
if inplace:
self._update_inplace(result)
else:
return result.__finalize__(self)
|
Sort by the values.
Sort a Series in ascending or descending order by some
criterion.
Parameters
----------
axis : {0 or 'index'}, default 0
Axis to direct sorting. The value 'index' is accepted for
compatibility with DataFrame.sort_values.
ascending : bool, default True
If True, sort values in ascending order, otherwise descending.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort' or 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also :func:`numpy.sort` for more
information. 'mergesort' is the only stable algorithm.
na_position : {'first' or 'last'}, default 'last'
Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at
the end.
Returns
-------
Series
Series ordered by values.
See Also
--------
Series.sort_index : Sort by the Series indices.
DataFrame.sort_values : Sort DataFrame by the values along either axis.
DataFrame.sort_index : Sort DataFrame by indices.
Examples
--------
>>> s = pd.Series([np.nan, 1, 3, 10, 5])
>>> s
0 NaN
1 1.0
2 3.0
3 10.0
4 5.0
dtype: float64
Sort values ascending order (default behaviour)
>>> s.sort_values(ascending=True)
1 1.0
2 3.0
4 5.0
3 10.0
0 NaN
dtype: float64
Sort values descending order
>>> s.sort_values(ascending=False)
3 10.0
4 5.0
2 3.0
1 1.0
0 NaN
dtype: float64
Sort values inplace
>>> s.sort_values(ascending=False, inplace=True)
>>> s
3 10.0
4 5.0
2 3.0
1 1.0
0 NaN
dtype: float64
Sort values putting NAs first
>>> s.sort_values(na_position='first')
0 NaN
1 1.0
2 3.0
4 5.0
3 10.0
dtype: float64
Sort a series of strings
>>> s = pd.Series(['z', 'b', 'd', 'a', 'c'])
>>> s
0 z
1 b
2 d
3 a
4 c
dtype: object
>>> s.sort_values()
3 a
1 b
4 c
2 d
0 z
dtype: object
|
def has_noquorum(self):
"""
less than 1/3 of the known votes are on the same block
"""
assert self.is_valid
bhs = self.blockhashes()
if not bhs or bhs[0][1] <= 1 / 3. * self.num_eligible_votes:
assert not self.has_quorum_possible
return True
|
less than 1/3 of the known votes are on the same block
|
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# extracting dictionary of coefficients (for soil amplification)
# specific to required intensity measure type
C_SR = self.COEFFS_SOIL_RESPONSE[imt]
# compute median PGA on rock (in g), needed to compute non-linear site
# amplification
C = self.COEFFS_AC10[PGA()]
pga4nl = np.exp(
self._compute_mean(C, rup.mag, dists.rjb, rup.rake)) * 1e-2 / g
# compute full mean value by adding site amplification terms
# (but avoiding recomputing mean on rock for PGA)
if imt == PGA():
mean = (np.log(pga4nl) +
self._get_site_amplification_linear(sites.vs30, C_SR) +
self._get_site_amplification_non_linear(sites.vs30, pga4nl,
C_SR))
else:
C = self.COEFFS_AC10[imt]
mean = (self._compute_mean(C, rup.mag, dists.rjb, rup.rake) +
self._get_site_amplification_linear(sites.vs30, C_SR) +
self._get_site_amplification_non_linear(sites.vs30, pga4nl,
C_SR))
# convert from cm/s**2 to g for SA (PGA is already computed in g)
if imt.name == "SA":
mean = np.log(np.exp(mean) * 1e-2 / g)
stddevs = self._get_stddevs(C, stddev_types, num_sites=len(sites.vs30))
return mean, stddevs
|
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
|
def _set_command_line_arguments(self, args):
"""
Set internal configuration variables according to
the input parameters
"""
Global.LOGGER.debug("setting command line arguments")
if args.VERBOSE:
Global.LOGGER.debug("verbose mode active")
Global.CONFIG_MANAGER.log_level = logging.DEBUG
Global.LOGGER_INSTANCE.reconfigure_log_level()
if args.STATS > 0:
Global.LOGGER.debug(f"stats requested every {args.STATS} seconds")
Global.CONFIG_MANAGER.show_stats = True
Global.CONFIG_MANAGER.stats_timeout = args.STATS
if args.INTERVAL > 0:
Global.LOGGER.debug(f"setting sleep interval to {args.INTERVAL} milliseconds")
Global.CONFIG_MANAGER.sleep_interval = float(args.INTERVAL)/1000
if args.TRACE:
Global.LOGGER.debug("tracing mode active")
Global.CONFIG_MANAGER.tracing_mode = True
Global.CONFIG_MANAGER.log_level = logging.DEBUG
Global.LOGGER_INSTANCE.reconfigure_log_level()
if args.MESSAGEINTERVAL is not None and args.MESSAGEINTERVAL > 0:
Global.LOGGER.debug(f"setting message fetcher sleep interval to {args.MESSAGEINTERVAL/10} milliseconds")
Global.CONFIG_MANAGER.message_fetcher_sleep_interval = float(args.MESSAGEINTERVAL)/10000
Global.CONFIG_MANAGER.fixed_message_fetcher_interval = True
Global.LOGGER.debug(f"recipes to be parsed: {args.FILENAME}")
Global.CONFIG_MANAGER.recipes = (args.FILENAME)
|
Set internal configuration variables according to
the input parameters
|
def clearReqVars(self):
""" Function clearHistVars
Clear the variables used to get history of all vars
@return RETURN: None
"""
self.errorMsg = None
self.payload = None
self.url = None
self.resp = None
self.res = None
self.method = None
self.printErrors = None
|
Function clearHistVars
Clear the variables used to get history of all vars
@return RETURN: None
|
def set_maxrad(self,newrad):
"""
Sets max allowed radius in populations.
Doesn't operate via the :class:`stars.Constraint`
protocol; rather just rescales the sky positions
for the background objects and recalculates
sky area, etc.
"""
if not isinstance(newrad, Quantity):
newrad = newrad * u.arcsec
#if 'Rsky' not in self.constraints:
# self.constraints.append('Rsky')
for pop in self.poplist:
if not pop.is_specific:
try:
pop.maxrad = newrad
except AttributeError:
pass
|
Sets max allowed radius in populations.
Doesn't operate via the :class:`stars.Constraint`
protocol; rather just rescales the sky positions
for the background objects and recalculates
sky area, etc.
|
def _cl_int_from_learner(cls, learn:Learner, ds_type:DatasetType=DatasetType.Valid, tta=False):
"Create an instance of `ClassificationInterpretation`. `tta` indicates if we want to use Test Time Augmentation."
preds = learn.TTA(ds_type=ds_type, with_loss=True) if tta else learn.get_preds(ds_type=ds_type, with_loss=True)
return cls(learn, *preds, ds_type=ds_type)
|
Create an instance of `ClassificationInterpretation`. `tta` indicates if we want to use Test Time Augmentation.
|
def _delete_node(self, tree, node):
""" Private function that eliminate node from tree.
Parameters
----------
tree : object
node : int
node to be eliminated from tree
Returns
-------
pruned_tree : object
"""
# Calculate gains
temp_tree = copy.deepcopy(tree)
def recourse(temp_tree_, del_node):
if isinstance(temp_tree_, dict):
if temp_tree_['split'] != -1:
if temp_tree_['node'] == del_node:
del temp_tree_['sr']
del temp_tree_['sl']
del temp_tree_['node']
temp_tree_['split'] = -1
else:
for k in ['sl', 'sr']:
recourse(temp_tree_[k], del_node)
return None
recourse(temp_tree, node)
return temp_tree
|
Private function that eliminate node from tree.
Parameters
----------
tree : object
node : int
node to be eliminated from tree
Returns
-------
pruned_tree : object
|
def running_jobs(self, exit_on_error=True):
"""Initialize multiprocessing."""
with self.handling_exceptions():
if self.using_jobs:
from concurrent.futures import ProcessPoolExecutor
try:
with ProcessPoolExecutor(self.jobs) as self.executor:
yield
finally:
self.executor = None
else:
yield
if exit_on_error:
self.exit_on_error()
|
Initialize multiprocessing.
|
def datetime_to_knx(datetimeval, clock_synced_external=1):
"""Convert a Python timestamp to an 8 byte KNX time and date object"""
res = [0, 0, 0, 0, 0, 0, 0, 0]
year = datetimeval.year
if (year < 1900) or (year > 2155):
raise KNXException("Only years between 1900 and 2155 supported")
res[0] = year - 1900
res[1] = datetimeval.month
res[2] = datetimeval.day
res[3] = (datetimeval.isoweekday() << 5) + datetimeval.hour
res[4] = datetimeval.minute
res[5] = datetimeval.second
if datetimeval.isoweekday() < 6:
is_working_day = 1
else:
is_working_day = 0
# DST starts last Sunday in March
date1 = datetime(year, 4, 1)
dston = date1 - timedelta(days=date1.weekday() + 1)
# ends last Sunday in October
date2 = datetime(year, 11, 1)
dstoff = date2 - timedelta(days=date2.weekday() + 1)
if dston <= datetimeval.replace(tzinfo=None) < dstoff:
dst = 1
else:
dst = 0
res[6] = (is_working_day << 6) + (1 << 5) + dst
if clock_synced_external:
res[7] = 128
else:
res[7] = 0
return res
|
Convert a Python timestamp to an 8 byte KNX time and date object
|
def unix(value):
"""
Convert a date, or datetime to unix timestamp
:param value:
:return:
"""
if isinstance(value, (date, builtin_datetime)):
pass
elif value < 10000000000:
value = unix2datetime(value)
else:
value = milli2datetime(value)
return str(datetime2unix(value))
|
Convert a date, or datetime to unix timestamp
:param value:
:return:
|
def _extract_clublog_header(self, cty_xml_filename):
"""
Extract the header of the Clublog XML File
"""
cty_header = {}
try:
with open(cty_xml_filename, "r") as cty:
raw_header = cty.readline()
cty_date = re.search("date='.+'", raw_header)
if cty_date:
cty_date = cty_date.group(0).replace("date=", "").replace("'", "")
cty_date = datetime.strptime(cty_date[:19], '%Y-%m-%dT%H:%M:%S')
cty_date.replace(tzinfo=UTC)
cty_header["Date"] = cty_date
cty_ns = re.search("xmlns='.+[']", raw_header)
if cty_ns:
cty_ns = cty_ns.group(0).replace("xmlns=", "").replace("'", "")
cty_header['NameSpace'] = cty_ns
if len(cty_header) == 2:
self._logger.debug("Header successfully retrieved from CTY File")
elif len(cty_header) < 2:
self._logger.warning("Header could only be partically retrieved from CTY File")
self._logger.warning("Content of Header: ")
for key in cty_header:
self._logger.warning(str(key)+": "+str(cty_header[key]))
return cty_header
except Exception as e:
self._logger.error("Clublog CTY File could not be opened / modified")
self._logger.error("Error Message: " + str(e))
return
|
Extract the header of the Clublog XML File
|
def get_yeast_sequence(chromosome, start, end, reverse_complement=False):
'''Acquire a sequence from SGD http://www.yeastgenome.org
:param chromosome: Yeast chromosome.
:type chromosome: int
:param start: A biostart.
:type start: int
:param end: A bioend.
:type end: int
:param reverse_complement: Get the reverse complement.
:type revervse_complement: bool
:returns: A DNA sequence.
:rtype: coral.DNA
'''
import requests
if start != end:
if reverse_complement:
rev_option = '-REV'
else:
rev_option = ''
param_url = '&chr=' + str(chromosome) + '&beg=' + str(start) + \
'&end=' + str(end) + '&rev=' + rev_option
url = 'http://www.yeastgenome.org/cgi-bin/getSeq?map=a2map' + \
param_url
res = requests.get(url)
# ok... sadely, I contacted SGD and they haven;t implemented this so
# I have to parse their yeastgenome page, but
# it is easy between the raw sequence is between <pre> tags!
# warning that's for the first < so we need +5!
begin_index = res.text.index('<pre>')
end_index = res.text.index('</pre>')
sequence = res.text[begin_index + 5:end_index]
sequence = sequence.replace('\n', '').replace('\r', '')
else:
sequence = ''
return coral.DNA(sequence)
|
Acquire a sequence from SGD http://www.yeastgenome.org
:param chromosome: Yeast chromosome.
:type chromosome: int
:param start: A biostart.
:type start: int
:param end: A bioend.
:type end: int
:param reverse_complement: Get the reverse complement.
:type revervse_complement: bool
:returns: A DNA sequence.
:rtype: coral.DNA
|
def _update_font_weight(self, font_weight):
"""Updates font weight widget
Parameters
----------
font_weight: Integer
\tButton down iif font_weight == wx.FONTWEIGHT_BOLD
"""
toggle_state = font_weight & wx.FONTWEIGHT_BOLD == wx.FONTWEIGHT_BOLD
self.ToggleTool(wx.FONTFLAG_BOLD, toggle_state)
|
Updates font weight widget
Parameters
----------
font_weight: Integer
\tButton down iif font_weight == wx.FONTWEIGHT_BOLD
|
def resolve_absolute_name(self, name):
'''
Resolve a field from an absolute name.
An absolute name is just like unix absolute path,
starts with '/' and each name component is separated by '/'.
:param name: absolute name, e.g. "/container/subcontainer/field"
:return: field with this absolute name
:raises: KittyException if field could not be resolved
'''
current = self
while current.enclosing:
current = current.enclosing
if name != '/':
components = name.split('/')[1:]
for component in components:
current = current.get_field_by_name(component)
return current
|
Resolve a field from an absolute name.
An absolute name is just like unix absolute path,
starts with '/' and each name component is separated by '/'.
:param name: absolute name, e.g. "/container/subcontainer/field"
:return: field with this absolute name
:raises: KittyException if field could not be resolved
|
def _clean_doc(self, doc=None):
"""Clean the doc before writing it, removing unnecessary properties and doing other operations."""
if doc is None:
doc = self.doc
resources = doc['Resources']
# We don't need these anymore because all of the data written into the package is normalized.
for arg in ['startline', 'headerlines', 'encoding']:
for e in list(resources.args):
if e.lower() == arg:
resources.args.remove(e)
for term in resources:
term['startline'] = None
term['headerlines'] = None
term['encoding'] = None
schema = doc['Schema']
## FIXME! This is probably dangerous, because the section args are changing, but the children
## are not, so when these two are combined in the Term.properties() acessors, the values are off.
## Because of this, _clean_doc should be run immediately before writing the doc.
for arg in ['altname', 'transform']:
for e in list(schema.args):
if e.lower() == arg:
schema.args.remove(e)
for table in self.doc.find('Root.Table'):
for col in table.find('Column'):
try:
col.value = col['altname'].value
except:
pass
col['altname'] = None
col['transform'] = None
# Remove any DSNs
#for dsn_t in self.doc.find('Root.Dsn'):
# self.doc.remove_term(dsn_t)
return doc
|
Clean the doc before writing it, removing unnecessary properties and doing other operations.
|
def regrep(filename, patterns, reverse=False, terminate_on_match=False,
postprocess=str):
"""
A powerful regular expression version of grep.
Args:
filename (str): Filename to grep.
patterns (dict): A dict of patterns, e.g.,
{"energy": "energy\(sigma->0\)\s+=\s+([\d\-\.]+)"}.
reverse (bool): Read files in reverse. Defaults to false. Useful for
large files, especially when used with terminate_on_match.
terminate_on_match (bool): Whether to terminate when there is at
least one match in each key in pattern.
postprocess (callable): A post processing function to convert all
matches. Defaults to str, i.e., no change.
Returns:
A dict of the following form:
{key1: [[[matches...], lineno], [[matches...], lineno],
[[matches...], lineno], ...],
key2: ...}
For reverse reads, the lineno is given as a -ve number. Please note
that 0-based indexing is used.
"""
compiled = {k: re.compile(v) for k, v in patterns.items()}
matches = collections.defaultdict(list)
gen = reverse_readfile(filename) if reverse else zopen(filename, "rt")
for i, l in enumerate(gen):
for k, p in compiled.items():
m = p.search(l)
if m:
matches[k].append([[postprocess(g) for g in m.groups()],
-i if reverse else i])
if terminate_on_match and all([
len(matches.get(k, [])) for k in compiled.keys()]):
break
try:
# Try to close open file handle. Pass if it is a generator.
gen.close()
except:
pass
return matches
|
A powerful regular expression version of grep.
Args:
filename (str): Filename to grep.
patterns (dict): A dict of patterns, e.g.,
{"energy": "energy\(sigma->0\)\s+=\s+([\d\-\.]+)"}.
reverse (bool): Read files in reverse. Defaults to false. Useful for
large files, especially when used with terminate_on_match.
terminate_on_match (bool): Whether to terminate when there is at
least one match in each key in pattern.
postprocess (callable): A post processing function to convert all
matches. Defaults to str, i.e., no change.
Returns:
A dict of the following form:
{key1: [[[matches...], lineno], [[matches...], lineno],
[[matches...], lineno], ...],
key2: ...}
For reverse reads, the lineno is given as a -ve number. Please note
that 0-based indexing is used.
|
def convert(cls, style_dict, num_format_str=None):
"""
converts a style_dict to an xlsxwriter format dict
Parameters
----------
style_dict : style dictionary to convert
num_format_str : optional number format string
"""
# Create a XlsxWriter format object.
props = {}
if num_format_str is not None:
props['num_format'] = num_format_str
if style_dict is None:
return props
if 'borders' in style_dict:
style_dict = style_dict.copy()
style_dict['border'] = style_dict.pop('borders')
for style_group_key, style_group in style_dict.items():
for src, dst in cls.STYLE_MAPPING.get(style_group_key, []):
# src is a sequence of keys into a nested dict
# dst is a flat key
if dst in props:
continue
v = style_group
for k in src:
try:
v = v[k]
except (KeyError, TypeError):
break
else:
props[dst] = v
if isinstance(props.get('pattern'), str):
# TODO: support other fill patterns
props['pattern'] = 0 if props['pattern'] == 'none' else 1
for k in ['border', 'top', 'right', 'bottom', 'left']:
if isinstance(props.get(k), str):
try:
props[k] = ['none', 'thin', 'medium', 'dashed', 'dotted',
'thick', 'double', 'hair', 'mediumDashed',
'dashDot', 'mediumDashDot', 'dashDotDot',
'mediumDashDotDot',
'slantDashDot'].index(props[k])
except ValueError:
props[k] = 2
if isinstance(props.get('font_script'), str):
props['font_script'] = ['baseline', 'superscript',
'subscript'].index(props['font_script'])
if isinstance(props.get('underline'), str):
props['underline'] = {'none': 0, 'single': 1, 'double': 2,
'singleAccounting': 33,
'doubleAccounting': 34}[props['underline']]
return props
|
converts a style_dict to an xlsxwriter format dict
Parameters
----------
style_dict : style dictionary to convert
num_format_str : optional number format string
|
def get_workflow_actions_for(brain_or_object):
"""Returns a list with the actions (transitions) supported by the workflows
the object pass in is bound to. Note it returns all actions, not only those
allowed for the object based on its current state and permissions.
"""
portal_type = api.get_portal_type(brain_or_object)
actions = actions_by_type.get(portal_type, None)
if actions:
return actions
# Retrieve the actions from the workflows this object is bound to
actions = []
wf_tool = api.get_tool("portal_workflow")
for wf_id in get_workflow_ids_for(brain_or_object):
workflow = wf_tool.getWorkflowById(wf_id)
wf_actions = map(lambda action: action[0], workflow.transitions.items())
actions.extend(wf_actions)
actions = list(set(actions))
actions_by_type[portal_type] = actions
return actions
|
Returns a list with the actions (transitions) supported by the workflows
the object pass in is bound to. Note it returns all actions, not only those
allowed for the object based on its current state and permissions.
|
def is_siemens(dicom_input):
"""
Use this function to detect if a dicom series is a siemens dataset
:param dicom_input: directory with dicom files for 1 scan
"""
# read dicom header
header = dicom_input[0]
# check if manufacturer is Siemens
if 'Manufacturer' not in header or 'Modality' not in header:
return False # we try generic conversion in these cases
# check if Modality is mr
if header.Modality.upper() != 'MR':
return False
if 'SIEMENS' not in header.Manufacturer.upper():
return False
return True
|
Use this function to detect if a dicom series is a siemens dataset
:param dicom_input: directory with dicom files for 1 scan
|
def load_weights(self, weight_file: str) -> None:
"""
Load the pre-trained weights from the file.
"""
requires_grad = self.requires_grad
with h5py.File(cached_path(weight_file), 'r') as fin:
for i_layer, lstms in enumerate(
zip(self.forward_layers, self.backward_layers)
):
for j_direction, lstm in enumerate(lstms):
# lstm is an instance of LSTMCellWithProjection
cell_size = lstm.cell_size
dataset = fin['RNN_%s' % j_direction]['RNN']['MultiRNNCell']['Cell%s' % i_layer
]['LSTMCell']
# tensorflow packs together both W and U matrices into one matrix,
# but pytorch maintains individual matrices. In addition, tensorflow
# packs the gates as input, memory, forget, output but pytorch
# uses input, forget, memory, output. So we need to modify the weights.
tf_weights = numpy.transpose(dataset['W_0'][...])
torch_weights = tf_weights.copy()
# split the W from U matrices
input_size = lstm.input_size
input_weights = torch_weights[:, :input_size]
recurrent_weights = torch_weights[:, input_size:]
tf_input_weights = tf_weights[:, :input_size]
tf_recurrent_weights = tf_weights[:, input_size:]
# handle the different gate order convention
for torch_w, tf_w in [[input_weights, tf_input_weights],
[recurrent_weights, tf_recurrent_weights]]:
torch_w[(1 * cell_size):(2 * cell_size), :] = tf_w[(2 * cell_size):(3 * cell_size), :]
torch_w[(2 * cell_size):(3 * cell_size), :] = tf_w[(1 * cell_size):(2 * cell_size), :]
lstm.input_linearity.weight.data.copy_(torch.FloatTensor(input_weights))
lstm.state_linearity.weight.data.copy_(torch.FloatTensor(recurrent_weights))
lstm.input_linearity.weight.requires_grad = requires_grad
lstm.state_linearity.weight.requires_grad = requires_grad
# the bias weights
tf_bias = dataset['B'][...]
# tensorflow adds 1.0 to forget gate bias instead of modifying the
# parameters...
tf_bias[(2 * cell_size):(3 * cell_size)] += 1
torch_bias = tf_bias.copy()
torch_bias[(1 * cell_size):(2 * cell_size)
] = tf_bias[(2 * cell_size):(3 * cell_size)]
torch_bias[(2 * cell_size):(3 * cell_size)
] = tf_bias[(1 * cell_size):(2 * cell_size)]
lstm.state_linearity.bias.data.copy_(torch.FloatTensor(torch_bias))
lstm.state_linearity.bias.requires_grad = requires_grad
# the projection weights
proj_weights = numpy.transpose(dataset['W_P_0'][...])
lstm.state_projection.weight.data.copy_(torch.FloatTensor(proj_weights))
lstm.state_projection.weight.requires_grad = requires_grad
|
Load the pre-trained weights from the file.
|
def get_group(self, name):
"""Get contact group by name
:param name: name of group
:type name: ``str``, ``unicode``
:rtype: ``dict`` with group data
"""
groups = self.get_groups()
for group in groups:
if group['contactgroupname'] == name:
return group
msg = 'No group named: "{name}" found.'
raise FMBaseError(msg.format(name=name))
|
Get contact group by name
:param name: name of group
:type name: ``str``, ``unicode``
:rtype: ``dict`` with group data
|
def get_depth(self, update=False):
"""
:returns: the depth (level) of the node
Caches the result in the object itself to help in loops.
:param update: Updates the cached value.
"""
if self.parent_id is None:
return 1
try:
if update:
del self._cached_depth
else:
return self._cached_depth
except AttributeError:
pass
depth = 0
node = self
while node:
node = node.parent
depth += 1
self._cached_depth = depth
return depth
|
:returns: the depth (level) of the node
Caches the result in the object itself to help in loops.
:param update: Updates the cached value.
|
def header_match(cls, header):
'''
Parse the 4-line (320-byte) library member header.
'''
mo = cls.header_re.match(header)
if mo is None:
msg = f'Expected {cls.header_re.pattern!r}, got {header!r}'
raise ValueError(msg)
return {
'name': mo['name'].decode().strip(),
'label': mo['label'].decode().strip(),
'type': mo['type'].decode().strip(),
'created': strptime(mo['created']),
'modified': strptime(mo['modified']),
'sas_version': float(mo['version']),
'os_version': mo['os'].decode().strip(),
'namestr_size': mo['descriptor_size'],
}
|
Parse the 4-line (320-byte) library member header.
|
def _handle_userInfo(self, data):
"""Handle user information"""
for k, v in data.items():
if k == "nick":
if v == "None":
v = "Volaphile"
setattr(self.room.user, k, v)
self.conn.enqueue_data(k, self.room.user.nick)
elif k != "profile":
if not hasattr(self.room, k):
warnings.warn(f"Skipping unset property {k}", ResourceWarning)
continue
setattr(self.room, k, v)
self.conn.enqueue_data(k, getattr(self.room, k))
self.room.user_info = k, v
self.conn.enqueue_data("user_info", self.room.user_info)
|
Handle user information
|
def ossos_discoveries(directory=parameters.REAL_KBO_AST_DIR,
suffix='ast',
no_nt_and_u=False,
single_object=None,
all_objects=True,
data_release=None,
):
"""
Returns a list of objects holding orbfit.Orbfit objects with the observations in the Orbfit.observations field.
Default is to return only the objects corresponding to the current Data Release.
"""
retval = []
# working_context = context.get_context(directory)
# files = working_context.get_listing(suffix)
files = [f for f in os.listdir(directory) if (f.endswith('mpc') or f.endswith('ast') or f.endswith('DONE'))]
if single_object is not None:
files = filter(lambda name: name.startswith(single_object), files)
elif all_objects and data_release is not None:
# only return the objects corresponding to a particular Data Release
data_release = ossos_release_parser(table=True, data_release=data_release)
objects = data_release['object']
files = filter(lambda name: name.partition(suffix)[0].rstrip('.') in objects, files)
for filename in files:
# keep out the not-tracked and uncharacteried.
if no_nt_and_u and (filename.__contains__('nt') or filename.startswith('u')):
continue
# observations = mpc.MPCReader(directory + filename)
mpc_filename = directory + filename
abg_filename = os.path.abspath(directory + '/../abg/') + "/" + os.path.splitext(filename)[0] + ".abg"
obj = TNO(None, ast_filename=mpc_filename, abg_filename=abg_filename)
retval.append(obj)
return retval
|
Returns a list of objects holding orbfit.Orbfit objects with the observations in the Orbfit.observations field.
Default is to return only the objects corresponding to the current Data Release.
|
def to_json(self):
"""
Returns the JSON representation of the space membership.
"""
result = super(SpaceMembership, self).to_json()
result.update({
'admin': self.admin,
'roles': self.roles
})
return result
|
Returns the JSON representation of the space membership.
|
def get_revoked(self):
"""
Return the revocations in this certificate revocation list.
These revocations will be provided by value, not by reference.
That means it's okay to mutate them: it won't affect this CRL.
:return: The revocations in this CRL.
:rtype: :class:`tuple` of :class:`Revocation`
"""
results = []
revoked_stack = _lib.X509_CRL_get_REVOKED(self._crl)
for i in range(_lib.sk_X509_REVOKED_num(revoked_stack)):
revoked = _lib.sk_X509_REVOKED_value(revoked_stack, i)
revoked_copy = _lib.Cryptography_X509_REVOKED_dup(revoked)
pyrev = Revoked.__new__(Revoked)
pyrev._revoked = _ffi.gc(revoked_copy, _lib.X509_REVOKED_free)
results.append(pyrev)
if results:
return tuple(results)
|
Return the revocations in this certificate revocation list.
These revocations will be provided by value, not by reference.
That means it's okay to mutate them: it won't affect this CRL.
:return: The revocations in this CRL.
:rtype: :class:`tuple` of :class:`Revocation`
|
def partition(pred, iterable, tolist=False):
'Use a predicate to partition entries into false entries and true entries'
# partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9
t1, t2 = itertools.tee(iterable)
ifalse = six.moves.filterfalse(pred, t1)
itrue = six.moves.filter(pred, t2)
if tolist:
return list(ifalse), list(itrue)
else:
return ifalse, itrue
|
Use a predicate to partition entries into false entries and true entries
|
def raw_data(self, filename):
"""Return the raw pickled data from `filename`."""
if self.debug and self.debug.should('dataio'):
self.debug.write("Reading data from %r" % (filename,))
fdata = open(filename, 'rb')
try:
data = pickle.load(fdata)
finally:
fdata.close()
return data
|
Return the raw pickled data from `filename`.
|
def _set_fcoeport(self, v, load=False):
"""
Setter method for fcoeport, mapped from YANG variable /interface/port_channel/fcoeport (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_fcoeport is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fcoeport() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=fcoeport.fcoeport, is_container='container', presence=False, yang_name="fcoeport", rest_name="fcoeport", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure the LAG to enable FCoE', u'callpoint': u'fcoeport_attr_lag_cp', u'sort-priority': u'138', u'display-when': u'(/vcsmode/vcs-mode = "true")'}}, namespace='urn:brocade.com:mgmt:brocade-fcoe', defining_module='brocade-fcoe', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """fcoeport must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=fcoeport.fcoeport, is_container='container', presence=False, yang_name="fcoeport", rest_name="fcoeport", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure the LAG to enable FCoE', u'callpoint': u'fcoeport_attr_lag_cp', u'sort-priority': u'138', u'display-when': u'(/vcsmode/vcs-mode = "true")'}}, namespace='urn:brocade.com:mgmt:brocade-fcoe', defining_module='brocade-fcoe', yang_type='container', is_config=True)""",
})
self.__fcoeport = t
if hasattr(self, '_set'):
self._set()
|
Setter method for fcoeport, mapped from YANG variable /interface/port_channel/fcoeport (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_fcoeport is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fcoeport() directly.
|
def getMaxPacketSize(self, endpoint):
"""
Get device's max packet size for given endpoint.
Warning: this function will not always give you the expected result.
See https://libusb.org/ticket/77 . You should instead consult the
endpoint descriptor of current configuration and alternate setting.
"""
result = libusb1.libusb_get_max_packet_size(self.device_p, endpoint)
mayRaiseUSBError(result)
return result
|
Get device's max packet size for given endpoint.
Warning: this function will not always give you the expected result.
See https://libusb.org/ticket/77 . You should instead consult the
endpoint descriptor of current configuration and alternate setting.
|
def and_(self, other):
"""
Creates a new compound query using the
<orb.QueryCompound.Op.And> type.
:param other <Query> || <orb.QueryCompound>
:return <orb.QueryCompound>
:sa __and__
:usage |>>> from orb import Query as Q
|>>> query = (Q('test') != 1).and_((Q('name') == 'Eric')
|>>> print query
|(test is not 1 and name is Eric)
"""
if not isinstance(other, (Query, QueryCompound)) or other.isNull():
return self.copy()
elif not self:
return other.copy()
else:
return orb.QueryCompound(self, other, op=orb.QueryCompound.Op.And)
|
Creates a new compound query using the
<orb.QueryCompound.Op.And> type.
:param other <Query> || <orb.QueryCompound>
:return <orb.QueryCompound>
:sa __and__
:usage |>>> from orb import Query as Q
|>>> query = (Q('test') != 1).and_((Q('name') == 'Eric')
|>>> print query
|(test is not 1 and name is Eric)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.