code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def multisplit(s, seps=list(string.punctuation) + list(string.whitespace), blank=True):
r"""Just like str.split(), except that a variety (list) of seperators is allowed.
>>> multisplit(r'1-2?3,;.4+-', string.punctuation)
['1', '2', '3', '', '', '4', '', '']
>>> multisplit(r'1-2?3,;.4+-', string.punctuation, blank=False)
['1', '2', '3', '4']
>>> multisplit(r'1C 234567890', '\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n' + string.punctuation)
['1C 234567890']
"""
seps = str().join(seps)
return [s2 for s2 in s.translate(str().join([(chr(i) if chr(i) not in seps else seps[0])
for i in range(256)])).split(seps[0]) if (blank or s2)]
|
r"""Just like str.split(), except that a variety (list) of seperators is allowed.
>>> multisplit(r'1-2?3,;.4+-', string.punctuation)
['1', '2', '3', '', '', '4', '', '']
>>> multisplit(r'1-2?3,;.4+-', string.punctuation, blank=False)
['1', '2', '3', '4']
>>> multisplit(r'1C 234567890', '\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n' + string.punctuation)
['1C 234567890']
|
def handle_change(self, change):
""" Handle changes from atom ContainerLists """
op = change['operation']
if op in 'append':
self.add(len(change['value']), LatLng(*change['item']))
elif op == 'insert':
self.add(change['index'], LatLng(*change['item']))
elif op == 'extend':
points = [LatLng(*p) for p in change['items']]
self.addAll([bridge.encode(c) for c in points])
elif op == '__setitem__':
self.set(change['index'], LatLng(*change['newitem']))
elif op == 'pop':
self.remove(change['index'])
else:
raise NotImplementedError(
"Unsupported change operation {}".format(op))
|
Handle changes from atom ContainerLists
|
def verify_space_available(self, search_pattern=r"(\d+) \w+ free"):
"""Verify sufficient space is available on destination file system (return boolean)."""
if self.direction == "put":
space_avail = self.remote_space_available(search_pattern=search_pattern)
elif self.direction == "get":
space_avail = self.local_space_available()
if space_avail > self.file_size:
return True
return False
|
Verify sufficient space is available on destination file system (return boolean).
|
def removeRedundantVerbChains( foundChains, removeOverlapping = True, removeSingleAraAndEi = False ):
''' Eemaldab yleliigsed verbiahelad: ahelad, mis katavad osaliselt v6i t2ielikult
teisi ahelaid (removeOverlapping == True), yhes6nalised 'ei' ja 'ära' ahelad (kui
removeSingleAraAndEi == True);
Yldiselt on nii, et ylekattuvaid ei tohiks palju olla, kuna fraaside laiendamisel
pyytakse alati kontrollida, et laiendus ei kattuks m6ne olemasoleva fraasiga;
Peamiselt tekivad ylekattuvused siis, kui morf analyysi on sattunud valed
finiitverbi analyysid (v6i analyysid on j22nud mitmesteks) ja seega tuvastatakse
osalausest rohkem finiitverbe, kui oleks vaja.
Heuristik: kahe ylekattuva puhul j2tame alles fraasi, mis algab eespool ning
m2rgime sellel OTHER_VERBS v22rtuseks True, mis m2rgib, et kontekstis on mingi
segadus teiste verbidega.
'''
toDelete = []
for i in range(len(foundChains)):
matchObj1 = foundChains[i]
if removeOverlapping:
for j in range(i+1, len(foundChains)):
matchObj2 = foundChains[j]
if matchObj1 != matchObj2 and matchObj1[CLAUSE_IDX] == matchObj2[CLAUSE_IDX]:
phrase1 = set(matchObj1[PHRASE])
phrase2 = set(matchObj2[PHRASE])
intersect = phrase1.intersection(phrase2)
if len(intersect) > 0:
# Yldiselt on nii, et ylekattuvaid ei tohiks olla, kuna fraaside laiendamisel
# pyytakse alati kontrollida, et laiendus ei kattuks m6ne olemasoleva fraasiga;
# Peamiselt tekivad ylekattuvused siis, kui morf analyysil on finiitverbi
# analyysidesse j22nud sisse mitmesused (v6i on sattunud valed analyysid) ja
# seega tuvastatakse osalausest rohkem finiitverbe, kui oleks vaja.
# Heuristik: j2tame alles fraasi, mis algab eespool ning lisame selle otsa
# kysim2rgi (kuna pole kindel, et asjad on korras)
minWid1 = min(matchObj1[PHRASE])
minWid2 = min(matchObj2[PHRASE])
if minWid1 < minWid2:
matchObj1[OTHER_VERBS] = True
toDelete.append(j)
else:
matchObj2[OTHER_VERBS] = True
toDelete.append(i)
if removeSingleAraAndEi:
if ( len(matchObj1[PATTERN])==1 and re.match('^(ei|ära)$', matchObj1[PATTERN][0]) ):
toDelete.append(i)
if toDelete:
if len(set(toDelete)) != len(toDelete):
toDelete = list(set(toDelete)) # Eemaldame duplikaadid
toDelete = [ foundChains[i] for i in toDelete ]
for verbObj in toDelete:
foundChains.remove(verbObj)
|
Eemaldab yleliigsed verbiahelad: ahelad, mis katavad osaliselt v6i t2ielikult
teisi ahelaid (removeOverlapping == True), yhes6nalised 'ei' ja 'ära' ahelad (kui
removeSingleAraAndEi == True);
Yldiselt on nii, et ylekattuvaid ei tohiks palju olla, kuna fraaside laiendamisel
pyytakse alati kontrollida, et laiendus ei kattuks m6ne olemasoleva fraasiga;
Peamiselt tekivad ylekattuvused siis, kui morf analyysi on sattunud valed
finiitverbi analyysid (v6i analyysid on j22nud mitmesteks) ja seega tuvastatakse
osalausest rohkem finiitverbe, kui oleks vaja.
Heuristik: kahe ylekattuva puhul j2tame alles fraasi, mis algab eespool ning
m2rgime sellel OTHER_VERBS v22rtuseks True, mis m2rgib, et kontekstis on mingi
segadus teiste verbidega.
|
def find_files(path, exts=None):
"""
查找路径下的文件,返回指定类型的文件列表
:param:
* path: (string) 查找路径
* exts: (list) 文件类型列表,默认为空
:return:
* files_list: (list) 文件列表
举例如下::
print('--- find_files demo ---')
path1 = '/root/fishbase_issue'
all_files = find_files(path1)
print(all_files)
exts_files = find_files(path1, exts=['.png', '.py'])
print(exts_files)
print('---')
执行结果::
--- find_files demo ---
['/root/fishbase_issue/test.png', '/root/fishbase_issue/head.jpg','/root/fishbase_issue/py/man.png'
['/root/fishbase_issue/test.png', '/root/fishbase_issue/py/man.png']
---
"""
files_list = []
for root, dirs, files in os.walk(path):
for name in files:
files_list.append(os.path.join(root, name))
if exts is not None:
return [file for file in files_list if pathlib.Path(file).suffix in exts]
return files_list
|
查找路径下的文件,返回指定类型的文件列表
:param:
* path: (string) 查找路径
* exts: (list) 文件类型列表,默认为空
:return:
* files_list: (list) 文件列表
举例如下::
print('--- find_files demo ---')
path1 = '/root/fishbase_issue'
all_files = find_files(path1)
print(all_files)
exts_files = find_files(path1, exts=['.png', '.py'])
print(exts_files)
print('---')
执行结果::
--- find_files demo ---
['/root/fishbase_issue/test.png', '/root/fishbase_issue/head.jpg','/root/fishbase_issue/py/man.png'
['/root/fishbase_issue/test.png', '/root/fishbase_issue/py/man.png']
---
|
def offset(self, location, dy=0):
""" Returns a new ``Region`` offset from this one by ``location``
Width and height remain the same
"""
if not isinstance(location, Location):
# Assume variables passed were dx,dy
location = Location(location, dy)
r = Region(self.x+location.x, self.y+location.y, self.w, self.h).clipRegionToScreen()
if r is None:
raise ValueError("Specified region is not visible on any screen")
return None
return r
|
Returns a new ``Region`` offset from this one by ``location``
Width and height remain the same
|
def is_rigid(matrix):
"""
Check to make sure a homogeonous transformation matrix is
a rigid body transform.
Parameters
-----------
matrix: possibly a transformation matrix
Returns
-----------
check: bool, True if matrix is a valid (4,4) rigid body transform.
"""
matrix = np.asanyarray(matrix, dtype=np.float64)
if matrix.shape != (4, 4):
return False
if not np.allclose(matrix[-1], [0, 0, 0, 1]):
return False
check = np.dot(matrix[:3, :3],
matrix[:3, :3].T)
return np.allclose(check, np.eye(3))
|
Check to make sure a homogeonous transformation matrix is
a rigid body transform.
Parameters
-----------
matrix: possibly a transformation matrix
Returns
-----------
check: bool, True if matrix is a valid (4,4) rigid body transform.
|
def URL(base, path, segments=None, defaults=None):
"""
URL segment handler capable of getting and setting segments by name. The
URL is constructed by joining base, path and segments.
For each segment a property capable of getting and setting that segment is
created dynamically.
"""
# Make a copy of the Segments class
url_class = type(Segments.__name__, Segments.__bases__,
dict(Segments.__dict__))
segments = [] if segments is None else segments
defaults = [] if defaults is None else defaults
# For each segment attach a property capable of getting and setting it
for segment in segments:
setattr(url_class, segment, url_class._segment(segment))
# Instantiate the class with the actual parameters
return url_class(base, path, segments, defaults)
|
URL segment handler capable of getting and setting segments by name. The
URL is constructed by joining base, path and segments.
For each segment a property capable of getting and setting that segment is
created dynamically.
|
def mutating_method(func):
"""Decorator for methods that are allowed to modify immutable objects"""
def wrapper(self, *__args, **__kwargs):
old_mutable = self._mutable
self._mutable = True
try:
# Call the wrapped function
return func(self, *__args, **__kwargs)
finally:
self._mutable = old_mutable
return wrapper
|
Decorator for methods that are allowed to modify immutable objects
|
async def _process_request(self, identity: bytes, empty_frame: list, request: RPCRequest):
"""
Executes the method specified in a JSON RPC request and then sends the reply to the socket.
:param identity: Client identity provided by ZeroMQ
:param empty_frame: Either an empty list or a single null frame depending on the client type
:param request: JSON RPC request
"""
try:
_log.debug("Client %s sent request: %s", identity, request)
start_time = datetime.now()
reply = await self.rpc_spec.run_handler(request)
if self.announce_timing:
_log.info("Request {} for {} lasted {} seconds".format(
request.id, request.method, (datetime.now() - start_time).total_seconds()))
_log.debug("Sending client %s reply: %s", identity, reply)
await self._socket.send_multipart([identity, *empty_frame, to_msgpack(reply)])
except Exception as e:
if self.serialize_exceptions:
_log.exception('Exception thrown in _process_request')
else:
raise e
|
Executes the method specified in a JSON RPC request and then sends the reply to the socket.
:param identity: Client identity provided by ZeroMQ
:param empty_frame: Either an empty list or a single null frame depending on the client type
:param request: JSON RPC request
|
def create_comment_edit(self, ):
"""Create a text edit for comments
:returns: the created text edit
:rtype: :class:`jukeboxcore.gui.widgets.textedit.JB_PlainTextEdit`
:raises: None
"""
pte = JB_PlainTextEdit(parent=self)
pte.set_placeholder("Enter a comment before saving...")
pte.setMaximumHeight(120)
return pte
|
Create a text edit for comments
:returns: the created text edit
:rtype: :class:`jukeboxcore.gui.widgets.textedit.JB_PlainTextEdit`
:raises: None
|
def concatenate_lists(*layers, **kwargs): # pragma: no cover
"""Compose two or more models `f`, `g`, etc, such that their outputs are
concatenated, i.e. `concatenate(f, g)(x)` computes `hstack(f(x), g(x))`
"""
if not layers:
return noop()
drop_factor = kwargs.get("drop_factor", 1.0)
ops = layers[0].ops
layers = [chain(layer, flatten) for layer in layers]
concat = concatenate(*layers)
def concatenate_lists_fwd(Xs, drop=0.0):
drop *= drop_factor
lengths = ops.asarray([len(X) for X in Xs], dtype="i")
flat_y, bp_flat_y = concat.begin_update(Xs, drop=drop)
ys = ops.unflatten(flat_y, lengths)
def concatenate_lists_bwd(d_ys, sgd=None):
return bp_flat_y(ops.flatten(d_ys), sgd=sgd)
return ys, concatenate_lists_bwd
model = wrap(concatenate_lists_fwd, concat)
return model
|
Compose two or more models `f`, `g`, etc, such that their outputs are
concatenated, i.e. `concatenate(f, g)(x)` computes `hstack(f(x), g(x))`
|
def command_err(self, code=1, errmsg='MockupDB command failure',
*args, **kwargs):
"""Error reply to a command.
Returns True so it is suitable as an `~MockupDB.autoresponds` handler.
"""
kwargs.setdefault('ok', 0)
kwargs['code'] = code
kwargs['errmsg'] = errmsg
self.replies(*args, **kwargs)
return True
|
Error reply to a command.
Returns True so it is suitable as an `~MockupDB.autoresponds` handler.
|
def exists_alias(self, alias_name, index_name=None):
"""Check whether or not the given alias exists
:return: True if alias already exist"""
return self._es_conn.indices.exists_alias(index=index_name, name=alias_name)
|
Check whether or not the given alias exists
:return: True if alias already exist
|
def publish(self):
"""Publish GitHub release as record."""
with db.session.begin_nested():
deposit = self.deposit_class.create(self.metadata)
deposit['_deposit']['created_by'] = self.event.user_id
deposit['_deposit']['owners'] = [self.event.user_id]
# Fetch the deposit files
for key, url in self.files:
deposit.files[key] = self.gh.api.session.get(
url, stream=True).raw
deposit.publish()
recid, record = deposit.fetch_published()
self.model.recordmetadata = record.model
|
Publish GitHub release as record.
|
def _validate(self, writing=False):
"""Verify that the box obeys the specifications."""
for box in self.DR:
if box.box_id != 'url ':
msg = ('Child boxes of a data reference box can only be data '
'entry URL boxes.')
self._dispatch_validation_error(msg, writing=writing)
|
Verify that the box obeys the specifications.
|
def p_sequenceItems(self, p):
"""sequenceItems : sequenceItems ',' sequenceItem
| sequenceItem"""
# libsmi: TODO: might this list be emtpy?
n = len(p)
if n == 4:
p[0] = p[1] + [p[3]]
elif n == 2:
p[0] = [p[1]]
|
sequenceItems : sequenceItems ',' sequenceItem
| sequenceItem
|
def wait(self):
'''
Wait until the ui object gone or exist.
Usage:
d(text="Clock").wait.gone() # wait until it's gone.
d(text="Settings").wait.exists() # wait until it appears.
'''
@param_to_property(action=["exists", "gone"])
def _wait(action, timeout=3000):
if timeout / 1000 + 5 > int(os.environ.get("JSONRPC_TIMEOUT", 90)):
http_timeout = timeout / 1000 + 5
else:
http_timeout = int(os.environ.get("JSONRPC_TIMEOUT", 90))
method = self.device.server.jsonrpc_wrap(
timeout=http_timeout
).waitUntilGone if action == "gone" else self.device.server.jsonrpc_wrap(timeout=http_timeout).waitForExists
return method(self.selector, timeout)
return _wait
|
Wait until the ui object gone or exist.
Usage:
d(text="Clock").wait.gone() # wait until it's gone.
d(text="Settings").wait.exists() # wait until it appears.
|
def _call_post_with_user_override(self, sap_user_id, url, payload):
"""
Make a post request with an auth token acquired for a specific user to a SuccessFactors endpoint.
Args:
sap_user_id (str): The user to use to retrieve an auth token.
url (str): The url to post to.
payload (str): The json encoded payload to post.
"""
SAPSuccessFactorsEnterpriseCustomerConfiguration = apps.get_model( # pylint: disable=invalid-name
'sap_success_factors',
'SAPSuccessFactorsEnterpriseCustomerConfiguration'
)
oauth_access_token, _ = SAPSuccessFactorsAPIClient.get_oauth_access_token(
self.enterprise_configuration.sapsf_base_url,
self.enterprise_configuration.key,
self.enterprise_configuration.secret,
self.enterprise_configuration.sapsf_company_id,
sap_user_id,
SAPSuccessFactorsEnterpriseCustomerConfiguration.USER_TYPE_USER
)
response = requests.post(
url,
data=payload,
headers={
'Authorization': 'Bearer {}'.format(oauth_access_token),
'content-type': 'application/json'
}
)
return response.status_code, response.text
|
Make a post request with an auth token acquired for a specific user to a SuccessFactors endpoint.
Args:
sap_user_id (str): The user to use to retrieve an auth token.
url (str): The url to post to.
payload (str): The json encoded payload to post.
|
def view(template=None):
"""
Create the Maintenance view
Must be instantiated
import maintenance_view
MaintenanceView = maintenance_view()
:param template_: The directory containing the view pages
:return:
"""
if not template:
template = "Juice/Plugin/MaintenancePage/index.html"
class Maintenance(View):
@classmethod
def register(cls, app, **kwargs):
super(cls, cls).register(app, **kwargs)
if cls.get_config("APPLICATION_MAINTENANCE_ON"):
app.logger.info("APPLICATION MAINTENANCE PAGE IS ON")
@app.before_request
def on_maintenance():
return cls.render_(layout_=template), 503
return Maintenance
|
Create the Maintenance view
Must be instantiated
import maintenance_view
MaintenanceView = maintenance_view()
:param template_: The directory containing the view pages
:return:
|
def Tmatrix(X):
"""
gets the orientation matrix (T) from data in X
"""
T = [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]
for row in X:
for k in range(3):
for l in range(3):
T[k][l] += row[k] * row[l]
return T
|
gets the orientation matrix (T) from data in X
|
def password_valid(self, wallet):
"""
Checks whether the password entered for **wallet** is valid
:param wallet: Wallet to check password for
:type wallet: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.password_valid(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
... )
True
"""
wallet = self._process_value(wallet, 'wallet')
payload = {"wallet": wallet}
resp = self.call('password_valid', payload)
return resp['valid'] == '1'
|
Checks whether the password entered for **wallet** is valid
:param wallet: Wallet to check password for
:type wallet: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.password_valid(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
... )
True
|
def get_learner_data_records(self, enterprise_enrollment, completed_date=None, grade=None, is_passing=False):
"""
Return a SapSuccessFactorsLearnerDataTransmissionAudit with the given enrollment and course completion data.
If completed_date is None and the learner isn't passing, then course completion has not been met.
If no remote ID can be found, return None.
"""
completed_timestamp = None
course_completed = False
if completed_date is not None:
completed_timestamp = parse_datetime_to_epoch_millis(completed_date)
course_completed = is_passing
sapsf_user_id = enterprise_enrollment.enterprise_customer_user.get_remote_id()
if sapsf_user_id is not None:
SapSuccessFactorsLearnerDataTransmissionAudit = apps.get_model( # pylint: disable=invalid-name
'sap_success_factors',
'SapSuccessFactorsLearnerDataTransmissionAudit'
)
# We return two records here, one with the course key and one with the course run id, to account for
# uncertainty about the type of content (course vs. course run) that was sent to the integrated channel.
return [
SapSuccessFactorsLearnerDataTransmissionAudit(
enterprise_course_enrollment_id=enterprise_enrollment.id,
sapsf_user_id=sapsf_user_id,
course_id=parse_course_key(enterprise_enrollment.course_id),
course_completed=course_completed,
completed_timestamp=completed_timestamp,
grade=grade,
),
SapSuccessFactorsLearnerDataTransmissionAudit(
enterprise_course_enrollment_id=enterprise_enrollment.id,
sapsf_user_id=sapsf_user_id,
course_id=enterprise_enrollment.course_id,
course_completed=course_completed,
completed_timestamp=completed_timestamp,
grade=grade,
),
]
else:
LOGGER.debug(
'No learner data was sent for user [%s] because an SAP SuccessFactors user ID could not be found.',
enterprise_enrollment.enterprise_customer_user.username
)
|
Return a SapSuccessFactorsLearnerDataTransmissionAudit with the given enrollment and course completion data.
If completed_date is None and the learner isn't passing, then course completion has not been met.
If no remote ID can be found, return None.
|
def _pct_diff(self, best, other):
""" Calculates and colorizes the percent difference between @best
and @other
"""
return colorize("{}%".format(
round(((best-other)/best)*100, 2)).rjust(10), "red")
|
Calculates and colorizes the percent difference between @best
and @other
|
def close(self):
"""Close the stream
"""
self.closed = True
self._flush_bits_to_stream()
self._stream.close()
|
Close the stream
|
def delete_empty_children(self):
"""
Walk through the children of this node and delete any that are empty.
"""
for child in self.children:
child.delete_empty_children()
try:
if os.path.exists(child.full_path):
os.rmdir(child.full_path)
except OSError: pass
else: self.children.remove(child)
|
Walk through the children of this node and delete any that are empty.
|
def _verify_credentials(self):
"""
An internal method that verifies the credentials given at instantiation.
:raises: :class:`Pymoe.errors.UserLoginFailed`
"""
r = requests.get(self.apiurl + "account/verify_credentials.xml",
auth=HTTPBasicAuth(self._username, self._password),
headers=self.header)
if r.status_code != 200:
raise UserLoginFailed("Username or Password incorrect.")
|
An internal method that verifies the credentials given at instantiation.
:raises: :class:`Pymoe.errors.UserLoginFailed`
|
def encrypt(key, message):
'''encrypt leverages KMS encrypt and base64-encode encrypted blob
More info on KMS encrypt API:
https://docs.aws.amazon.com/kms/latest/APIReference/API_encrypt.html
'''
try:
ret = kms.encrypt(KeyId=key, Plaintext=message)
encrypted_data = base64.encodestring(ret.get('CiphertextBlob'))
except Exception as e:
# returns http 500 back to user and log error details in Cloudwatch Logs
raise Exception("Unable to encrypt data: ", e)
return encrypted_data.decode()
|
encrypt leverages KMS encrypt and base64-encode encrypted blob
More info on KMS encrypt API:
https://docs.aws.amazon.com/kms/latest/APIReference/API_encrypt.html
|
def max(a, axis=None):
"""
Request the maximum of an Array over any number of axes.
.. note:: Currently limited to operating on a single axis.
Parameters
----------
a : Array object
The object whose maximum is to be found.
axis : None, or int, or iterable of ints
Axis or axes along which the operation is performed. The default
(axis=None) is to perform the operation over all the dimensions of the
input array. The axis may be negative, in which case it counts from
the last to the first axis. If axis is a tuple of ints, the operation
is performed over multiple axes.
Returns
-------
out : Array
The Array representing the requested max.
"""
axes = _normalise_axis(axis, a)
assert axes is not None and len(axes) == 1
return _Aggregation(a, axes[0],
_MaxStreamsHandler, _MaxMaskedStreamsHandler,
a.dtype, {})
|
Request the maximum of an Array over any number of axes.
.. note:: Currently limited to operating on a single axis.
Parameters
----------
a : Array object
The object whose maximum is to be found.
axis : None, or int, or iterable of ints
Axis or axes along which the operation is performed. The default
(axis=None) is to perform the operation over all the dimensions of the
input array. The axis may be negative, in which case it counts from
the last to the first axis. If axis is a tuple of ints, the operation
is performed over multiple axes.
Returns
-------
out : Array
The Array representing the requested max.
|
def str2rsi(key):
"""
Convert a string of the form 'rlz-XXXX/sid-YYYY/ZZZ'
into a triple (XXXX, YYYY, ZZZ)
"""
rlzi, sid, imt = key.split('/')
return int(rlzi[4:]), int(sid[4:]), imt
|
Convert a string of the form 'rlz-XXXX/sid-YYYY/ZZZ'
into a triple (XXXX, YYYY, ZZZ)
|
def _onDisconnect(self, mqttc, obj, rc):
"""
Called when the client disconnects from IBM Watson IoT Platform.
See [paho.mqtt.python#on_disconnect](https://github.com/eclipse/paho.mqtt.python#on_disconnect) for more information
# Parameters
mqttc (paho.mqtt.client.Client): The client instance for this callback
obj (object): The private user data as set in Client() or user_data_set()
rc (int): indicates the disconnection state. If `MQTT_ERR_SUCCESS` (0), the callback was
called in response to a `disconnect()` call. If any other value the disconnection was
unexpected, such as might be caused by a network error.
"""
# Clear the event to indicate we're no longer connected
self.connectEvent.clear()
if rc != 0:
self.logger.error("Unexpected disconnect from IBM Watson IoT Platform: %d" % (rc))
else:
self.logger.info("Disconnected from IBM Watson IoT Platform")
|
Called when the client disconnects from IBM Watson IoT Platform.
See [paho.mqtt.python#on_disconnect](https://github.com/eclipse/paho.mqtt.python#on_disconnect) for more information
# Parameters
mqttc (paho.mqtt.client.Client): The client instance for this callback
obj (object): The private user data as set in Client() or user_data_set()
rc (int): indicates the disconnection state. If `MQTT_ERR_SUCCESS` (0), the callback was
called in response to a `disconnect()` call. If any other value the disconnection was
unexpected, such as might be caused by a network error.
|
def get_zipped_dataset_from_predictions(predictions):
"""Creates dataset from in-memory predictions."""
targets = stack_data_given_key(predictions, "targets")
outputs = stack_data_given_key(predictions, "outputs")
num_videos, num_steps = targets.shape[:2]
# Truncate output time-steps to match target time-steps
outputs = outputs[:, :num_steps]
targets_placeholder = tf.placeholder(targets.dtype, targets.shape)
outputs_placeholder = tf.placeholder(outputs.dtype, outputs.shape)
dataset = tf.data.Dataset.from_tensor_slices(
(targets_placeholder, outputs_placeholder))
iterator = dataset.make_initializable_iterator()
feed_dict = {targets_placeholder: targets,
outputs_placeholder: outputs}
return iterator, feed_dict, num_videos
|
Creates dataset from in-memory predictions.
|
def put(self, path, data, **options):
"""
Parses PUT request options and dispatches a request
"""
data, options = self._update_request(data, options)
return self.request('put', path, data=data, **options)
|
Parses PUT request options and dispatches a request
|
def load(self, env=None):
""" Load a section values of given environment.
If nothing to specified, use environmental variable.
If unknown environment was specified, warn it on logger.
:param env: environment key to load in a coercive manner
:type env: string
:rtype: dict
"""
self._load()
e = env or \
os.environ.get(RUNNING_MODE_ENVKEY, DEFAULT_RUNNING_MODE)
if e in self.config:
return self.config[e]
logging.warn("Environment '%s' was not found.", e)
|
Load a section values of given environment.
If nothing to specified, use environmental variable.
If unknown environment was specified, warn it on logger.
:param env: environment key to load in a coercive manner
:type env: string
:rtype: dict
|
def _ParseMFTEntry(self, parser_mediator, mft_entry):
"""Extracts data from a NFTS $MFT entry.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
mft_entry (pyfsntfs.file_entry): MFT entry.
"""
for attribute_index in range(0, mft_entry.number_of_attributes):
try:
mft_attribute = mft_entry.get_attribute(attribute_index)
self._ParseMFTAttribute(parser_mediator, mft_entry, mft_attribute)
except IOError as exception:
parser_mediator.ProduceExtractionWarning((
'unable to parse MFT attribute: {0:d} with error: {1!s}').format(
attribute_index, exception))
|
Extracts data from a NFTS $MFT entry.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
mft_entry (pyfsntfs.file_entry): MFT entry.
|
def milestone(self, number):
"""Get the milestone indicated by ``number``.
:param int number: (required), unique id number of the milestone
:returns: :class:`Milestone <github3.issues.milestone.Milestone>`
"""
json = None
if int(number) > 0:
url = self._build_url('milestones', str(number),
base_url=self._api)
json = self._json(self._get(url), 200)
return Milestone(json, self) if json else None
|
Get the milestone indicated by ``number``.
:param int number: (required), unique id number of the milestone
:returns: :class:`Milestone <github3.issues.milestone.Milestone>`
|
def fixed_vectors_encoding(index_encoded_sequences, letter_to_vector_df):
"""
Given a `n` x `k` matrix of integers such as that returned by `index_encoding()` and
a dataframe mapping each index to an arbitrary vector, return a `n * k * m`
array where the (`i`, `j`)'th element is `letter_to_vector_df.iloc[sequence[i][j]]`.
The dataframe index and columns names are ignored here; the indexing is done
entirely by integer position in the dataframe.
Parameters
----------
index_encoded_sequences : `n` x `k` array of integers
letter_to_vector_df : pandas.DataFrame of shape (`alphabet size`, `m`)
Returns
-------
numpy.array of integers with shape (`n`, `k`, `m`)
"""
(num_sequences, sequence_length) = index_encoded_sequences.shape
target_shape = (
num_sequences, sequence_length, letter_to_vector_df.shape[0])
result = letter_to_vector_df.iloc[
index_encoded_sequences.flatten()
].values.reshape(target_shape)
return result
|
Given a `n` x `k` matrix of integers such as that returned by `index_encoding()` and
a dataframe mapping each index to an arbitrary vector, return a `n * k * m`
array where the (`i`, `j`)'th element is `letter_to_vector_df.iloc[sequence[i][j]]`.
The dataframe index and columns names are ignored here; the indexing is done
entirely by integer position in the dataframe.
Parameters
----------
index_encoded_sequences : `n` x `k` array of integers
letter_to_vector_df : pandas.DataFrame of shape (`alphabet size`, `m`)
Returns
-------
numpy.array of integers with shape (`n`, `k`, `m`)
|
def _get(self, *args, **kwargs):
"""
Retrieve unread messages for current user, both from the inbox and
from other storages
"""
messages, all_retrieved = super(StorageMixin, self)._get(*args, **kwargs)
if self.user.is_authenticated():
inbox_messages = self.backend.inbox_list(self.user)
else:
inbox_messages = []
return messages + inbox_messages, all_retrieved
|
Retrieve unread messages for current user, both from the inbox and
from other storages
|
def init_providers(self, provider, kwargs):
"""
Inits main and fallback provider if relevant
:param provider: Provider name to use
:param kwargs: Additional kwargs
:raises ValueError: If provider name or fallback names are not valid providers, a :exc:`ValueError` will
be raised
"""
self.provider = notifiers.get_notifier(provider, strict=True)
if kwargs.get("fallback"):
self.fallback = notifiers.get_notifier(kwargs.pop("fallback"), strict=True)
self.fallback_defaults = kwargs.pop("fallback_defaults", {})
|
Inits main and fallback provider if relevant
:param provider: Provider name to use
:param kwargs: Additional kwargs
:raises ValueError: If provider name or fallback names are not valid providers, a :exc:`ValueError` will
be raised
|
def window_nuttall(N):
r"""Nuttall tapering window
:param N: window length
.. math:: w(n) = a_0 - a_1 \cos\left(\frac{2\pi n}{N-1}\right)+ a_2 \cos\left(\frac{4\pi n}{N-1}\right)- a_3 \cos\left(\frac{6\pi n}{N-1}\right)
with :math:`a_0 = 0.355768`, :math:`a_1 = 0.487396`, :math:`a_2=0.144232` and :math:`a_3=0.012604`
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'nuttall', mindB=-80)
.. seealso:: :func:`create_window`, :class:`Window`
"""
a0 = 0.355768
a1 = 0.487396
a2 = 0.144232
a3 = 0.012604
return _coeff4(N, a0, a1, a2, a3)
|
r"""Nuttall tapering window
:param N: window length
.. math:: w(n) = a_0 - a_1 \cos\left(\frac{2\pi n}{N-1}\right)+ a_2 \cos\left(\frac{4\pi n}{N-1}\right)- a_3 \cos\left(\frac{6\pi n}{N-1}\right)
with :math:`a_0 = 0.355768`, :math:`a_1 = 0.487396`, :math:`a_2=0.144232` and :math:`a_3=0.012604`
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'nuttall', mindB=-80)
.. seealso:: :func:`create_window`, :class:`Window`
|
def save(self, *args, **kwargs):
"""
**uid**: :code:`{person.uid}_candidate:{party.uid}-{cycle.ap_code}`
"""
self.uid = "{}_candidate:{}-{}".format(
self.person.uid, self.party.uid, self.race.cycle.uid
)
super(Candidate, self).save(*args, **kwargs)
|
**uid**: :code:`{person.uid}_candidate:{party.uid}-{cycle.ap_code}`
|
def archive_handler(unused_build_context, target, fetch, package_dir, tar):
"""Handle remote downloadable archive URI.
Download the archive and cache it under the private builer workspace
(unless already downloaded), extract it, and add the content to the
package tar.
TODO(itamar): Support re-downloading if remote changed compared to local.
TODO(itamar): Support more archive formats (currently only tarballs).
"""
package_dest = join(package_dir, basename(urlparse(fetch.uri).path))
package_content_dir = join(package_dir, 'content')
extract_dir = (join(package_content_dir, fetch.name)
if fetch.name else package_content_dir)
fetch_url(fetch.uri, package_dest, package_dir)
# TODO(itamar): Avoid repetition of splitting extension here and above
# TODO(itamar): Don't use `extractall` on potentially untrsuted archives
ext = splitext(package_dest)[-1].lower()
if ext in ('.gz', '.bz2', '.tgz'):
with tarfile.open(package_dest, 'r:*') as src_tar:
src_tar.extractall(extract_dir)
elif ext in ('.zip',):
with ZipFile(package_dest, 'r') as zipf:
zipf.extractall(extract_dir)
else:
raise ValueError('Unsupported extension {}'.format(ext))
tar.add(package_content_dir, arcname=split_name(target.name))
|
Handle remote downloadable archive URI.
Download the archive and cache it under the private builer workspace
(unless already downloaded), extract it, and add the content to the
package tar.
TODO(itamar): Support re-downloading if remote changed compared to local.
TODO(itamar): Support more archive formats (currently only tarballs).
|
def fopen(*args, **kwargs):
'''
Wrapper around open() built-in to set CLOEXEC on the fd.
This flag specifies that the file descriptor should be closed when an exec
function is invoked;
When a file descriptor is allocated (as with open or dup), this bit is
initially cleared on the new file descriptor, meaning that descriptor will
survive into the new program after exec.
NB! We still have small race condition between open and fcntl.
'''
if six.PY3:
try:
# Don't permit stdin/stdout/stderr to be opened. The boolean False
# and True are treated by Python 3's open() as file descriptors 0
# and 1, respectively.
if args[0] in (0, 1, 2):
raise TypeError(
'{0} is not a permitted file descriptor'.format(args[0])
)
except IndexError:
pass
binary = None
# ensure 'binary' mode is always used on Windows in Python 2
if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or
kwargs.pop('binary', False)):
if len(args) > 1:
args = list(args)
if 'b' not in args[1]:
args[1] = args[1].replace('t', 'b')
if 'b' not in args[1]:
args[1] += 'b'
elif kwargs.get('mode'):
if 'b' not in kwargs['mode']:
kwargs['mode'] = kwargs['mode'].replace('t', 'b')
if 'b' not in kwargs['mode']:
kwargs['mode'] += 'b'
else:
# the default is to read
kwargs['mode'] = 'rb'
elif six.PY3 and 'encoding' not in kwargs:
# In Python 3, if text mode is used and the encoding
# is not specified, set the encoding to 'utf-8'.
binary = False
if len(args) > 1:
args = list(args)
if 'b' in args[1]:
binary = True
if kwargs.get('mode', None):
if 'b' in kwargs['mode']:
binary = True
if not binary:
kwargs['encoding'] = __salt_system_encoding__
if six.PY3 and not binary and not kwargs.get('newline', None):
kwargs['newline'] = ''
f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage
if is_fcntl_available():
# modify the file descriptor on systems with fcntl
# unix and unix-like systems only
try:
FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103
except AttributeError:
FD_CLOEXEC = 1 # pylint: disable=C0103
old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)
fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)
return f_handle
|
Wrapper around open() built-in to set CLOEXEC on the fd.
This flag specifies that the file descriptor should be closed when an exec
function is invoked;
When a file descriptor is allocated (as with open or dup), this bit is
initially cleared on the new file descriptor, meaning that descriptor will
survive into the new program after exec.
NB! We still have small race condition between open and fcntl.
|
def on_release_key(key, callback, suppress=False):
"""
Invokes `callback` for KEY_UP event related to the given key. For details see `hook`.
"""
return hook_key(key, lambda e: e.event_type == KEY_DOWN or callback(e), suppress=suppress)
|
Invokes `callback` for KEY_UP event related to the given key. For details see `hook`.
|
def _get_drift(step_size_parts, volatility_parts, grads_volatility,
grads_target_log_prob,
name=None):
"""Compute diffusion drift at the current location `current_state`.
The drift of the diffusion at is computed as
```none
0.5 * `step_size` * volatility_parts * `target_log_prob_fn(current_state)`
+ `step_size` * `grads_volatility`
```
where `volatility_parts` = `volatility_fn(current_state)**2` and
`grads_volatility` is a gradient of `volatility_parts` at the `current_state`.
Args:
step_size_parts: Python `list` of `Tensor`s representing the step size for
Euler-Maruyama method. Must broadcast with the shape of
`volatility_parts`. Larger step sizes lead to faster progress, but
too-large step sizes make rejection exponentially more likely. When
possible, it's often helpful to match per-variable step sizes to the
standard deviations of the target distribution in each variable.
volatility_parts: Python `list` of `Tensor`s representing the value of
`volatility_fn(*state_parts)`.
grads_volatility: Python list of `Tensor`s representing the value of the
gradient of `volatility_parts**2` wrt the state of the chain.
grads_target_log_prob: Python list of `Tensor`s representing
gradient of `target_log_prob_fn(*state_parts`) wrt `state_parts`. Must
have same shape as `volatility_parts`.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'mala_get_drift').
Returns:
drift_parts: Tensor or Python list of `Tensor`s representing the
state(s) of the Markov chain(s) at each result step. Has same shape as
input `current_state_parts`.
"""
with tf.compat.v1.name_scope(name, 'mala_get_drift', [
step_size_parts, volatility_parts, grads_volatility, grads_target_log_prob
]):
drift_parts = []
for step_size, volatility, grad_volatility, grad_target_log_prob in (
zip(step_size_parts,
volatility_parts,
grads_volatility,
grads_target_log_prob)):
volatility_squared = tf.square(volatility)
drift = 0.5 * step_size * (volatility_squared * grad_target_log_prob
+ grad_volatility)
drift_parts.append(drift)
return drift_parts
|
Compute diffusion drift at the current location `current_state`.
The drift of the diffusion at is computed as
```none
0.5 * `step_size` * volatility_parts * `target_log_prob_fn(current_state)`
+ `step_size` * `grads_volatility`
```
where `volatility_parts` = `volatility_fn(current_state)**2` and
`grads_volatility` is a gradient of `volatility_parts` at the `current_state`.
Args:
step_size_parts: Python `list` of `Tensor`s representing the step size for
Euler-Maruyama method. Must broadcast with the shape of
`volatility_parts`. Larger step sizes lead to faster progress, but
too-large step sizes make rejection exponentially more likely. When
possible, it's often helpful to match per-variable step sizes to the
standard deviations of the target distribution in each variable.
volatility_parts: Python `list` of `Tensor`s representing the value of
`volatility_fn(*state_parts)`.
grads_volatility: Python list of `Tensor`s representing the value of the
gradient of `volatility_parts**2` wrt the state of the chain.
grads_target_log_prob: Python list of `Tensor`s representing
gradient of `target_log_prob_fn(*state_parts`) wrt `state_parts`. Must
have same shape as `volatility_parts`.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'mala_get_drift').
Returns:
drift_parts: Tensor or Python list of `Tensor`s representing the
state(s) of the Markov chain(s) at each result step. Has same shape as
input `current_state_parts`.
|
def kappa_statistic(self):
r"""Return κ statistic.
The κ statistic is defined as:
:math:`\kappa = \frac{accuracy - random~ accuracy}
{1 - random~ accuracy}`
The κ statistic compares the performance of the classifier relative to
the performance of a random classifier. :math:`\kappa` = 0 indicates
performance identical to random. :math:`\kappa` = 1 indicates perfect
predictive success. :math:`\kappa` = -1 indicates perfect predictive
failure.
Returns
-------
float
The κ statistic of the confusion table
Example
-------
>>> ct = ConfusionTable(120, 60, 20, 30)
>>> ct.kappa_statistic()
0.5344129554655871
"""
if self.population() == 0:
return float('NaN')
random_accuracy = (
(self._tn + self._fp) * (self._tn + self._fn)
+ (self._fn + self._tp) * (self._fp + self._tp)
) / self.population() ** 2
return (self.accuracy() - random_accuracy) / (1 - random_accuracy)
|
r"""Return κ statistic.
The κ statistic is defined as:
:math:`\kappa = \frac{accuracy - random~ accuracy}
{1 - random~ accuracy}`
The κ statistic compares the performance of the classifier relative to
the performance of a random classifier. :math:`\kappa` = 0 indicates
performance identical to random. :math:`\kappa` = 1 indicates perfect
predictive success. :math:`\kappa` = -1 indicates perfect predictive
failure.
Returns
-------
float
The κ statistic of the confusion table
Example
-------
>>> ct = ConfusionTable(120, 60, 20, 30)
>>> ct.kappa_statistic()
0.5344129554655871
|
def run_in_order(l, show_output=True, show_err=True, ignore_err=False,
args=(), **kwargs):
'''
Processes each element of l in order:
if it is a string: execute it as a shell command
elif it is a callable, call it with *args, **kwargs
l-->list: Each elem is either a string (shell command) or callable
Any other type is ignored
show_output-->boolean: Show stdout of shell commands
Does not affect callables
show_err-->Boolean: Show stderr of shell commands
Does not affect callables
ignore_err-->boolean: Continue after exception or shell command
wth return code != 0
Returns-->Nothing
if ignore_err == False, exceptions are re-raised, hence shown
------------------------------------------------------------------
show_output show_err ignore_err stdout stderr exception continue
trace
------------------------------------------------------------------
True True False SHOW SHOW SHOW NO
True False False SHOW HIDE SHOW NO
False True False HIDE SHOW SHOW NO
False False False HIDE HIDE SHOW NO
True True True SHOW SHOW SHOW YES
True False True SHOW HIDE HIDE YES
False True True HIDE SHOW SHOW YES
False False True HIDE HIDE HIDE YES
------------------------------------------------------------------
----------- DEFAULT ----------- SHOW SHOW SHOW NO
------------------------------------------------------------------
'''
# Set defaults
if show_output is None:
show_output = True
if show_err is None:
show_err = True
if ignore_err is None:
ignore_err = False
if args is None:
args = ()
for c in l:
try:
if isinstance(c, str):
devnull = open(os.devnull, 'w')
if not show_err:
stderr = devnull
else:
stderr = None
if not show_output:
stdout = devnull
else:
stdout = None
retcode = subprocess.call(
c, shell=True, stdout=stdout, stderr=stderr)
if not ignore_err and retcode != 0:
break
elif hasattr(c, '__call__'):
c(*args, **kwargs)
except:
if not ignore_err:
raise
if show_err:
sys.stderr.write(traceback.format_exc())
|
Processes each element of l in order:
if it is a string: execute it as a shell command
elif it is a callable, call it with *args, **kwargs
l-->list: Each elem is either a string (shell command) or callable
Any other type is ignored
show_output-->boolean: Show stdout of shell commands
Does not affect callables
show_err-->Boolean: Show stderr of shell commands
Does not affect callables
ignore_err-->boolean: Continue after exception or shell command
wth return code != 0
Returns-->Nothing
if ignore_err == False, exceptions are re-raised, hence shown
------------------------------------------------------------------
show_output show_err ignore_err stdout stderr exception continue
trace
------------------------------------------------------------------
True True False SHOW SHOW SHOW NO
True False False SHOW HIDE SHOW NO
False True False HIDE SHOW SHOW NO
False False False HIDE HIDE SHOW NO
True True True SHOW SHOW SHOW YES
True False True SHOW HIDE HIDE YES
False True True HIDE SHOW SHOW YES
False False True HIDE HIDE HIDE YES
------------------------------------------------------------------
----------- DEFAULT ----------- SHOW SHOW SHOW NO
------------------------------------------------------------------
|
def _maybe_unique_host(onion):
"""
:param onion: IAuthenticatedOnionClients provider
:returns: a .onion hostname if all clients have the same name or
raises ValueError otherwise
"""
hosts = [
onion.get_client(nm).hostname
for nm in onion.client_names()
]
if not hosts:
raise ValueError(
"Can't access .onion_uri because there are no clients"
)
host = hosts[0]
for h in hosts[1:]:
if h != host:
raise ValueError(
"Cannot access .onion_uri for stealth-authenticated services "
"because each client has a unique URI"
)
return host
|
:param onion: IAuthenticatedOnionClients provider
:returns: a .onion hostname if all clients have the same name or
raises ValueError otherwise
|
def _update_doc_in_index(self, index_writer, doc):
"""
Add/Update a document in the index
"""
all_labels = set(self.label_list)
doc_labels = set(doc.labels)
new_labels = doc_labels.difference(all_labels)
# can happen when we recreate the index from scratch
for label in new_labels:
self.create_label(label)
last_mod = datetime.datetime.fromtimestamp(doc.last_mod)
docid = str(doc.docid)
dochash = doc.get_docfilehash()
dochash = (u"%X" % dochash)
doc_txt = doc.get_index_text()
assert(isinstance(doc_txt, str))
labels_txt = doc.get_index_labels()
assert(isinstance(labels_txt, str))
# append labels to doc txt, because we usually search on doc_txt
doc_txt += " " + labels_txt
query = whoosh.query.Term("docid", docid)
index_writer.delete_by_query(query)
index_writer.update_document(
docid=docid,
doctype=doc.doctype,
docfilehash=dochash,
content=strip_accents(doc_txt),
label=strip_accents(labels_txt),
date=doc.date,
last_read=last_mod
)
return True
|
Add/Update a document in the index
|
def _create_breadcrumbs(self, relpath):
"""Create filesystem browsing breadcrumb navigation.
That is, make each path segment into a clickable element that takes you to that dir.
"""
if relpath == '.':
breadcrumbs = []
else:
path_parts = [os.path.basename(self._root)] + relpath.split(os.path.sep)
path_links = ['/'.join(path_parts[1:i + 1]) for i, name in enumerate(path_parts)]
breadcrumbs = [{'link_path': link_path, 'name': name}
for link_path, name in zip(path_links, path_parts)]
return breadcrumbs
|
Create filesystem browsing breadcrumb navigation.
That is, make each path segment into a clickable element that takes you to that dir.
|
def process(self, salt_data, token, opts):
'''
Process events and publish data
'''
parts = salt_data['tag'].split('/')
if len(parts) < 2:
return
# TBD: Simplify these conditional expressions
if parts[1] == 'job':
if parts[3] == 'new':
self.process_new_job_event(salt_data)
if salt_data['data']['fun'] == 'grains.items':
self.minions = {}
elif parts[3] == 'ret':
self.process_ret_job_event(salt_data)
if salt_data['data']['fun'] == 'grains.items':
self.process_minion_update(salt_data)
if parts[1] == 'key':
self.process_key_event(salt_data)
if parts[1] == 'presence':
self.process_presence_events(salt_data, token, opts)
|
Process events and publish data
|
def sec_overview(self):
"""
Generate the data for the Overview section in the report
:return:
"""
""" Data sources overview: table with metric summaries"""
metrics = self.config['overview']['activity_metrics']
file_name = self.config['overview']['activity_file_csv']
data_path = os.path.join(self.data_dir, "data")
file_name = os.path.join(data_path, file_name)
logger.debug("CSV file %s generation in progress", file_name)
csv = 'metricsnames,netvalues,relativevalues,datasource\n'
for metric in metrics:
# comparing current metric month count with previous month
es_index = self.get_metric_index(metric)
ds = metric.ds.name
m = metric(self.es_url, es_index, start=self.start, end=self.end)
(last, percentage) = m.get_trend()
csv += "%s,%i,%i,%s" % (metric.name, last, percentage, ds)
csv += "\n"
os.makedirs(os.path.dirname(file_name), exist_ok=True)
with open(file_name, "w") as f:
# Hack, we need to fix LaTeX escaping in a central place
csv = csv.replace("_", r"\_")
f.write(csv)
logger.debug("CSV file: %s was generated", file_name)
"""
Git Authors:
description: average number of developers per month by quarters
(so we have the average number of developers per month during
those three months). If the approach is to work at the level of month,
then just the number of developers per month.
"""
author = self.config['overview']['author_metrics'][0]
csv_labels = 'labels,' + author.id
file_label = author.ds.name + "_" + author.id
title_label = author.name + " per " + self.interval
self.__create_csv_eps(author, None, csv_labels, file_label, title_label)
logger.debug("CSV file %s generation in progress", file_name)
bmi = []
ttc = [] # time to close
csv_labels = ''
for m in self.config['overview']['bmi_metrics']:
metric = m(self.es_url, self.get_metric_index(m),
start=self.end_prev_month, end=self.end)
csv_labels += m.id + ","
bmi.append(metric.get_agg())
for m in self.config['overview']['time_to_close_metrics']:
metric = m(self.es_url, self.get_metric_index(m),
start=self.end_prev_month, end=self.end)
csv_labels += m.id + ","
ttc.append(metric.get_agg())
csv = csv_labels[:-1] + "\n" # remove last comma
csv = csv.replace("_", "")
for val in bmi:
csv += "%s," % (self.str_val(val))
for val in ttc:
csv += "%s," % (self.str_val(val))
if csv[-1] == ',':
csv = csv[:-1]
data_path = os.path.join(self.data_dir, "data")
file_name = os.path.join(data_path, 'efficiency.csv')
os.makedirs(os.path.dirname(file_name), exist_ok=True)
with open(file_name, "w") as f:
f.write(csv)
logger.debug("CSV file: %s was generated", file_name)
|
Generate the data for the Overview section in the report
:return:
|
def path_regex(self):
"""Return the regex for the path to the build folder."""
regex = r'releases/%(VERSION)s/%(PLATFORM)s/%(LOCALE)s/'
return regex % {'LOCALE': self.locale,
'PLATFORM': self.platform_regex,
'VERSION': self.version}
|
Return the regex for the path to the build folder.
|
def is_invalid_params(func, *args, **kwargs):
""" Check, whether function 'func' accepts parameters 'args', 'kwargs'.
NOTE: Method is called after funct(*args, **kwargs) generated TypeError,
it is aimed to destinguish TypeError because of invalid parameters from
TypeError from inside the function.
.. versionadded: 1.9.0
"""
# For builtin functions inspect.getargspec(funct) return error. If builtin
# function generates TypeError, it is because of wrong parameters.
if not inspect.isfunction(func):
return True
funcargs, varargs, varkwargs, defaults = inspect.getargspec(func)
if defaults:
funcargs = funcargs[:-len(defaults)]
if args and len(args) != len(funcargs):
return True
if kwargs and set(kwargs.keys()) != set(funcargs):
return True
if not args and not kwargs and funcargs:
return True
return False
|
Check, whether function 'func' accepts parameters 'args', 'kwargs'.
NOTE: Method is called after funct(*args, **kwargs) generated TypeError,
it is aimed to destinguish TypeError because of invalid parameters from
TypeError from inside the function.
.. versionadded: 1.9.0
|
def submit_mult_calcs(calc_suite_specs, exec_options=None):
"""Generate and execute all specified computations.
Once the calculations are prepped and submitted for execution, any
calculation that triggers any exception or error is skipped, and the rest
of the calculations proceed unaffected. This prevents an error in a single
calculation from crashing a large suite of calculations.
Parameters
----------
calc_suite_specs : dict
The specifications describing the full set of calculations to be
generated and potentially executed. Accepted keys and their values:
library : module or package comprising an aospy object library
The aospy object library for these calculations.
projects : list of aospy.Proj objects
The projects to permute over.
models : 'all', 'default', or list of aospy.Model objects
The models to permute over. If 'all', use all models in the
``models`` attribute of each ``Proj``. If 'default', use all
models in the ``default_models`` attribute of each ``Proj``.
runs : 'all', 'default', or list of aospy.Run objects
The runs to permute over. If 'all', use all runs in the
``runs`` attribute of each ``Model``. If 'default', use all
runs in the ``default_runs`` attribute of each ``Model``.
variables : list of aospy.Var objects
The variables to be calculated.
regions : 'all' or list of aospy.Region objects
The region(s) over which any regional reductions will be performed.
If 'all', use all regions in the ``regions`` attribute of each
``Proj``.
date_ranges : 'default' or a list of tuples
The range of dates (inclusive) over which to perform calculations.
If 'default', use the ``default_start_date`` and
``default_end_date`` attribute of each ``Run``. Else provide a
list of tuples, each containing a pair of start and end dates,
such as ``date_ranges=[(start, end)]`` where ``start`` and
``end`` are each ``datetime.datetime`` objects, partial
datetime strings (e.g. '0001'), ``np.datetime64`` objects, or
``cftime.datetime`` objects.
output_time_intervals : {'ann', season-string, month-integer}
The sub-annual time interval over which to aggregate.
- 'ann' : Annual mean
- season-string : E.g. 'JJA' for June-July-August
- month-integer : 1 for January, 2 for February, etc. Each one is
a separate reduction, e.g. [1, 2] would produce averages (or
other specified time reduction) over all Januaries, and
separately over all Februaries.
output_time_regional_reductions : list of reduction string identifiers
Unlike most other keys, these are not permuted over when creating
the :py:class:`aospy.Calc` objects that execute the calculations;
each :py:class:`aospy.Calc` performs all of the specified
reductions. Accepted string identifiers are:
- Gridpoint-by-gridpoint output:
- 'av' : Gridpoint-by-gridpoint time-average
- 'std' : Gridpoint-by-gridpoint temporal standard deviation
- 'ts' : Gridpoint-by-gridpoint time-series
- Averages over each region specified via `region`:
- 'reg.av', 'reg.std', 'reg.ts' : analogous to 'av', 'std', 'ts'
output_vertical_reductions : {None, 'vert_av', 'vert_int'}, optional
How to reduce the data vertically:
- None : no vertical reduction
- 'vert_av' : mass-weighted vertical average
- 'vert_int' : mass-weighted vertical integral
input_time_intervals : {'annual', 'monthly', 'daily', '#hr'}
A string specifying the time resolution of the input data. In
'#hr' above, the '#' stands for a number, e.g. 3hr or 6hr, for
sub-daily output. These are the suggested specifiers, but others
may be used if they are also used by the DataLoaders for the given
Runs.
input_time_datatypes : {'inst', 'ts', 'av'}
What the time axis of the input data represents:
- 'inst' : Timeseries of instantaneous values
- 'ts' : Timeseries of averages over the period of each time-index
- 'av' : A single value averaged over a date range
input_vertical_datatypes : {False, 'pressure', 'sigma'}, optional
The vertical coordinate system used by the input data:
- False : not defined vertically
- 'pressure' : pressure coordinates
- 'sigma' : hybrid sigma-pressure coordinates
input_time_offsets : {None, dict}, optional
How to offset input data in time to correct for metadata errors
- None : no time offset applied
- dict : e.g. ``{'hours': -3}`` to offset times by -3 hours
See :py:meth:`aospy.utils.times.apply_time_offset`.
exec_options : dict or None (default None)
Options regarding how the calculations are reported, submitted, and
saved. If None, default settings are used for all options. Currently
supported options (each should be either `True` or `False`):
- prompt_verify : (default False) If True, print summary of
calculations to be performed and prompt user to confirm before
submitting for execution.
- parallelize : (default False) If True, submit calculations in
parallel.
- client : distributed.Client or None (default None) The
dask.distributed Client used to schedule computations. If None
and parallelize is True, a LocalCluster will be started.
- write_to_tar : (default True) If True, write results of calculations
to .tar files, one for each :py:class:`aospy.Run` object.
These tar files have an identical directory structures the
standard output relative to their root directory, which is
specified via the `tar_direc_out` argument of each Proj
object's instantiation.
Returns
-------
A list of the return values from each :py:meth:`aospy.Calc.compute` call
If a calculation ran without error, this value is the
:py:class:`aospy.Calc` object itself, with the results of its
calculations saved in its ``data_out`` attribute. ``data_out`` is a
dictionary, with the keys being the temporal-regional reduction
identifiers (e.g. 'reg.av'), and the values being the corresponding
result.
If any error occurred during a calculation, the return value is None.
Raises
------
AospyException
If the ``prompt_verify`` option is set to True and the user does not
respond affirmatively to the prompt.
"""
if exec_options is None:
exec_options = dict()
if exec_options.pop('prompt_verify', False):
print(_print_suite_summary(calc_suite_specs))
_user_verify()
calc_suite = CalcSuite(calc_suite_specs)
calcs = calc_suite.create_calcs()
if not calcs:
raise AospyException(
"The specified combination of parameters yielded zero "
"calculations. Most likely, one of the parameters is "
"inadvertently empty."
)
return _exec_calcs(calcs, **exec_options)
|
Generate and execute all specified computations.
Once the calculations are prepped and submitted for execution, any
calculation that triggers any exception or error is skipped, and the rest
of the calculations proceed unaffected. This prevents an error in a single
calculation from crashing a large suite of calculations.
Parameters
----------
calc_suite_specs : dict
The specifications describing the full set of calculations to be
generated and potentially executed. Accepted keys and their values:
library : module or package comprising an aospy object library
The aospy object library for these calculations.
projects : list of aospy.Proj objects
The projects to permute over.
models : 'all', 'default', or list of aospy.Model objects
The models to permute over. If 'all', use all models in the
``models`` attribute of each ``Proj``. If 'default', use all
models in the ``default_models`` attribute of each ``Proj``.
runs : 'all', 'default', or list of aospy.Run objects
The runs to permute over. If 'all', use all runs in the
``runs`` attribute of each ``Model``. If 'default', use all
runs in the ``default_runs`` attribute of each ``Model``.
variables : list of aospy.Var objects
The variables to be calculated.
regions : 'all' or list of aospy.Region objects
The region(s) over which any regional reductions will be performed.
If 'all', use all regions in the ``regions`` attribute of each
``Proj``.
date_ranges : 'default' or a list of tuples
The range of dates (inclusive) over which to perform calculations.
If 'default', use the ``default_start_date`` and
``default_end_date`` attribute of each ``Run``. Else provide a
list of tuples, each containing a pair of start and end dates,
such as ``date_ranges=[(start, end)]`` where ``start`` and
``end`` are each ``datetime.datetime`` objects, partial
datetime strings (e.g. '0001'), ``np.datetime64`` objects, or
``cftime.datetime`` objects.
output_time_intervals : {'ann', season-string, month-integer}
The sub-annual time interval over which to aggregate.
- 'ann' : Annual mean
- season-string : E.g. 'JJA' for June-July-August
- month-integer : 1 for January, 2 for February, etc. Each one is
a separate reduction, e.g. [1, 2] would produce averages (or
other specified time reduction) over all Januaries, and
separately over all Februaries.
output_time_regional_reductions : list of reduction string identifiers
Unlike most other keys, these are not permuted over when creating
the :py:class:`aospy.Calc` objects that execute the calculations;
each :py:class:`aospy.Calc` performs all of the specified
reductions. Accepted string identifiers are:
- Gridpoint-by-gridpoint output:
- 'av' : Gridpoint-by-gridpoint time-average
- 'std' : Gridpoint-by-gridpoint temporal standard deviation
- 'ts' : Gridpoint-by-gridpoint time-series
- Averages over each region specified via `region`:
- 'reg.av', 'reg.std', 'reg.ts' : analogous to 'av', 'std', 'ts'
output_vertical_reductions : {None, 'vert_av', 'vert_int'}, optional
How to reduce the data vertically:
- None : no vertical reduction
- 'vert_av' : mass-weighted vertical average
- 'vert_int' : mass-weighted vertical integral
input_time_intervals : {'annual', 'monthly', 'daily', '#hr'}
A string specifying the time resolution of the input data. In
'#hr' above, the '#' stands for a number, e.g. 3hr or 6hr, for
sub-daily output. These are the suggested specifiers, but others
may be used if they are also used by the DataLoaders for the given
Runs.
input_time_datatypes : {'inst', 'ts', 'av'}
What the time axis of the input data represents:
- 'inst' : Timeseries of instantaneous values
- 'ts' : Timeseries of averages over the period of each time-index
- 'av' : A single value averaged over a date range
input_vertical_datatypes : {False, 'pressure', 'sigma'}, optional
The vertical coordinate system used by the input data:
- False : not defined vertically
- 'pressure' : pressure coordinates
- 'sigma' : hybrid sigma-pressure coordinates
input_time_offsets : {None, dict}, optional
How to offset input data in time to correct for metadata errors
- None : no time offset applied
- dict : e.g. ``{'hours': -3}`` to offset times by -3 hours
See :py:meth:`aospy.utils.times.apply_time_offset`.
exec_options : dict or None (default None)
Options regarding how the calculations are reported, submitted, and
saved. If None, default settings are used for all options. Currently
supported options (each should be either `True` or `False`):
- prompt_verify : (default False) If True, print summary of
calculations to be performed and prompt user to confirm before
submitting for execution.
- parallelize : (default False) If True, submit calculations in
parallel.
- client : distributed.Client or None (default None) The
dask.distributed Client used to schedule computations. If None
and parallelize is True, a LocalCluster will be started.
- write_to_tar : (default True) If True, write results of calculations
to .tar files, one for each :py:class:`aospy.Run` object.
These tar files have an identical directory structures the
standard output relative to their root directory, which is
specified via the `tar_direc_out` argument of each Proj
object's instantiation.
Returns
-------
A list of the return values from each :py:meth:`aospy.Calc.compute` call
If a calculation ran without error, this value is the
:py:class:`aospy.Calc` object itself, with the results of its
calculations saved in its ``data_out`` attribute. ``data_out`` is a
dictionary, with the keys being the temporal-regional reduction
identifiers (e.g. 'reg.av'), and the values being the corresponding
result.
If any error occurred during a calculation, the return value is None.
Raises
------
AospyException
If the ``prompt_verify`` option is set to True and the user does not
respond affirmatively to the prompt.
|
def dataset_to_stream(dataset, input_name, num_chunks=0, append_targets=False):
"""Takes a tf.Dataset and creates a numpy stream of ready batches."""
for example in tfds.as_numpy(dataset):
inp, out = example[0][input_name], example[1]
if len(out.shape) > 1 and out.shape[-1] == 1:
out = np.squeeze(out, axis=-1)
if num_chunks > 0:
inp = np.split(inp, num_chunks, axis=1)
out = np.split(out, num_chunks, axis=1)
if append_targets:
inp = (inp, out)
yield inp, out
|
Takes a tf.Dataset and creates a numpy stream of ready batches.
|
def dump_BSE_data_in_GW_run(self, BSE_dump=True):
"""
:param BSE_dump: boolean
:return: set the "do_bse" variable to one in cell.in
"""
if BSE_dump:
self.BSE_TDDFT_options.update(do_bse=1, do_tddft=0)
else:
self.BSE_TDDFT_options.update(do_bse=0, do_tddft=0)
|
:param BSE_dump: boolean
:return: set the "do_bse" variable to one in cell.in
|
def rebuild(self):
"""
Rebuilds the scene based on the current settings.
:param start | <QDate>
end | <QDate>
"""
gantt = self.ganttWidget()
scale = gantt.timescale()
rect = self.sceneRect()
header = gantt.treeWidget().header()
# define the rendering options
options = {}
options['start'] = gantt.dateStart()
options['end'] = gantt.dateEnd()
options['cell_width'] = gantt.cellWidth()
options['cell_height'] = gantt.cellHeight()
options['rect'] = rect
options['height'] = rect.height()
options['header_height'] = header.height()
if not header.isVisible():
options['header_height'] = 0
opt = XGanttRenderOptions(**options)
# rebuild the minute timescale
if scale in (gantt.Timescale.Minute, gantt.Timescale.Hour):
opt.start = gantt.dateTimeStart()
opt.end = gantt.dateTimeEnd()
self.rebuildHour(opt)
# rebuild the day timescale
elif scale == gantt.Timescale.Day:
self.rebuildDay(opt)
# rebuild the week timescale
elif scale == gantt.Timescale.Week:
self.rebuildWeek(opt)
self.rebuildTiles()
|
Rebuilds the scene based on the current settings.
:param start | <QDate>
end | <QDate>
|
def main(graph):
"""
Create and drop databases.
"""
args = parse_args(graph)
if args.drop:
drop_all(graph)
create_all(graph)
|
Create and drop databases.
|
def add_node(self, op_type, inputs, outputs, op_domain='', op_version=1, **attrs):
'''
Add a NodeProto into the node list of the final ONNX model. If the input operator's domain-version information
cannot be found in our domain-version pool (a Python set), we may add it.
:param op_type: A string (e.g., Pool and Conv) indicating the type of the NodeProto
:param inputs: A list of strings. They are the input variables' names of the considered NodeProto
:param outputs: A list of strings. They are the output variables' names of the considered NodeProto
:param op_domain: The domain name (e.g., ai.onnx.ml) of the operator we are trying to add.
:param op_version: The version number (e.g., 0 and 1) of the operator we are trying to add.
:param attrs: A Python dictionary. Keys and values are attributes' names and attributes' values, respectively.
'''
if isinstance(inputs, (six.string_types, six.text_type)):
inputs = [inputs]
if isinstance(outputs, (six.string_types, six.text_type)):
outputs = [outputs]
if not isinstance(inputs, list) or not all(isinstance(s, (six.string_types, six.text_type)) for s in inputs):
type_list = ','.join(list(str(type(s)) for s in inputs))
raise ValueError('Inputs must be a list of string but get [%s]' % type_list)
if not isinstance(outputs, list) or not all(isinstance(s, (six.string_types, six.text_type)) for s in outputs):
type_list = ','.join(list(str(type(s)) for s in outputs))
raise ValueError('Outputs must be a list of string but get [%s]' % type_list)
for k, v in attrs.items():
if v is None:
raise ValueError('Failed to create ONNX node. Undefined attribute pair (%s, %s) found' % (k, v))
node = helper.make_node(op_type, inputs, outputs, **attrs)
node.domain = op_domain
self.node_domain_version_pair_sets.add((op_domain, op_version))
self.nodes.append(node)
|
Add a NodeProto into the node list of the final ONNX model. If the input operator's domain-version information
cannot be found in our domain-version pool (a Python set), we may add it.
:param op_type: A string (e.g., Pool and Conv) indicating the type of the NodeProto
:param inputs: A list of strings. They are the input variables' names of the considered NodeProto
:param outputs: A list of strings. They are the output variables' names of the considered NodeProto
:param op_domain: The domain name (e.g., ai.onnx.ml) of the operator we are trying to add.
:param op_version: The version number (e.g., 0 and 1) of the operator we are trying to add.
:param attrs: A Python dictionary. Keys and values are attributes' names and attributes' values, respectively.
|
def validate(self, value):
"""Make sure that the inspected value is of type `list` or `tuple` """
if not isinstance(value, (list, tuple)) or isinstance(value, str_types):
self.raise_error('Only lists and tuples may be used in the ListField vs provided {0}'
.format(type(value).__name__))
super(ListField, self).validate(value)
|
Make sure that the inspected value is of type `list` or `tuple`
|
def prepare_native_return_state(native_state):
"""
Hook target for native function call returns.
Recovers and stores the return value from native memory and toggles the
state, s.t. execution continues in the Soot engine.
"""
javavm_simos = native_state.project.simos
ret_state = native_state.copy()
# set successor flags
ret_state.regs._ip = ret_state.callstack.ret_addr
ret_state.scratch.guard = ret_state.solver.true
ret_state.history.jumpkind = 'Ijk_Ret'
# if available, lookup the return value in native memory
ret_var = ret_state.callstack.invoke_return_variable
if ret_var is not None:
# get return symbol from native state
native_cc = javavm_simos.get_native_cc()
ret_symbol = native_cc.get_return_val(native_state).to_claripy()
# convert value to java type
if ret_var.type in ArchSoot.primitive_types:
# return value has a primitive type
# => we need to manually cast the return value to the correct size, as this
# would be usually done by the java callee
ret_value = javavm_simos.cast_primitive(ret_state, ret_symbol,
to_type=ret_var.type)
else:
# return value has a reference type
# => ret_symbol is a opaque ref
# => lookup corresponding java reference
ret_value = ret_state.jni_references.lookup(ret_symbol)
else:
ret_value = None
# teardown return state
SimEngineSoot.prepare_return_state(ret_state, ret_value)
# finally, delete all local references
ret_state.jni_references.clear_local_references()
return [ret_state]
|
Hook target for native function call returns.
Recovers and stores the return value from native memory and toggles the
state, s.t. execution continues in the Soot engine.
|
def vector_similarity(self, vector, items):
"""Compute the similarity between a vector and a set of items."""
vector = self.normalize(vector)
items_vec = np.stack([self.norm_vectors[self.items[x]] for x in items])
return vector.dot(items_vec.T)
|
Compute the similarity between a vector and a set of items.
|
def get_room(self, id):
""" Get room.
Returns:
:class:`Room`. Room
"""
if id not in self._rooms:
self._rooms[id] = Room(self, id)
return self._rooms[id]
|
Get room.
Returns:
:class:`Room`. Room
|
def _delete_ubridge_connection(self, adapter_number):
"""
Deletes a connection in uBridge.
:param adapter_number: adapter number
"""
vnet = "ethernet{}.vnet".format(adapter_number)
if vnet not in self._vmx_pairs:
raise VMwareError("vnet {} not in VMX file".format(vnet))
yield from self._ubridge_send("bridge delete {name}".format(name=vnet))
|
Deletes a connection in uBridge.
:param adapter_number: adapter number
|
def delete(self, deleteSubtasks=False):
"""Delete this issue from the server.
:param deleteSubtasks: if the issue has subtasks, this argument must be set to true for the call to succeed.
:type deleteSubtasks: bool
"""
super(Issue, self).delete(params={'deleteSubtasks': deleteSubtasks})
|
Delete this issue from the server.
:param deleteSubtasks: if the issue has subtasks, this argument must be set to true for the call to succeed.
:type deleteSubtasks: bool
|
def get_logger(name, level=0):
"""Setup a logging instance"""
level = 0 if not isinstance(level, int) else level
level = 0 if level < 0 else level
level = 4 if level > 4 else level
console = logging.StreamHandler()
level = [logging.NOTSET, logging.ERROR, logging.WARN, logging.INFO,
logging.DEBUG][level]
console.setLevel(level)
formatter = logging.Formatter(LOGGING_FORMAT)
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
return logging.getLogger(name)
|
Setup a logging instance
|
def fit(self, X, y=None, **fit_params):
"""
Fit Unary Math Operator
:param y:
:return:
"""
if self.transform_type not in self.valid_transforms:
warnings.warn("Invalid transform type.", stacklevel=2)
return self
|
Fit Unary Math Operator
:param y:
:return:
|
def export(self, path, column_names=None, byteorder="=", shuffle=False, selection=False, progress=None, virtual=False, sort=None, ascending=True):
"""Exports the DataFrame to a file written with arrow
:param DataFrameLocal df: DataFrame to export
:param str path: path for file
:param lis[str] column_names: list of column names to export or None for all columns
:param str byteorder: = for native, < for little endian and > for big endian (not supported for fits)
:param bool shuffle: export rows in random order
:param bool selection: export selection or not
:param progress: progress callback that gets a progress fraction as argument and should return True to continue,
or a default progress bar when progress=True
:param: bool virtual: When True, export virtual columns
:param str sort: expression used for sorting the output
:param bool ascending: sort ascending (True) or descending
:return:
"""
if path.endswith('.arrow'):
self.export_arrow(path, column_names, byteorder, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending)
elif path.endswith('.hdf5'):
self.export_hdf5(path, column_names, byteorder, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending)
elif path.endswith('.fits'):
self.export_fits(path, column_names, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending)
if path.endswith('.parquet'):
self.export_parquet(path, column_names, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending)
|
Exports the DataFrame to a file written with arrow
:param DataFrameLocal df: DataFrame to export
:param str path: path for file
:param lis[str] column_names: list of column names to export or None for all columns
:param str byteorder: = for native, < for little endian and > for big endian (not supported for fits)
:param bool shuffle: export rows in random order
:param bool selection: export selection or not
:param progress: progress callback that gets a progress fraction as argument and should return True to continue,
or a default progress bar when progress=True
:param: bool virtual: When True, export virtual columns
:param str sort: expression used for sorting the output
:param bool ascending: sort ascending (True) or descending
:return:
|
def get_orthology_matrix(self, outfile, sc, outdir=None,
pid_cutoff=None, bitscore_cutoff=None, evalue_cutoff=None,
force_rerun=False):
"""Create the orthology matrix by finding best bidirectional BLAST hits. Genes = rows, strains = columns
Runs run_makeblastdb, run_bidirectional_blast, and calculate_bbh for protein sequences.
Args:
outfile (str): Filename with extension of the orthology matrix (ie. df_orthology.csv)
outdir (str): Path to output of orthology matrix, default is ATLAS data_dir
pid_cutoff (float): Minimum percent identity between BLAST hits to filter for in the range [0, 100]
bitscore_cutoff (float): Minimum bitscore allowed between BLAST hits
evalue_cutoff (float): Maximum E-value allowed between BLAST hits
filter_condition (str): 'OR' or 'AND', how to combine cutoff filters. 'OR' gives more results since it
is less stringent, as you will be filtering for hits with (>80% PID or >30 bitscore or <0.0001 evalue).
remove_strains_with_no_orthology (bool): Remove strains which have no orthologous genes found
remove_strains_with_no_differences (bool): Remove strains which have all the same genes as the base model.
Default is False because since orthology is found using a PID cutoff, all genes may be present but
differences may be on the sequence level.
remove_genes_not_in_base_model (bool): Remove genes from the orthology matrix which are not present in our
base model. This happens if we use a genome file for our model that has other genes in it.
Returns:
DataFrame: Orthology matrix calculated from best bidirectional BLAST hits.
"""
# Output directory for orthology matrix
if not outdir:
outdir = self.data_dir
ortho_matrix = op.join(outdir, outfile)
if ssbio.utils.force_rerun(flag=force_rerun, outfile=ortho_matrix):
if not sc:
raise ValueError('Please initialize SparkContext')
################################################################################################################
# BIDIRECTIONAL BLAST
def run_bidirectional_blast(strain_id, strain_genome_path,
r_file=self.reference_gempro.genome_path,
outdir2=self.sequences_by_organism_dir):
import ssbio.protein.sequence.utils.blast
# Run bidirectional BLAST
r_vs_g, g_vs_r = ssbio.protein.sequence.utils.blast.run_bidirectional_blast(reference=r_file,
other_genome=strain_genome_path,
dbtype='prot',
outdir=outdir2)
# Using the BLAST files, find the BBH
bbh = ssbio.protein.sequence.utils.blast.calculate_bbh(blast_results_1=r_vs_g, blast_results_2=g_vs_r,
outdir=outdir2)
return strain_id, bbh
log.info('Running bidirectional BLAST and finding best bidirectional hits (BBH)...')
for_strains_rdd = [(k, v['genome_path']) for k,v in self.strain_infodict.items()]
from random import shuffle
shuffle(for_strains_rdd)
strains_rdd = sc.parallelize(for_strains_rdd)
result = strains_rdd.map(lambda x: run_bidirectional_blast(strain_id=x[0], strain_genome_path=x[1])).collect()
bbh_files = dict(result)
################################################################################################################
################################################################################################################
# ORTHOLOGY MATRIX
log.info('Creating orthology matrix from BBHs...')
ortho_matrix = ssbio.protein.sequence.utils.blast.create_orthology_matrix(r_name=self.reference_gempro.id,
genome_to_bbh_files=bbh_files,
pid_cutoff=pid_cutoff,
bitscore_cutoff=bitscore_cutoff,
evalue_cutoff=evalue_cutoff,
outname=outfile,
outdir=outdir,
force_rerun=force_rerun)
log.info('Orthology matrix at {}. See the "df_orthology_matrix" attribute.'.format(ortho_matrix))
self.df_orthology_matrix = pd.read_csv(ortho_matrix, index_col=0)
self.df_orthology_matrix = self.df_orthology_matrix.rename_axis('gene').rename_axis("strain", axis="columns")
|
Create the orthology matrix by finding best bidirectional BLAST hits. Genes = rows, strains = columns
Runs run_makeblastdb, run_bidirectional_blast, and calculate_bbh for protein sequences.
Args:
outfile (str): Filename with extension of the orthology matrix (ie. df_orthology.csv)
outdir (str): Path to output of orthology matrix, default is ATLAS data_dir
pid_cutoff (float): Minimum percent identity between BLAST hits to filter for in the range [0, 100]
bitscore_cutoff (float): Minimum bitscore allowed between BLAST hits
evalue_cutoff (float): Maximum E-value allowed between BLAST hits
filter_condition (str): 'OR' or 'AND', how to combine cutoff filters. 'OR' gives more results since it
is less stringent, as you will be filtering for hits with (>80% PID or >30 bitscore or <0.0001 evalue).
remove_strains_with_no_orthology (bool): Remove strains which have no orthologous genes found
remove_strains_with_no_differences (bool): Remove strains which have all the same genes as the base model.
Default is False because since orthology is found using a PID cutoff, all genes may be present but
differences may be on the sequence level.
remove_genes_not_in_base_model (bool): Remove genes from the orthology matrix which are not present in our
base model. This happens if we use a genome file for our model that has other genes in it.
Returns:
DataFrame: Orthology matrix calculated from best bidirectional BLAST hits.
|
def _get_or_generate_vocab(tmp_dir, vocab_filename, vocab_size):
"""Read or create vocabulary."""
vocab_filepath = os.path.join(tmp_dir, vocab_filename)
print('Vocab file written to: ' + vocab_filepath)
if tf.gfile.Exists(vocab_filepath):
gs = text_encoder.SubwordTextEncoder(vocab_filepath)
return gs
example_file = os.path.join(tmp_dir, _EXAMPLES_FILE)
gs = text_encoder.SubwordTextEncoder()
token_counts = tokenizer.corpus_token_counts(
example_file, corpus_max_lines=1000000)
gs = gs.build_to_target_size(
vocab_size, token_counts, min_val=1, max_val=1e3)
gs.store_to_file(vocab_filepath)
return gs
|
Read or create vocabulary.
|
def get(self, name):
"""Returns the specified interfaces STP configuration resource
The STP interface resource contains the following
* name (str): The interface name
* portfast (bool): The spanning-tree portfast admin state
* bpduguard (bool): The spanning-tree bpduguard admin state
* portfast_type (str): The spanning-tree portfast <type> value.
Valid values include "edge", "network", "normal"
Args:
name (string): The interface identifier to retrieve the config
for. Note: Spanning-tree interfaces are only supported on
Ethernet and Port-Channel interfaces
Returns:
dict: A resource dict object that represents the interface
configuration.
None: If the specified interace is not a STP port
"""
if not isvalidinterface(name):
return None
config = self.get_block(r'^interface\s%s$' % name)
resp = dict()
resp.update(self._parse_bpduguard(config))
resp.update(self._parse_portfast(config))
resp.update(self._parse_portfast_type(config))
return resp
|
Returns the specified interfaces STP configuration resource
The STP interface resource contains the following
* name (str): The interface name
* portfast (bool): The spanning-tree portfast admin state
* bpduguard (bool): The spanning-tree bpduguard admin state
* portfast_type (str): The spanning-tree portfast <type> value.
Valid values include "edge", "network", "normal"
Args:
name (string): The interface identifier to retrieve the config
for. Note: Spanning-tree interfaces are only supported on
Ethernet and Port-Channel interfaces
Returns:
dict: A resource dict object that represents the interface
configuration.
None: If the specified interace is not a STP port
|
def delete_course(self, courseid):
"""
:param courseid: the course id of the course
:raise InvalidNameException or CourseNotFoundException
Erase the content of the course folder
"""
if not id_checker(courseid):
raise InvalidNameException("Course with invalid name: " + courseid)
course_fs = self.get_course_fs(courseid)
if not course_fs.exists():
raise CourseNotFoundException()
course_fs.delete()
get_course_logger(courseid).info("Course %s erased from the factory.", courseid)
|
:param courseid: the course id of the course
:raise InvalidNameException or CourseNotFoundException
Erase the content of the course folder
|
def main(args=sys.argv):
"""Run the work() method from the class instance in the file "job-instance.pickle".
"""
try:
# Set up logging.
logging.basicConfig(level=logging.WARN)
work_dir = args[1]
assert os.path.exists(work_dir), "First argument to lsf_runner.py must be a directory that exists"
do_work_on_compute_node(work_dir)
except Exception as exc:
# Dump encoded data that we will try to fetch using mechanize
print(exc)
raise
|
Run the work() method from the class instance in the file "job-instance.pickle".
|
def fail(self, message, view, type_error=False):
"""Raise an exception indicating that a value cannot be
accepted.
`type_error` indicates whether the error is due to a type
mismatch rather than a malformed value. In this case, a more
specific exception is raised.
"""
exc_class = ConfigTypeError if type_error else ConfigValueError
raise exc_class(
u'{0}: {1}'.format(view.name, message)
)
|
Raise an exception indicating that a value cannot be
accepted.
`type_error` indicates whether the error is due to a type
mismatch rather than a malformed value. In this case, a more
specific exception is raised.
|
def visit_Call(self, node):
"""Call visitor - used for finding setup() call."""
self.generic_visit(node)
# Setup() is a keywords-only function.
if node.args:
return
keywords = set()
for k in node.keywords:
if k.arg is not None:
keywords.add(k.arg)
# Simple case for dictionary expansion for Python >= 3.5.
if k.arg is None and isinstance(k.value, ast.Dict):
keywords.update(x.s for x in k.value.keys)
# Simple case for dictionary expansion for Python <= 3.4.
if getattr(node, 'kwargs', ()) and isinstance(node.kwargs, ast.Dict):
keywords.update(x.s for x in node.kwargs.keys)
# The bare minimum number of arguments seems to be around five, which
# includes author, name, version, module/package and something extra.
if len(keywords) < 5:
return
score = sum(
self.attributes.get(x, 0)
for x in keywords
) / len(keywords)
if score < 0.5:
LOG.debug(
"Scoring for setup%r below 0.5: %.2f",
tuple(keywords),
score)
return
# Redirect call to our setup() tap function.
node.func = ast.Name(id='__f8r_setup', ctx=node.func.ctx)
self.redirected = True
|
Call visitor - used for finding setup() call.
|
def process(specs):
"""
Executes the passed in list of specs
"""
pout, pin = chain_specs(specs)
LOG.info("Processing")
sw = StopWatch().start()
r = pout.process(pin)
if r:
print(r)
LOG.info("Finished in %s", sw.read())
|
Executes the passed in list of specs
|
def get_lb_conn(dd_driver=None):
'''
Return a load-balancer conn object
'''
vm_ = get_configured_provider()
region = config.get_cloud_config_value(
'region', vm_, __opts__
)
user_id = config.get_cloud_config_value(
'user_id', vm_, __opts__
)
key = config.get_cloud_config_value(
'key', vm_, __opts__
)
if not dd_driver:
raise SaltCloudSystemExit(
'Missing dimensiondata_driver for get_lb_conn method.'
)
return get_driver_lb(Provider_lb.DIMENSIONDATA)(user_id, key, region=region)
|
Return a load-balancer conn object
|
def fix_virtualenv_tkinter():
"""
work-a-round for tkinter under windows in a virtualenv:
"TclError: Can't find a usable init.tcl..."
Known bug, see: https://github.com/pypa/virtualenv/issues/93
There are "fix tk" file here:
C:\Python27\Lib\lib-tk\FixTk.py
C:\Python34\Lib\tkinter\_fix.py
These modules will be automatic imported by tkinter import.
The fix set theses environment variables:
TCL_LIBRARY C:\Python27\tcl\tcl8.5
TIX_LIBRARY C:\Python27\tcl\tix8.4.3
TK_LIBRARY C:\Python27\tcl\tk8.5
TCL_LIBRARY C:\Python34\tcl\tcl8.6
TIX_LIBRARY C:\Python34\tcl\tix8.4.3
TK_LIBRARY C:\Python34\tcl\tk8.6
but only if:
os.path.exists(os.path.join(sys.prefix,"tcl"))
And the virtualenv activate script will change the sys.prefix
to the current env. So we temporary change it back to sys.real_prefix
and import the fix module.
If the fix module was imported before, then we reload it.
"""
if "TCL_LIBRARY" in os.environ:
# Fix not needed (e.g. virtualenv issues #93 fixed?)
return
if not hasattr(sys, "real_prefix"):
# we are not in a activated virtualenv
return
if sys.version_info[0] == 2:
# Python v2
virtualprefix = sys.prefix
sys.prefix = sys.real_prefix
import FixTk
if "TCL_LIBRARY" not in os.environ:
reload(FixTk)
sys.prefix = virtualprefix
else:
# Python v3
virtualprefix = sys.base_prefix
sys.base_prefix = sys.real_prefix
from tkinter import _fix
if "TCL_LIBRARY" not in os.environ:
from imp import reload
reload(_fix)
sys.base_prefix = virtualprefix
|
work-a-round for tkinter under windows in a virtualenv:
"TclError: Can't find a usable init.tcl..."
Known bug, see: https://github.com/pypa/virtualenv/issues/93
There are "fix tk" file here:
C:\Python27\Lib\lib-tk\FixTk.py
C:\Python34\Lib\tkinter\_fix.py
These modules will be automatic imported by tkinter import.
The fix set theses environment variables:
TCL_LIBRARY C:\Python27\tcl\tcl8.5
TIX_LIBRARY C:\Python27\tcl\tix8.4.3
TK_LIBRARY C:\Python27\tcl\tk8.5
TCL_LIBRARY C:\Python34\tcl\tcl8.6
TIX_LIBRARY C:\Python34\tcl\tix8.4.3
TK_LIBRARY C:\Python34\tcl\tk8.6
but only if:
os.path.exists(os.path.join(sys.prefix,"tcl"))
And the virtualenv activate script will change the sys.prefix
to the current env. So we temporary change it back to sys.real_prefix
and import the fix module.
If the fix module was imported before, then we reload it.
|
def jsondeclarations(self):
"""Return all declarations in a form ready to be serialised to JSON.
Returns:
list of dict
"""
l = []
for annotationtype, set in self.annotations:
label = None
#Find the 'label' for the declarations dynamically (aka: AnnotationType --> String)
for key, value in vars(AnnotationType).items():
if value == annotationtype:
label = key
break
#gather attribs
if (annotationtype == AnnotationType.TEXT or annotationtype == AnnotationType.PHON) and set == 'undefined' and len(self.annotationdefaults[annotationtype][set]) == 0:
#this is the implicit TextContent declaration, no need to output it explicitly
continue
jsonnode = {'annotationtype': label.lower()}
if set and set != 'undefined':
jsonnode['set'] = set
for key, value in self.annotationdefaults[annotationtype][set].items():
if key == 'annotatortype':
if value == AnnotatorType.MANUAL:
jsonnode[key] = 'manual'
elif value == AnnotatorType.AUTO:
jsonnode[key] = 'auto'
elif key == 'datetime':
jsonnode[key] = value.strftime("%Y-%m-%dT%H:%M:%S") #proper iso-formatting
elif value:
jsonnode[key] = value
if label:
l.append( jsonnode )
else:
raise Exception("Invalid annotation type")
return l
|
Return all declarations in a form ready to be serialised to JSON.
Returns:
list of dict
|
def put_metadata(self, key, value, namespace='default'):
"""
Add metadata to segment or subsegment. Metadata is not indexed
but can be later retrieved by BatchGetTraces API.
:param str namespace: optional. Default namespace is `default`.
It must be a string and prefix `AWS.` is reserved.
:param str key: metadata key under specified namespace
:param object value: any object that can be serialized into JSON string
"""
self._check_ended()
if not isinstance(namespace, string_types):
log.warning("ignoring non string type metadata namespace")
return
if namespace.startswith('AWS.'):
log.warning("Prefix 'AWS.' is reserved, drop metadata with namespace %s", namespace)
return
if self.metadata.get(namespace, None):
self.metadata[namespace][key] = value
else:
self.metadata[namespace] = {key: value}
|
Add metadata to segment or subsegment. Metadata is not indexed
but can be later retrieved by BatchGetTraces API.
:param str namespace: optional. Default namespace is `default`.
It must be a string and prefix `AWS.` is reserved.
:param str key: metadata key under specified namespace
:param object value: any object that can be serialized into JSON string
|
def show(cls, result):
"""
:param TryHaskell.Result result: Parse result of JSON data.
:rtype: str|unicode
"""
if result.ok:
if result.stdout:
out = '\n'.join(result.stdout)
if result.value and result.value != '()':
return '\n'.join([out, result.value])
return out
if result.value and not cls._is_function_value(result.value):
return result.value
return cls.show_type(result)
return result.value
|
:param TryHaskell.Result result: Parse result of JSON data.
:rtype: str|unicode
|
def get_objects(self, path, marker=None,
limit=settings.CLOUD_BROWSER_DEFAULT_LIST_LIMIT):
"""Get objects.
Certain upload clients may add a 0-byte object (e.g., ``FOLDER`` object
for path ``path/to/FOLDER`` - ``path/to/FOLDER/FOLDER``). We add an
extra +1 limit query and ignore any such file objects.
"""
# Get basename of implied folder.
folder = path.split(SEP)[-1]
# Query extra objects, then strip 0-byte dummy object if present.
objs = super(GsContainer, self).get_objects(path, marker, limit + 1)
objs = [o for o in objs if not (o.size == 0 and o.name == folder)]
return objs[:limit]
|
Get objects.
Certain upload clients may add a 0-byte object (e.g., ``FOLDER`` object
for path ``path/to/FOLDER`` - ``path/to/FOLDER/FOLDER``). We add an
extra +1 limit query and ignore any such file objects.
|
def _set_scores(self):
"""
Compute anomaly scores for the time series.
"""
anom_scores = {}
self._compute_derivatives()
derivatives_ema = utils.compute_ema(self.smoothing_factor, self.derivatives)
for i, (timestamp, value) in enumerate(self.time_series_items):
anom_scores[timestamp] = abs(self.derivatives[i] - derivatives_ema[i])
stdev = numpy.std(anom_scores.values())
if stdev:
for timestamp in anom_scores.keys():
anom_scores[timestamp] /= stdev
self.anom_scores = TimeSeries(self._denoise_scores(anom_scores))
|
Compute anomaly scores for the time series.
|
def Orth(docs, drop=0.0):
"""Get word forms."""
ids = numpy.zeros((sum(len(doc) for doc in docs),), dtype="i")
i = 0
for doc in docs:
for token in doc:
ids[i] = token.orth
i += 1
return ids, None
|
Get word forms.
|
def onpick(self, event):
"""Called per artist (group), with possibly a list of indices."""
if hasattr(event.artist, '_mt_legend_item'):
# legend item, instead of data point
idx = event.artist._mt_legend_item
try:
self.toggle_artist(self.artists[idx])
except IndexError:
pass
return
# only print logevents of visible points
if not event.artist.get_visible():
return
# get PlotType and let it print that event
plot_type = event.artist._mt_plot_type
plot_type.clicked(event)
|
Called per artist (group), with possibly a list of indices.
|
def addproperties(
names,
bfget=None, afget=None, enableget=True,
bfset=None, afset=None, enableset=True,
bfdel=None, afdel=None, enabledel=True
):
"""Decorator in charge of adding python properties to cls.
{a/b}fget, {a/b}fset and {a/b}fdel are applied to all properties matching
names in taking care to not forget default/existing properties. The
prefixes *a* and *b* are respectively for after and before default/existing
property getter/setter/deleter execution.
These getter, setter and deleter functions are called before existing or
default getter, setter and deleters. Default getter, setter and deleters
are functions which uses an attribute with a name starting with '_' and
finishing with the property name (like the python language convention).
.. seealso::
_protectedattrname(name)
:param str(s) names: property name(s) to add.
:param bfget: getter function to apply to all properties before
default/existing getter execution. Parameters are a decorated cls
instance and a property name.
:param afget: fget function to apply to all properties after
default/existing getter execution. Parameters are a decorated cls
instance and a property name.
:param bool enableget: if True (default), enable existing or default getter
. Otherwise, use only fget if given.
:param bfset: fset function to apply to all properties before
default/existing setter execution. Parameters are a decorated cls
instance and a property name.
:param afset: fset function to apply to all properties after
default/existing setter execution. Parameters are a decorated cls
instance and a property name.
:param bool enableset: if True (default), enable existing or default setter
. Otherwise, use only fset if given.
:param bfdel: fdel function to apply to all properties before
default/existing deleter execution. Parameters are a decorated cls
instance and a property name.
:param bfdel: fdel function to apply to all properties after
default/existing deleter execution. Parameters are a decorated cls
instance and a property name.
:param bool enabledel: if True (default), enable existing or default
deleter. Otherwise, use only fdel if given.
:return: cls decorator.
"""
# ensure names is a list
names = ensureiterable(names, exclude=string_types)
if isinstance(bfget, MethodType):
finalbfget = lambda self, name: bfget(name)
else:
finalbfget = bfget
if isinstance(afget, MethodType):
finalafget = lambda self, name: afget(name)
else:
finalafget = afget
if isinstance(bfset, MethodType):
finalbfset = lambda self, value, name: bfset(value, name)
else:
finalbfset = bfset
if isinstance(afset, MethodType):
finalafset = lambda self, value, name: afset(value, name)
else:
finalafset = afset
if isinstance(bfdel, MethodType):
finalbfdel = lambda self, name: bfdel(name)
else:
finalbfdel = bfdel
if isinstance(afdel, MethodType):
finalafdel = lambda self, name: afdel(name)
else:
finalafdel = afdel
def _addproperties(cls):
"""Add properties to cls.
:param type cls: cls on adding properties.
:return: cls
"""
for name in names:
protectedattrname = _protectedattrname(name)
# try to find an existing property
existingproperty = getattr(cls, name, None)
if isinstance(existingproperty, property):
_fget = existingproperty.fget
_fset = existingproperty.fset
_fdel = existingproperty.fdel
else:
_fget, _fset, _fdel = None, None, None
# construct existing/default getter
if _fget is None:
def _fget(protectedattrname):
"""Simple getter wrapper."""
def _fget(self):
"""Simple getter."""
return getattr(self, protectedattrname, None)
return _fget
_fget = _fget(protectedattrname)
_fget.__doc__ = 'Get this {0}.\n:return: this {0}.'.format(
name
)
# transform method to function in order to add self in parameters
if isinstance(_fget, MethodType):
final_fget = lambda self: _fget()
else:
final_fget = _fget
# construct existing/default setter
if _fset is None:
def _fset(protectedattrname):
"""Simple setter wrapper."""
def _fset(self, value):
"""Simple setter."""
setattr(self, protectedattrname, value)
return _fset
_fset = _fset(protectedattrname)
_fset.__doc__ = (
'Change of {0}.\n:param {0}: {0} to use.'.format(name)
)
# transform method to function in order to add self in parameters
if isinstance(_fset, MethodType):
final_fset = lambda self, value: _fset(value)
else:
final_fset = _fset
# construct existing/default deleter
if _fdel is None:
def _fdel(protectedattrname):
"""Simple deleter wrapper."""
def _fdel(self):
"""Simple deleter."""
if hasattr(self, protectedattrname):
delattr(self, protectedattrname)
return _fdel
_fdel = _fdel(protectedattrname)
_fdel.__doc__ = 'Delete this {0}.'.format(name)
# transform method to function in order to add self in parameters
if isinstance(_fdel, MethodType):
final_fdel = lambda self: _fdel()
else:
final_fdel = _fdel
def _getter(final_fget, name):
"""Property getter wrapper."""
def _getter(self):
"""Property getter."""
result = None
# start to process input bfget
if finalbfget is not None:
result = finalbfget(self, name)
# process cls getter
if enableget:
result = final_fget(self)
# finish to process afget
if finalafget is not None:
result = finalafget(self, name)
return result
return _getter
_getter = _getter(final_fget, name)
_getter.__doc__ = final_fget.__doc__ # update doc
def _setter(final_fset, name):
"""Property setter wrapper."""
def _setter(self, value):
"""Property setter."""
# start to process input bfset
if finalbfset is not None:
finalbfset(self, value, name)
# finish to process cls setter
if enableset:
final_fset(self, value)
# finish to process afset
if finalafset is not None:
finalafset(self, value, name)
return _setter
_setter = _setter(final_fset, name)
_setter.__doc__ = final_fset.__doc__ # update doc
def _deleter(final_fdel, name):
"""Property deleter wrapper."""
def _deleter(self):
"""Property deleter."""
# start to process input fdel
if finalbfdel is not None:
finalbfdel(self, name)
# finish to process cls deleter
if enabledel:
final_fdel(self)
# finish to process afget
if finalafdel is not None:
finalafdel(self, name)
return _deleter
_deleter = _deleter(final_fdel, name)
_deleter.__doc__ = final_fdel.__doc__ # update doc
# get property name
doc = '{0} property.'.format(name)
propertyfield = property(
fget=_getter, fset=_setter, fdel=_deleter, doc=doc
)
# put property name in cls
setattr(cls, name, propertyfield)
return cls # finish to return the cls
return _addproperties
|
Decorator in charge of adding python properties to cls.
{a/b}fget, {a/b}fset and {a/b}fdel are applied to all properties matching
names in taking care to not forget default/existing properties. The
prefixes *a* and *b* are respectively for after and before default/existing
property getter/setter/deleter execution.
These getter, setter and deleter functions are called before existing or
default getter, setter and deleters. Default getter, setter and deleters
are functions which uses an attribute with a name starting with '_' and
finishing with the property name (like the python language convention).
.. seealso::
_protectedattrname(name)
:param str(s) names: property name(s) to add.
:param bfget: getter function to apply to all properties before
default/existing getter execution. Parameters are a decorated cls
instance and a property name.
:param afget: fget function to apply to all properties after
default/existing getter execution. Parameters are a decorated cls
instance and a property name.
:param bool enableget: if True (default), enable existing or default getter
. Otherwise, use only fget if given.
:param bfset: fset function to apply to all properties before
default/existing setter execution. Parameters are a decorated cls
instance and a property name.
:param afset: fset function to apply to all properties after
default/existing setter execution. Parameters are a decorated cls
instance and a property name.
:param bool enableset: if True (default), enable existing or default setter
. Otherwise, use only fset if given.
:param bfdel: fdel function to apply to all properties before
default/existing deleter execution. Parameters are a decorated cls
instance and a property name.
:param bfdel: fdel function to apply to all properties after
default/existing deleter execution. Parameters are a decorated cls
instance and a property name.
:param bool enabledel: if True (default), enable existing or default
deleter. Otherwise, use only fdel if given.
:return: cls decorator.
|
def cycle_running_window(iterable, size):
"""Generate n-size cycle running window.
Example::
>>> for i in running_windows([1, 2, 3, 4, 5], size=3):
... print(i)
[1, 2, 3]
[2, 3, 4]
[3, 4, 5]
[4, 5, 1]
[5, 1, 2]
**中文文档**
循环位移滑窗函数。
"""
if size > len(iterable):
raise ValueError("size can not be greater than length of iterable.")
fifo = collections.deque(maxlen=size)
cycle = itertools.cycle(iterable)
counter = itertools.count(1)
length = len(iterable)
for i in cycle:
fifo.append(i)
if len(fifo) == size:
yield list(fifo)
if next(counter) == length:
break
|
Generate n-size cycle running window.
Example::
>>> for i in running_windows([1, 2, 3, 4, 5], size=3):
... print(i)
[1, 2, 3]
[2, 3, 4]
[3, 4, 5]
[4, 5, 1]
[5, 1, 2]
**中文文档**
循环位移滑窗函数。
|
def alphanum_variable(min_size, max_size, name=None):
"""
Creates the grammar for an alphanumeric code where the size ranges between
two values.
:param min_size: minimum size
:param max_size: maximum size
:param name: name for the field
:return: grammar for an alphanumeric field of a variable size
"""
if name is None:
name = 'Alphanumeric Field'
if min_size < 0:
# Can't have negative min
raise BaseException()
if max_size < min_size:
# Max can't be lower than min
raise BaseException()
field = pp.Word(pp.alphanums, min=min_size, max=max_size)
# Parse action
field.setParseAction(lambda s: s[0].strip())
# White spaces are not removed
field.leaveWhitespace()
# Name
field.setName(name)
return field
|
Creates the grammar for an alphanumeric code where the size ranges between
two values.
:param min_size: minimum size
:param max_size: maximum size
:param name: name for the field
:return: grammar for an alphanumeric field of a variable size
|
def atlas_get_all_neighbors( peer_table=None ):
"""
Get *all* neighbor information.
USED ONLY FOR TESTING
"""
if os.environ.get("BLOCKSTACK_ATLAS_NETWORK_SIMULATION") != "1":
raise Exception("This method is only available when testing with the Atlas network simulator")
ret = {}
with AtlasPeerTableLocked(peer_table) as ptbl:
ret = copy.deepcopy(ptbl)
# make zonefile inventories printable
for peer_hostport in ret.keys():
if ret[peer_hostport].has_key('zonefile_inv'):
ret[peer_hostport]['zonefile_inv'] = atlas_inventory_to_string( ret[peer_hostport]['zonefile_inv'] )
return ret
|
Get *all* neighbor information.
USED ONLY FOR TESTING
|
def _init_connection(self):
"""
Requests the session ids of the bridge.
:returns: True, if initialization was successful. False, otherwise.
"""
try:
# We are changing self.is_ready: lock it up!
self._lock.acquire()
response = bytearray(22)
self._send_raw(BRIDGE_INITIALIZATION_COMMAND)
self._socket.recv_into(response)
self._wb1 = response[19]
self._wb2 = response[20]
self.is_ready = True
except (socket.error, socket.timeout):
# Connection timed out, bridge is not ready for us
self.is_ready = False
finally:
# Prevent deadlocks: always release the lock
self._lock.release()
return self.is_ready
|
Requests the session ids of the bridge.
:returns: True, if initialization was successful. False, otherwise.
|
def agitator_time_homogeneous(N, P, T, H, mu, rho, D=None, homogeneity=.95):
r'''Calculates time for a fluid mizing in a tank with an impeller to
reach a specified level of homogeneity, according to [1]_.
.. math::
N_p = \frac{Pg}{\rho N^3 D^5}
.. math::
Re_{imp} = \frac{\rho D^2 N}{\mu}
.. math::
\text{constant} = N_p^{1/3} Re_{imp}
.. math::
Fo = 5.2/\text{constant} \text{for turbulent regime}
.. math::
Fo = (183/\text{constant})^2 \text{for transition regime}
Parameters
----------
N : float:
Speed of impeller, [revolutions/s]
P : float
Actual power required to mix, ignoring mechanical inefficiencies [W]
T : float
Tank diameter, [m]
H : float
Tank height, [m]
mu : float
Mixture viscosity, [Pa*s]
rho : float
Mixture density, [kg/m^3]
D : float, optional
Impeller diameter [m]
homogeneity : float, optional
Fraction completion of mixing, []
Returns
-------
t : float
Time for specified degree of homogeneity [s]
Notes
-----
If impeller diameter is not specified, assumed to be 0.5 tank diameters.
The first example is solved forward rather than backwards here. A rather
different result is obtained, but is accurate.
No check to see if the mixture if laminar is currently implemented.
This would under predict the required time.
Examples
--------
>>> agitator_time_homogeneous(D=36*.0254, N=56/60., P=957., T=1.83, H=1.83, mu=0.018, rho=1020, homogeneity=.995)
15.143198226374668
>>> agitator_time_homogeneous(D=1, N=125/60., P=298., T=3, H=2.5, mu=.5, rho=980, homogeneity=.95)
67.7575069865228
References
----------
.. [1] Paul, Edward L, Victor A Atiemo-Obeng, and Suzanne M Kresta.
Handbook of Industrial Mixing: Science and Practice.
Hoboken, N.J.: Wiley-Interscience, 2004.
'''
if not D:
D = T*0.5
Np = P*g/rho/N**3/D**5
Re_imp = rho/mu*D**2*N
regime_constant = Np**(1/3.)*Re_imp
if regime_constant >= min_regime_constant_for_turbulent:
Fo = (5.2/regime_constant)
else:
Fo = (183./regime_constant)**2
time = rho*T**1.5*H**0.5/mu*Fo
multiplier = adjust_homogeneity(homogeneity)
return time*multiplier
|
r'''Calculates time for a fluid mizing in a tank with an impeller to
reach a specified level of homogeneity, according to [1]_.
.. math::
N_p = \frac{Pg}{\rho N^3 D^5}
.. math::
Re_{imp} = \frac{\rho D^2 N}{\mu}
.. math::
\text{constant} = N_p^{1/3} Re_{imp}
.. math::
Fo = 5.2/\text{constant} \text{for turbulent regime}
.. math::
Fo = (183/\text{constant})^2 \text{for transition regime}
Parameters
----------
N : float:
Speed of impeller, [revolutions/s]
P : float
Actual power required to mix, ignoring mechanical inefficiencies [W]
T : float
Tank diameter, [m]
H : float
Tank height, [m]
mu : float
Mixture viscosity, [Pa*s]
rho : float
Mixture density, [kg/m^3]
D : float, optional
Impeller diameter [m]
homogeneity : float, optional
Fraction completion of mixing, []
Returns
-------
t : float
Time for specified degree of homogeneity [s]
Notes
-----
If impeller diameter is not specified, assumed to be 0.5 tank diameters.
The first example is solved forward rather than backwards here. A rather
different result is obtained, but is accurate.
No check to see if the mixture if laminar is currently implemented.
This would under predict the required time.
Examples
--------
>>> agitator_time_homogeneous(D=36*.0254, N=56/60., P=957., T=1.83, H=1.83, mu=0.018, rho=1020, homogeneity=.995)
15.143198226374668
>>> agitator_time_homogeneous(D=1, N=125/60., P=298., T=3, H=2.5, mu=.5, rho=980, homogeneity=.95)
67.7575069865228
References
----------
.. [1] Paul, Edward L, Victor A Atiemo-Obeng, and Suzanne M Kresta.
Handbook of Industrial Mixing: Science and Practice.
Hoboken, N.J.: Wiley-Interscience, 2004.
|
def highlight_options(self, **kwargs):
"""
Update the global highlighting options used for this request. For
example::
s = Search()
s = s.highlight_options(order='score')
"""
s = self._clone()
s._highlight_opts.update(kwargs)
return s
|
Update the global highlighting options used for this request. For
example::
s = Search()
s = s.highlight_options(order='score')
|
def setup_splittable_dax_generated(workflow, input_tables, out_dir, tags):
'''
Function for setting up the splitting jobs as part of the workflow.
Parameters
-----------
workflow : pycbc.workflow.core.Workflow
The Workflow instance that the jobs will be added to.
input_tables : pycbc.workflow.core.FileList
The input files to be split up.
out_dir : path
The directory in which output will be written.
Returns
--------
split_table_outs : pycbc.workflow.core.FileList
The list of split up files as output from this job.
'''
cp = workflow.cp
# Get values from ini file
try:
num_splits = cp.get_opt_tags("workflow-splittable",
"splittable-num-banks", tags)
except BaseException:
inj_interval = int(cp.get_opt_tags("workflow-splittable",
"splitinjtable-interval", tags))
if cp.has_option_tags("em_bright_filter", "max-keep", tags) and \
cp.has_option("workflow-injections", "em-bright-only"):
num_injs = int(cp.get_opt_tags("em_bright_filter", "max-keep",
tags))
else:
num_injs = int(cp.get_opt_tags("workflow-injections", "num-injs",
tags))
inj_tspace = float(abs(workflow.analysis_time)) / num_injs
num_splits = int(inj_interval // inj_tspace) + 1
split_exe_tag = cp.get_opt_tags("workflow-splittable",
"splittable-exe-tag", tags)
split_exe = os.path.basename(cp.get("executables", split_exe_tag))
# Select the appropriate class
exe_class = select_splitfilejob_instance(split_exe)
# Set up output structure
out_file_groups = FileList([])
# Set up the condorJob class for the current executable
curr_exe_job = exe_class(workflow.cp, split_exe_tag, num_splits,
out_dir=out_dir)
for input in input_tables:
node = curr_exe_job.create_node(input, tags=tags)
workflow.add_node(node)
out_file_groups += node.output_files
return out_file_groups
|
Function for setting up the splitting jobs as part of the workflow.
Parameters
-----------
workflow : pycbc.workflow.core.Workflow
The Workflow instance that the jobs will be added to.
input_tables : pycbc.workflow.core.FileList
The input files to be split up.
out_dir : path
The directory in which output will be written.
Returns
--------
split_table_outs : pycbc.workflow.core.FileList
The list of split up files as output from this job.
|
def canonicalize_observed_time_series_with_mask(
maybe_masked_observed_time_series):
"""Extract a Tensor with canonical shape and optional mask.
Args:
maybe_masked_observed_time_series: a `Tensor`-like object with shape
`[..., num_timesteps]` or `[..., num_timesteps, 1]`, or a
`tfp.sts.MaskedTimeSeries` containing such an object.
Returns:
masked_time_series: a `tfp.sts.MaskedTimeSeries` namedtuple, in which
the `observed_time_series` is converted to `Tensor` with canonical shape
`[..., num_timesteps, 1]`, and `is_missing` is either `None` or a boolean
`Tensor`.
"""
with tf.compat.v1.name_scope('canonicalize_observed_time_series_with_mask'):
if hasattr(maybe_masked_observed_time_series, 'is_missing'):
observed_time_series = (
maybe_masked_observed_time_series.time_series)
is_missing = maybe_masked_observed_time_series.is_missing
else:
observed_time_series = maybe_masked_observed_time_series
is_missing = None
observed_time_series = tf.convert_to_tensor(value=observed_time_series,
name='observed_time_series')
observed_time_series = _maybe_expand_trailing_dim(observed_time_series)
if is_missing is not None:
is_missing = tf.convert_to_tensor(
value=is_missing, name='is_missing', dtype_hint=tf.bool)
return missing_values_util.MaskedTimeSeries(observed_time_series,
is_missing=is_missing)
|
Extract a Tensor with canonical shape and optional mask.
Args:
maybe_masked_observed_time_series: a `Tensor`-like object with shape
`[..., num_timesteps]` or `[..., num_timesteps, 1]`, or a
`tfp.sts.MaskedTimeSeries` containing such an object.
Returns:
masked_time_series: a `tfp.sts.MaskedTimeSeries` namedtuple, in which
the `observed_time_series` is converted to `Tensor` with canonical shape
`[..., num_timesteps, 1]`, and `is_missing` is either `None` or a boolean
`Tensor`.
|
def _isophote_list_to_table(isophote_list):
"""
Convert an `~photutils.isophote.IsophoteList` instance to
a `~astropy.table.QTable`.
Parameters
----------
isophote_list : list of `~photutils.isophote.Isophote` or a `~photutils.isophote.IsophoteList` instance
A list of isophotes.
Returns
-------
result : `~astropy.table.QTable`
An astropy QTable with the main isophote parameters.
"""
properties = OrderedDict()
properties['sma'] = 'sma'
properties['intens'] = 'intens'
properties['int_err'] = 'intens_err'
properties['eps'] = 'ellipticity'
properties['ellip_err'] = 'ellipticity_err'
properties['pa'] = 'pa'
properties['pa_err'] = 'pa_err'
properties['grad_r_error'] = 'grad_rerr'
properties['ndata'] = 'ndata'
properties['nflag'] = 'flag'
properties['niter'] = 'niter'
properties['stop_code'] = 'stop_code'
isotable = QTable()
for k, v in properties.items():
isotable[v] = np.array([getattr(iso, k) for iso in isophote_list])
if k in ('pa', 'pa_err'):
isotable[v] = isotable[v] * 180. / np.pi * u.deg
return isotable
|
Convert an `~photutils.isophote.IsophoteList` instance to
a `~astropy.table.QTable`.
Parameters
----------
isophote_list : list of `~photutils.isophote.Isophote` or a `~photutils.isophote.IsophoteList` instance
A list of isophotes.
Returns
-------
result : `~astropy.table.QTable`
An astropy QTable with the main isophote parameters.
|
def most_hot(self):
"""
Returns the *Weather* object in the forecast having the highest max
temperature. The temperature is retrieved using the
``get_temperature['temp_max']`` call; was 'temp_max' key missing for
every *Weather* instance in the forecast, ``None`` would be returned.
:returns: a *Weather* object or ``None`` if no item in the forecast is
eligible
"""
maxtemp = -270.0 # No one would survive that...
hottest = None
for weather in self._forecast.get_weathers():
d = weather.get_temperature()
if 'temp_max' in d:
if d['temp_max'] > maxtemp:
maxtemp = d['temp_max']
hottest = weather
return hottest
|
Returns the *Weather* object in the forecast having the highest max
temperature. The temperature is retrieved using the
``get_temperature['temp_max']`` call; was 'temp_max' key missing for
every *Weather* instance in the forecast, ``None`` would be returned.
:returns: a *Weather* object or ``None`` if no item in the forecast is
eligible
|
def fit_size_models(self, model_names,
model_objs,
input_columns,
output_column="Hail_Size",
output_start=5,
output_step=5,
output_stop=100):
"""
Fit size models to produce discrete pdfs of forecast hail sizes.
Args:
model_names: List of model names
model_objs: List of model objects
input_columns: List of input variables
output_column: Output variable name
output_start: Hail size bin start
output_step: hail size bin step
output_stop: hail size bin stop
"""
print("Fitting size models")
groups = self.data["train"]["member"][self.group_col].unique()
output_start = int(output_start)
output_step = int(output_step)
output_stop = int(output_stop)
for group in groups:
group_data = self.data["train"]["combo"].loc[self.data["train"]["combo"][self.group_col] == group]
group_data.dropna(inplace=True)
group_data = group_data[group_data[output_column] >= output_start]
output_data = group_data[output_column].values.astype(int)
output_data[output_data > output_stop] = output_stop
discrete_data = ((output_data - output_start) // output_step) * output_step + output_start
self.size_models[group] = {}
self.size_models[group]["outputvalues"] = np.arange(output_start, output_stop + output_step, output_step,
dtype=int)
for m, model_name in enumerate(model_names):
print("{0} {1}".format(group, model_name))
self.size_models[group][model_name] = deepcopy(model_objs[m])
self.size_models[group][model_name].fit(group_data[input_columns], discrete_data)
|
Fit size models to produce discrete pdfs of forecast hail sizes.
Args:
model_names: List of model names
model_objs: List of model objects
input_columns: List of input variables
output_column: Output variable name
output_start: Hail size bin start
output_step: hail size bin step
output_stop: hail size bin stop
|
def linear(m=1, b=0):
''' Return a driver function that can advance a sequence of linear values.
.. code-block:: none
value = m * i + b
Args:
m (float) : a slope for the linear driver
x (float) : an offset for the linear driver
'''
def f(i):
return m * i + b
return partial(force, sequence=_advance(f))
|
Return a driver function that can advance a sequence of linear values.
.. code-block:: none
value = m * i + b
Args:
m (float) : a slope for the linear driver
x (float) : an offset for the linear driver
|
def setup_argparse():
"""
Setup the argparse argument parser
:return: instance of argparse
:rtype: ArgumentParser
"""
parser = argparse.ArgumentParser(
description='Convert old ini-style GNS3 topologies (<=0.8.7) to '
'the newer version 1+ JSON format')
parser.add_argument('--version',
action='version',
version='%(prog)s ' + __version__)
parser.add_argument('-n', '--name', help='Topology name (default uses the '
'name of the old project '
'directory)')
parser.add_argument('-o', '--output', help='Output directory')
parser.add_argument('topology', nargs='?', default='topology.net',
help='GNS3 .net topology file (default: topology.net)')
parser.add_argument('--debug',
help='Enable debugging output',
action='store_true')
parser.add_argument('-q', '--quiet',
help='Quiet-mode (no output to console)',
action='store_true')
return parser
|
Setup the argparse argument parser
:return: instance of argparse
:rtype: ArgumentParser
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.