code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def integrate(self, function, lower_bound, upper_bound):
"""
Calculates the integral of the given one dimensional function
in the interval from lower_bound to upper_bound, with the simplex integration method.
"""
ret = 0.0
n = self.nsteps
xStep = (float(upper_bound) - float(lower_bound)) / float(n)
self.log_info("xStep" + str(xStep))
x = lower_bound
val1 = function(x)
self.log_info("val1: " + str(val1))
for i in range(n):
x = (i + 1) * xStep + lower_bound
self.log_info("x: " + str(x))
val2 = function(x)
self.log_info("val2: " + str(val2))
ret += 0.5 * xStep * (val1 + val2)
val1 = val2
return ret
|
Calculates the integral of the given one dimensional function
in the interval from lower_bound to upper_bound, with the simplex integration method.
|
def readlen(args):
"""
%prog readlen fastqfile
Calculate read length, will only try the first N reads. Output min, max, and
avg for each file.
"""
p = OptionParser(readlen.__doc__)
p.set_firstN()
p.add_option("--silent", default=False, action="store_true",
help="Do not print read length stats")
p.add_option("--nocheck", default=False, action="store_true",
help="Do not check file type suffix")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
f, = args
if (not opts.nocheck) and (not is_fastq(f)):
logging.debug("File `{}` does not endswith .fastq or .fq".format(f))
return 0
s = calc_readlen(f, opts.firstN)
if not opts.silent:
print("\t".join(str(x) for x in (f, s.min, s.max, s.mean, s.median)))
return int(s.max)
|
%prog readlen fastqfile
Calculate read length, will only try the first N reads. Output min, max, and
avg for each file.
|
def fftw_normxcorr(templates, stream, pads, threaded=False, *args, **kwargs):
"""
Normalised cross-correlation using the fftw library.
Internally this function used double precision numbers, which is definitely
required for seismic data. Cross-correlations are computed as the
inverse fft of the dot product of the ffts of the stream and the reversed,
normalised, templates. The cross-correlation is then normalised using the
running mean and standard deviation (not using the N-1 correction) of the
stream and the sums of the normalised templates.
This python function wraps the C-library written by C. Chamberlain for this
purpose.
:param templates: 2D Array of templates
:type templates: np.ndarray
:param stream: 1D array of continuous data
:type stream: np.ndarray
:param pads: List of ints of pad lengths in the same order as templates
:type pads: list
:param threaded:
Whether to use the threaded routine or not - note openMP and python
multiprocessing don't seem to play nice for this.
:type threaded: bool
:return: np.ndarray of cross-correlations
:return: np.ndarray channels used
"""
utilslib = _load_cdll('libutils')
argtypes = [
np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,
flags=native_str('C_CONTIGUOUS')),
ctypes.c_long, ctypes.c_long,
np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,
flags=native_str('C_CONTIGUOUS')),
ctypes.c_long,
np.ctypeslib.ndpointer(dtype=np.float32,
flags=native_str('C_CONTIGUOUS')),
ctypes.c_long,
np.ctypeslib.ndpointer(dtype=np.intc,
flags=native_str('C_CONTIGUOUS')),
np.ctypeslib.ndpointer(dtype=np.intc,
flags=native_str('C_CONTIGUOUS')),
np.ctypeslib.ndpointer(dtype=np.intc,
flags=native_str('C_CONTIGUOUS'))]
restype = ctypes.c_int
if threaded:
func = utilslib.normxcorr_fftw_threaded
else:
func = utilslib.normxcorr_fftw
func.argtypes = argtypes
func.restype = restype
# Generate a template mask
used_chans = ~np.isnan(templates).any(axis=1)
template_length = templates.shape[1]
stream_length = len(stream)
n_templates = templates.shape[0]
fftshape = next_fast_len(template_length + stream_length - 1)
# # Normalize and flip the templates
norm = ((templates - templates.mean(axis=-1, keepdims=True)) / (
templates.std(axis=-1, keepdims=True) * template_length))
norm = np.nan_to_num(norm)
ccc = np.zeros((n_templates, stream_length - template_length + 1),
np.float32)
used_chans_np = np.ascontiguousarray(used_chans, dtype=np.intc)
pads_np = np.ascontiguousarray(pads, dtype=np.intc)
variance_warning = np.ascontiguousarray([0], dtype=np.intc)
# Check that stream is non-zero and above variance threshold
if not np.all(stream == 0) and np.var(stream) < 1e-8:
# Apply gain
stream *= 1e8
warnings.warn("Low variance found for, applying gain "
"to stabilise correlations")
ret = func(
np.ascontiguousarray(norm.flatten(order='C'), np.float32),
template_length, n_templates,
np.ascontiguousarray(stream, np.float32), stream_length,
np.ascontiguousarray(ccc, np.float32), fftshape,
used_chans_np, pads_np, variance_warning)
if ret < 0:
raise MemoryError()
elif ret not in [0, 999]:
print('Error in C code (possible normalisation error)')
print('Maximum ccc %f at %i' % (ccc.max(), ccc.argmax()))
print('Minimum ccc %f at %i' % (ccc.min(), ccc.argmin()))
raise CorrelationError("Internal correlation error")
elif ret == 999:
warnings.warn("Some correlations not computed, are there "
"zeros in data? If not, consider increasing gain.")
if variance_warning[0] and variance_warning[0] > template_length:
warnings.warn(
"Low variance found in {0} positions, check result.".format(
variance_warning[0]))
return ccc, used_chans
|
Normalised cross-correlation using the fftw library.
Internally this function used double precision numbers, which is definitely
required for seismic data. Cross-correlations are computed as the
inverse fft of the dot product of the ffts of the stream and the reversed,
normalised, templates. The cross-correlation is then normalised using the
running mean and standard deviation (not using the N-1 correction) of the
stream and the sums of the normalised templates.
This python function wraps the C-library written by C. Chamberlain for this
purpose.
:param templates: 2D Array of templates
:type templates: np.ndarray
:param stream: 1D array of continuous data
:type stream: np.ndarray
:param pads: List of ints of pad lengths in the same order as templates
:type pads: list
:param threaded:
Whether to use the threaded routine or not - note openMP and python
multiprocessing don't seem to play nice for this.
:type threaded: bool
:return: np.ndarray of cross-correlations
:return: np.ndarray channels used
|
def get_changes(self, fixer=str.lower,
task_handle=taskhandle.NullTaskHandle()):
"""Fix module names
`fixer` is a function that takes and returns a `str`. Given
the name of a module, it should return the fixed name.
"""
stack = changestack.ChangeStack(self.project, 'Fixing module names')
jobset = task_handle.create_jobset('Fixing module names',
self._count_fixes(fixer) + 1)
try:
while True:
for resource in self._tobe_fixed(fixer):
jobset.started_job(resource.path)
renamer = rename.Rename(self.project, resource)
changes = renamer.get_changes(fixer(self._name(resource)))
stack.push(changes)
jobset.finished_job()
break
else:
break
finally:
jobset.started_job('Reverting to original state')
stack.pop_all()
jobset.finished_job()
return stack.merged()
|
Fix module names
`fixer` is a function that takes and returns a `str`. Given
the name of a module, it should return the fixed name.
|
def memoize(f):
"""Memoization decorator for a function taking one or more arguments."""
def _c(*args, **kwargs):
if not hasattr(f, 'cache'):
f.cache = dict()
key = (args, tuple(kwargs))
if key not in f.cache:
f.cache[key] = f(*args, **kwargs)
return f.cache[key]
return wraps(f)(_c)
|
Memoization decorator for a function taking one or more arguments.
|
def repack_all(self):
"""Repacks the side chains of all Polymers in the Assembly."""
non_na_sequences = [s for s in self.sequences if ' ' not in s]
self.pack_new_sequences(non_na_sequences)
return
|
Repacks the side chains of all Polymers in the Assembly.
|
def to_bytes(self):
r'''
Create bytes from properties
>>> message = DataPacket(nonce='XyZ', instance_id=1234,
... payload='SomeDummyPayloadData')
>>> message.to_bytes()
'\x88XyZ\x00\x04\xd2\x00SomeDummyPayloadData'
'''
# Verify that properties make sense
self.sanitize()
# Set the flags in the first 8 bits
bitstream = BitArray('bool=%d, bool=%d, bool=%d, bool=%d, bool=%d'
% (self.nonce is not None,
self.lsb is not None,
self.echo_nonce_request,
(self.source_map_version is not None or
self.destination_map_version is not None),
self.instance_id is not None))
# Add padding
bitstream += BitArray(3)
# Add the 24 bit nonce or the map-versions if present
if self.nonce is not None:
# Nonce
bitstream += BitArray(bytes=self.nonce)
elif self.source_map_version is not None \
or self.destination_map_version is not None:
# Map versions
bitstream += BitArray(('uint:12=%d, uint:12=%d')
% (self.source_map_version,
self.destination_map_version))
else:
# Padding
bitstream += BitArray(24)
# Add instance-id if present
if self.instance_id is not None:
bitstream += BitArray('uint:24=%d' % self.instance_id)
lsb_bits = 8
else:
lsb_bits = 32
# Add LSBs if present
if self.lsb is not None:
flags = map(lambda f: f and 'bool=1' or 'bool=0',
self.lsb[::-1])
bitstream += BitArray(','.join(flags))
else:
bitstream += BitArray(lsb_bits)
return bitstream.bytes + bytes(self.payload)
|
r'''
Create bytes from properties
>>> message = DataPacket(nonce='XyZ', instance_id=1234,
... payload='SomeDummyPayloadData')
>>> message.to_bytes()
'\x88XyZ\x00\x04\xd2\x00SomeDummyPayloadData'
|
def get_vlan_brief_output_vlan_interface_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vlan_brief = ET.Element("get_vlan_brief")
config = get_vlan_brief
output = ET.SubElement(get_vlan_brief, "output")
vlan = ET.SubElement(output, "vlan")
vlan_id_key = ET.SubElement(vlan, "vlan-id")
vlan_id_key.text = kwargs.pop('vlan_id')
interface = ET.SubElement(vlan, "interface")
interface_name_key = ET.SubElement(interface, "interface-name")
interface_name_key.text = kwargs.pop('interface_name')
interface_type = ET.SubElement(interface, "interface-type")
interface_type.text = kwargs.pop('interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def get_patient_pharmacies(self, patient_id,
patients_favorite_only='N'):
"""
invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_GET_PATIENT_PHARAMCIES,
patient_id=patient_id,
parameter1=patients_favorite_only)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_GET_PATIENT_PHARAMCIES)
return result
|
invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action
:return: JSON response
|
def get_instance(self, payload):
"""
Build an instance of InviteInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.chat.v2.service.channel.invite.InviteInstance
:rtype: twilio.rest.chat.v2.service.channel.invite.InviteInstance
"""
return InviteInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
channel_sid=self._solution['channel_sid'],
)
|
Build an instance of InviteInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.chat.v2.service.channel.invite.InviteInstance
:rtype: twilio.rest.chat.v2.service.channel.invite.InviteInstance
|
def begin_operation(self, conn_or_internal_id, op_name, callback, timeout):
"""Begin an operation on a connection
Args:
conn_or_internal_id (string, int): Either an integer connection id or a string
internal_id
op_name (string): The name of the operation that we are starting (stored in
the connection's microstate)
callback (callable): Callback to call when this disconnection attempt either
succeeds or fails
timeout (float): How long to allow this connection attempt to proceed
without timing it out (in seconds)
"""
data = {
'id': conn_or_internal_id,
'callback': callback,
'operation_name': op_name
}
action = ConnectionAction('begin_operation', data, timeout=timeout, sync=False)
self._actions.put(action)
|
Begin an operation on a connection
Args:
conn_or_internal_id (string, int): Either an integer connection id or a string
internal_id
op_name (string): The name of the operation that we are starting (stored in
the connection's microstate)
callback (callable): Callback to call when this disconnection attempt either
succeeds or fails
timeout (float): How long to allow this connection attempt to proceed
without timing it out (in seconds)
|
def insert(self, loc, item):
"""
Make new Index inserting new item at location.
Follows Python list.append semantics for negative values.
Parameters
----------
loc : int
item : object
Returns
-------
new_index : Index
"""
_self = np.asarray(self)
item = self._coerce_scalar_to_index(item)._ndarray_values
idx = np.concatenate((_self[:loc], item, _self[loc:]))
return self._shallow_copy_with_infer(idx)
|
Make new Index inserting new item at location.
Follows Python list.append semantics for negative values.
Parameters
----------
loc : int
item : object
Returns
-------
new_index : Index
|
def getcwd(cls):
"""
Provide a context dependent current working directory. This method
will return the directory currently holding the lock.
"""
if not hasattr(cls._tl, "cwd"):
cls._tl.cwd = os.getcwd()
return cls._tl.cwd
|
Provide a context dependent current working directory. This method
will return the directory currently holding the lock.
|
def show_vcs_output_nodes_disconnected_from_cluster(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
nodes_disconnected_from_cluster = ET.SubElement(output, "nodes-disconnected-from-cluster")
nodes_disconnected_from_cluster.text = kwargs.pop('nodes_disconnected_from_cluster')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def enable_digital_reporting(self, pin):
"""
Enables digital reporting. By turning reporting on for all 8 bits in the "port" -
this is part of Firmata's protocol specification.
:param pin: Pin and all pins for this port
:return: No return value
"""
port = pin // 8
command = [self._command_handler.REPORT_DIGITAL + port, self.REPORTING_ENABLE]
self._command_handler.send_command(command)
|
Enables digital reporting. By turning reporting on for all 8 bits in the "port" -
this is part of Firmata's protocol specification.
:param pin: Pin and all pins for this port
:return: No return value
|
def __get_user(self, login):
"""Get user and org data for the login"""
user = {}
if not login:
return user
user_raw = self.client.user(login)
user = json.loads(user_raw)
user_orgs_raw = \
self.client.user_orgs(login)
user['organizations'] = json.loads(user_orgs_raw)
return user
|
Get user and org data for the login
|
def _remove_wrappers(self):
"""
Uninstall the PluginLoader monkey patches.
"""
ansible_mitogen.loaders.action_loader.get = action_loader__get
ansible_mitogen.loaders.connection_loader.get = connection_loader__get
ansible.executor.process.worker.WorkerProcess.run = worker__run
|
Uninstall the PluginLoader monkey patches.
|
def addFeature(self, features,
gdbVersion=None,
rollbackOnFailure=True):
""" Adds a single feature to the service
Inputs:
feature - list of common.Feature object or a single
common.Feature Object, a FeatureSet object, or a
list of dictionary objects
gdbVersion - Geodatabase version to apply the edits
rollbackOnFailure - Optional parameter to specify if the
edits should be applied only if all
submitted edits succeed. If false, the
server will apply the edits that succeed
even if some of the submitted edits fail.
If true, the server will apply the edits
only if all edits succeed. The default
value is true.
Output:
JSON message as dictionary
"""
url = self._url + "/addFeatures"
params = {
"f" : "json"
}
if gdbVersion is not None:
params['gdbVersion'] = gdbVersion
if isinstance(rollbackOnFailure, bool):
params['rollbackOnFailure'] = rollbackOnFailure
if isinstance(features, list) and \
len(features) > 0:
if isinstance(features[0], Feature):
params['features'] = json.dumps([feature.asDictionary for feature in features],
default=_date_handler)
elif isinstance(features[0], dict):
params['features'] = json.dumps(features,
default=_date_handler)
elif isinstance(features, Feature):
params['features'] = json.dumps([features.asDictionary],
default=_date_handler)
elif isinstance(features, FeatureSet):
params['features'] = json.dumps([feature.asDictionary for feature in features.features],
default=_date_handler)
else:
return None
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
|
Adds a single feature to the service
Inputs:
feature - list of common.Feature object or a single
common.Feature Object, a FeatureSet object, or a
list of dictionary objects
gdbVersion - Geodatabase version to apply the edits
rollbackOnFailure - Optional parameter to specify if the
edits should be applied only if all
submitted edits succeed. If false, the
server will apply the edits that succeed
even if some of the submitted edits fail.
If true, the server will apply the edits
only if all edits succeed. The default
value is true.
Output:
JSON message as dictionary
|
def pprint(self, indent: str = ' ', remove_comments=False):
"""Deprecated, use self.pformat instead."""
warn(
'pprint method is deprecated, use pformat instead.',
DeprecationWarning,
)
return self.pformat(indent, remove_comments)
|
Deprecated, use self.pformat instead.
|
def on_entry_click(self, event):
"""
function that gets called whenever entry is clicked
"""
if event.widget.config('fg') [4] == 'grey':
event.widget.delete(0, "end" ) # delete all the text in the entry
event.widget.insert(0, '') #Insert blank for user input
event.widget.config(fg = 'black')
|
function that gets called whenever entry is clicked
|
def _fmt_args_kwargs(self, *some_args, **some_kwargs):
"""Helper to convert the given args and kwargs into a string."""
if some_args:
out_args = str(some_args).lstrip('(').rstrip(',)')
if some_kwargs:
out_kwargs = ', '.join([str(i).lstrip('(').rstrip(')').replace(', ',': ') for i in [
(k,some_kwargs[k]) for k in sorted(some_kwargs.keys())]])
if some_args and some_kwargs:
return out_args + ', ' + out_kwargs
elif some_args:
return out_args
elif some_kwargs:
return out_kwargs
else:
return ''
|
Helper to convert the given args and kwargs into a string.
|
def find_particles_in_tile(positions, tile):
"""
Finds the particles in a tile, as numpy.ndarray of ints.
Parameters
----------
positions : `numpy.ndarray`
[N,3] array of the particle positions to check in the tile
tile : :class:`peri.util.Tile` instance
Tile of the region inside which to check for particles.
Returns
-------
numpy.ndarray, int
The indices of the particles in the tile.
"""
bools = tile.contains(positions)
return np.arange(bools.size)[bools]
|
Finds the particles in a tile, as numpy.ndarray of ints.
Parameters
----------
positions : `numpy.ndarray`
[N,3] array of the particle positions to check in the tile
tile : :class:`peri.util.Tile` instance
Tile of the region inside which to check for particles.
Returns
-------
numpy.ndarray, int
The indices of the particles in the tile.
|
def _choose_port(self):
"""
Return a port number from 5000-5999 based on the environment name
to be used as a default when the user hasn't selected one.
"""
# instead of random let's base it on the name chosen (and the site name)
return 5000 + unpack('Q',
sha((self.name + self.site_name)
.decode('ascii')).digest()[:8])[0] % 1000
|
Return a port number from 5000-5999 based on the environment name
to be used as a default when the user hasn't selected one.
|
def steam64_from_url(url, http_timeout=30):
"""
Takes a Steam Community url and returns steam64 or None
.. note::
Each call makes a http request to ``steamcommunity.com``
.. note::
For a reliable resolving of vanity urls use ``ISteamUser.ResolveVanityURL`` web api
:param url: steam community url
:type url: :class:`str`
:param http_timeout: how long to wait on http request before turning ``None``
:type http_timeout: :class:`int`
:return: steam64, or ``None`` if ``steamcommunity.com`` is down
:rtype: :class:`int` or :class:`None`
Example URLs::
https://steamcommunity.com/gid/[g:1:4]
https://steamcommunity.com/gid/103582791429521412
https://steamcommunity.com/groups/Valve
https://steamcommunity.com/profiles/[U:1:12]
https://steamcommunity.com/profiles/76561197960265740
https://steamcommunity.com/id/johnc
"""
match = re.match(r'^(?P<clean_url>https?://steamcommunity.com/'
r'(?P<type>profiles|id|gid|groups)/(?P<value>.*?))(?:/(?:.*)?)?$', url)
if not match:
return None
web = make_requests_session()
try:
# user profiles
if match.group('type') in ('id', 'profiles'):
text = web.get(match.group('clean_url'), timeout=http_timeout).text
data_match = re.search("g_rgProfileData = (?P<json>{.*?});[ \t\r]*\n", text)
if data_match:
data = json.loads(data_match.group('json'))
return int(data['steamid'])
# group profiles
else:
text = web.get(match.group('clean_url'), timeout=http_timeout).text
data_match = re.search("'steam://friends/joinchat/(?P<steamid>\d+)'", text)
if data_match:
return int(data_match.group('steamid'))
except requests.exceptions.RequestException:
return None
|
Takes a Steam Community url and returns steam64 or None
.. note::
Each call makes a http request to ``steamcommunity.com``
.. note::
For a reliable resolving of vanity urls use ``ISteamUser.ResolveVanityURL`` web api
:param url: steam community url
:type url: :class:`str`
:param http_timeout: how long to wait on http request before turning ``None``
:type http_timeout: :class:`int`
:return: steam64, or ``None`` if ``steamcommunity.com`` is down
:rtype: :class:`int` or :class:`None`
Example URLs::
https://steamcommunity.com/gid/[g:1:4]
https://steamcommunity.com/gid/103582791429521412
https://steamcommunity.com/groups/Valve
https://steamcommunity.com/profiles/[U:1:12]
https://steamcommunity.com/profiles/76561197960265740
https://steamcommunity.com/id/johnc
|
def tokenized(self, delimiter=' ', overlap_threshold=0.1):
"""
Return a ordered list of tokens based on all labels.
Joins all token from all labels (``label.tokenized()```).
If the overlapping between two labels is greater than ``overlap_threshold``,
an Exception is thrown.
Args:
delimiter (str): The delimiter used to split labels into tokens. (default: space)
overlap_threshold (float): Maximum overlap between two consecutive labels.
Returns:
str: A list containing tokens of all labels ordered according to the label order.
Example:
>>> ll = LabelList(idx='some', labels=[
>>> Label('a d q', start=0, end=4),
>>> Label('b', start=3.95, end=6.0),
>>> Label('c a', start=7.0, end=10.2),
>>> Label('f g', start=10.3, end=14.0)
>>> ])
>>> ll.tokenized(delimiter=' ', overlap_threshold=0.1)
['a', 'd', 'q', 'b', 'c', 'a', 'f', 'g']
"""
sorted_by_start = sorted(self.labels)
tokens = []
last_label_end = None
for label in sorted_by_start:
if last_label_end is None or (last_label_end - label.start < overlap_threshold and last_label_end > 0):
tokens.extend(label.tokenized(delimiter=delimiter))
last_label_end = label.end
else:
raise ValueError('Labels overlap, not able to define the correct order')
return tokens
|
Return a ordered list of tokens based on all labels.
Joins all token from all labels (``label.tokenized()```).
If the overlapping between two labels is greater than ``overlap_threshold``,
an Exception is thrown.
Args:
delimiter (str): The delimiter used to split labels into tokens. (default: space)
overlap_threshold (float): Maximum overlap between two consecutive labels.
Returns:
str: A list containing tokens of all labels ordered according to the label order.
Example:
>>> ll = LabelList(idx='some', labels=[
>>> Label('a d q', start=0, end=4),
>>> Label('b', start=3.95, end=6.0),
>>> Label('c a', start=7.0, end=10.2),
>>> Label('f g', start=10.3, end=14.0)
>>> ])
>>> ll.tokenized(delimiter=' ', overlap_threshold=0.1)
['a', 'd', 'q', 'b', 'c', 'a', 'f', 'g']
|
async def post(self, public_key):
"""Accepting offer by buyer
Function accepts:
- cid
- buyer access string
- buyer public key
- seller public key
"""
logging.debug("[+] -- Deal debugging. ")
if settings.SIGNATURE_VERIFICATION:
super().verify()
# Check if message contains required data
try:
body = json.loads(self.request.body)
except:
self.set_status(400)
self.write({"error":400, "reason":"Unexpected data format. JSON required"})
raise tornado.web.Finish
logging.debug("\n Body")
logging.debug(body)
if isinstance(body["message"], str):
message = json.loads(body["message"])
elif isinstance(body["message"], dict):
message = body["message"]
cid = message.get("cid")
buyer_pubkey = message.get("buyer_pubkey")
buyer_access_string = message.get("buyer_access_string")
seller_access_string = message.get("seller_access_string")
access_type = message.get("access_type")
coinid = message.get("coinid")
# check passes data
if not all([buyer_access_string, cid, buyer_pubkey, coinid]):
self.set_status(400)
self.write({"error":400, "reason":"Missed required fields"})
raise tornado.web.Finish
if coinid in settings.bridges.keys():
self.account.blockchain.setendpoint(settings.bridges[coinid])
else:
self.set_status(400)
self.write({"error":400, "reason":"Invalid coin ID"})
raise tornado.web.Finish
# Sellcontent
buyer_address = self.account.validator[coinid](buyer_pubkey)
# Check if accounts exists
seller_account = await self.account.getaccountdata(public_key=public_key)
logging.debug("\n Seller account")
logging.debug(seller_account)
try:
error_code = seller_account["error"]
except:
pass
else:
self.set_status(error_code)
self.write(seller_account)
raise tornado.web.Finish
buyer_account = await self.account.getaccountdata(public_key=buyer_pubkey)
logging.debug("\n Buyer account")
logging.debug(buyer_account)
try:
error_code = buyer_account["error"]
except:
pass
else:
self.set_status(error_code)
self.write(buyer_account)
raise tornado.web.Finish
# Check if content belongs to current account
owneraddr = await self.account.blockchain.ownerbycid(cid=cid)
if owneraddr != self.account.validator[coinid](public_key):
self.set_status(403)
self.write({"error":403, "reason":"Forbidden. Profile owner does not match."})
raise tornado.web.Finish
#Get buyers balance
balances = await self.account.balance.get_wallets(coinid=coinid,
uid=buyer_account["id"])
if isinstance(balances, dict):
if "error" in balances.keys():
self.set_status(balances["error"])
self.write(balances)
raise tornado.web.Finish
# Get difference with balance and price
get_price = await self.account.blockchain.getoffer(buyer_address=buyer_address,
cid=cid)
price = get_price["price"]
logging.debug("\n Price")
logging.debug(price)
for w in balances["wallets"]:
if "PUT" in w.values() or "PUTTEST" in w.values():
balance = w
difference = int(balance["amount_frozen"]) - int(price)
if difference >= 0:
if access_type == "write_access":
logging.debug("\n Write access")
# Fee
fee = await billing.change_owner_fee(cid=cid, new_owner=buyer_pubkey)
if "error" in fee.keys():
self.set_status(fee["error"])
self.write(fee)
raise tornado.web.Finish
# Change content owner
chownerdata = {
"cid":cid,
"new_owner": buyer_address,
"access_string": buyer_access_string,
"seller_public_key": public_key
}
response = await self.account.blockchain.changeowner(**chownerdata)
logging.debug("\n Bridge change owner")
logging.debug(response)
new_owner = await self.account.changeowner(cid=cid,
public_key=buyer_account["public_key"],
coinid=coinid)
logging.debug("\n Database new owner")
logging.debug(new_owner)
elif access_type == "read_access":
logging.debug("\n Read access")
# Fee
fee = await billing.sell_content_fee(cid=cid, new_owner=buyer_pubkey)
if "error" in fee.keys():
self.set_status(fee["error"])
self.write(fee)
raise tornado.web.Finish
selldata = {
"cid":cid,
"buyer_address":buyer_address,
"access_string":buyer_access_string,
"seller_public_key":public_key
}
response = await self.account.blockchain.sellcontent(**selldata)
logging.debug("\n Bridge sell content")
# Write cid to database
check = await self.account.setuserscontent(public_key=buyer_account["public_key"],
hash=None,coinid=coinid, cid=cid,
txid=response["result"]["txid"],
access="deal")
logging.debug(check)
# Increment and decrement balances of seller and buyer
coinid = "PUT"
unfreeze = await self.account.balance.unfreeze(uid=buyer_account["id"],
amount=price, coinid=coinid)
logging.debug("\n Unfreeze buyer")
logging.debug(unfreeze)
sub_active = await self.account.balance.sub_active(uid=buyer_account["id"],
coinid=coinid, amount=price)
logging.debug("\n Sub active")
logging.debug(sub_active)
add_frozen = await self.account.balance.add_frozen(uid=seller_account["id"],
amount=price, coinid=coinid)
logging.debug("\n Add frozen")
logging.debug(add_frozen)
# Write entry with txid to database
new_deal = await self.account.balance.registerdeal(uid=seller_account["id"],
public_key=seller_account["public_key"],
txid=response["result"]["txid"],
coinid=coinid, cid=cid)
del response["result"]
del response["contract_owner_hex"]
self.write(response)
else:
# If Insufficient funds
self.set_status(402)
self.write({"error":402, "reason":"Insufficient funds of buyer"})
raise tornado.web.Finish
|
Accepting offer by buyer
Function accepts:
- cid
- buyer access string
- buyer public key
- seller public key
|
def shrink(self):
"""
Calculate the Constant-Correlation covariance matrix.
:return: shrunk sample covariance matrix
:rtype: np.ndarray
"""
x = np.nan_to_num(self.X.values)
# de-mean returns
t, n = np.shape(x)
meanx = x.mean(axis=0)
x = x - np.tile(meanx, (t, 1))
xmkt = x.mean(axis=1).reshape(t, 1)
# compute sample covariance matrix
sample = np.cov(np.append(x, xmkt, axis=1), rowvar=False) * (t - 1) / t
covmkt = sample[0:n, n].reshape(n, 1)
varmkt = sample[n, n]
sample = sample[:n, :n]
prior = np.dot(covmkt, covmkt.T) / varmkt
prior[np.eye(n) == 1] = np.diag(sample)
# compute shrinkage parameters
if self.delta is None:
c = np.linalg.norm(sample - prior, "fro") ** 2
y = x ** 2
p = 1 / t * np.sum(np.dot(y.T, y)) - np.sum(sample ** 2)
# r is divided into diagonal
# and off-diagonal terms, and the off-diagonal term
# is itself divided into smaller terms
rdiag = 1 / t * np.sum(y ** 2) - sum(np.diag(sample) ** 2)
z = x * np.tile(xmkt, (n,))
v1 = 1 / t * np.dot(y.T, z) - np.tile(covmkt, (n,)) * sample
roff1 = (
np.sum(v1 * np.tile(covmkt, (n,)).T) / varmkt
- np.sum(np.diag(v1) * covmkt.T) / varmkt
)
v3 = 1 / t * np.dot(z.T, z) - varmkt * sample
roff3 = (
np.sum(v3 * np.dot(covmkt, covmkt.T)) / varmkt ** 2
- np.sum(np.diag(v3).reshape(-1, 1) * covmkt ** 2) / varmkt ** 2
)
roff = 2 * roff1 - roff3
r = rdiag + roff
# compute shrinkage constant
k = (p - r) / c
shrinkage = max(0, min(1, k / t))
self.delta = shrinkage
else:
# use specified constant
shrinkage = self.delta
# compute the estimator
sigma = shrinkage * prior + (1 - shrinkage) * sample
return self.format_and_annualise(sigma)
|
Calculate the Constant-Correlation covariance matrix.
:return: shrunk sample covariance matrix
:rtype: np.ndarray
|
def get_notification(self, id):
"""
Return a Notification object.
:param id: The id of the notification object to return.
"""
url = self._base_url + "/3/notification/{0}".format(id)
resp = self._send_request(url)
return Notification(resp, self)
|
Return a Notification object.
:param id: The id of the notification object to return.
|
def get_dag_configs(self) -> Dict[str, Dict[str, Any]]:
"""
Returns configuration for each the DAG in factory
:returns: dict with configuration for dags
"""
return {dag: self.config[dag] for dag in self.config.keys() if dag != "default"}
|
Returns configuration for each the DAG in factory
:returns: dict with configuration for dags
|
def get_connected(self):
"""
Returns the connection status
:returns: a boolean that indicates if connected.
"""
connected = c_int32()
result = self.library.Cli_GetConnected(self.pointer, byref(connected))
check_error(result, context="client")
return bool(connected)
|
Returns the connection status
:returns: a boolean that indicates if connected.
|
def setValue(self, value):
"""
Sets the value that will be used for this query instance.
:param value <variant>
"""
self.__value = projex.text.decoded(value) if isinstance(value, (str, unicode)) else value
|
Sets the value that will be used for this query instance.
:param value <variant>
|
def history_file(self, location=None):
"""Return history file location.
"""
if location:
# Hardcoded location passed from the config file.
if os.path.exists(location):
return location
else:
logger.warn("The specified history file %s doesn't exist",
location)
filenames = []
for base in ['CHANGES', 'HISTORY', 'CHANGELOG']:
filenames.append(base)
for extension in ['rst', 'txt', 'markdown']:
filenames.append('.'.join([base, extension]))
history = self.filefind(filenames)
if history:
return history
|
Return history file location.
|
def create_key_ring(
self,
parent,
key_ring_id,
key_ring,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Create a new ``KeyRing`` in a given Project and Location.
Example:
>>> from google.cloud import kms_v1
>>>
>>> client = kms_v1.KeyManagementServiceClient()
>>>
>>> parent = client.location_path('[PROJECT]', '[LOCATION]')
>>>
>>> # TODO: Initialize `key_ring_id`:
>>> key_ring_id = ''
>>>
>>> # TODO: Initialize `key_ring`:
>>> key_ring = {}
>>>
>>> response = client.create_key_ring(parent, key_ring_id, key_ring)
Args:
parent (str): Required. The resource name of the location associated with the
``KeyRings``, in the format ``projects/*/locations/*``.
key_ring_id (str): Required. It must be unique within a location and match the regular
expression ``[a-zA-Z0-9_-]{1,63}``
key_ring (Union[dict, ~google.cloud.kms_v1.types.KeyRing]): A ``KeyRing`` with initial field values.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.kms_v1.types.KeyRing`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.kms_v1.types.KeyRing` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "create_key_ring" not in self._inner_api_calls:
self._inner_api_calls[
"create_key_ring"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_key_ring,
default_retry=self._method_configs["CreateKeyRing"].retry,
default_timeout=self._method_configs["CreateKeyRing"].timeout,
client_info=self._client_info,
)
request = service_pb2.CreateKeyRingRequest(
parent=parent, key_ring_id=key_ring_id, key_ring=key_ring
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["create_key_ring"](
request, retry=retry, timeout=timeout, metadata=metadata
)
|
Create a new ``KeyRing`` in a given Project and Location.
Example:
>>> from google.cloud import kms_v1
>>>
>>> client = kms_v1.KeyManagementServiceClient()
>>>
>>> parent = client.location_path('[PROJECT]', '[LOCATION]')
>>>
>>> # TODO: Initialize `key_ring_id`:
>>> key_ring_id = ''
>>>
>>> # TODO: Initialize `key_ring`:
>>> key_ring = {}
>>>
>>> response = client.create_key_ring(parent, key_ring_id, key_ring)
Args:
parent (str): Required. The resource name of the location associated with the
``KeyRings``, in the format ``projects/*/locations/*``.
key_ring_id (str): Required. It must be unique within a location and match the regular
expression ``[a-zA-Z0-9_-]{1,63}``
key_ring (Union[dict, ~google.cloud.kms_v1.types.KeyRing]): A ``KeyRing`` with initial field values.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.kms_v1.types.KeyRing`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.kms_v1.types.KeyRing` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
|
def get_draw(self, index: Index, additional_key: Any=None) -> pd.Series:
"""Get an indexed sequence of floats pulled from a uniform distribution over [0.0, 1.0)
Parameters
----------
index :
An index whose length is the number of random draws made
and which indexes the returned `pandas.Series`.
additional_key :
Any additional information used to seed random number generation.
Returns
-------
pd.Series
A series of random numbers indexed by the provided `pandas.Index`.
"""
if self._for_initialization:
draw = random(self._key(additional_key), pd.Index(range(len(index))), self.index_map)
draw.index = index
else:
draw = random(self._key(additional_key), index, self.index_map)
return draw
|
Get an indexed sequence of floats pulled from a uniform distribution over [0.0, 1.0)
Parameters
----------
index :
An index whose length is the number of random draws made
and which indexes the returned `pandas.Series`.
additional_key :
Any additional information used to seed random number generation.
Returns
-------
pd.Series
A series of random numbers indexed by the provided `pandas.Index`.
|
def _line_parse(line):
"""Removes line ending characters and returns a tuple (`stripped_line`,
`is_terminated`).
"""
if line[-2:] in ["\r\n", b"\r\n"]:
return line[:-2], True
elif line[-1:] in ["\r", "\n", b"\r", b"\n"]:
return line[:-1], True
return line, False
|
Removes line ending characters and returns a tuple (`stripped_line`,
`is_terminated`).
|
def process(self):
"""Collect the results.
Raises:
DFTimewolfError: if no files specified
"""
client = self._get_client_by_hostname(self.host)
self._await_flow(client, self.flow_id)
collected_flow_data = self._download_files(client, self.flow_id)
if collected_flow_data:
print('{0:s}: Downloaded: {1:s}'.format(
self.flow_id, collected_flow_data))
fqdn = client.data.os_info.fqdn.lower()
self.state.output.append((fqdn, collected_flow_data))
|
Collect the results.
Raises:
DFTimewolfError: if no files specified
|
def getTemplate(self, uri, meta=None):
"""Return the template for an action. Cache the result. Can use an optional meta parameter with meta information"""
if not meta:
metaKey = self.cacheKey + '_templatesmeta_cache_' + uri
meta = cache.get(metaKey, None)
if not meta:
meta = self.getMeta(uri)
cache.set(metaKey, meta, 15)
if not meta: # No meta, can return a template
return None
# Let's find the template in the cache
action = urlparse(uri).path
templateKey = self.cacheKey + '_templates_' + action + '_' + meta['template_tag']
template = cache.get(templateKey, None)
# Nothing found -> Retrieve it from the server and cache it
if not template:
r = self.doQuery('template/' + uri)
if r.status_code == 200: # Get the content if there is not problem. If there is, template will stay to None
template = r.content
cache.set(templateKey, template, None) # None = Cache forever
return template
|
Return the template for an action. Cache the result. Can use an optional meta parameter with meta information
|
def get_account_holds(self, account_id, **kwargs):
""" Get holds on an account.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Holds are placed on an account for active orders or
pending withdraw requests.
As an order is filled, the hold amount is updated. If an order
is canceled, any remaining hold is removed. For a withdraw, once
it is completed, the hold is removed.
The `type` field will indicate why the hold exists. The hold
type is 'order' for holds related to open orders and 'transfer'
for holds related to a withdraw.
The `ref` field contains the id of the order or transfer which
created the hold.
Args:
account_id (str): Account id to get holds of.
kwargs (dict): Additional HTTP request parameters.
Returns:
generator(list): Hold information for the account. Example::
[
{
"id": "82dcd140-c3c7-4507-8de4-2c529cd1a28f",
"account_id": "e0b3f39a-183d-453e-b754-0c13e5bab0b3",
"created_at": "2014-11-06T10:34:47.123456Z",
"updated_at": "2014-11-06T10:40:47.123456Z",
"amount": "4.23",
"type": "order",
"ref": "0a205de4-dd35-4370-a285-fe8fc375a273",
},
{
...
}
]
"""
endpoint = '/accounts/{}/holds'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs)
|
Get holds on an account.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Holds are placed on an account for active orders or
pending withdraw requests.
As an order is filled, the hold amount is updated. If an order
is canceled, any remaining hold is removed. For a withdraw, once
it is completed, the hold is removed.
The `type` field will indicate why the hold exists. The hold
type is 'order' for holds related to open orders and 'transfer'
for holds related to a withdraw.
The `ref` field contains the id of the order or transfer which
created the hold.
Args:
account_id (str): Account id to get holds of.
kwargs (dict): Additional HTTP request parameters.
Returns:
generator(list): Hold information for the account. Example::
[
{
"id": "82dcd140-c3c7-4507-8de4-2c529cd1a28f",
"account_id": "e0b3f39a-183d-453e-b754-0c13e5bab0b3",
"created_at": "2014-11-06T10:34:47.123456Z",
"updated_at": "2014-11-06T10:40:47.123456Z",
"amount": "4.23",
"type": "order",
"ref": "0a205de4-dd35-4370-a285-fe8fc375a273",
},
{
...
}
]
|
def os_change_list(self, subid, params=None):
''' /v1/server/os_change_list
GET - account
Retrieves a list of operating systems to which this server can be
changed.
Link: https://www.vultr.com/api/#server_os_change_list
'''
params = update_params(params, {'SUBID': subid})
return self.request('/v1/server/os_change_list', params, 'GET')
|
/v1/server/os_change_list
GET - account
Retrieves a list of operating systems to which this server can be
changed.
Link: https://www.vultr.com/api/#server_os_change_list
|
def load_nb(cls, inline=True):
"""
Loads any resources required for display of plots
in the Jupyter notebook
"""
with param.logging_level('ERROR'):
cls.notebook_context = True
cls.comm_manager = JupyterCommManager
|
Loads any resources required for display of plots
in the Jupyter notebook
|
def iterline(x1, y1, x2, y2):
'Yields (x, y) coords of line from (x1, y1) to (x2, y2)'
xdiff = abs(x2-x1)
ydiff = abs(y2-y1)
xdir = 1 if x1 <= x2 else -1
ydir = 1 if y1 <= y2 else -1
r = math.ceil(max(xdiff, ydiff))
if r == 0: # point, not line
yield x1, y1
else:
x, y = math.floor(x1), math.floor(y1)
i = 0
while i < r:
x += xdir * xdiff / r
y += ydir * ydiff / r
yield x, y
i += 1
|
Yields (x, y) coords of line from (x1, y1) to (x2, y2)
|
def do_file_upload(client, args):
"""Upload files"""
# Sanity check
if len(args.paths) > 1:
# destination must be a directory
try:
resource = client.get_resource_by_uri(args.dest_uri)
except ResourceNotFoundError:
resource = None
if resource and not isinstance(resource, Folder):
print("file-upload: "
"target '{}' is not a directory".format(args.dest_uri))
return None
with client.upload_session():
for src_path in args.paths:
print("Uploading {} to {}".format(src_path, args.dest_uri))
result = client.upload_file(src_path, args.dest_uri)
print("Uploaded {}, result={}".format(src_path, result))
return True
|
Upload files
|
def save_parameters(self, path, grad_only=False):
"""Save all parameters into a file with the specified format.
Currently hdf5 and protobuf formats are supported.
Args:
path : path or file object
grad_only (bool, optional): Return parameters with `need_grad` option as `True`.
"""
params = self.get_parameters(grad_only=grad_only)
nn.save_parameters(path, params)
|
Save all parameters into a file with the specified format.
Currently hdf5 and protobuf formats are supported.
Args:
path : path or file object
grad_only (bool, optional): Return parameters with `need_grad` option as `True`.
|
def guess_sequence(redeem_script):
'''
str -> int
If OP_CSV is used, guess an appropriate sequence
Otherwise, disable RBF, but leave lock_time on.
Fails if there's not a constant before OP_CSV
'''
try:
script_array = redeem_script.split()
loc = script_array.index('OP_CHECKSEQUENCEVERIFY')
return int(script_array[loc - 1], 16)
except ValueError:
return 0xFFFFFFFE
|
str -> int
If OP_CSV is used, guess an appropriate sequence
Otherwise, disable RBF, but leave lock_time on.
Fails if there's not a constant before OP_CSV
|
def hook_key(key, callback, suppress=False):
"""
Hooks key up and key down events for a single key. Returns the event handler
created. To remove a hooked key use `unhook_key(key)` or
`unhook_key(handler)`.
Note: this function shares state with hotkeys, so `clear_all_hotkeys`
affects it aswell.
"""
_listener.start_if_necessary()
store = _listener.blocking_keys if suppress else _listener.nonblocking_keys
scan_codes = key_to_scan_codes(key)
for scan_code in scan_codes:
store[scan_code].append(callback)
def remove_():
del _hooks[callback]
del _hooks[key]
del _hooks[remove_]
for scan_code in scan_codes:
store[scan_code].remove(callback)
_hooks[callback] = _hooks[key] = _hooks[remove_] = remove_
return remove_
|
Hooks key up and key down events for a single key. Returns the event handler
created. To remove a hooked key use `unhook_key(key)` or
`unhook_key(handler)`.
Note: this function shares state with hotkeys, so `clear_all_hotkeys`
affects it aswell.
|
def _checkup(peaks, ecg_integrated, sample_rate, rr_buffer, spk1, npk1, threshold):
"""
Check each peak according to thresholds
----------
Parameters
----------
peaks : list
List of local maximums that pass the first stage of conditions needed to be considered as
an R peak.
ecg_integrated : ndarray
Array that contains the samples of the integrated signal.
sample_rate : int
Sampling rate at which the acquisition took place.
rr_buffer : list
Data structure that stores the duration of the last eight RR intervals.
spk1 : float
Actual value of SPK1 parameter defined in Pan-Tompkins real-time R peak detection algorithm
(named signal peak).
npk1 : int
Actual value of NPK1 parameter defined in Pan-Tompkins real-time R peak detection algorithm
(named noise peak).
threshold : float
Initial value of the adaptive threshold level (relevant parameter for the application of
specific criteria during the identification of R peaks).
Returns
-------
out : list
List with the position of the peaks considered as R peak by the algorithm.
"""
peaks_amp = [ecg_integrated[peak] for peak in peaks]
definitive_peaks = []
for i, peak in enumerate(peaks):
amp = peaks_amp[i]
# accept if larger than threshold and slope in raw signal
# is +-30% of previous slopes
if amp > threshold:
definitive_peaks, spk1, rr_buffer = _acceptpeak(peak, amp, definitive_peaks, spk1,
rr_buffer)
# accept as qrs if higher than half threshold,
# but is 360 ms after last qrs and next peak
# is more than 1.5 rr intervals away
# just abandon it if there is no peak before
# or after
elif amp > threshold / 2 and list(definitive_peaks) and len(peaks) > i + 1:
mean_rr = numpy.mean(rr_buffer)
last_qrs_ms = (peak - definitive_peaks[-1]) * (1000 / sample_rate)
last_qrs_to_next_peak = peaks[i+1] - definitive_peaks[-1]
if last_qrs_ms > 360 and last_qrs_to_next_peak > 1.5 * mean_rr:
definitive_peaks, spk1, rr_buffer = _acceptpeak(peak, amp, definitive_peaks, spk1,
rr_buffer)
else:
npk1 = _noisepeak(amp, npk1)
# if not either of these it is noise
else:
npk1 = _noisepeak(amp, npk1)
threshold = _buffer_update(npk1, spk1)
definitive_peaks = numpy.array(definitive_peaks)
return definitive_peaks
|
Check each peak according to thresholds
----------
Parameters
----------
peaks : list
List of local maximums that pass the first stage of conditions needed to be considered as
an R peak.
ecg_integrated : ndarray
Array that contains the samples of the integrated signal.
sample_rate : int
Sampling rate at which the acquisition took place.
rr_buffer : list
Data structure that stores the duration of the last eight RR intervals.
spk1 : float
Actual value of SPK1 parameter defined in Pan-Tompkins real-time R peak detection algorithm
(named signal peak).
npk1 : int
Actual value of NPK1 parameter defined in Pan-Tompkins real-time R peak detection algorithm
(named noise peak).
threshold : float
Initial value of the adaptive threshold level (relevant parameter for the application of
specific criteria during the identification of R peaks).
Returns
-------
out : list
List with the position of the peaks considered as R peak by the algorithm.
|
def zinnia_pagination(context, page, begin_pages=1, end_pages=1,
before_pages=2, after_pages=2,
template='zinnia/tags/pagination.html'):
"""
Return a Digg-like pagination,
by splitting long list of page into 3 blocks of pages.
"""
get_string = ''
for key, value in context['request'].GET.items():
if key != 'page':
get_string += '&%s=%s' % (key, value)
page_range = list(page.paginator.page_range)
begin = page_range[:begin_pages]
end = page_range[-end_pages:]
middle = page_range[max(page.number - before_pages - 1, 0):
page.number + after_pages]
if set(begin) & set(middle): # [1, 2, 3], [2, 3, 4], [...]
begin = sorted(set(begin + middle)) # [1, 2, 3, 4]
middle = []
elif begin[-1] + 1 == middle[0]: # [1, 2, 3], [4, 5, 6], [...]
begin += middle # [1, 2, 3, 4, 5, 6]
middle = []
elif middle[-1] + 1 == end[0]: # [...], [15, 16, 17], [18, 19, 20]
end = middle + end # [15, 16, 17, 18, 19, 20]
middle = []
elif set(middle) & set(end): # [...], [17, 18, 19], [18, 19, 20]
end = sorted(set(middle + end)) # [17, 18, 19, 20]
middle = []
if set(begin) & set(end): # [1, 2, 3], [...], [2, 3, 4]
begin = sorted(set(begin + end)) # [1, 2, 3, 4]
middle, end = [], []
elif begin[-1] + 1 == end[0]: # [1, 2, 3], [...], [4, 5, 6]
begin += end # [1, 2, 3, 4, 5, 6]
middle, end = [], []
return {'template': template,
'page': page,
'begin': begin,
'middle': middle,
'end': end,
'GET_string': get_string}
|
Return a Digg-like pagination,
by splitting long list of page into 3 blocks of pages.
|
def _get_repeat_masker_header(pairwise_alignment):
"""generate header string of repeatmasker formated repr of self."""
res = ""
res += str(pairwise_alignment.meta[ALIG_SCORE_KEY]) + " "
res += "{:.2f}".format(pairwise_alignment.meta[PCENT_SUBS_KEY]) + " "
res += "{:.2f}".format(pairwise_alignment.meta[PCENT_S1_INDELS_KEY]) + " "
res += "{:.2f}".format(pairwise_alignment.meta[PCENT_S2_INDELS_KEY]) + " "
res += (pairwise_alignment.s1.name
if (pairwise_alignment.s1.name != "" and
pairwise_alignment.s1.name is not None)
else UNKNOWN_SEQ_NAME) + " "
res += str(pairwise_alignment.s1.start) + " "
res += str(pairwise_alignment.s1.end - 1) + " "
res += "(" + str(pairwise_alignment.s1.remaining) + ") "
res += ("C " if not pairwise_alignment.s2.is_positive_strand() else "")
res += (pairwise_alignment.s2.name
if (pairwise_alignment.s2.name != "" and
pairwise_alignment.s2.name is not None)
else UNKNOWN_SEQ_NAME) + " "
res += ("(" + str(pairwise_alignment.s2.remaining) + ")"
if not pairwise_alignment.s2.is_positive_strand()
else str(pairwise_alignment.s2.start))
res += " "
# Note here that we need to convert between our internal representation
# for coordinates and the repeat-masker one; internally, we always store
# coordinates as exclusive of the final value with start < end;
# repeatmasker gives the larger coordinate as the 'start' when the match
# is to the reverse complement, so we have to swap start/end, and its
# coordinates are inclusive of end, so we have to subtract 1 from end.
res += str(pairwise_alignment.s2.end - 1) + " "
res += (str(pairwise_alignment.s2.start)
if not pairwise_alignment.s2.is_positive_strand()
else "(" + str(pairwise_alignment.s2.remaining) + ")") + " "
res += pairwise_alignment.meta[UNKNOWN_RM_HEADER_FIELD_KEY] + " "
res += str(pairwise_alignment.meta[RM_ID_KEY])
return res
|
generate header string of repeatmasker formated repr of self.
|
def replace_project(self, owner, id, **kwargs):
"""
Create / Replace a project
Create a project with a given id or completely rewrite the project, including any previously added files or linked datasets, if one already exists with the given id.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.replace_project(owner, id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: User name and unique identifier of the creator of a project. For example, in the URL: [https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), government is the unique identifier of the owner. (required)
:param str id: Project unique identifier. For example, in the URL:[https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), how-to-add-depth-to-your-data-with-the-us-census-acs is the unique identifier of the project. (required)
:param ProjectCreateRequest body:
:return: SuccessMessage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.replace_project_with_http_info(owner, id, **kwargs)
else:
(data) = self.replace_project_with_http_info(owner, id, **kwargs)
return data
|
Create / Replace a project
Create a project with a given id or completely rewrite the project, including any previously added files or linked datasets, if one already exists with the given id.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.replace_project(owner, id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: User name and unique identifier of the creator of a project. For example, in the URL: [https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), government is the unique identifier of the owner. (required)
:param str id: Project unique identifier. For example, in the URL:[https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), how-to-add-depth-to-your-data-with-the-us-census-acs is the unique identifier of the project. (required)
:param ProjectCreateRequest body:
:return: SuccessMessage
If the method is called asynchronously,
returns the request thread.
|
def _apply_filters(self, **filters):
"""Determine rows to keep in data for given set of filters
Parameters
----------
filters: dict
dictionary of filters ({col: values}}); uses a pseudo-regexp syntax
by default, but accepts `regexp: True` to use regexp directly
"""
regexp = filters.pop('regexp', False)
keep = np.array([True] * len(self.data))
# filter by columns and list of values
for col, values in filters.items():
# treat `_apply_filters(col=None)` as no filter applied
if values is None:
continue
if col in self.meta.columns:
matches = pattern_match(self.meta[col], values, regexp=regexp)
cat_idx = self.meta[matches].index
keep_col = (self.data[META_IDX].set_index(META_IDX)
.index.isin(cat_idx))
elif col == 'variable':
level = filters['level'] if 'level' in filters else None
keep_col = pattern_match(self.data[col], values, level, regexp)
elif col == 'year':
_data = self.data[col] if self.time_col is not 'time' \
else self.data['time'].apply(lambda x: x.year)
keep_col = years_match(_data, values)
elif col == 'month' and self.time_col is 'time':
keep_col = month_match(self.data['time']
.apply(lambda x: x.month),
values)
elif col == 'day' and self.time_col is 'time':
if isinstance(values, str):
wday = True
elif isinstance(values, list) and isinstance(values[0], str):
wday = True
else:
wday = False
if wday:
days = self.data['time'].apply(lambda x: x.weekday())
else: # ints or list of ints
days = self.data['time'].apply(lambda x: x.day)
keep_col = day_match(days, values)
elif col == 'hour' and self.time_col is 'time':
keep_col = hour_match(self.data['time']
.apply(lambda x: x.hour),
values)
elif col == 'time' and self.time_col is 'time':
keep_col = datetime_match(self.data[col], values)
elif col == 'level':
if 'variable' not in filters.keys():
keep_col = find_depth(self.data['variable'], level=values)
else:
continue
elif col in self.data.columns:
keep_col = pattern_match(self.data[col], values, regexp=regexp)
else:
_raise_filter_error(col)
keep &= keep_col
return keep
|
Determine rows to keep in data for given set of filters
Parameters
----------
filters: dict
dictionary of filters ({col: values}}); uses a pseudo-regexp syntax
by default, but accepts `regexp: True` to use regexp directly
|
def encrypt_file(file, keys=secretKeys()):
'''Encrypt file data with the same method as the Send browser/js client'''
key = keys.encryptKey
iv = keys.encryptIV
encData = tempfile.SpooledTemporaryFile(max_size=SPOOL_SIZE, mode='w+b')
cipher = Cryptodome.Cipher.AES.new(key, Cryptodome.Cipher.AES.MODE_GCM, iv)
pbar = progbar(fileSize(file))
for chunk in iter(lambda: file.read(CHUNK_SIZE), b''):
encData.write(cipher.encrypt(chunk))
pbar.update(len(chunk))
pbar.close()
encData.write(cipher.digest())
file.close()
encData.seek(0)
return encData
|
Encrypt file data with the same method as the Send browser/js client
|
def missing(name, limit=''):
'''
The inverse of service.available.
Return True if the named service is not available. Use the ``limit`` param to
restrict results to services of that type.
CLI Examples:
.. code-block:: bash
salt '*' service.missing sshd
salt '*' service.missing sshd limit=upstart
salt '*' service.missing sshd limit=sysvinit
'''
if limit == 'upstart':
return not _service_is_upstart(name)
elif limit == 'sysvinit':
return not _service_is_sysv(name)
else:
if _service_is_upstart(name) or _service_is_sysv(name):
return False
else:
return True
|
The inverse of service.available.
Return True if the named service is not available. Use the ``limit`` param to
restrict results to services of that type.
CLI Examples:
.. code-block:: bash
salt '*' service.missing sshd
salt '*' service.missing sshd limit=upstart
salt '*' service.missing sshd limit=sysvinit
|
def critical(self, msg, *args, **kwargs) -> Task: # type: ignore
"""
Log msg with severity 'CRITICAL'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
await logger.critical("Houston, we have a major disaster", exc_info=1)
"""
return self._make_log_task(logging.CRITICAL, msg, args, **kwargs)
|
Log msg with severity 'CRITICAL'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
await logger.critical("Houston, we have a major disaster", exc_info=1)
|
def configure_versioning(self, versioning, mfa_delete=False,
mfa_token=None, headers=None):
"""
Configure versioning for this bucket.
..note:: This feature is currently in beta.
:type versioning: bool
:param versioning: A boolean indicating whether version is
enabled (True) or disabled (False).
:type mfa_delete: bool
:param mfa_delete: A boolean indicating whether the Multi-Factor
Authentication Delete feature is enabled (True)
or disabled (False). If mfa_delete is enabled
then all Delete operations will require the
token from your MFA device to be passed in
the request.
:type mfa_token: tuple or list of strings
:param mfa_token: A tuple or list consisting of the serial number
from the MFA device and the current value of
the six-digit token associated with the device.
This value is required when you are changing
the status of the MfaDelete property of
the bucket.
"""
if versioning:
ver = 'Enabled'
else:
ver = 'Suspended'
if mfa_delete:
mfa = 'Enabled'
else:
mfa = 'Disabled'
body = self.VersioningBody % (ver, mfa)
if mfa_token:
if not headers:
headers = {}
provider = self.connection.provider
headers[provider.mfa_header] = ' '.join(mfa_token)
response = self.connection.make_request('PUT', self.name, data=body,
query_args='versioning', headers=headers)
body = response.read()
if response.status == 200:
return True
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
|
Configure versioning for this bucket.
..note:: This feature is currently in beta.
:type versioning: bool
:param versioning: A boolean indicating whether version is
enabled (True) or disabled (False).
:type mfa_delete: bool
:param mfa_delete: A boolean indicating whether the Multi-Factor
Authentication Delete feature is enabled (True)
or disabled (False). If mfa_delete is enabled
then all Delete operations will require the
token from your MFA device to be passed in
the request.
:type mfa_token: tuple or list of strings
:param mfa_token: A tuple or list consisting of the serial number
from the MFA device and the current value of
the six-digit token associated with the device.
This value is required when you are changing
the status of the MfaDelete property of
the bucket.
|
def update_catalog_extent(self, current_extent):
# type: (int) -> None
'''
A method to update the extent associated with this Boot Catalog.
Parameters:
current_extent - New extent to associate with this Boot Catalog
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('El Torito Boot Catalog not yet initialized')
self.br.update_boot_system_use(struct.pack('=L', current_extent))
|
A method to update the extent associated with this Boot Catalog.
Parameters:
current_extent - New extent to associate with this Boot Catalog
Returns:
Nothing.
|
def restart(self, restart_only_stale_services=None,
redeploy_client_configuration=None,
restart_service_names=None):
"""
Restart all services in the cluster.
Services are restarted in the appropriate order given their dependencies.
@param restart_only_stale_services: Only restart services that have stale
configuration and their dependent
services. Default is False.
@param redeploy_client_configuration: Re-deploy client configuration for
all services in the cluster. Default
is False.
@param restart_service_names: Only restart services that are specified and their dependent services.
Available since API v11.
@since API v6
@return: Reference to the submitted command.
"""
if self._get_resource_root().version < 6:
return self._cmd('restart')
else:
args = dict()
args['restartOnlyStaleServices'] = restart_only_stale_services
args['redeployClientConfiguration'] = redeploy_client_configuration
if self._get_resource_root().version >= 11:
args['restartServiceNames'] = restart_service_names
return self._cmd('restart', data=args, api_version=6)
|
Restart all services in the cluster.
Services are restarted in the appropriate order given their dependencies.
@param restart_only_stale_services: Only restart services that have stale
configuration and their dependent
services. Default is False.
@param redeploy_client_configuration: Re-deploy client configuration for
all services in the cluster. Default
is False.
@param restart_service_names: Only restart services that are specified and their dependent services.
Available since API v11.
@since API v6
@return: Reference to the submitted command.
|
def store_random(self):
"""
Populate array with random quartets sampled from a generator.
Holding all sets in memory might take a lot, but holding a very
large list of random numbers for which ones to sample will fit
into memory for most reasonable sized sets. So we'll load a
list of random numbers in the range of the length of total
sets that can be generated, then only keep sets from the set
generator if they are in the int list. I did several tests to
check that random pairs are as likely as 0 & 1 to come up together
in a random quartet set.
"""
with h5py.File(self.database.input, 'a') as io5:
fillsets = io5["quartets"]
## set generators
qiter = itertools.combinations(xrange(len(self.samples)), 4)
rand = np.arange(0, n_choose_k(len(self.samples), 4))
np.random.shuffle(rand)
rslice = rand[:self.params.nquartets]
rss = np.sort(rslice)
riter = iter(rss)
del rand, rslice
## print progress update 1 to the engine stdout
print(self._chunksize)
## set to store
rando = riter.next()
tmpr = np.zeros((self.params.nquartets, 4), dtype=np.uint16)
tidx = 0
while 1:
try:
for i, j in enumerate(qiter):
if i == rando:
tmpr[tidx] = j
tidx += 1
rando = riter.next()
## print progress bar update to engine stdout
if not i % self._chunksize:
print(min(i, self.params.nquartets))
except StopIteration:
break
## store into database
fillsets[:] = tmpr
del tmpr
|
Populate array with random quartets sampled from a generator.
Holding all sets in memory might take a lot, but holding a very
large list of random numbers for which ones to sample will fit
into memory for most reasonable sized sets. So we'll load a
list of random numbers in the range of the length of total
sets that can be generated, then only keep sets from the set
generator if they are in the int list. I did several tests to
check that random pairs are as likely as 0 & 1 to come up together
in a random quartet set.
|
def get_width(self, c, default=0, match_only=None):
"""
Get the display width of a component. Wraps `getattr()`.
Development note: Cannot define this as a `partial()` because I want
to maintain the order of arguments in `getattr()`.
Args:
c (component): The component to look up.
default (float): The width to return in the event of no match.
match_only (list of str): The component attributes to include in the
comparison. Default: All of them.
Returns:
float. The width of the matching Decor in the Legend.
"""
return self.getattr(c=c,
attr='width',
default=default,
match_only=match_only)
|
Get the display width of a component. Wraps `getattr()`.
Development note: Cannot define this as a `partial()` because I want
to maintain the order of arguments in `getattr()`.
Args:
c (component): The component to look up.
default (float): The width to return in the event of no match.
match_only (list of str): The component attributes to include in the
comparison. Default: All of them.
Returns:
float. The width of the matching Decor in the Legend.
|
def set_nodes_vlan(site, nodes, interface, vlan_id):
"""Set the interface of the nodes in a specific vlan.
It is assumed that the same interface name is available on the node.
Args:
site(str): site to consider
nodes(list): nodes to consider
interface(str): the network interface to put in the vlan
vlan_id(str): the id of the vlan
"""
def _to_network_address(host):
"""Translate a host to a network address
e.g:
paranoia-20.rennes.grid5000.fr -> paranoia-20-eth2.rennes.grid5000.fr
"""
splitted = host.split('.')
splitted[0] = splitted[0] + "-" + interface
return ".".join(splitted)
gk = get_api_client()
network_addresses = [_to_network_address(n) for n in nodes]
gk.sites[site].vlans[str(vlan_id)].submit({"nodes": network_addresses})
|
Set the interface of the nodes in a specific vlan.
It is assumed that the same interface name is available on the node.
Args:
site(str): site to consider
nodes(list): nodes to consider
interface(str): the network interface to put in the vlan
vlan_id(str): the id of the vlan
|
def todo(self, **kwargs):
"""Create a todo associated to the object.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabTodoError: If the todo cannot be set
"""
path = '%s/%s/todo' % (self.manager.path, self.get_id())
self.manager.gitlab.http_post(path, **kwargs)
|
Create a todo associated to the object.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabTodoError: If the todo cannot be set
|
def decode_mst(energy: numpy.ndarray,
length: int,
has_labels: bool = True) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""
Note: Counter to typical intuition, this function decodes the _maximum_
spanning tree.
Decode the optimal MST tree with the Chu-Liu-Edmonds algorithm for
maximum spanning arborescences on graphs.
Parameters
----------
energy : ``numpy.ndarray``, required.
A tensor with shape (num_labels, timesteps, timesteps)
containing the energy of each edge. If has_labels is ``False``,
the tensor should have shape (timesteps, timesteps) instead.
length : ``int``, required.
The length of this sequence, as the energy may have come
from a padded batch.
has_labels : ``bool``, optional, (default = True)
Whether the graph has labels or not.
"""
if has_labels and energy.ndim != 3:
raise ConfigurationError("The dimension of the energy array is not equal to 3.")
elif not has_labels and energy.ndim != 2:
raise ConfigurationError("The dimension of the energy array is not equal to 2.")
input_shape = energy.shape
max_length = input_shape[-1]
# Our energy matrix might have been batched -
# here we clip it to contain only non padded tokens.
if has_labels:
energy = energy[:, :length, :length]
# get best label for each edge.
label_id_matrix = energy.argmax(axis=0)
energy = energy.max(axis=0)
else:
energy = energy[:length, :length]
label_id_matrix = None
# get original score matrix
original_score_matrix = energy
# initialize score matrix to original score matrix
score_matrix = numpy.array(original_score_matrix, copy=True)
old_input = numpy.zeros([length, length], dtype=numpy.int32)
old_output = numpy.zeros([length, length], dtype=numpy.int32)
current_nodes = [True for _ in range(length)]
representatives: List[Set[int]] = []
for node1 in range(length):
original_score_matrix[node1, node1] = 0.0
score_matrix[node1, node1] = 0.0
representatives.append({node1})
for node2 in range(node1 + 1, length):
old_input[node1, node2] = node1
old_output[node1, node2] = node2
old_input[node2, node1] = node2
old_output[node2, node1] = node1
final_edges: Dict[int, int] = {}
# The main algorithm operates inplace.
chu_liu_edmonds(length, score_matrix, current_nodes,
final_edges, old_input, old_output, representatives)
heads = numpy.zeros([max_length], numpy.int32)
if has_labels:
head_type = numpy.ones([max_length], numpy.int32)
else:
head_type = None
for child, parent in final_edges.items():
heads[child] = parent
if has_labels:
head_type[child] = label_id_matrix[parent, child]
return heads, head_type
|
Note: Counter to typical intuition, this function decodes the _maximum_
spanning tree.
Decode the optimal MST tree with the Chu-Liu-Edmonds algorithm for
maximum spanning arborescences on graphs.
Parameters
----------
energy : ``numpy.ndarray``, required.
A tensor with shape (num_labels, timesteps, timesteps)
containing the energy of each edge. If has_labels is ``False``,
the tensor should have shape (timesteps, timesteps) instead.
length : ``int``, required.
The length of this sequence, as the energy may have come
from a padded batch.
has_labels : ``bool``, optional, (default = True)
Whether the graph has labels or not.
|
def _get_synset_offsets(synset_idxes):
"""Returs pointer offset in the WordNet file for every synset index.
Notes
-----
Internal function. Do not call directly.
Preserves order -- for [x,y,z] returns [offset(x),offset(y),offset(z)].
Parameters
----------
synset_idxes : list of ints
Lists synset IDs, which need offset.
Returns
-------
list of ints
Lists pointer offsets in Wordnet file.
"""
offsets = {}
current_seeked_offset_idx = 0
ordered_synset_idxes = sorted(synset_idxes)
with codecs.open(_SOI,'rb', 'utf-8') as fin:
for line in fin:
split_line = line.split(':')
while current_seeked_offset_idx < len(ordered_synset_idxes) and split_line[0] == str(ordered_synset_idxes[current_seeked_offset_idx]):
# Looping on single line entries in case synset_indexes contains duplicates.
offsets[synset_idxes[current_seeked_offset_idx]] = int(split_line[1])
current_seeked_offset_idx += 1
if current_seeked_offset_idx >= len(synset_idxes):
break
return [offsets[synset_idx] for synset_idx in synset_idxes]
|
Returs pointer offset in the WordNet file for every synset index.
Notes
-----
Internal function. Do not call directly.
Preserves order -- for [x,y,z] returns [offset(x),offset(y),offset(z)].
Parameters
----------
synset_idxes : list of ints
Lists synset IDs, which need offset.
Returns
-------
list of ints
Lists pointer offsets in Wordnet file.
|
def all_announcements_view(request):
''' The view of manager announcements. '''
page_name = "Archives - All Announcements"
userProfile = UserProfile.objects.get(user=request.user)
announcement_form = None
manager_positions = Manager.objects.filter(incumbent=userProfile)
if manager_positions:
announcement_form = AnnouncementForm(
request.POST if "post_announcement" in request.POST else None,
profile=userProfile,
)
if announcement_form.is_valid():
announcement_form.save()
return HttpResponseRedirect(reverse('managers:all_announcements'))
# A pseudo-dictionary, actually a list with items of form (announcement,
# announcement_pin_form)
announcements_dict = list()
for a in Announcement.objects.all():
pin_form = None
if a.manager.incumbent == userProfile or request.user.is_superuser:
pin_form = PinForm(
request.POST if "pin-{0}".format(a.pk) in request.POST else None,
instance=a,
)
if pin_form.is_valid():
pin_form.save()
return HttpResponseRedirect(reverse('managers:all_announcements'))
announcements_dict.append((a, pin_form))
return render_to_response('announcements.html', {
'page_name': page_name,
'manager_positions': manager_positions,
'announcements_dict': announcements_dict,
'announcement_form': announcement_form,
}, context_instance=RequestContext(request))
|
The view of manager announcements.
|
async def update(self) -> None:
"""Retrieve updated device state."""
if self.next_allowed_update is not None and \
datetime.utcnow() < self.next_allowed_update:
return
self.next_allowed_update = None
await self.api._update_device_state()
self._device_json = self._device['device_info']
|
Retrieve updated device state.
|
def to_instants_dataframe(self, sql_ctx):
"""
Returns a DataFrame of instants, each a horizontal slice of this TimeSeriesRDD at a time.
This essentially transposes the TimeSeriesRDD, producing a DataFrame where each column
is a key form one of the rows in the TimeSeriesRDD.
"""
ssql_ctx = sql_ctx._ssql_ctx
jdf = self._jtsrdd.toInstantsDataFrame(ssql_ctx, -1)
return DataFrame(jdf, sql_ctx)
|
Returns a DataFrame of instants, each a horizontal slice of this TimeSeriesRDD at a time.
This essentially transposes the TimeSeriesRDD, producing a DataFrame where each column
is a key form one of the rows in the TimeSeriesRDD.
|
def find_expcoef(self, nsd_below=0., plot=False,
trimlim=None, autorange_kwargs={}):
"""
Determines exponential decay coefficient for despike filter.
Fits an exponential decay function to the washout phase of standards
to determine the washout time of your laser cell. The exponential
coefficient reported is `nsd_below` standard deviations below the
fitted exponent, to ensure that no real data is removed.
Total counts are used in fitting, rather than a specific analyte.
Parameters
----------
nsd_below : float
The number of standard deviations to subtract from the fitted
coefficient when calculating the filter exponent.
plot : bool or str
If True, creates a plot of the fit, if str the plot is to the
location specified in str.
trimlim : float
A threshold limit used in determining the start of the
exponential decay region of the washout. Defaults to half
the increase in signal over background. If the data in
the plot don't fall on an exponential decay line, change
this number. Normally you'll need to increase it.
Returns
-------
None
"""
print('Calculating exponential decay coefficient\nfrom SRM washouts...')
def findtrim(tr, lim=None):
trr = np.roll(tr, -1)
trr[-1] = 0
if lim is None:
lim = 0.5 * np.nanmax(tr - trr)
ind = (tr - trr) >= lim
return np.arange(len(ind))[ind ^ np.roll(ind, -1)][0]
if not hasattr(self.stds[0], 'trnrng'):
for s in self.stds:
s.autorange(**autorange_kwargs, ploterrs=False)
trans = []
times = []
for v in self.stds:
for trnrng in v.trnrng[-1::-2]:
tr = minmax_scale(v.data['total_counts'][(v.Time > trnrng[0]) & (v.Time < trnrng[1])])
sm = np.apply_along_axis(np.nanmean, 1,
rolling_window(tr, 3, pad=0))
sm[0] = sm[1]
trim = findtrim(sm, trimlim) + 2
trans.append(minmax_scale(tr[trim:]))
times.append(np.arange(tr[trim:].size) *
np.diff(v.Time[1:3]))
times = np.concatenate(times)
times = np.round(times, 2)
trans = np.concatenate(trans)
ti = []
tr = []
for t in np.unique(times):
ti.append(t)
tr.append(np.nanmin(trans[times == t]))
def expfit(x, e):
"""
Exponential decay function.
"""
return np.exp(e * x)
ep, ecov = curve_fit(expfit, ti, tr, p0=(-1.))
eeR2 = R2calc(trans, expfit(times, ep))
if plot:
fig, ax = plt.subplots(1, 1, figsize=[6, 4])
ax.scatter(times, trans, alpha=0.2, color='k', marker='x', zorder=-2)
ax.scatter(ti, tr, alpha=1, color='k', marker='o')
fitx = np.linspace(0, max(ti))
ax.plot(fitx, expfit(fitx, ep), color='r', label='Fit')
ax.plot(fitx, expfit(fitx, ep - nsd_below * np.diag(ecov)**.5, ),
color='b', label='Used')
ax.text(0.95, 0.75,
('y = $e^{%.2f \pm %.2f * x}$\n$R^2$= %.2f \nCoefficient: '
'%.2f') % (ep,
np.diag(ecov)**.5,
eeR2,
ep - nsd_below * np.diag(ecov)**.5),
transform=ax.transAxes, ha='right', va='top', size=12)
ax.set_xlim(0, ax.get_xlim()[-1])
ax.set_xlabel('Time (s)')
ax.set_ylim(-0.05, 1.05)
ax.set_ylabel('Proportion of Signal')
plt.legend()
if isinstance(plot, str):
fig.savefig(plot)
self.expdecay_coef = ep - nsd_below * np.diag(ecov)**.5
print(' {:0.2f}'.format(self.expdecay_coef[0]))
return
|
Determines exponential decay coefficient for despike filter.
Fits an exponential decay function to the washout phase of standards
to determine the washout time of your laser cell. The exponential
coefficient reported is `nsd_below` standard deviations below the
fitted exponent, to ensure that no real data is removed.
Total counts are used in fitting, rather than a specific analyte.
Parameters
----------
nsd_below : float
The number of standard deviations to subtract from the fitted
coefficient when calculating the filter exponent.
plot : bool or str
If True, creates a plot of the fit, if str the plot is to the
location specified in str.
trimlim : float
A threshold limit used in determining the start of the
exponential decay region of the washout. Defaults to half
the increase in signal over background. If the data in
the plot don't fall on an exponential decay line, change
this number. Normally you'll need to increase it.
Returns
-------
None
|
def _load_mnist_dataset(shape, path, name='mnist', url='http://yann.lecun.com/exdb/mnist/'):
"""A generic function to load mnist-like dataset.
Parameters:
----------
shape : tuple
The shape of digit images.
path : str
The path that the data is downloaded to.
name : str
The dataset name you want to use(the default is 'mnist').
url : str
The url of dataset(the default is 'http://yann.lecun.com/exdb/mnist/').
"""
path = os.path.join(path, name)
# Define functions for loading mnist-like data's images and labels.
# For convenience, they also download the requested files if needed.
def load_mnist_images(path, filename):
filepath = maybe_download_and_extract(filename, path, url)
logging.info(filepath)
# Read the inputs in Yann LeCun's binary format.
with gzip.open(filepath, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=16)
# The inputs are vectors now, we reshape them to monochrome 2D images,
# following the shape convention: (examples, channels, rows, columns)
data = data.reshape(shape)
# The inputs come as bytes, we convert them to float32 in range [0,1].
# (Actually to range [0, 255/256], for compatibility to the version
# provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.)
return data / np.float32(256)
def load_mnist_labels(path, filename):
filepath = maybe_download_and_extract(filename, path, url)
# Read the labels in Yann LeCun's binary format.
with gzip.open(filepath, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=8)
# The labels are vectors of integers now, that's exactly what we want.
return data
# Download and read the training and test set images and labels.
logging.info("Load or Download {0} > {1}".format(name.upper(), path))
X_train = load_mnist_images(path, 'train-images-idx3-ubyte.gz')
y_train = load_mnist_labels(path, 'train-labels-idx1-ubyte.gz')
X_test = load_mnist_images(path, 't10k-images-idx3-ubyte.gz')
y_test = load_mnist_labels(path, 't10k-labels-idx1-ubyte.gz')
# We reserve the last 10000 training examples for validation.
X_train, X_val = X_train[:-10000], X_train[-10000:]
y_train, y_val = y_train[:-10000], y_train[-10000:]
# We just return all the arrays in order, as expected in main().
# (It doesn't matter how we do this as long as we can read them again.)
X_train = np.asarray(X_train, dtype=np.float32)
y_train = np.asarray(y_train, dtype=np.int32)
X_val = np.asarray(X_val, dtype=np.float32)
y_val = np.asarray(y_val, dtype=np.int32)
X_test = np.asarray(X_test, dtype=np.float32)
y_test = np.asarray(y_test, dtype=np.int32)
return X_train, y_train, X_val, y_val, X_test, y_test
|
A generic function to load mnist-like dataset.
Parameters:
----------
shape : tuple
The shape of digit images.
path : str
The path that the data is downloaded to.
name : str
The dataset name you want to use(the default is 'mnist').
url : str
The url of dataset(the default is 'http://yann.lecun.com/exdb/mnist/').
|
def authenticate_credentials(self, request, access_token):
"""
Authenticate the request, given the access token.
"""
try:
token = oauth2_provider.oauth2.models.AccessToken.objects.select_related('user')
# provider_now switches to timezone aware datetime when
# the oauth2_provider version supports to it.
token = token.get(token=access_token, expires__gt=provider_now())
except oauth2_provider.oauth2.models.AccessToken.DoesNotExist:
raise exceptions.AuthenticationFailed('Invalid token')
user = token.user
if not user.is_active:
msg = 'User inactive or deleted: %s' % user.username
raise exceptions.AuthenticationFailed(msg)
return (user, token)
|
Authenticate the request, given the access token.
|
def gcs_read(self, remote_log_location):
"""
Returns the log found at the remote_log_location.
:param remote_log_location: the log's location in remote storage
:type remote_log_location: str (path)
"""
bkt, blob = self.parse_gcs_url(remote_log_location)
return self.hook.download(bkt, blob).decode('utf-8')
|
Returns the log found at the remote_log_location.
:param remote_log_location: the log's location in remote storage
:type remote_log_location: str (path)
|
def write_byte_data(self, addr, cmd, val):
"""write_byte_data(addr, cmd, val)
Perform SMBus Write Byte Data transaction.
"""
self._set_addr(addr)
if SMBUS.i2c_smbus_write_byte_data(self._fd,
ffi.cast("__u8", cmd),
ffi.cast("__u8", val)) == -1:
raise IOError(ffi.errno)
|
write_byte_data(addr, cmd, val)
Perform SMBus Write Byte Data transaction.
|
def _tree_view_builder(self, indent=0, is_root=True):
"""
Build a text to represent the package structure.
"""
def pad_text(indent):
return " " * indent + "|-- "
lines = list()
if is_root:
lines.append(SP_DIR)
lines.append(
"%s%s (%s)" % (pad_text(indent), self.shortname, self.fullname)
)
indent += 1
# sub packages
for pkg in self.sub_packages.values():
lines.append(pkg._tree_view_builder(indent=indent, is_root=False))
# __init__.py
lines.append(
"%s%s (%s)" % (
pad_text(indent), "__init__.py", self.fullname,
)
)
# sub modules
for mod in self.sub_modules.values():
lines.append(
"%s%s (%s)" % (
pad_text(indent), mod.shortname + ".py", mod.fullname,
)
)
return "\n".join(lines)
|
Build a text to represent the package structure.
|
def inverted(self):
'''
Return a version of this instance with inputs replaced by outputs and vice versa.
'''
return Instance(input=self.output, output=self.input,
annotated_input=self.annotated_output,
annotated_output=self.annotated_input,
alt_inputs=self.alt_outputs,
alt_outputs=self.alt_inputs,
source=self.source)
|
Return a version of this instance with inputs replaced by outputs and vice versa.
|
def trace_integration(tracer=None):
"""Wrap threading functions to trace."""
log.info("Integrated module: {}".format(MODULE_NAME))
# Wrap the threading start function
start_func = getattr(threading.Thread, "start")
setattr(
threading.Thread, start_func.__name__, wrap_threading_start(start_func)
)
# Wrap the threading run function
run_func = getattr(threading.Thread, "run")
setattr(threading.Thread, run_func.__name__, wrap_threading_run(run_func))
# Wrap the threading run function
apply_async_func = getattr(pool.Pool, "apply_async")
setattr(
pool.Pool,
apply_async_func.__name__,
wrap_apply_async(apply_async_func),
)
# Wrap the threading run function
submit_func = getattr(futures.ThreadPoolExecutor, "submit")
setattr(
futures.ThreadPoolExecutor,
submit_func.__name__,
wrap_submit(submit_func),
)
|
Wrap threading functions to trace.
|
def go_to_position(self, position):
"""
Moves the text cursor to given position.
:param position: Position to go to.
:type position: int
:return: Method success.
:rtype: bool
"""
cursor = self.textCursor()
cursor.setPosition(position)
self.setTextCursor(cursor)
return True
|
Moves the text cursor to given position.
:param position: Position to go to.
:type position: int
:return: Method success.
:rtype: bool
|
def convert_block_dicts_to_string(self, block_1st2nd, block_1st, block_2nd, block_3rd):
"""Takes into account whether we need to output all codon positions."""
out = ""
# We need 1st and 2nd positions
if self.codon_positions in ['ALL', '1st-2nd']:
for gene_code, seqs in block_1st2nd.items():
out += '>{0}_1st-2nd\n----\n'.format(gene_code)
for seq in seqs:
out += seq
elif self.codon_positions == '1st':
for gene_code, seqs in block_1st.items():
out += '>{0}_1st\n----\n'.format(gene_code)
for seq in seqs:
out += seq
elif self.codon_positions == '2nd':
for gene_code, seqs in block_2nd.items():
out += '>{0}_2nd\n----\n'.format(gene_code)
for seq in seqs:
out += seq
# We also need 3rd positions
if self.codon_positions in ['ALL', '3rd']:
for gene_code, seqs in block_3rd.items():
out += '\n>{0}_3rd\n----\n'.format(gene_code)
for seq in seqs:
out += seq
return out
|
Takes into account whether we need to output all codon positions.
|
def search(self, keyword, count=30):
"""
Search files or directories
:param str keyword: keyword
:param int count: number of entries to be listed
"""
kwargs = {}
kwargs['search_value'] = keyword
root = self.root_directory
entries = root._load_entries(func=self._req_files_search,
count=count, page=1, **kwargs)
res = []
for entry in entries:
if 'pid' in entry:
res.append(_instantiate_directory(self, entry))
else:
res.append(_instantiate_file(self, entry))
return res
|
Search files or directories
:param str keyword: keyword
:param int count: number of entries to be listed
|
def condense(self):
"""
Condense the data set to the distinct intervals based on the pseudo key.
"""
for pseudo_key, rows in self._rows.items():
tmp1 = []
intervals = sorted(self._derive_distinct_intervals(rows))
for interval in intervals:
tmp2 = dict(zip(self._pseudo_key, pseudo_key))
tmp2[self._key_start_date] = interval[0]
tmp2[self._key_end_date] = interval[1]
tmp1.append(tmp2)
self._rows[pseudo_key] = tmp1
|
Condense the data set to the distinct intervals based on the pseudo key.
|
def Prep(self, size, additionalBytes):
"""
Prep prepares to write an element of `size` after `additional_bytes`
have been written, e.g. if you write a string, you need to align
such the int length field is aligned to SizeInt32, and the string
data follows it directly.
If all you need to do is align, `additionalBytes` will be 0.
"""
# Track the biggest thing we've ever aligned to.
if size > self.minalign:
self.minalign = size
# Find the amount of alignment needed such that `size` is properly
# aligned after `additionalBytes`:
alignSize = (~(len(self.Bytes) - self.Head() + additionalBytes)) + 1
alignSize &= (size - 1)
# Reallocate the buffer if needed:
while self.Head() < alignSize+size+additionalBytes:
oldBufSize = len(self.Bytes)
self.growByteBuffer()
updated_head = self.head + len(self.Bytes) - oldBufSize
self.head = UOffsetTFlags.py_type(updated_head)
self.Pad(alignSize)
|
Prep prepares to write an element of `size` after `additional_bytes`
have been written, e.g. if you write a string, you need to align
such the int length field is aligned to SizeInt32, and the string
data follows it directly.
If all you need to do is align, `additionalBytes` will be 0.
|
def cbar_value_cb(self, cbar, value, event):
"""
This method is called when the user moves the mouse over the
ColorBar. It displays the value of the mouse position in the
ColorBar in the Readout (if any).
"""
if self.cursor_obj is not None:
readout = self.cursor_obj.readout
if readout is not None:
maxv = readout.maxv
text = "Value: %-*.*s" % (maxv, maxv, value)
readout.set_text(text)
|
This method is called when the user moves the mouse over the
ColorBar. It displays the value of the mouse position in the
ColorBar in the Readout (if any).
|
def getkeypress(self):
u'''Return next key press event from the queue, ignoring others.'''
ck = System.ConsoleKey
while 1:
e = System.Console.ReadKey(True)
if e.Key == System.ConsoleKey.PageDown: #PageDown
self.scroll_window(12)
elif e.Key == System.ConsoleKey.PageUp:#PageUp
self.scroll_window(-12)
elif str(e.KeyChar) == u"\000":#Drop deadkeys
log(u"Deadkey: %s"%e)
return event(self, e)
else:
return event(self, e)
|
u'''Return next key press event from the queue, ignoring others.
|
def p_expression_lesseq(self, p):
'expression : expression LE expression'
p[0] = LessEq(p[1], p[3], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1))
|
expression : expression LE expression
|
def main():
"""Create application.properties for a given application."""
logging.basicConfig(format=LOGGING_FORMAT)
parser = argparse.ArgumentParser(description=main.__doc__)
add_debug(parser)
add_app(parser)
add_env(parser)
add_properties(parser)
add_region(parser)
add_artifact_path(parser)
add_artifact_version(parser)
args = parser.parse_args()
logging.getLogger(__package__.split('.')[0]).setLevel(args.debug)
LOG.debug('Args: %s', vars(args))
rendered_props = get_properties(args.properties)
if rendered_props['pipeline']['type'] == 's3':
s3app = S3Apps(app=args.app, env=args.env, region=args.region, prop_path=args.properties)
s3app.create_bucket()
s3deploy = S3Deployment(
app=args.app,
env=args.env,
region=args.region,
prop_path=args.properties,
artifact_path=args.artifact_path,
artifact_version=args.artifact_version)
s3deploy.upload_artifacts()
else:
init_properties(**vars(args))
|
Create application.properties for a given application.
|
def is_writable(filename):
"""Check if
- the file is a regular file and is writable, or
- the file does not exist and its parent directory exists and is
writable
"""
if not os.path.exists(filename):
parentdir = os.path.dirname(filename)
return os.path.isdir(parentdir) and os.access(parentdir, os.W_OK)
return os.path.isfile(filename) and os.access(filename, os.W_OK)
|
Check if
- the file is a regular file and is writable, or
- the file does not exist and its parent directory exists and is
writable
|
def check(self, orb):
"""Method that check whether or not the listener is triggered
Args:
orb (Orbit):
Return:
bool: True if there is a zero-crossing for the parameter watched by the listener
"""
return self.prev is not None and np.sign(self(orb)) != np.sign(self(self.prev))
|
Method that check whether or not the listener is triggered
Args:
orb (Orbit):
Return:
bool: True if there is a zero-crossing for the parameter watched by the listener
|
def _value_format(self, value):
"""
Format value for map value display.
"""
return '%s: %s' % (
self.area_names.get(self.adapt_code(value[0]), '?'),
self._y_format(value[1])
)
|
Format value for map value display.
|
def ldeo(magfile, dir_path=".", input_dir_path="",
meas_file="measurements.txt", spec_file="specimens.txt",
samp_file="samples.txt", site_file="sites.txt", loc_file="locations.txt",
specnum=0, samp_con="1", location="unknown", codelist="",
coil="", arm_labfield=50e-6, trm_peakT=873., peakfield=0,
labfield=0, phi=0, theta=0, mass_or_vol="v", noave=0):
"""
converts Lamont Doherty Earth Observatory measurement files to MagIC data base model 3.0
Parameters
_________
magfile : input measurement file
dir_path : output directory path, default "."
input_dir_path : input file directory IF different from dir_path, default ""
meas_file : output file measurement file name, default "measurements.txt"
spec_file : output file specimen file name, default "specimens.txt"
samp_file : output file sample file name, default "samples.txt"
site_file : output file site file name, default "sites.txt"
loc_file : output file location file name, default "locations.txt"
specnum : number of terminal characters distinguishing specimen from sample, default 0
samp_con : sample/site naming convention, default "1"
"1" XXXXY: where XXXX is an arbitr[ary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
"2" XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
"3" XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
"4-Z" XXXX[YYY]: YYY is sample designation with Z characters from site XXX
"5" site name same as sample
"6" site is entered under a separate column NOT CURRENTLY SUPPORTED
"7-Z" [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail ltauxe@ucsd.edu for help.
"8" synthetic - has no site name
"9" ODP naming convention
codelist : colon delimited string of lab protocols (e.g., codelist="AF"), default ""
AF: af demag
T: thermal including thellier but not trm acquisition
S: Shaw method
I: IRM (acquisition)
N: NRM only
TRM: trm acquisition
ANI: anisotropy experiment
D: double AF demag
G: triple AF demag (GRM protocol)
coil : 1,2, or 3 unist of IRM field in volts using ASC coil #1,2 or 3, default ""
arm_labfield : dc field for ARM in tesla, default 50e-6
peakfield : peak af field for ARM, default 873.
trm_peakT : peak temperature for TRM, default 0
labfield : lab field in tesla for TRM, default 0
phi, theta : direction of lab field, default 0, 0
mass_or_vol : is the parameter in the file mass 'm' or volume 'v', default "v"
noave : boolean, if False, average replicates, default False
Returns
--------
type - Tuple : (True or False indicating if conversion was sucessful, meas_file name written)
Effects
_______
creates MagIC formatted tables
"""
# initialize some stuff
dec = [315, 225, 180, 135, 45, 90, 270, 270, 270, 90, 180, 180, 0, 0, 0]
inc = [0, 0, 0, 0, 0, -45, -45, 0, 45, 45, 45, -45, -90, -45, 45]
tdec = [0, 90, 0, 180, 270, 0, 0, 90, 0]
tinc = [0, 0, 90, 0, 0, -90, 0, 0, 90]
demag = "N"
trm = 0
irm = 0
# format/organize variables
input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, dir_path)
labfield = int(labfield) * 1e-6
phi = int(phi)
theta = int(theta)
specnum = - int(specnum)
if magfile:
try:
fname = pmag.resolve_file_name(magfile, input_dir_path)
infile = open(fname, 'r')
except IOError:
print("bad mag file name")
return False, "bad mag file name"
else:
print("mag_file field is required option")
return False, "mag_file field is required option"
samp_con = str(samp_con)
if "4" in samp_con:
if "-" not in samp_con:
print(
"naming convention option [4] must be in form 4-Z where Z is an integer")
return False, "naming convention option [4] must be in form 4-Z where Z is an integer"
else:
Z = samp_con.split("-")[1]
samp_con = "4"
elif "7" in samp_con:
if "-" not in samp_con:
print(
"naming convention option [7] must be in form 7-Z where Z is an integer")
return False, "naming convention option [7] must be in form 7-Z where Z is an integer"
else:
Z = samp_con.split("-")[1]
samp_con = "4"
else:
Z = 1
codes = codelist.split(':')
if "AF" in codes:
demag = 'AF'
if not labfield:
methcode = "LT-AF-Z"
if labfield:
methcode = "LT-AF-I"
if "T" in codes:
demag = "T"
if not labfield:
methcode = "LT-T-Z"
if labfield:
methcode = "LT-T-I"
if "I" in codes:
methcode = "LP-IRM"
irmunits = "mT"
if "S" in codes:
demag = "S"
methcode = "LP-PI-TRM:LP-PI-ALT-AFARM"
trm_labfield = labfield
# should use arm_labfield and trm_peakT as well, but these values are currently never asked for
if "G" in codes:
methcode = "LT-AF-G"
if "D" in codes:
methcode = "LT-AF-D"
if "TRM" in codes:
demag = "T"
trm = 1
if coil:
methcode = "LP-IRM"
irmunits = "V"
if coil not in ["1", "2", "3"]:
print('not a valid coil specification')
return False, 'not a valid coil specification'
if demag == "T" and "ANI" in codes:
methcode = "LP-AN-TRM"
if demag == "AF" and "ANI" in codes:
methcode = "LP-AN-ARM"
if labfield == 0:
labfield = 50e-6
if peakfield == 0:
peakfield = .180
MeasRecs, SpecRecs, SampRecs, SiteRecs, LocRecs = [], [], [], [], []
version_num = pmag.get_version()
# find start of data:
DIspec = []
Data = infile.readlines()
infile.close()
for k in range(len(Data)):
rec = Data[k].split()
if len(rec) <= 2:
continue
if rec[0].upper() == "LAT:" and len(rec) > 3:
lat, lon = rec[1], rec[3]
continue
elif rec[0].upper() == "ID":
continue
MeasRec, SpecRec, SampRec, SiteRec, LocRec = {}, {}, {}, {}, {}
specimen = rec[0]
if specnum != 0:
sample = specimen[:specnum]
else:
sample = specimen
site = pmag.parse_site(sample, samp_con, Z)
if mass_or_vol == 'v':
volume = float(rec[12])
if volume > 0:
# convert to SI (assume Bartington, 10-5 SI)
susc_chi_volume = '%10.3e' % (
float(rec[11])*1e-5) / volume
else:
# convert to SI (assume Bartington, 10-5 SI)
susc_chi_volume = '%10.3e' % (float(rec[11])*1e-5)
else:
mass = float(rec[12])
if mass > 0:
# convert to SI (assume Bartington, 10-5 SI)
susc_chi_mass = '%10.3e' % (
float(rec[11])*1e-5) / mass
else:
# convert to SI (assume Bartington, 10-5 SI)
susc_chi_mass = '%10.3e' % (float(rec[11])*1e-5)
# print((specimen,sample,site,samp_con,Z))
# fill tables besides measurements
if specimen != "" and specimen not in [x['specimen'] if 'specimen' in list(x.keys()) else "" for x in SpecRecs]:
SpecRec['specimen'] = specimen
SpecRec['sample'] = sample
if mass_or_vol == 'v':
SpecRec["susc_chi_volume"] = susc_chi_volume
SpecRec["volume"] = volume
else:
SpecRec["susc_chi_mass"] = susc_chi_mass
SpecRec["magn_mass"] = mass
SpecRecs.append(SpecRec)
if sample != "" and sample not in [x['sample'] if 'sample' in list(x.keys()) else "" for x in SampRecs]:
SampRec['sample'] = sample
SampRec['site'] = site
SampRecs.append(SampRec)
if site != "" and site not in [x['site'] if 'site' in list(x.keys()) else "" for x in SiteRecs]:
SiteRec['site'] = site
SiteRec['location'] = location
SiteRec['lat'] = lat
SiteRec['lon'] = lon
SiteRecs.append(SiteRec)
if location != "" and location not in [x['location'] if 'location' in list(x.keys()) else "" for x in LocRecs]:
LocRec['location'] = location
LocRec['lat_n'] = lat
LocRec['lon_e'] = lon
LocRec['lat_s'] = lat
LocRec['lon_w'] = lon
LocRecs.append(LocRec)
# fill measurements
MeasRec["treat_temp"] = '%8.3e' % (273) # room temp in kelvin
MeasRec["meas_temp"] = '%8.3e' % (273) # room temp in kelvin
MeasRec["treat_ac_field"] = '0'
MeasRec["treat_dc_field"] = '0'
MeasRec["treat_dc_field_phi"] = '0'
MeasRec["treat_dc_field_theta"] = '0'
meas_type = "LT-NO"
MeasRec["quality"] = 'g'
MeasRec["standard"] = 'u'
MeasRec["treat_step_num"] = 0
MeasRec["specimen"] = specimen
# if mass_or_vol=='v': MeasRec["susc_chi_volume"]=susc_chi_volume
# else: MeasRec["susc_chi_mass"]=susc_chi_mass
try:
float(rec[3])
MeasRec["dir_csd"] = rec[3]
except ValueError:
MeasRec["dir_csd"] = ''
MeasRec["magn_moment"] = '%10.3e' % (float(rec[4])*1e-7)
MeasRec["dir_dec"] = rec[5]
MeasRec["dir_inc"] = rec[6]
MeasRec["citations"] = "This study"
if demag == "AF":
if methcode != "LP-AN-ARM":
MeasRec["treat_ac_field"] = '%8.3e' % (
float(rec[1])*1e-3) # peak field in tesla
meas_type = "LT-AF-Z"
MeasRec["treat_dc_field"] = '0'
else: # AARM experiment
if treat[1][0] == '0':
meas_type = "LT-AF-Z"
MeasRec["treat_ac_field"] = '%8.3e' % (
peakfield) # peak field in tesla
else:
meas_type = "LT-AF-I"
ipos = int(treat[0])-1
MeasRec["treat_dc_field_phi"] = '%7.1f' % (dec[ipos])
MeasRec["treat_dc_field_theta"] = '%7.1f' % (inc[ipos])
MeasRec["treat_dc_field"] = '%8.3e' % (labfield)
MeasRec["treat_ac_field"] = '%8.3e' % (
peakfield) # peak field in tesla
elif demag == "T":
if rec[1][0] == ".":
rec[1] = "0"+rec[1]
treat = rec[1].split('.')
if len(treat) == 1:
treat.append('0')
MeasRec["treat_temp"] = '%8.3e' % (
float(rec[1])+273.) # temp in kelvin
meas_type = "LT-T-Z"
MeasRec["treat_temp"] = '%8.3e' % (
float(treat[0])+273.) # temp in kelvin
if trm == 0: # demag=T and not trmaq
if treat[1][0] == '0':
meas_type = "LT-T-Z"
else:
# labfield in tesla (convert from microT)
MeasRec["treat_dc_field"] = '%8.3e' % (labfield)
MeasRec["treat_dc_field_phi"] = '%7.1f' % (
phi) # labfield phi
MeasRec["treat_dc_field_theta"] = '%7.1f' % (
theta) # labfield theta
if treat[1][0] == '1':
meas_type = "LT-T-I" # in-field thermal step
if treat[1][0] == '2':
meas_type = "LT-PTRM-I" # pTRM check
pTRM = 1
if treat[1][0] == '3':
# this is a zero field step
MeasRec["treat_dc_field"] = '0'
meas_type = "LT-PTRM-MD" # pTRM tail check
else:
meas_type = "LT-T-I" # trm acquisition experiment
MeasRec['method_codes'] = meas_type
MeasRecs.append(MeasRec)
# need to add these
meas_file = pmag.resolve_file_name(meas_file, output_dir_path)
spec_file = pmag.resolve_file_name(spec_file, output_dir_path)
samp_file = pmag.resolve_file_name(samp_file, output_dir_path)
site_file = pmag.resolve_file_name(site_file, output_dir_path)
loc_file = pmag.resolve_file_name(loc_file, output_dir_path)
con = cb.Contribution(output_dir_path, read_tables=[])
con.add_magic_table_from_data(dtype='specimens', data=SpecRecs)
con.add_magic_table_from_data(dtype='samples', data=SampRecs)
con.add_magic_table_from_data(dtype='sites', data=SiteRecs)
con.add_magic_table_from_data(dtype='locations', data=LocRecs)
MeasOuts = pmag.measurements_methods3(MeasRecs, noave)
con.add_magic_table_from_data(dtype='measurements', data=MeasOuts)
con.tables['specimens'].write_magic_file(custom_name=spec_file)
con.tables['samples'].write_magic_file(custom_name=samp_file)
con.tables['sites'].write_magic_file(custom_name=site_file)
con.tables['locations'].write_magic_file(custom_name=loc_file)
con.tables['measurements'].write_magic_file(custom_name=meas_file)
return True, meas_file
|
converts Lamont Doherty Earth Observatory measurement files to MagIC data base model 3.0
Parameters
_________
magfile : input measurement file
dir_path : output directory path, default "."
input_dir_path : input file directory IF different from dir_path, default ""
meas_file : output file measurement file name, default "measurements.txt"
spec_file : output file specimen file name, default "specimens.txt"
samp_file : output file sample file name, default "samples.txt"
site_file : output file site file name, default "sites.txt"
loc_file : output file location file name, default "locations.txt"
specnum : number of terminal characters distinguishing specimen from sample, default 0
samp_con : sample/site naming convention, default "1"
"1" XXXXY: where XXXX is an arbitr[ary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
"2" XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
"3" XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
"4-Z" XXXX[YYY]: YYY is sample designation with Z characters from site XXX
"5" site name same as sample
"6" site is entered under a separate column NOT CURRENTLY SUPPORTED
"7-Z" [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail ltauxe@ucsd.edu for help.
"8" synthetic - has no site name
"9" ODP naming convention
codelist : colon delimited string of lab protocols (e.g., codelist="AF"), default ""
AF: af demag
T: thermal including thellier but not trm acquisition
S: Shaw method
I: IRM (acquisition)
N: NRM only
TRM: trm acquisition
ANI: anisotropy experiment
D: double AF demag
G: triple AF demag (GRM protocol)
coil : 1,2, or 3 unist of IRM field in volts using ASC coil #1,2 or 3, default ""
arm_labfield : dc field for ARM in tesla, default 50e-6
peakfield : peak af field for ARM, default 873.
trm_peakT : peak temperature for TRM, default 0
labfield : lab field in tesla for TRM, default 0
phi, theta : direction of lab field, default 0, 0
mass_or_vol : is the parameter in the file mass 'm' or volume 'v', default "v"
noave : boolean, if False, average replicates, default False
Returns
--------
type - Tuple : (True or False indicating if conversion was sucessful, meas_file name written)
Effects
_______
creates MagIC formatted tables
|
def get_pull_requests(self, project, repository, state='OPEN', order='newest', limit=100, start=0):
"""
Get pull requests
:param project:
:param repository:
:param state:
:param order: OPTIONAL: defaults to NEWEST) the order to return pull requests in, either OLDEST
(as in: "oldest first") or NEWEST.
:param limit:
:param start:
:return:
"""
url = 'rest/api/1.0/projects/{project}/repos/{repository}/pull-requests'.format(project=project,
repository=repository)
params = {}
if state:
params['state'] = state
if limit:
params['limit'] = limit
if start:
params['start'] = start
if order:
params['order'] = order
response = self.get(url, params=params)
if 'values' not in response:
return []
pr_list = (response or {}).get('values')
while not response.get('isLastPage'):
start = response.get('nextPageStart')
params['start'] = start
response = self.get(url, params=params)
pr_list += (response or {}).get('values')
return pr_list
|
Get pull requests
:param project:
:param repository:
:param state:
:param order: OPTIONAL: defaults to NEWEST) the order to return pull requests in, either OLDEST
(as in: "oldest first") or NEWEST.
:param limit:
:param start:
:return:
|
def _get_matching(self, reltype, target, is_external=False):
"""
Return relationship of matching *reltype*, *target*, and
*is_external* from collection, or None if not found.
"""
def matches(rel, reltype, target, is_external):
if rel.reltype != reltype:
return False
if rel.is_external != is_external:
return False
rel_target = rel.target_ref if rel.is_external else rel.target_part
if rel_target != target:
return False
return True
for rel in self.values():
if matches(rel, reltype, target, is_external):
return rel
return None
|
Return relationship of matching *reltype*, *target*, and
*is_external* from collection, or None if not found.
|
def update(self,table, sys_id, **kparams):
"""
update a record via table api, kparams being the dict of PUT params to update.
returns a SnowRecord obj.
"""
record = self.api.update(table, sys_id, **kparams)
return record
|
update a record via table api, kparams being the dict of PUT params to update.
returns a SnowRecord obj.
|
def _get_alpha_data(data: np.ndarray, kwargs) -> Union[float, np.ndarray]:
"""Get alpha values for all data points.
Parameters
----------
alpha: Callable or float
This can be a fixed value or a function of the data.
"""
alpha = kwargs.pop("alpha", 1)
if hasattr(alpha, "__call__"):
return np.vectorize(alpha)(data)
return alpha
|
Get alpha values for all data points.
Parameters
----------
alpha: Callable or float
This can be a fixed value or a function of the data.
|
def show(self, start_date, end_date):
"""setting suggested name to something readable, replace backslashes
with dots so the name is valid in linux"""
# title in the report file name
vars = {"title": _("Time track"),
"start": start_date.strftime("%x").replace("/", "."),
"end": end_date.strftime("%x").replace("/", ".")}
if start_date != end_date:
filename = "%(title)s, %(start)s - %(end)s.html" % vars
else:
filename = "%(title)s, %(start)s.html" % vars
self.dialog.set_current_name(filename)
response = self.dialog.run()
if response != gtk.ResponseType.OK:
self.emit("report-chooser-closed")
self.dialog.destroy()
self.dialog = None
else:
self.on_save_button_clicked()
|
setting suggested name to something readable, replace backslashes
with dots so the name is valid in linux
|
async def get_headline(self, name):
"""Get stored messages for a service.
Args:
name (string): The name of the service to get messages from.
Returns:
ServiceMessage: the headline or None if no headline has been set
"""
resp = await self.send_command(OPERATIONS.CMD_QUERY_HEADLINE, {'name': name},
MESSAGES.QueryHeadlineResponse, timeout=5.0)
if resp is not None:
resp = states.ServiceMessage.FromDictionary(resp)
return resp
|
Get stored messages for a service.
Args:
name (string): The name of the service to get messages from.
Returns:
ServiceMessage: the headline or None if no headline has been set
|
def get_api_ruler(self):
"""Return the existing reader if it exists or launch a new one.
Returns
-------
api_ruler : org.clulab.reach.apis.ApiRuler
An instance of the REACH ApiRuler class (java object).
"""
if self.api_ruler is None:
try:
self.api_ruler = \
autoclass('org.clulab.reach.export.apis.ApiRuler')
except JavaException as e:
raise ReachOfflineReadingError(e)
return self.api_ruler
|
Return the existing reader if it exists or launch a new one.
Returns
-------
api_ruler : org.clulab.reach.apis.ApiRuler
An instance of the REACH ApiRuler class (java object).
|
def addTab(self, view, title):
"""
Adds a new view tab to this panel.
:param view | <XView>
title | <str>
:return <bool> | success
"""
if not isinstance(view, XView):
return False
tab = self._tabBar.addTab(title)
self.addWidget(view)
tab.titleChanged.connect(view.setWindowTitle)
# create connections
try:
view.windowTitleChanged.connect(self.refreshTitles,
QtCore.Qt.UniqueConnection)
view.sizeConstraintChanged.connect(self.adjustSizeConstraint,
QtCore.Qt.UniqueConnection)
view.poppedOut.connect(self.disconnectView,
QtCore.Qt.UniqueConnection)
except RuntimeError:
pass
self.setCurrentIndex(self.count() - 1)
return True
|
Adds a new view tab to this panel.
:param view | <XView>
title | <str>
:return <bool> | success
|
def encode_field(self, field, value):
"""Encode the given value as JSON.
Args:
field: a messages.Field for the field we're encoding.
value: a value for field.
Returns:
A python value suitable for json.dumps.
"""
for encoder in _GetFieldCodecs(field, 'encoder'):
result = encoder(field, value)
value = result.value
if result.complete:
return value
if isinstance(field, messages.EnumField):
if field.repeated:
remapped_value = [GetCustomJsonEnumMapping(
field.type, python_name=e.name) or e.name for e in value]
else:
remapped_value = GetCustomJsonEnumMapping(
field.type, python_name=value.name)
if remapped_value:
return remapped_value
if (isinstance(field, messages.MessageField) and
not isinstance(field, message_types.DateTimeField)):
value = json.loads(self.encode_message(value))
return super(_ProtoJsonApiTools, self).encode_field(field, value)
|
Encode the given value as JSON.
Args:
field: a messages.Field for the field we're encoding.
value: a value for field.
Returns:
A python value suitable for json.dumps.
|
def register_callback(self):
"""Register callback that we will have to wait for"""
cid = str(self.__cid)
self.__cid += 1
event = queue.Queue()
self.__callbacks[cid] = event
return cid, event
|
Register callback that we will have to wait for
|
def _handle_precalled(data):
"""Copy in external pre-called variants fed into analysis.
Symlinks for non-CWL runs where we want to ensure VCF present
in a local directory.
"""
if data.get("vrn_file") and not cwlutils.is_cwl_run(data):
vrn_file = data["vrn_file"]
if isinstance(vrn_file, (list, tuple)):
assert len(vrn_file) == 1
vrn_file = vrn_file[0]
precalled_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "precalled"))
ext = utils.splitext_plus(vrn_file)[-1]
orig_file = os.path.abspath(vrn_file)
our_vrn_file = os.path.join(precalled_dir, "%s-precalled%s" % (dd.get_sample_name(data), ext))
utils.copy_plus(orig_file, our_vrn_file)
data["vrn_file"] = our_vrn_file
return data
|
Copy in external pre-called variants fed into analysis.
Symlinks for non-CWL runs where we want to ensure VCF present
in a local directory.
|
def _exponent_handler_factory(ion_type, exp_chars, parse_func, first_char=None):
"""Generates a handler co-routine which tokenizes an numeric exponent.
Args:
ion_type (IonType): The type of the value with this exponent.
exp_chars (sequence): The set of ordinals of the legal exponent characters for this component.
parse_func (callable): Called upon ending the numeric value. Accepts the current token value and returns a
thunk that lazily parses the token.
first_char (Optional[int]): The ordinal of the character that should be appended instead of the character that
occurs first in this component. This is useful for preparing the token for parsing in the case where a
particular character is peculiar to the Ion format (e.g. 'd' to denote the exponent of a decimal value
should be replaced with 'e' for compatibility with python's Decimal type).
"""
def transition(prev, c, ctx, trans):
if c in _SIGN and prev in exp_chars:
ctx.value.append(c)
else:
_illegal_character(c, ctx)
return trans
illegal = exp_chars + _SIGN
return _numeric_handler_factory(_DIGITS, transition, lambda c, ctx: c in exp_chars, illegal, parse_func,
illegal_at_end=illegal, ion_type=ion_type, first_char=first_char)
|
Generates a handler co-routine which tokenizes an numeric exponent.
Args:
ion_type (IonType): The type of the value with this exponent.
exp_chars (sequence): The set of ordinals of the legal exponent characters for this component.
parse_func (callable): Called upon ending the numeric value. Accepts the current token value and returns a
thunk that lazily parses the token.
first_char (Optional[int]): The ordinal of the character that should be appended instead of the character that
occurs first in this component. This is useful for preparing the token for parsing in the case where a
particular character is peculiar to the Ion format (e.g. 'd' to denote the exponent of a decimal value
should be replaced with 'e' for compatibility with python's Decimal type).
|
def Run(self, arg):
"""Does the actual work."""
try:
if self.grr_worker.client.FleetspeakEnabled():
raise ValueError("Not supported on Fleetspeak enabled clients.")
except AttributeError:
pass
smart_arg = {str(field): value for field, value in iteritems(arg)}
disallowed_fields = [
field for field in smart_arg
if field not in UpdateConfiguration.UPDATABLE_FIELDS
]
if disallowed_fields:
raise ValueError("Received an update request for restricted field(s) %s."
% ",".join(disallowed_fields))
if platform.system() != "Windows":
# Check config validity before really applying the changes. This isn't
# implemented for our Windows clients though, whose configs are stored in
# the registry, as opposed to in the filesystem.
canary_config = config.CONFIG.CopyConfig()
# Prepare a temporary file we'll write changes to.
with tempfiles.CreateGRRTempFile(mode="w+") as temp_fd:
temp_filename = temp_fd.name
# Write canary_config changes to temp_filename.
canary_config.SetWriteBack(temp_filename)
self._UpdateConfig(smart_arg, canary_config)
try:
# Assert temp_filename is usable by loading it.
canary_config.SetWriteBack(temp_filename)
# Wide exception handling passed here from config_lib.py...
except Exception: # pylint: disable=broad-except
logging.warning("Updated config file %s is not usable.", temp_filename)
raise
# If temp_filename works, remove it (if not, it's useful for debugging).
os.unlink(temp_filename)
# The changes seem to work, so push them to the real config.
self._UpdateConfig(smart_arg, config.CONFIG)
|
Does the actual work.
|
def get_fmt_widget(self, parent, project):
"""Create a combobox with the attributes"""
from psy_simple.widgets.texts import LabelWidget
return LabelWidget(parent, self, project)
|
Create a combobox with the attributes
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.