code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def folder_get(self, token, folder_id):
"""
Get the attributes of the specified folder.
:param token: A valid token for the user in question.
:type token: string
:param folder_id: The id of the requested folder.
:type folder_id: int | long
:returns: Dictionary of the folder attributes.
:rtype: dict
"""
parameters = dict()
parameters['token'] = token
parameters['id'] = folder_id
response = self.request('midas.folder.get', parameters)
return response
|
Get the attributes of the specified folder.
:param token: A valid token for the user in question.
:type token: string
:param folder_id: The id of the requested folder.
:type folder_id: int | long
:returns: Dictionary of the folder attributes.
:rtype: dict
|
def cmd(send, msg, args):
"""Converts text to fullwidth characters.
Syntax: {command} [text]
"""
if not msg:
msg = gen_word()
send(gen_fullwidth(msg.upper()))
|
Converts text to fullwidth characters.
Syntax: {command} [text]
|
def transition_to_add(self):
"""Transition to add"""
assert self.state in [AQStateMachineStates.init, AQStateMachineStates.add]
self.state = AQStateMachineStates.add
|
Transition to add
|
def activate(paths, skip_local, skip_shared):
'''Activate an environment'''
if not paths:
ctx = click.get_current_context()
if cpenv.get_active_env():
ctx.invoke(info)
return
click.echo(ctx.get_help())
examples = (
'\nExamples: \n'
' cpenv activate my_env\n'
' cpenv activate ./relative/path/to/my_env\n'
' cpenv activate my_env my_module\n'
)
click.echo(examples)
return
if skip_local:
cpenv.module_resolvers.remove(cpenv.resolver.module_resolver)
cpenv.module_resolvers.remove(cpenv.resolver.active_env_module_resolver)
if skip_shared:
cpenv.module_resolvers.remove(cpenv.resolver.modules_path_resolver)
try:
r = cpenv.resolve(*paths)
except cpenv.ResolveError as e:
click.echo('\n' + str(e))
return
resolved = set(r.resolved)
active_modules = set()
env = cpenv.get_active_env()
if env:
active_modules.add(env)
active_modules.update(cpenv.get_active_modules())
new_modules = resolved - active_modules
old_modules = active_modules & resolved
if old_modules and not new_modules:
click.echo(
'\nModules already active: '
+ bold(' '.join([obj.name for obj in old_modules]))
)
return
if env and contains_env(new_modules):
click.echo('\nUse bold(exit) to leave your active environment first.')
return
click.echo('\nResolved the following modules...')
click.echo(format_objects(r.resolved))
r.activate()
click.echo(blue('\nLaunching subshell...'))
modules = sorted(resolved | active_modules, key=_type_and_name)
prompt = ':'.join([obj.name for obj in modules])
shell.launch(prompt)
|
Activate an environment
|
def value(self, obj):
'''
Computes the value of this field to update the index.
:param obj: object instance, as a dictionary or as a model instance.
'''
if self.template_name:
t = loader.select_template([self.template_name])
return t.render(Context({'object': obj}))
if self.eval_func:
try:
return eval(self.eval_func)
except Exception as e:
raise type(e)('Could not compute value of {} field (eval_as=`{}`): {}.'.format(unicode(self), self.eval_func, unicode(e)))
elif self.model_attr:
if isinstance(obj, dict):
return obj[self.model_attr]
current_obj = getattr(obj, self.model_attr)
if callable(current_obj):
return current_obj()
else:
return current_obj
else:
raise KeyError('{0} gets its value via a model attribute, an eval function, a template, or is prepared in a method '
'call but none of `model_attr`, `eval_as,` `template,` `prepare_{0}` is provided.'.format(unicode(self)))
|
Computes the value of this field to update the index.
:param obj: object instance, as a dictionary or as a model instance.
|
def factorize(cls, pq):
"""
Factorizes the given large integer.
:param pq: the prime pair pq.
:return: a tuple containing the two factors p and q.
"""
if pq % 2 == 0:
return 2, pq // 2
y, c, m = randint(1, pq - 1), randint(1, pq - 1), randint(1, pq - 1)
g = r = q = 1
x = ys = 0
while g == 1:
x = y
for i in range(r):
y = (pow(y, 2, pq) + c) % pq
k = 0
while k < r and g == 1:
ys = y
for i in range(min(m, r - k)):
y = (pow(y, 2, pq) + c) % pq
q = q * (abs(x - y)) % pq
g = cls.gcd(q, pq)
k += m
r *= 2
if g == pq:
while True:
ys = (pow(ys, 2, pq) + c) % pq
g = cls.gcd(abs(x - ys), pq)
if g > 1:
break
p, q = g, pq // g
return (p, q) if p < q else (q, p)
|
Factorizes the given large integer.
:param pq: the prime pair pq.
:return: a tuple containing the two factors p and q.
|
def delete_user(self, user_id, **kwargs):
"""
Delete user
:param user_id: User ID
:param kwargs:
:return:
"""
return DeleteUser(settings=self.settings, **kwargs).call(user_id=user_id, **kwargs)
|
Delete user
:param user_id: User ID
:param kwargs:
:return:
|
def convert_markerstyle(inputstyle, mode, inputmode=None):
"""
Convert *inputstyle* to ROOT or matplotlib format.
Output format is determined by *mode* ('root' or 'mpl'). The *inputstyle*
may be a ROOT marker style, a matplotlib marker style, or a description
such as 'star' or 'square'.
"""
mode = mode.lower()
if mode not in ('mpl', 'root'):
raise ValueError("`{0}` is not valid `mode`".format(mode))
if inputmode is None:
if inputstyle in markerstyles_root2mpl:
inputmode = 'root'
elif inputstyle in markerstyles_mpl2root or '$' in str(inputstyle):
inputmode = 'mpl'
elif inputstyle in markerstyles_text2root:
inputmode = 'root'
inputstyle = markerstyles_text2root[inputstyle]
else:
raise ValueError(
"`{0}` is not a valid `markerstyle`".format(inputstyle))
if inputmode == 'root':
if inputstyle not in markerstyles_root2mpl:
raise ValueError(
"`{0}` is not a valid ROOT `markerstyle`".format(
inputstyle))
if mode == 'root':
return inputstyle
return markerstyles_root2mpl[inputstyle]
else:
if '$' in str(inputstyle):
if mode == 'root':
return 1
else:
return inputstyle
if inputstyle not in markerstyles_mpl2root:
raise ValueError(
"`{0}` is not a valid matplotlib `markerstyle`".format(
inputstyle))
if mode == 'mpl':
return inputstyle
return markerstyles_mpl2root[inputstyle]
|
Convert *inputstyle* to ROOT or matplotlib format.
Output format is determined by *mode* ('root' or 'mpl'). The *inputstyle*
may be a ROOT marker style, a matplotlib marker style, or a description
such as 'star' or 'square'.
|
def _pairwise(iterable):
"""
Wrapper on itertools for SVD_magnitude.
"""
a, b = itertools.tee(iterable)
next(b, None)
if sys.version_info.major == 2:
return itertools.izip(a, b)
else:
return zip(a, b)
|
Wrapper on itertools for SVD_magnitude.
|
def calculate_checksum_on_iterator(
itr, algorithm=d1_common.const.DEFAULT_CHECKSUM_ALGORITHM
):
"""Calculate the checksum of an iterator.
Args:
itr: iterable
Object which supports the iterator protocol.
algorithm: str
Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``.
Returns:
str : Checksum as a hexadecimal string, with length decided by the algorithm.
"""
checksum_calc = get_checksum_calculator_by_dataone_designator(algorithm)
for chunk in itr:
checksum_calc.update(chunk)
return checksum_calc.hexdigest()
|
Calculate the checksum of an iterator.
Args:
itr: iterable
Object which supports the iterator protocol.
algorithm: str
Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``.
Returns:
str : Checksum as a hexadecimal string, with length decided by the algorithm.
|
def _duplicate_queries(self, output):
"""Appends the most common duplicate queries to the given output."""
if QC_SETTINGS['DISPLAY_DUPLICATES']:
for query, count in self.queries.most_common(QC_SETTINGS['DISPLAY_DUPLICATES']):
lines = ['\nRepeated {0} times.'.format(count)]
lines += wrap(query)
lines = "\n".join(lines) + "\n"
output += self._colorize(lines, count)
return output
|
Appends the most common duplicate queries to the given output.
|
def get_forward_star(self, node):
"""Given a node, get a copy of that node's forward star.
:param node: node to retrieve the forward-star of.
:returns: set -- set of hyperedge_ids for the hyperedges
in the node's forward star.
:raises: ValueError -- No such node exists.
"""
if node not in self._node_attributes:
raise ValueError("No such node exists.")
return self._forward_star[node].copy()
|
Given a node, get a copy of that node's forward star.
:param node: node to retrieve the forward-star of.
:returns: set -- set of hyperedge_ids for the hyperedges
in the node's forward star.
:raises: ValueError -- No such node exists.
|
def args_length(min_len, max_len, *args):
"""
检查参数长度
"""
not_null(*args)
if not all(map(lambda v: min_len <= len(v) <= max_len, args)):
raise ValueError("Argument length must be between {0} and {1}!".format(min_len, max_len))
|
检查参数长度
|
def nearest_vertices(self, x, y, k=1, max_distance=np.inf ):
"""
Query the cKDtree for the nearest neighbours and Euclidean
distance from x,y points.
Returns 0, 0 if a cKDtree has not been constructed
(switch tree=True if you need this routine)
Parameters
----------
x : 1D array of Cartesian x coordinates
y : 1D array of Cartesian y coordinates
k : number of nearest neighbours to return
(default: 1)
max_distance : maximum Euclidean distance to search
for neighbours (default: inf)
Returns
-------
d : Euclidean distance between each point and their
nearest neighbour(s)
vert : vertices of the nearest neighbour(s)
"""
if self.tree == False or self.tree == None:
return 0, 0
xy = np.column_stack([x, y])
dxy, vertices = self._cKDtree.query(xy, k=k, distance_upper_bound=max_distance)
if k == 1: # force this to be a 2D array
vertices = np.reshape(vertices, (-1, 1))
return dxy, vertices
|
Query the cKDtree for the nearest neighbours and Euclidean
distance from x,y points.
Returns 0, 0 if a cKDtree has not been constructed
(switch tree=True if you need this routine)
Parameters
----------
x : 1D array of Cartesian x coordinates
y : 1D array of Cartesian y coordinates
k : number of nearest neighbours to return
(default: 1)
max_distance : maximum Euclidean distance to search
for neighbours (default: inf)
Returns
-------
d : Euclidean distance between each point and their
nearest neighbour(s)
vert : vertices of the nearest neighbour(s)
|
def _footer_start_thread(self, text, time):
"""Display given text in the footer. Clears after <time> seconds
"""
footerwid = urwid.AttrMap(urwid.Text(text), 'footer')
self.top.footer = footerwid
load_thread = Thread(target=self._loading_thread, args=(time,))
load_thread.daemon = True
load_thread.start()
|
Display given text in the footer. Clears after <time> seconds
|
def do_build_reports(directory):
"""
Walk the directory and builds pre-calculation reports for all the
job.ini files found.
"""
for cwd, dirs, files in os.walk(directory):
for f in sorted(files):
if f in ('job.ini', 'job_h.ini', 'job_haz.ini', 'job_hazard.ini'):
job_ini = os.path.join(cwd, f)
logging.info(job_ini)
try:
reportwriter.build_report(job_ini, cwd)
except Exception as e:
logging.error(str(e))
|
Walk the directory and builds pre-calculation reports for all the
job.ini files found.
|
def get_joke():
"""Returns a joke from the WebKnox one liner API.
Returns None if unable to retrieve a joke.
"""
page = requests.get("https://api.chucknorris.io/jokes/random")
if page.status_code == 200:
joke = json.loads(page.content.decode("UTF-8"))
return joke["value"]
return None
|
Returns a joke from the WebKnox one liner API.
Returns None if unable to retrieve a joke.
|
def merge(args):
"""
%prog merge protein-quartets registry LOST
Merge protein quartets table with dna quartets registry. This is specific
to the napus project.
"""
from jcvi.formats.base import DictFile
p = OptionParser(merge.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
quartets, registry, lost = args
qq = DictFile(registry, keypos=1, valuepos=3)
lost = DictFile(lost, keypos=1, valuepos=0, delimiter='|')
qq.update(lost)
fp = open(quartets)
cases = {
"AN,CN": 4,
"BO,AN,CN": 8,
"BO,CN": 2,
"BR,AN": 1,
"BR,AN,CN": 6,
"BR,BO": 3,
"BR,BO,AN": 5,
"BR,BO,AN,CN": 9,
"BR,BO,CN": 7,
}
ip = {
"syntenic_model": "Syntenic_model_excluded_by_OMG",
"complete": "Predictable",
"partial": "Truncated",
"pseudogene": "Pseudogene",
"random": "Match_random",
"real_ns": "Transposed",
"gmap_fail": "GMAP_fail",
"AN LOST": "AN_LOST",
"CN LOST": "CN_LOST",
"BR LOST": "BR_LOST",
"BO LOST": "BO_LOST",
"outside": "Outside_synteny_blocks",
"[NF]": "Not_found",
}
for row in fp:
atoms = row.strip().split("\t")
genes = atoms[:4]
tag = atoms[4]
a, b, c, d = [qq.get(x, ".").rsplit("-", 1)[-1] for x in genes]
qqs = [c, d, a, b]
for i, q in enumerate(qqs):
if atoms[i] != '.':
qqs[i] = "syntenic_model"
# Make comment
comment = "Case{0}".format(cases[tag])
dots = sum([1 for x in genes if x == '.'])
if dots == 1:
idx = genes.index(".")
status = qqs[idx]
status = ip[status]
comment += "-" + status
print(row.strip() + "\t" + "\t".join(qqs + [comment]))
|
%prog merge protein-quartets registry LOST
Merge protein quartets table with dna quartets registry. This is specific
to the napus project.
|
def add_dynamic_kb(kbname, tag, collection="", searchwith=""):
"""A convenience method."""
kb_id = add_kb(kb_name=kbname, kb_type='dynamic')
save_kb_dyn_config(kb_id, tag, searchwith, collection)
return kb_id
|
A convenience method.
|
def create(self, using=None, **kwargs):
"""
Creates the index in elasticsearch.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.create`` unchanged.
"""
self._get_connection(using).indices.create(index=self._name, body=self.to_dict(), **kwargs)
|
Creates the index in elasticsearch.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.create`` unchanged.
|
def _phase_kuramoto(self, teta, t, argv):
"""!
@brief Overrided method for calculation of oscillator phase.
@param[in] teta (double): Current value of phase.
@param[in] t (double): Time (can be ignored).
@param[in] argv (uint): Index of oscillator whose phase represented by argument teta.
@return (double) New value of phase of oscillator with index 'argv'.
"""
index = argv; # index of oscillator
phase = 0.0; # phase of a specified oscillator that will calculated in line with current env. states.
neighbors = self.get_neighbors(index);
for k in neighbors:
conn_weight = 1.0;
if (self._ena_conn_weight is True):
conn_weight = self._conn_weight[index][k];
phase += conn_weight * self._weight * math.sin(self._phases[k] - teta);
divider = len(neighbors);
if (divider == 0):
divider = 1.0;
return ( self._freq[index] + (phase / divider) );
|
!
@brief Overrided method for calculation of oscillator phase.
@param[in] teta (double): Current value of phase.
@param[in] t (double): Time (can be ignored).
@param[in] argv (uint): Index of oscillator whose phase represented by argument teta.
@return (double) New value of phase of oscillator with index 'argv'.
|
def series(self):
'''Generator of single series data (no dates are included).'''
data = self.values()
if len(data):
for c in range(self.count()):
yield data[:, c]
else:
raise StopIteration
|
Generator of single series data (no dates are included).
|
def requestSchema(self, nym, name, version, sender):
"""
Used to get a schema from Sovrin
:param nym: nym that schema is attached to
:param name: name of schema
:param version: version of schema
:return: req object
"""
operation = { TARGET_NYM: nym,
TXN_TYPE: GET_SCHEMA,
DATA: {NAME : name,
VERSION: version}
}
req = Request(sender, operation=operation)
return self.prepReq(req)
|
Used to get a schema from Sovrin
:param nym: nym that schema is attached to
:param name: name of schema
:param version: version of schema
:return: req object
|
def is_read_only(p_command):
""" Returns True when the given command class is read-only. """
read_only_commands = tuple(cmd for cmd
in ('revert', ) + READ_ONLY_COMMANDS)
return p_command.name() in read_only_commands
|
Returns True when the given command class is read-only.
|
def selection_error_control(self, form_info):
"""
It controls the selection from the form according
to the operations, and returns an error message
if it does not comply with the rules.
Args:
form_info: Channel or subscriber form from the user
Returns: True or False
error message
"""
keys, names = self.return_selected_form_items(form_info['ChannelList'])
chosen_channels_number = len(keys)
if form_info['new_channel'] and chosen_channels_number < 2:
return False, _(
u"You should choose at least two channel to merge operation at a new channel.")
elif form_info['existing_channel'] and chosen_channels_number == 0:
return False, _(
u"You should choose at least one channel to merge operation with existing channel.")
elif form_info['find_chosen_channel'] and chosen_channels_number != 1:
return False, _(u"You should choose one channel for split operation.")
return True, None
|
It controls the selection from the form according
to the operations, and returns an error message
if it does not comply with the rules.
Args:
form_info: Channel or subscriber form from the user
Returns: True or False
error message
|
def add_node(self, node):
"""Adds a `node` to the hash ring (including a number of replicas).
"""
self.nodes.append(node)
for x in xrange(self.replicas):
ring_key = self.hash_method(b("%s:%d" % (node, x)))
self.ring[ring_key] = node
self.sorted_keys.append(ring_key)
self.sorted_keys.sort()
|
Adds a `node` to the hash ring (including a number of replicas).
|
def sorted_items(d, key=__identity, reverse=False):
"""
Return the items of the dictionary sorted by the keys
>>> sample = dict(foo=20, bar=42, baz=10)
>>> tuple(sorted_items(sample))
(('bar', 42), ('baz', 10), ('foo', 20))
>>> reverse_string = lambda s: ''.join(reversed(s))
>>> tuple(sorted_items(sample, key=reverse_string))
(('foo', 20), ('bar', 42), ('baz', 10))
>>> tuple(sorted_items(sample, reverse=True))
(('foo', 20), ('baz', 10), ('bar', 42))
"""
# wrap the key func so it operates on the first element of each item
def pairkey_key(item):
return key(item[0])
return sorted(d.items(), key=pairkey_key, reverse=reverse)
|
Return the items of the dictionary sorted by the keys
>>> sample = dict(foo=20, bar=42, baz=10)
>>> tuple(sorted_items(sample))
(('bar', 42), ('baz', 10), ('foo', 20))
>>> reverse_string = lambda s: ''.join(reversed(s))
>>> tuple(sorted_items(sample, key=reverse_string))
(('foo', 20), ('bar', 42), ('baz', 10))
>>> tuple(sorted_items(sample, reverse=True))
(('foo', 20), ('baz', 10), ('bar', 42))
|
def table_path(cls, project, instance, table):
"""Return a fully-qualified table string."""
return google.api_core.path_template.expand(
"projects/{project}/instances/{instance}/tables/{table}",
project=project,
instance=instance,
table=table,
)
|
Return a fully-qualified table string.
|
def debug(self, *msg):
"""
Prints a warning
"""
label = colors.yellow("DEBUG")
self._msg(label, *msg)
|
Prints a warning
|
def get_query_rows(self, job_id, offset=None, limit=None, timeout=0):
"""Retrieve a list of rows from a query table by job id.
This method will append results from multiple pages together. If you
want to manually page through results, you can use `get_query_results`
method directly.
Parameters
----------
job_id : str
The job id that references a BigQuery query.
offset : int, optional
The offset of the rows to pull from BigQuery
limit : int, optional
The number of rows to retrieve from a query table.
timeout : float, optional
Timeout in seconds.
Returns
-------
list
A ``list`` of ``dict`` objects that represent table rows.
"""
# Get query results
query_reply = self.get_query_results(job_id, offset=offset,
limit=limit, timeout=timeout)
if not query_reply['jobComplete']:
logger.warning('BigQuery job %s not complete' % job_id)
raise UnfinishedQueryException()
schema = query_reply["schema"]["fields"]
rows = query_reply.get('rows', [])
page_token = query_reply.get("pageToken")
records = [self._transform_row(row, schema) for row in rows]
# Append to records if there are multiple pages for query results
while page_token and (not limit or len(records) < limit):
query_reply = self.get_query_results(
job_id, offset=offset, limit=limit, page_token=page_token,
timeout=timeout)
page_token = query_reply.get("pageToken")
rows = query_reply.get('rows', [])
records += [self._transform_row(row, schema) for row in rows]
return records[:limit] if limit else records
|
Retrieve a list of rows from a query table by job id.
This method will append results from multiple pages together. If you
want to manually page through results, you can use `get_query_results`
method directly.
Parameters
----------
job_id : str
The job id that references a BigQuery query.
offset : int, optional
The offset of the rows to pull from BigQuery
limit : int, optional
The number of rows to retrieve from a query table.
timeout : float, optional
Timeout in seconds.
Returns
-------
list
A ``list`` of ``dict`` objects that represent table rows.
|
def from_perseus(network_table, networks):
"""
Create networkx graph from network tables
>>> from perseuspy import read_networks, nx
>>> network_table, networks = read_networks(folder)
>>> graphs = nx.from_perseus(network_table, networks)
"""
graphs = []
for guid, graph_attr in zip(network_table['GUID'], network_table.values):
network = networks[guid]
edge_table = network['edge_table']
if edge_table[['Source', 'Target']].duplicated().any():
warnings.warn('Duplicate edges were found and ignored in network {}'.format(network['name']))
G = nx.from_pandas_edgelist(edge_table, 'Source', 'Target', True, create_using=nx.DiGraph())
for attr, value in zip(network_table.columns, graph_attr):
G.graph[attr] = value
node_table = network['node_table']
if node_table['Node'].duplicated().any():
warnings.warn('Duplicate nodes were found and ignored in network {}'.format(network['name']))
node_column = node_table['Node']
for name, attributes in zip(node_column, node_table.values):
if name not in G:
G.add_node(name)
for attr, value in zip(node_table.columns, attributes):
G.node[name][attr] = value
graphs.append(G)
return graphs
|
Create networkx graph from network tables
>>> from perseuspy import read_networks, nx
>>> network_table, networks = read_networks(folder)
>>> graphs = nx.from_perseus(network_table, networks)
|
def compute_key(cli, familly, discriminant=None):
"""This function is used to compute a unique key from all connection parametters."""
hash_key = hashlib.sha256()
hash_key.update(familly)
hash_key.update(cli.host)
hash_key.update(cli.user)
hash_key.update(cli.password)
if discriminant:
if isinstance(discriminant, list):
for i in discriminant:
if i is not None and i is not False:
hash_key.update(str(i))
elif isinstance(discriminant, tuple):
for i in discriminant:
if i is not None and i is not False:
hash_key.update(str(i))
else:
hash_key.update(discriminant)
hash_key = hash_key.hexdigest()
cli.log.debug("hash_key: " + hash_key)
return hash_key
|
This function is used to compute a unique key from all connection parametters.
|
def normalize_url(url):
"""Return url after stripping trailing .json and trailing slashes."""
if url.endswith('.json'):
url = url[:-5]
if url.endswith('/'):
url = url[:-1]
return url
|
Return url after stripping trailing .json and trailing slashes.
|
def GetData(ID, season = None, cadence = 'lc', clobber = False, delete_raw = False,
aperture_name = None, saturated_aperture_name = None,
max_pixels = None, download_only = False, saturation_tolerance = None,
bad_bits = None, **kwargs):
'''
Returns a :py:obj:`DataContainer` instance with the raw data for the target.
:param int ID: The target ID number
:param int season: The observing season. Default :py:obj:`None`
:param str cadence: The light curve cadence. Default `lc`
:param bool clobber: Overwrite existing files? Default :py:obj:`False`
:param bool delete_raw: Delete the FITS TPF after processing it? Default :py:obj:`False`
:param str aperture_name: The name of the aperture to use. Select `custom` to call \
:py:func:`GetCustomAperture`. Default :py:obj:`None`
:param str saturated_aperture_name: The name of the aperture to use if the target is \
saturated. Default :py:obj:`None`
:param int max_pixels: Maximum number of pixels in the TPF. Default :py:obj:`None`
:param bool download_only: Download raw TPF and return? Default :py:obj:`False`
:param float saturation_tolerance: Target is considered saturated if flux is within \
this fraction of the pixel well depth. Default :py:obj:`None`
:param array_like bad_bits: Flagged :py:obj`QUALITY` bits to consider outliers when \
computing the model. Default :py:obj:`None`
'''
raise NotImplementedError('This mission is not yet supported.')
|
Returns a :py:obj:`DataContainer` instance with the raw data for the target.
:param int ID: The target ID number
:param int season: The observing season. Default :py:obj:`None`
:param str cadence: The light curve cadence. Default `lc`
:param bool clobber: Overwrite existing files? Default :py:obj:`False`
:param bool delete_raw: Delete the FITS TPF after processing it? Default :py:obj:`False`
:param str aperture_name: The name of the aperture to use. Select `custom` to call \
:py:func:`GetCustomAperture`. Default :py:obj:`None`
:param str saturated_aperture_name: The name of the aperture to use if the target is \
saturated. Default :py:obj:`None`
:param int max_pixels: Maximum number of pixels in the TPF. Default :py:obj:`None`
:param bool download_only: Download raw TPF and return? Default :py:obj:`False`
:param float saturation_tolerance: Target is considered saturated if flux is within \
this fraction of the pixel well depth. Default :py:obj:`None`
:param array_like bad_bits: Flagged :py:obj`QUALITY` bits to consider outliers when \
computing the model. Default :py:obj:`None`
|
def _from_docstring_rst(doc):
"""
format from docstring to ReStructured Text
"""
def format_fn(line, status):
""" format function """
if re_from_data.match(line):
line = re_from_data.sub(r"**\1** ", line)
status["add_line"] = True
line = re_from_defaults.sub(r"*\1*", line)
if status["listing"]:
# parameters
if re_from_param.match(line):
m = re_from_param.match(line)
line = " - ``{}`` {}".format(m.group(1), m.group(3))
# status items
elif re_from_status.match(line):
m = re_from_status.match(line)
line = " - ``{}`` {}".format(m.group(1), m.group(3))
# bullets
elif re_from_item.match(line):
line = re_from_item.sub(r" -", line)
# is continuation line
else:
line = " " * 4 + line.lstrip()
# in .rst format code samples use double backticks vs single ones for
# .md This converts them.
line = re_lone_backtick.sub("``", line)
return line
return _reformat_docstring(doc, format_fn, code_newline="\n")
|
format from docstring to ReStructured Text
|
def insert(self, row):
""" Insert a new row. The row will be added to the end of the
spreadsheet. Before inserting, the field names in the given
row will be normalized and values with empty field names
removed. """
data = self._convert_value(row)
self._service.InsertRow(data, self._ss.id, self.id)
|
Insert a new row. The row will be added to the end of the
spreadsheet. Before inserting, the field names in the given
row will be normalized and values with empty field names
removed.
|
def verify_psd_options_multi_ifo(opt, parser, ifos):
"""Parses the CLI options and verifies that they are consistent and
reasonable.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (psd_model, psd_file, asd_file, psd_estimation,
psd_segment_length, psd_segment_stride, psd_inverse_length, psd_output).
parser : object
OptionParser instance.
"""
for ifo in ifos:
for opt_group in ensure_one_opt_groups:
ensure_one_opt_multi_ifo(opt, parser, ifo, opt_group)
if opt.psd_estimation[ifo]:
required_opts_multi_ifo(opt, parser, ifo,
['--psd-segment-stride', '--psd-segment-length'],
required_by = "--psd-estimation")
|
Parses the CLI options and verifies that they are consistent and
reasonable.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (psd_model, psd_file, asd_file, psd_estimation,
psd_segment_length, psd_segment_stride, psd_inverse_length, psd_output).
parser : object
OptionParser instance.
|
def create_ngram_set(input_list, ngram_value=2):
"""
Extract a set of n-grams from a list of integers.
>>> create_ngram_set([1, 4, 9, 4, 1, 4], ngram_value=2)
{(4, 9), (4, 1), (1, 4), (9, 4)}
>>> create_ngram_set([1, 4, 9, 4, 1, 4], ngram_value=3)
[(1, 4, 9), (4, 9, 4), (9, 4, 1), (4, 1, 4)]
"""
return set(zip(*[input_list[i:] for i in range(ngram_value)]))
|
Extract a set of n-grams from a list of integers.
>>> create_ngram_set([1, 4, 9, 4, 1, 4], ngram_value=2)
{(4, 9), (4, 1), (1, 4), (9, 4)}
>>> create_ngram_set([1, 4, 9, 4, 1, 4], ngram_value=3)
[(1, 4, 9), (4, 9, 4), (9, 4, 1), (4, 1, 4)]
|
def get_proficiencies_by_genus_type(self, proficiency_genus_type):
"""Gets a ``ProficiencyList`` corresponding to the given proficiency genus ``Type`` which does not include proficiencies of types derived from the specified ``Type``.
arg: proficiency_genus_type (osid.type.Type): a proficiency
genus type
return: (osid.learning.ProficiencyList) - the returned
``Proficiency`` list
raise: NullArgument - ``proficiency_genus_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.get_resources_by_genus_type
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('learning',
collection='Proficiency',
runtime=self._runtime)
result = collection.find(
dict({'genusTypeId': str(proficiency_genus_type)},
**self._view_filter())).sort('_id', DESCENDING)
return objects.ProficiencyList(result, runtime=self._runtime, proxy=self._proxy)
|
Gets a ``ProficiencyList`` corresponding to the given proficiency genus ``Type`` which does not include proficiencies of types derived from the specified ``Type``.
arg: proficiency_genus_type (osid.type.Type): a proficiency
genus type
return: (osid.learning.ProficiencyList) - the returned
``Proficiency`` list
raise: NullArgument - ``proficiency_genus_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
|
def generator(self, output, target):
"Evaluate the `output` with the critic then uses `self.loss_funcG` to combine it with `target`."
fake_pred = self.gan_model.critic(output)
return self.loss_funcG(fake_pred, target, output)
|
Evaluate the `output` with the critic then uses `self.loss_funcG` to combine it with `target`.
|
def current_site_id():
"""
Responsible for determining the current ``Site`` instance to use
when retrieving data for any ``SiteRelated`` models. If we're inside an
override_current_site_id context manager, return the overriding site ID.
Otherwise, try to determine the site using the following methods in order:
- ``site_id`` in session. Used in the admin so that admin users
can switch sites and stay on the same domain for the admin.
- The id of the Site object corresponding to the hostname in the current
request. This result is cached.
- ``YACMS_SITE_ID`` environment variable, so management
commands or anything else outside of a request can specify a
site.
- ``SITE_ID`` setting.
If a current request exists and the current site is not overridden, the
site ID is stored on the request object to speed up subsequent calls.
"""
if hasattr(override_current_site_id.thread_local, "site_id"):
return override_current_site_id.thread_local.site_id
from yacms.utils.cache import cache_installed, cache_get, cache_set
request = current_request()
site_id = getattr(request, "site_id", None)
if request and not site_id:
site_id = request.session.get("site_id", None)
if not site_id:
domain = request.get_host().lower()
if cache_installed():
# Don't use yacms's cache_key_prefix here, since it
# uses this very function we're in right now to create a
# per-site cache key.
bits = (settings.CACHE_MIDDLEWARE_KEY_PREFIX, domain)
cache_key = "%s.site_id.%s" % bits
site_id = cache_get(cache_key)
if not site_id:
try:
site = Site.objects.get(domain__iexact=domain)
except Site.DoesNotExist:
pass
else:
site_id = site.id
if cache_installed():
cache_set(cache_key, site_id)
if not site_id:
site_id = os.environ.get("YACMS_SITE_ID", settings.SITE_ID)
if request and site_id and not getattr(settings, "TESTING", False):
request.site_id = site_id
return site_id
|
Responsible for determining the current ``Site`` instance to use
when retrieving data for any ``SiteRelated`` models. If we're inside an
override_current_site_id context manager, return the overriding site ID.
Otherwise, try to determine the site using the following methods in order:
- ``site_id`` in session. Used in the admin so that admin users
can switch sites and stay on the same domain for the admin.
- The id of the Site object corresponding to the hostname in the current
request. This result is cached.
- ``YACMS_SITE_ID`` environment variable, so management
commands or anything else outside of a request can specify a
site.
- ``SITE_ID`` setting.
If a current request exists and the current site is not overridden, the
site ID is stored on the request object to speed up subsequent calls.
|
def config_path(self, value):
"""Set config_path"""
self._config_path = value or ''
if not isinstance(self._config_path, str):
raise BadArgumentError("config_path must be string: {}".format(
self._config_path))
|
Set config_path
|
def reset_coords(self, names=None, drop=False, inplace=None):
"""Given names of coordinates, reset them to become variables
Parameters
----------
names : str or list of str, optional
Name(s) of non-index coordinates in this dataset to reset into
variables. By default, all non-index coordinates are reset.
drop : bool, optional
If True, remove coordinates instead of converting them into
variables.
inplace : bool, optional
If True, modify this dataset inplace. Otherwise, create a new
object.
Returns
-------
Dataset
"""
inplace = _check_inplace(inplace)
if names is None:
names = self._coord_names - set(self.dims)
else:
if isinstance(names, str):
names = [names]
self._assert_all_in_dataset(names)
bad_coords = set(names) & set(self.dims)
if bad_coords:
raise ValueError(
'cannot remove index coordinates with reset_coords: %s'
% bad_coords)
obj = self if inplace else self.copy()
obj._coord_names.difference_update(names)
if drop:
for name in names:
del obj._variables[name]
return obj
|
Given names of coordinates, reset them to become variables
Parameters
----------
names : str or list of str, optional
Name(s) of non-index coordinates in this dataset to reset into
variables. By default, all non-index coordinates are reset.
drop : bool, optional
If True, remove coordinates instead of converting them into
variables.
inplace : bool, optional
If True, modify this dataset inplace. Otherwise, create a new
object.
Returns
-------
Dataset
|
def lv_load_areas(self):
"""Returns a generator for iterating over load_areas
Yields
------
int
generator for iterating over load_areas
"""
for load_area in sorted(self._lv_load_areas, key=lambda _: repr(_)):
yield load_area
|
Returns a generator for iterating over load_areas
Yields
------
int
generator for iterating over load_areas
|
def get_least_salient_words(vocab, topic_word_distrib, doc_topic_distrib, doc_lengths, n=None):
"""
Order the words from `vocab` by "saliency score" (Chuang et al. 2012) from least to most salient. Optionally only
return the `n` least salient words.
J. Chuang, C. Manning, J. Heer 2012: "Termite: Visualization Techniques for Assessing Textual Topic Models"
"""
return _words_by_salience_score(vocab, topic_word_distrib, doc_topic_distrib, doc_lengths, n, least_to_most=True)
|
Order the words from `vocab` by "saliency score" (Chuang et al. 2012) from least to most salient. Optionally only
return the `n` least salient words.
J. Chuang, C. Manning, J. Heer 2012: "Termite: Visualization Techniques for Assessing Textual Topic Models"
|
def binary_regex(self):
"""Return the regex for the binary."""
regex = {'linux': r'^%(BINARY_NAME)s-%(VERSION)s\.%(EXT)s$',
'linux64': r'^%(BINARY_NAME)s-%(VERSION)s\.%(EXT)s$',
'mac': r'^%(BINARY_NAME)s(?:\s|-)%(VERSION)s\.%(EXT)s$',
'mac64': r'^%(BINARY_NAME)s(?:\s|-)%(VERSION)s\.%(EXT)s$',
'win32':
r'^%(BINARY_NAME)s(%(STUB_NEW)s|(?:\sSetup\s|-)%(STUB)s%(VERSION)s)\.%(EXT)s$',
'win64':
r'^%(BINARY_NAME)s(%(STUB_NEW)s|(?:\sSetup\s|-)%(STUB)s%(VERSION)s)\.%(EXT)s$',
}
return regex[self.platform] % {
'BINARY_NAME': APPLICATIONS_TO_BINARY_NAME.get(self.application, self.application),
'EXT': self.extension,
'STUB': 'Stub ' if self.is_stub_installer else '',
'STUB_NEW': ' Installer' if self.is_stub_installer else '',
'VERSION': self.version,
}
|
Return the regex for the binary.
|
def gain_offsets(Idat,Qdat,Udat,Vdat,tsamp,chan_per_coarse,feedtype='l',**kwargs):
'''
Determines relative gain error in the X and Y feeds for an
observation given I and Q (I and V for circular basis) noise diode data.
'''
if feedtype=='l':
#Fold noise diode data and calculate ON OFF differences for I and Q
I_OFF,I_ON = foldcal(Idat,tsamp,**kwargs)
Q_OFF,Q_ON = foldcal(Qdat,tsamp,**kwargs)
#Calculate power in each feed for noise diode ON and OFF
XX_ON = (I_ON+Q_ON)/2
XX_OFF = (I_OFF+Q_OFF)/2
YY_ON = (I_ON-Q_ON)/2
YY_OFF = (I_OFF-Q_OFF)/2
#Calculate gain offset (divided by 2) as defined in Heiles (2001)
G = (XX_OFF-YY_OFF)/(XX_OFF+YY_OFF)
if feedtype=='c':
#Fold noise diode data and calculate ON OFF differences for I and Q
I_OFF,I_ON = foldcal(Idat,tsamp,**kwargs)
V_OFF,V_ON = foldcal(Vdat,tsamp,**kwargs)
#Calculate power in each feed for noise diode ON and OFF
RR_ON = (I_ON+V_ON)/2
RR_OFF = (I_OFF+V_OFF)/2
LL_ON = (I_ON-V_ON)/2
LL_OFF = (I_OFF-V_OFF)/2
#Calculate gain offset (divided by 2) as defined in Heiles (2001)
G = (RR_OFF-LL_OFF)/(RR_OFF+LL_OFF)
return convert_to_coarse(G,chan_per_coarse)
|
Determines relative gain error in the X and Y feeds for an
observation given I and Q (I and V for circular basis) noise diode data.
|
def export_partlist_to_file(input, output, timeout=20, showgui=False):
'''
call eagle and export sch or brd to partlist text file
:param input: .sch or .brd file name
:param output: text file name
:param timeout: int
:param showgui: Bool, True -> do not hide eagle GUI
:rtype: None
'''
input = norm_path(input)
output = norm_path(output)
commands = export_command(output=output, output_type='partlist')
command_eagle(
input=input, timeout=timeout, commands=commands, showgui=showgui)
|
call eagle and export sch or brd to partlist text file
:param input: .sch or .brd file name
:param output: text file name
:param timeout: int
:param showgui: Bool, True -> do not hide eagle GUI
:rtype: None
|
def _from_dict(cls, _dict):
"""Initialize a MessageContext object from a json dictionary."""
args = {}
if 'global' in _dict:
args['global_'] = MessageContextGlobal._from_dict(
_dict.get('global'))
if 'skills' in _dict:
args['skills'] = MessageContextSkills._from_dict(
_dict.get('skills'))
return cls(**args)
|
Initialize a MessageContext object from a json dictionary.
|
def fetch(self, fetch_notes=None):
""" update remote values (called automatically at __init__) """
if fetch_notes is None:
fetch_notes = self.fetch_notes
values, notes_index = get_sheet_values(self.name, self.sheet_name,
spreadsheet_service=self._spreadsheet_service,
get_notes=fetch_notes)
self.raw_values = values
self.values = [list(r) for r in zip(*itertools.zip_longest(*self.raw_values, fillvalue=''))]
self.byCol = byCol(self.values, to_index=self.index_columns)
self.notes_index = notes_index
|
update remote values (called automatically at __init__)
|
def from_xy_array(cls, xy, shape):
"""
Convert an array (N,2) with a given image shape to a KeypointsOnImage object.
Parameters
----------
xy : (N, 2) ndarray
Coordinates of ``N`` keypoints on the original image, given
as ``(N,2)`` array of xy-coordinates.
shape : tuple of int or ndarray
Shape tuple of the image on which the keypoints are placed.
Returns
-------
KeypointsOnImage
KeypointsOnImage object that contains all keypoints from the array.
"""
keypoints = [Keypoint(x=coord[0], y=coord[1]) for coord in xy]
return KeypointsOnImage(keypoints, shape)
|
Convert an array (N,2) with a given image shape to a KeypointsOnImage object.
Parameters
----------
xy : (N, 2) ndarray
Coordinates of ``N`` keypoints on the original image, given
as ``(N,2)`` array of xy-coordinates.
shape : tuple of int or ndarray
Shape tuple of the image on which the keypoints are placed.
Returns
-------
KeypointsOnImage
KeypointsOnImage object that contains all keypoints from the array.
|
def with_bundler(self):
"""
Check, if bundler check is requested.
Check, if the user wants us to check for new gems and return True in
this case.
:rtype : bool
"""
def gemfile_exists():
"""
Check, if a Gemfile exists in the current repo.
"""
return os.path.exists('Gemfile')
if 'GIT_UP_BUNDLER_CHECK' in os.environ:
print(colored(
'''The GIT_UP_BUNDLER_CHECK environment variable is deprecated.
You can now tell git-up to check (or not check) for missing
gems on a per-project basis using git's config system. To
set it globally, run this command anywhere:
git config --global git-up.bundler.check true
To set it within a project, run this command inside that
project's directory:
git config git-up.bundler.check true
Replace 'true' with 'false' to disable checking.''', 'yellow'))
if self.settings['bundler.check']:
return gemfile_exists()
if ('GIT_UP_BUNDLER_CHECK' in os.environ
and os.environ['GIT_UP_BUNDLER_CHECK'] == 'true'):
return gemfile_exists()
return False
|
Check, if bundler check is requested.
Check, if the user wants us to check for new gems and return True in
this case.
:rtype : bool
|
def setCol(self, x, l):
"""set the x-th column, starting at 0"""
for i in xrange(0, self.__size):
self.setCell(x, i, l[i])
|
set the x-th column, starting at 0
|
def parse_cell(self, cell):
"""
Process cell field, the field format just like {{field}}
:param cell:
:return: value, field
"""
field = ''
if (isinstance(cell.value, (str, unicode)) and
cell.value.startswith('{{') and
cell.value.endswith('}}')):
field = cell.value[2:-2].strip()
value = ''
else:
value = cell.value
return value, field
|
Process cell field, the field format just like {{field}}
:param cell:
:return: value, field
|
def make_spark_lines(table,filename,sc,**kwargs):
spark_output = True
lines_out_count = False
extrema = False
for key,value in kwargs.iteritems():
if key == 'lines_out_count':
lines_out_count = value
if key == 'extrema':
extrema = value
# removing datetime references from imported postgis database
# CURRENTLY datetime from postgis dbs throw errors
# fields containing dates removed
list = []
count = 0
for row in table.columns.values.tolist():
if 'date' in row:
list.append(count)
count += 1
table.drop(table.columns[list], axis=1, inplace=True)
# getting spark arguments
if lines_out_count == False:
args = make_spark_args(table,25,lines_out = True,extrema=extrema)
else:
args = make_spark_args(table,25,lines_out_count=lines_out_count)
# concurrent represents rdd structure that will be parrelized
concurrent = sc.parallelize(args)
# getting table that would normally be going into this function
table = concurrent.map(map_spark_lines).collect()
'''
alignment_field = False
spark_output = True
if kwargs is not None:
for key,value in kwargs.iteritems():
if key == 'alignment_field':
alignment_field = value
if key == 'spark_output':
spark_output = value
#changing dataframe to list if dataframe
if isinstance(table,pd.DataFrame):
table=df2list(table)
header=table[0]
total = []
# making table the proper iterable for each input
if spark_output == True:
#table = sum(table,[])
pass
else:
table = table[1:]
'''
'''
# making filenames list
filenames = []
count = 0
while not len(filenames) == len(table):
count += 1
filename = 'lines%s.geojson' % str(count)
filenames.append(filename)
args = []
# zipping arguments together for each value in table
for filename,row in itertools.izip(filenames,table):
args.append([filename,row])
concurrent = sc.parallelize(args)
concurrent.map(map_lines_output).collect()
'''
'''
count=0
total=0
for row in table:
count+=1
# logic to treat rows as outputs of make_line or to perform make_line operation
if spark_output == False:
value = make_line([header,row],list=True,postgis=True,alignment_field=alignment_field)
elif spark_output == True:
value = row
# logic for how to handle starting and ending geojson objects
if row==table[0]:
#value=make_line([header,row],list=True,postgis=True,alignment_field=alignment_field)
if not len(table)==2:
value=value[:-3]
totalvalue=value+['\t},']
elif row==table[-1]:
#value=make_line([header,row],list=True,postgis=True,alignment_field=alignment_field)
value=value[2:]
totalvalue=totalvalue+value
else:
#value=make_line([header,row],list=True,postgis=True,alignment_field=alignment_field)
value=value[2:-3]
value=value+['\t},']
totalvalue=totalvalue+value
if count == 1000:
total += count
count = 0
print '[%s/%s]' % (total,len(table))
bl.parselist(totalvalue,filename)
'''
|
alignment_field = False
spark_output = True
if kwargs is not None:
for key,value in kwargs.iteritems():
if key == 'alignment_field':
alignment_field = value
if key == 'spark_output':
spark_output = value
#changing dataframe to list if dataframe
if isinstance(table,pd.DataFrame):
table=df2list(table)
header=table[0]
total = []
# making table the proper iterable for each input
if spark_output == True:
#table = sum(table,[])
pass
else:
table = table[1:]
|
def to_molden(cartesian_list, buf=None, sort_index=True,
overwrite=True, float_format='{:.6f}'.format):
"""Write a list of Cartesians into a molden file.
.. note:: Since it permamently writes a file, this function
is strictly speaking **not sideeffect free**.
The list to be written is of course not changed.
Args:
cartesian_list (list):
buf (str): StringIO-like, optional buffer to write to
sort_index (bool): If sort_index is true, the Cartesian
is sorted by the index before writing.
overwrite (bool): May overwrite existing files.
float_format (one-parameter function): Formatter function
to apply to column’s elements if they are floats.
The result of this function must be a unicode string.
Returns:
formatted : string (or unicode, depending on data and options)
"""
if sort_index:
cartesian_list = [molecule.sort_index() for molecule in cartesian_list]
give_header = ("[MOLDEN FORMAT]\n"
+ "[N_GEO]\n"
+ str(len(cartesian_list)) + "\n"
+ '[GEOCONV]\n'
+ 'energy\n{energy}'
+ 'max-force\n{max_force}'
+ 'rms-force\n{rms_force}'
+ '[GEOMETRIES] (XYZ)\n').format
values = len(cartesian_list) * '1\n'
energy = [str(m.metadata.get('energy', 1)) for m in cartesian_list]
energy = '\n'.join(energy) + '\n'
header = give_header(energy=energy, max_force=values, rms_force=values)
coordinates = [x.to_xyz(sort_index=sort_index, float_format=float_format)
for x in cartesian_list]
output = header + '\n'.join(coordinates)
if buf is not None:
if overwrite:
with open(buf, mode='w') as f:
f.write(output)
else:
with open(buf, mode='x') as f:
f.write(output)
else:
return output
|
Write a list of Cartesians into a molden file.
.. note:: Since it permamently writes a file, this function
is strictly speaking **not sideeffect free**.
The list to be written is of course not changed.
Args:
cartesian_list (list):
buf (str): StringIO-like, optional buffer to write to
sort_index (bool): If sort_index is true, the Cartesian
is sorted by the index before writing.
overwrite (bool): May overwrite existing files.
float_format (one-parameter function): Formatter function
to apply to column’s elements if they are floats.
The result of this function must be a unicode string.
Returns:
formatted : string (or unicode, depending on data and options)
|
def _dens(self,R,z,phi=0.,t=0.):
"""
NAME:
_dens
PURPOSE:
evaluate the density for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the density
HISTORY:
2015-06-15 - Written - Bovy (IAS)
"""
return 3./4./nu.pi*self._b2*(R**2.+z**2.+self._b2)**-2.5
|
NAME:
_dens
PURPOSE:
evaluate the density for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the density
HISTORY:
2015-06-15 - Written - Bovy (IAS)
|
def _rotate(lon, lat, theta, axis='x'):
"""
Rotate "lon", "lat" coords (in _degrees_) about the X-axis by "theta"
degrees. This effectively simulates rotating a physical stereonet.
Returns rotated lon, lat coords in _radians_).
"""
# Convert input to numpy arrays in radians
lon, lat = np.atleast_1d(lon, lat)
lon, lat = map(np.radians, [lon, lat])
theta = np.radians(theta)
# Convert to cartesian coords for the rotation
x, y, z = sph2cart(lon, lat)
lookup = {'x':_rotate_x, 'y':_rotate_y, 'z':_rotate_z}
X, Y, Z = lookup[axis](x, y, z, theta)
# Now convert back to spherical coords (longitude and latitude, ignore R)
lon, lat = cart2sph(X,Y,Z)
return lon, lat
|
Rotate "lon", "lat" coords (in _degrees_) about the X-axis by "theta"
degrees. This effectively simulates rotating a physical stereonet.
Returns rotated lon, lat coords in _radians_).
|
def get_de_novos_in_transcript(transcript, de_novos):
""" get the de novos within the coding sequence of a transcript
Args:
transcript: Transcript object, which defines the transcript coordinates
de_novos: list of chromosome sequence positions for de novo events
Returns:
list of de novo positions found within the transcript
"""
in_transcript = []
for de_novo in de_novos:
# we check if the de novo is within the transcript by converting the
# chromosomal position to a CDS-based position. Variants outside the CDS
# will raise an error, which we catch and pass on. It's better to do
# this, rather than use the function in_coding_region(), since that
# function does not allow for splice site variants.
site = transcript.get_coding_distance(de_novo)
cds_length = transcript.get_coding_distance(transcript.get_cds_end())
within_cds = site['pos'] >= 0 and site['pos'] < cds_length['pos']
if within_cds and (transcript.in_coding_region(de_novo) or abs(site['offset']) < 9):
in_transcript.append(de_novo)
return in_transcript
|
get the de novos within the coding sequence of a transcript
Args:
transcript: Transcript object, which defines the transcript coordinates
de_novos: list of chromosome sequence positions for de novo events
Returns:
list of de novo positions found within the transcript
|
def less_than(self, less_than):
"""Adds new `<` condition
:param less_than: str or datetime compatible object (naive UTC datetime or tz-aware datetime)
:raise:
- QueryTypeError: if `less_than` is of an unexpected type
"""
if hasattr(less_than, 'strftime'):
less_than = datetime_as_utc(less_than).strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(less_than, six.string_types):
raise QueryTypeError('Expected value of type `int` or instance of `datetime`, not %s' % type(less_than))
return self._add_condition('<', less_than, types=[int, str])
|
Adds new `<` condition
:param less_than: str or datetime compatible object (naive UTC datetime or tz-aware datetime)
:raise:
- QueryTypeError: if `less_than` is of an unexpected type
|
def read_object_from_yaml(desired_type: Type[Any], file_object: TextIOBase, logger: Logger,
fix_imports: bool = True, errors: str = 'strict', *args, **kwargs) -> Any:
"""
Parses a yaml file.
:param desired_type:
:param file_object:
:param logger:
:param fix_imports:
:param errors:
:param args:
:param kwargs:
:return:
"""
return yaml.load(file_object)
|
Parses a yaml file.
:param desired_type:
:param file_object:
:param logger:
:param fix_imports:
:param errors:
:param args:
:param kwargs:
:return:
|
def populate_dataframe(index,columns, default_dict, dtype):
""" helper function to populate a generic Pst dataframe attribute. This
function is called as part of constructing a generic Pst instance
Parameters
----------
index : (varies)
something to use as the dataframe index
columns: (varies)
something to use as the dataframe columns
default_dict : (dict)
dictionary of default values for columns
dtype : numpy.dtype
dtype used to cast dataframe columns
Returns
-------
new_df : pandas.DataFrame
"""
new_df = pd.DataFrame(index=index,columns=columns)
for fieldname,dt in zip(columns,dtype.descr):
default = default_dict[fieldname]
new_df.loc[:,fieldname] = default
new_df.loc[:,fieldname] = new_df.loc[:,fieldname].astype(dt[1])
return new_df
|
helper function to populate a generic Pst dataframe attribute. This
function is called as part of constructing a generic Pst instance
Parameters
----------
index : (varies)
something to use as the dataframe index
columns: (varies)
something to use as the dataframe columns
default_dict : (dict)
dictionary of default values for columns
dtype : numpy.dtype
dtype used to cast dataframe columns
Returns
-------
new_df : pandas.DataFrame
|
def fn_floor(self, value):
"""
Return the floor of a number. For negative numbers, floor returns a lower value. E.g., `floor(-2.5) == -3`
:param value: The number.
:return: The floor of the number.
"""
if is_ndarray(value) or isinstance(value, (list, tuple)):
return numpy.floor(self._to_ndarray(value))
else:
return math.floor(value)
|
Return the floor of a number. For negative numbers, floor returns a lower value. E.g., `floor(-2.5) == -3`
:param value: The number.
:return: The floor of the number.
|
def zan(self, id_reply):
'''
先在外部表中更新,然后更新内部表字段的值。
有冗余,但是查看的时候避免了联合查询
'''
logger.info('zan: {0}'.format(id_reply))
MReply2User.create_reply(self.userinfo.uid, id_reply)
cur_count = MReply2User.get_voter_count(id_reply)
if cur_count:
MReply.update_vote(id_reply, cur_count)
output = {'text_zan': cur_count}
else:
output = {'text_zan': 0}
logger.info('zan dic: {0}'.format(cur_count))
return json.dump(output, self)
|
先在外部表中更新,然后更新内部表字段的值。
有冗余,但是查看的时候避免了联合查询
|
def get_template_debug(template_name, error):
'''
This structure is what Django wants when errors occur in templates.
It gives the user a nice stack trace in the error page during debug.
'''
# This is taken from mako.exceptions.html_error_template(), which has an issue
# in Py3 where files get loaded as bytes but `lines = src.split('\n')` below
# splits with a string. Not sure if this is a bug or if I'm missing something,
# but doing a custom debugging template allows a workaround as well as a custom
# DMP look.
# I used to have a file in the templates directory for this, but too many users
# reported TemplateNotFound errors. This function is a bit of a hack, but it only
# happens during development (and mako.exceptions does this same thing).
# /justification
stacktrace_template = MakoTemplate(r"""
<%! from mako.exceptions import syntax_highlight, pygments_html_formatter %>
<style>
.stacktrace { margin:5px 5px 5px 5px; }
.highlight { padding:0px 10px 0px 10px; background-color:#9F9FDF; }
.nonhighlight { padding:0px; background-color:#DFDFDF; }
.sample { padding:10px; margin:10px 10px 10px 10px;
font-family:monospace; }
.sampleline { padding:0px 10px 0px 10px; }
.sourceline { margin:5px 5px 10px 5px; font-family:monospace;}
.location { font-size:80%; }
.highlight { white-space:pre; }
.sampleline { white-space:pre; }
% if pygments_html_formatter:
${pygments_html_formatter.get_style_defs() | n}
.linenos { min-width: 2.5em; text-align: right; }
pre { margin: 0; }
.syntax-highlighted { padding: 0 10px; }
.syntax-highlightedtable { border-spacing: 1px; }
.nonhighlight { border-top: 1px solid #DFDFDF;
border-bottom: 1px solid #DFDFDF; }
.stacktrace .nonhighlight { margin: 5px 15px 10px; }
.sourceline { margin: 0 0; font-family:monospace; }
.code { background-color: #F8F8F8; width: 100%; }
.error .code { background-color: #FFBDBD; }
.error .syntax-highlighted { background-color: #FFBDBD; }
% endif
## adjustments to Django css
table.source {
background-color: #fdfdfd;
}
table.source > tbody > tr > th {
width: auto;
}
table.source > tbody > tr > td {
font-family: inherit;
white-space: normal;
padding: 15px;
}
#template {
background-color: #b3daff;
}
</style>
<%
src = tback.source
line = tback.lineno
if isinstance(src, bytes):
src = src.decode()
if src:
lines = src.split('\n')
else:
lines = None
%>
<h3>${tback.errorname}: ${tback.message}</h3>
% if lines:
<div class="sample">
<div class="nonhighlight">
% for index in range(max(0, line-4),min(len(lines), line+5)):
<%
if pygments_html_formatter:
pygments_html_formatter.linenostart = index + 1
%>
% if index + 1 == line:
<%
if pygments_html_formatter:
old_cssclass = pygments_html_formatter.cssclass
pygments_html_formatter.cssclass = 'error ' + old_cssclass
%>
${lines[index] | n,syntax_highlight(language='mako')}
<%
if pygments_html_formatter:
pygments_html_formatter.cssclass = old_cssclass
%>
% else:
${lines[index] | n,syntax_highlight(language='mako')}
% endif
% endfor
</div>
</div>
% endif
<div class="stacktrace">
% for (filename, lineno, function, line) in tback.reverse_traceback:
<div class="location">${filename}, line ${lineno}:</div>
<div class="nonhighlight">
<%
if pygments_html_formatter:
pygments_html_formatter.linenostart = lineno
%>
<div class="sourceline">${line | n,syntax_highlight(filename)}</div>
</div>
% endfor
</div>
""")
tback = RichTraceback(error, error.__traceback__)
lines = stacktrace_template.render_unicode(tback=tback)
return {
'message': '',
'source_lines': [
( '', mark_safe(lines) ),
],
'before': '',
'during': '',
'after': '',
'top': 0,
'bottom': 0,
'total': 0,
'line': tback.lineno or 0,
'name': template_name,
'start': 0,
'end': 0,
}
|
This structure is what Django wants when errors occur in templates.
It gives the user a nice stack trace in the error page during debug.
|
def update_report_collector(self, timestamp):
"""Updating report collector for pipeline details."""
report_enabled = 'report' in self.information and self.information['report'] == 'html'
report_enabled = report_enabled and 'stage' in self.information
report_enabled = report_enabled and Event.collector_queue is not None
if report_enabled:
Event.collector_queue.put(CollectorUpdate(
matrix=self.information['matrix'] if 'matrix' in self.information else 'default',
stage=self.information['stage'],
status=self.status,
timestamp=timestamp,
information=self.information
))
|
Updating report collector for pipeline details.
|
def warm_spell_duration_index(tasmax, tx90, window=6, freq='YS'):
r"""Warm spell duration index
Number of days with at least six consecutive days where the daily maximum temperature is above the 90th
percentile. The 90th percentile should be computed for a 5-day window centred on each calendar day in the
1961-1990 period.
Parameters
----------
tasmax : xarray.DataArray
Maximum daily temperature [℃] or [K]
tx90 : float
90th percentile of daily maximum temperature [℃] or [K]
window : int
Minimum number of days with temperature below threshold to qualify as a warm spell.
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Count of days with at least six consecutive days where the daily maximum temperature is above the 90th
percentile [days].
References
----------
From the Expert Team on Climate Change Detection, Monitoring and Indices (ETCCDMI).
Used in Alexander, L. V., et al. (2006), Global observed changes in daily climate extremes of temperature and
precipitation, J. Geophys. Res., 111, D05109, doi: 10.1029/2005JD006290.
"""
if 'dayofyear' not in tx90.coords.keys():
raise AttributeError("tx90 should have dayofyear coordinates.")
# The day of year value of the tasmax series.
doy = tasmax.indexes['time'].dayofyear
# adjustment of tx90 to tasmax doy range
tx90 = utils.adjust_doy_calendar(tx90, tasmax)
# Create an array with the shape and coords of tasmax, but with values set to tx90 according to the doy index.
thresh = xr.full_like(tasmax, np.nan)
thresh.data = tx90.sel(dayofyear=doy)
above = (tasmax > thresh)
return above.resample(time=freq).apply(rl.windowed_run_count, window=window, dim='time')
|
r"""Warm spell duration index
Number of days with at least six consecutive days where the daily maximum temperature is above the 90th
percentile. The 90th percentile should be computed for a 5-day window centred on each calendar day in the
1961-1990 period.
Parameters
----------
tasmax : xarray.DataArray
Maximum daily temperature [℃] or [K]
tx90 : float
90th percentile of daily maximum temperature [℃] or [K]
window : int
Minimum number of days with temperature below threshold to qualify as a warm spell.
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Count of days with at least six consecutive days where the daily maximum temperature is above the 90th
percentile [days].
References
----------
From the Expert Team on Climate Change Detection, Monitoring and Indices (ETCCDMI).
Used in Alexander, L. V., et al. (2006), Global observed changes in daily climate extremes of temperature and
precipitation, J. Geophys. Res., 111, D05109, doi: 10.1029/2005JD006290.
|
def fit(self, xy=False, **kwargs):
"""Write xtc that is fitted to the tpr reference structure.
Runs :class:`gromacs.tools.trjconv` with appropriate arguments
for fitting. The most important *kwargs* are listed
here but in most cases the defaults should work.
Note that the default settings do *not* include centering or
periodic boundary treatment as this often does not work well
with fitting. It is better to do this as a separate step (see
:meth:`center_fit` or :func:`gromacs.cbook.trj_fitandcenter`)
:Keywords:
*s*
Input structure (typically the default tpr file but can be set to
some other file with a different conformation for fitting)
*n*
Alternative index file.
*o*
Name of the output trajectory. A default name is created.
If e.g. *dt* = 100 is one of the *kwargs* then the default name includes
"_dt100ps".
*xy* : boolean
If ``True`` then only do a rot+trans fit in the xy plane
(good for membrane simulations); default is ``False``.
*force*
``True``: overwrite existing trajectories
``False``: throw a IOError exception
``None``: skip existing and log a warning [default]
*fitgroup*
index group to fit on ["backbone"]
.. Note:: If keyword *input* is supplied then it will override
*fitgroup*; *input* = ``[fitgroup, outgroup]``
*kwargs*
kwargs are passed to :func:`~gromacs.cbook.trj_xyfitted`
:Returns:
dictionary with keys *tpr*, *xtc*, which are the names of the
the new files
"""
kwargs.setdefault('s', self.tpr)
kwargs.setdefault('n', self.ndx)
kwargs['f'] = self.xtc
force = kwargs.pop('force', self.force)
if xy:
fitmode = 'rotxy+transxy'
kwargs.pop('fit', None)
infix_default = '_fitxy'
else:
fitmode = kwargs.pop('fit', 'rot+trans') # user can use 'progressive', too
infix_default = '_fit'
dt = kwargs.get('dt')
if dt:
infix_default += '_dt{0:d}ps'.format(int(dt)) # dt in ps
kwargs.setdefault('o', self.outfile(self.infix_filename(None, self.xtc, infix_default, 'xtc')))
fitgroup = kwargs.pop('fitgroup', 'backbone')
kwargs.setdefault('input', [fitgroup, "system"])
if kwargs.get('center', False):
logger.warn("Transformer.fit(): center=%(center)r used: centering should not be combined with fitting.", kwargs)
if len(kwargs['inputs']) != 3:
logger.error("If you insist on centering you must provide three groups in the 'input' kwarg: (center, fit, output)")
raise ValuError("Insufficient index groups for centering,fitting,output")
logger.info("Fitting trajectory %r to with xy=%r...", kwargs['f'], xy)
logger.info("Fitting on index group %(fitgroup)r", vars())
with utilities.in_dir(self.dirname):
if self.check_file_exists(kwargs['o'], resolve="indicate", force=force):
logger.warn("File %r exists; force regenerating it with force=True.", kwargs['o'])
else:
gromacs.trjconv(fit=fitmode, **kwargs)
logger.info("Fitted trajectory (fitmode=%s): %r.", fitmode, kwargs['o'])
return {'tpr': self.rp(kwargs['s']), 'xtc': self.rp(kwargs['o'])}
|
Write xtc that is fitted to the tpr reference structure.
Runs :class:`gromacs.tools.trjconv` with appropriate arguments
for fitting. The most important *kwargs* are listed
here but in most cases the defaults should work.
Note that the default settings do *not* include centering or
periodic boundary treatment as this often does not work well
with fitting. It is better to do this as a separate step (see
:meth:`center_fit` or :func:`gromacs.cbook.trj_fitandcenter`)
:Keywords:
*s*
Input structure (typically the default tpr file but can be set to
some other file with a different conformation for fitting)
*n*
Alternative index file.
*o*
Name of the output trajectory. A default name is created.
If e.g. *dt* = 100 is one of the *kwargs* then the default name includes
"_dt100ps".
*xy* : boolean
If ``True`` then only do a rot+trans fit in the xy plane
(good for membrane simulations); default is ``False``.
*force*
``True``: overwrite existing trajectories
``False``: throw a IOError exception
``None``: skip existing and log a warning [default]
*fitgroup*
index group to fit on ["backbone"]
.. Note:: If keyword *input* is supplied then it will override
*fitgroup*; *input* = ``[fitgroup, outgroup]``
*kwargs*
kwargs are passed to :func:`~gromacs.cbook.trj_xyfitted`
:Returns:
dictionary with keys *tpr*, *xtc*, which are the names of the
the new files
|
def usergroups_users_update(
self, *, usergroup: str, users: List[str], **kwargs
) -> SlackResponse:
"""Update the list of users for a User Group
Args:
usergroup (str): The encoded ID of the User Group to update.
e.g. 'S0604QSJC'
users (list): A list user IDs that represent the entire list of
users for the User Group. e.g. ['U060R4BJ4', 'U060RNRCZ']
"""
self._validate_xoxp_token()
kwargs.update({"usergroup": usergroup, "users": users})
return self.api_call("usergroups.users.update", json=kwargs)
|
Update the list of users for a User Group
Args:
usergroup (str): The encoded ID of the User Group to update.
e.g. 'S0604QSJC'
users (list): A list user IDs that represent the entire list of
users for the User Group. e.g. ['U060R4BJ4', 'U060RNRCZ']
|
def _bucket_key(self):
""" Returns hash bucket key for the redis key """
return "{}.size.{}".format(
self.prefix, (self._hashed_key//1000)
if self._hashed_key > 1000 else self._hashed_key)
|
Returns hash bucket key for the redis key
|
def func(nargs: Optional[int] = None, nouts: Optional[int] = None, ndefs: Optional[int] = None):
"""
decorates normal function to Function with (optional) number of arguments and outputs.
: func(nargs: Optional[int] = None, nouts: Optional[int] = None, ndefs: Optional[int] = None)
"""
return lambda f: wraps(f)(WrappedFunction(f, nargs=nargs, nouts=nouts, ndefs=ndefs))
|
decorates normal function to Function with (optional) number of arguments and outputs.
: func(nargs: Optional[int] = None, nouts: Optional[int] = None, ndefs: Optional[int] = None)
|
def from_backend(self, dagobah_id):
""" Reconstruct this Dagobah instance from the backend. """
logger.debug('Reconstructing Dagobah instance from backend with ID {0}'.format(dagobah_id))
rec = self.backend.get_dagobah_json(dagobah_id)
if not rec:
raise DagobahError('dagobah with id %s does not exist '
'in backend' % dagobah_id)
self._construct_from_json(rec)
|
Reconstruct this Dagobah instance from the backend.
|
def healthy(self):
"""Return 200 is healthy, else 500.
Override is_healthy() to change the health check.
"""
try:
if self.is_healthy():
return "OK", 200
else:
return "FAIL", 500
except Exception as e:
self.app.logger.exception(e)
return str(e), 500
|
Return 200 is healthy, else 500.
Override is_healthy() to change the health check.
|
def max_insertion(seqs, gene, domain):
"""
length of largest insertion
"""
seqs = [i[2] for i in list(seqs.values()) if i[2] != [] and i[0] == gene and i[1] == domain]
lengths = []
for seq in seqs:
for ins in seq:
lengths.append(int(ins[2]))
if lengths == []:
return 100
return max(lengths)
|
length of largest insertion
|
def calc_svd(self, lapack_driver='gesdd'):
""" Return the SVD decomposition of data
The input data np.ndarray shall be of dimension 2,
with time as the first dimension, and the channels in the second
Hence data should be of shape (nt, nch)
Uses scipy.linalg.svd(), with:
full_matrices = True
compute_uv = True
overwrite_a = False
check_finite = True
See scipy online doc for details
Return
------
chronos: np.ndarray
First arg (u) returned by scipy.linalg.svd()
Contains the so-called 'chronos', of shape (nt, nt)
i.e.: the time-dependent part of the decoposition
s: np.ndarray
Second arg (s) returned by scipy.linalg.svd()
Contains the singular values, of shape (nch,)
i.e.: the channel-dependent part of the decoposition
topos: np.ndarray
Third arg (v) returned by scipy.linalg.svd()
Contains the so-called 'topos', of shape (nch, nch)
i.e.: the channel-dependent part of the decoposition
"""
if self._isSpectral():
msg = "svd not implemented yet for spectral data class"
raise Exception(msg)
chronos, s, topos = _comp.calc_svd(self.data, lapack_driver=lapack_driver)
return u, s, v
|
Return the SVD decomposition of data
The input data np.ndarray shall be of dimension 2,
with time as the first dimension, and the channels in the second
Hence data should be of shape (nt, nch)
Uses scipy.linalg.svd(), with:
full_matrices = True
compute_uv = True
overwrite_a = False
check_finite = True
See scipy online doc for details
Return
------
chronos: np.ndarray
First arg (u) returned by scipy.linalg.svd()
Contains the so-called 'chronos', of shape (nt, nt)
i.e.: the time-dependent part of the decoposition
s: np.ndarray
Second arg (s) returned by scipy.linalg.svd()
Contains the singular values, of shape (nch,)
i.e.: the channel-dependent part of the decoposition
topos: np.ndarray
Third arg (v) returned by scipy.linalg.svd()
Contains the so-called 'topos', of shape (nch, nch)
i.e.: the channel-dependent part of the decoposition
|
async def toggle(self):
"""Toggles between pause and resume command"""
self.logger.debug("toggle command")
if not self.state == 'ready':
return
if self.streamer is None:
return
try:
if self.streamer.is_playing():
await self.pause()
else:
await self.resume()
except Exception as e:
logger.error(e)
pass
|
Toggles between pause and resume command
|
def __normalize_args(**keywds):
"""implementation details"""
if isinstance(keywds['name'], Callable) and \
None is keywds['function']:
keywds['function'] = keywds['name']
keywds['name'] = None
return keywds
|
implementation details
|
def save_output_meta(self):
"""
Save descriptive output meta data to a JSON file.
"""
options = self.options
file_path = os.path.join(options.outputdir, 'output.meta.json')
with open(file_path, 'w') as outfile:
json.dump(self.OUTPUT_META_DICT, outfile)
|
Save descriptive output meta data to a JSON file.
|
def skull_strip(dset,suffix='_ns',prefix=None,unifize=True):
''' use bet to strip skull from given anatomy '''
# should add options to use betsurf and T1/T2 in the future
# Since BET fails on weirdly distributed datasets, I added 3dUnifize in... I realize this makes this dependent on AFNI. Sorry, :)
if prefix==None:
prefix = nl.suffix(dset,suffix)
unifize_dset = nl.suffix(dset,'_u')
cmd = bet2 if bet2 else 'bet2'
if unifize:
info = nl.dset_info(dset)
if info==None:
nl.notify('Error: could not read info for dset %s' % dset,level=nl.level.error)
return False
cmd = os.path.join(fsl_dir,cmd) if fsl_dir else cmd
cutoff_value = nl.max(dset) * 0.05
nl.run(['3dUnifize','-prefix',unifize_dset,nl.calc(dset,'step(a-%f)*a' % cutoff_value)],products=unifize_dset)
else:
unifize_dset = dset
nl.run([cmd,unifize_dset,prefix,'-w',0.5],products=prefix)
|
use bet to strip skull from given anatomy
|
def dist_calc(loc1, loc2):
"""
Function to calculate the distance in km between two points.
Uses the flat Earth approximation. Better things are available for this,
like `gdal <http://www.gdal.org/>`_.
:type loc1: tuple
:param loc1: Tuple of lat, lon, depth (in decimal degrees and km)
:type loc2: tuple
:param loc2: Tuple of lat, lon, depth (in decimal degrees and km)
:returns: Distance between points in km.
:rtype: float
"""
R = 6371.009 # Radius of the Earth in km
dlat = np.radians(abs(loc1[0] - loc2[0]))
dlong = np.radians(abs(loc1[1] - loc2[1]))
ddepth = abs(loc1[2] - loc2[2])
mean_lat = np.radians((loc1[0] + loc2[0]) / 2)
dist = R * np.sqrt(dlat ** 2 + (np.cos(mean_lat) * dlong) ** 2)
dist = np.sqrt(dist ** 2 + ddepth ** 2)
return dist
|
Function to calculate the distance in km between two points.
Uses the flat Earth approximation. Better things are available for this,
like `gdal <http://www.gdal.org/>`_.
:type loc1: tuple
:param loc1: Tuple of lat, lon, depth (in decimal degrees and km)
:type loc2: tuple
:param loc2: Tuple of lat, lon, depth (in decimal degrees and km)
:returns: Distance between points in km.
:rtype: float
|
def get_user_choice(items):
'''Returns the selected item from provided items or None if 'q' was
entered for quit.
'''
choice = raw_input('Choose an item or "q" to quit: ')
while choice != 'q':
try:
item = items[int(choice)]
print # Blank line for readability between interactive views
return item
except ValueError:
# Passed something that cound't be converted with int()
choice = raw_input('You entered a non-integer. Choice must be an'
' integer or "q": ')
except IndexError:
# Passed an integer that was out of range of the list of urls
choice = raw_input('You entered an invalid integer. Choice must be'
' from above url list or "q": ')
return None
|
Returns the selected item from provided items or None if 'q' was
entered for quit.
|
def shell_split(text):
"""
Split the string `text` using shell-like syntax
This avoids breaking single/double-quoted strings (e.g. containing
strings with spaces). This function is almost equivalent to the shlex.split
function (see standard library `shlex`) except that it is supporting
unicode strings (shlex does not support unicode until Python 2.7.3).
"""
assert is_text_string(text) # in case a QString is passed...
pattern = r'(\s+|(?<!\\)".*?(?<!\\)"|(?<!\\)\'.*?(?<!\\)\')'
out = []
for token in re.split(pattern, text):
if token.strip():
out.append(token.strip('"').strip("'"))
return out
|
Split the string `text` using shell-like syntax
This avoids breaking single/double-quoted strings (e.g. containing
strings with spaces). This function is almost equivalent to the shlex.split
function (see standard library `shlex`) except that it is supporting
unicode strings (shlex does not support unicode until Python 2.7.3).
|
def write(self, buf):
"""Inserts a string buffer as a record.
Examples
---------
>>> record = mx.recordio.MXRecordIO('tmp.rec', 'w')
>>> for i in range(5):
... record.write('record_%d'%i)
>>> record.close()
Parameters
----------
buf : string (python2), bytes (python3)
Buffer to write.
"""
assert self.writable
self._check_pid(allow_reset=False)
check_call(_LIB.MXRecordIOWriterWriteRecord(self.handle,
ctypes.c_char_p(buf),
ctypes.c_size_t(len(buf))))
|
Inserts a string buffer as a record.
Examples
---------
>>> record = mx.recordio.MXRecordIO('tmp.rec', 'w')
>>> for i in range(5):
... record.write('record_%d'%i)
>>> record.close()
Parameters
----------
buf : string (python2), bytes (python3)
Buffer to write.
|
def groups(self, labels, collect=None):
"""Group rows by multiple columns, count or aggregate others.
Args:
``labels``: list of column names (or indices) to group on
``collect``: a function applied to values in other columns for each group
Returns: A Table with each row corresponding to a unique combination of values in
the columns specified in ``labels``, where the first columns are those
specified in ``labels``, followed by a column of counts for each of the unique
values. If ``collect`` is provided, a Table is returned with all original
columns, each containing values calculated by first grouping rows according to
to values in the ``labels`` column, then applying ``collect`` to each set of
grouped values in the other columns.
Note:
The grouped columns will appear first in the result table. If ``collect`` does not
accept arguments with one of the column types, that column will be empty in the resulting
table.
>>> marbles = Table().with_columns(
... "Color", make_array("Red", "Green", "Blue", "Red", "Green", "Green"),
... "Shape", make_array("Round", "Rectangular", "Rectangular", "Round", "Rectangular", "Round"),
... "Amount", make_array(4, 6, 12, 7, 9, 2),
... "Price", make_array(1.30, 1.30, 2.00, 1.75, 1.40, 1.00))
>>> marbles
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Blue | Rectangular | 12 | 2
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 1.4
Green | Round | 2 | 1
>>> marbles.groups(["Color", "Shape"])
Color | Shape | count
Blue | Rectangular | 1
Green | Rectangular | 2
Green | Round | 1
Red | Round | 2
>>> marbles.groups(["Color", "Shape"], sum)
Color | Shape | Amount sum | Price sum
Blue | Rectangular | 12 | 2
Green | Rectangular | 15 | 2.7
Green | Round | 2 | 1
Red | Round | 11 | 3.05
"""
# Assume that a call to groups with one label is a call to group
if not _is_non_string_iterable(labels):
return self.group(labels, collect=collect)
collect = _zero_on_type_error(collect)
columns = []
labels = self._as_labels(labels)
for label in labels:
if label not in self.labels:
raise ValueError("All labels must exist in the table")
columns.append(self._get_column(label))
grouped = self.group(list(zip(*columns)), lambda s: s)
grouped._columns.popitem(last=False) # Discard the column of tuples
# Flatten grouping values and move them to front
counts = [len(v) for v in grouped[0]]
for label in labels[::-1]:
grouped[label] = grouped.apply(_assert_same, label)
grouped.move_to_start(label)
# Aggregate other values
if collect is None:
count = 'count' if 'count' not in labels else self._unused_label('count')
return grouped.select(labels).with_column(count, counts)
else:
for label in grouped.labels:
if label in labels:
continue
column = [collect(v) for v in grouped[label]]
del grouped[label]
grouped[_collected_label(collect, label)] = column
return grouped
|
Group rows by multiple columns, count or aggregate others.
Args:
``labels``: list of column names (or indices) to group on
``collect``: a function applied to values in other columns for each group
Returns: A Table with each row corresponding to a unique combination of values in
the columns specified in ``labels``, where the first columns are those
specified in ``labels``, followed by a column of counts for each of the unique
values. If ``collect`` is provided, a Table is returned with all original
columns, each containing values calculated by first grouping rows according to
to values in the ``labels`` column, then applying ``collect`` to each set of
grouped values in the other columns.
Note:
The grouped columns will appear first in the result table. If ``collect`` does not
accept arguments with one of the column types, that column will be empty in the resulting
table.
>>> marbles = Table().with_columns(
... "Color", make_array("Red", "Green", "Blue", "Red", "Green", "Green"),
... "Shape", make_array("Round", "Rectangular", "Rectangular", "Round", "Rectangular", "Round"),
... "Amount", make_array(4, 6, 12, 7, 9, 2),
... "Price", make_array(1.30, 1.30, 2.00, 1.75, 1.40, 1.00))
>>> marbles
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Blue | Rectangular | 12 | 2
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 1.4
Green | Round | 2 | 1
>>> marbles.groups(["Color", "Shape"])
Color | Shape | count
Blue | Rectangular | 1
Green | Rectangular | 2
Green | Round | 1
Red | Round | 2
>>> marbles.groups(["Color", "Shape"], sum)
Color | Shape | Amount sum | Price sum
Blue | Rectangular | 12 | 2
Green | Rectangular | 15 | 2.7
Green | Round | 2 | 1
Red | Round | 11 | 3.05
|
def update(self, changed_state_model=None, with_expand=False):
"""Checks if all states are in tree and if tree has states which were deleted
:param changed_state_model: Model that row has to be updated
:param with_expand: The expand flag for the tree
"""
if not self.view_is_registered:
return
# define initial state-model for update
if changed_state_model is None:
# reset all
parent_row_iter = None
self.state_row_iter_dict_by_state_path.clear()
self.tree_store.clear()
if self._selected_sm_model:
changed_state_model = self._selected_sm_model.root_state
else:
return
else: # pick
if changed_state_model.state.is_root_state:
parent_row_iter = self.state_row_iter_dict_by_state_path[changed_state_model.state.get_path()]
else:
if changed_state_model.state.is_root_state_of_library:
# because either lib-state or lib-state-root is in tree the next higher hierarchy state is updated
changed_upper_state_m = changed_state_model.parent.parent
else:
changed_upper_state_m = changed_state_model.parent
# TODO check the work around of the next 2 lines while refactoring -> it is a check to be more robust
while changed_upper_state_m.state.get_path() not in self.state_row_iter_dict_by_state_path:
# show Warning because because avoided method states_update
logger.warning("Take a parent state because this is not in.")
changed_upper_state_m = changed_upper_state_m.parent
parent_row_iter = self.state_row_iter_dict_by_state_path[changed_upper_state_m.state.get_path()]
# do recursive update
self.insert_and_update_recursively(parent_row_iter, changed_state_model, with_expand)
|
Checks if all states are in tree and if tree has states which were deleted
:param changed_state_model: Model that row has to be updated
:param with_expand: The expand flag for the tree
|
def get_available_devices(self):
"""
Gets available devices using mbedls and self.available_edbg_ports.
:return: List of connected devices as dictionaries.
"""
connected_devices = self.mbeds.list_mbeds() if self.mbeds else []
# Check non mbedOS supported devices.
# Just for backward compatible reason - is obsolete..
edbg_ports = self.available_edbg_ports()
for port in edbg_ports:
connected_devices.append({
"platform_name": "SAM4E",
"serial_port": port,
"mount_point": None,
"target_id": None,
"baud_rate": 460800
})
for dev in connected_devices:
dev['state'] = "unknown"
return connected_devices
|
Gets available devices using mbedls and self.available_edbg_ports.
:return: List of connected devices as dictionaries.
|
def make_directory(self, directory_name, *args, **kwargs):
""" :meth:`.WNetworkClientProto.make_directory` method implementation
"""
self.dav_client().mkdir(self.join_path(self.session_path(), directory_name))
|
:meth:`.WNetworkClientProto.make_directory` method implementation
|
def server(self):
"""
UDP server to listen for responses.
"""
server = getattr(self, "_server", None)
if server is None:
log.debug("Binding datagram server to %s", self.bind)
server = DatagramServer(self.bind, self._response_received)
self._server = server
return server
|
UDP server to listen for responses.
|
def get_randomness_stream(self, decision_point: str, for_initialization: bool=False) -> RandomnessStream:
"""Provides a new source of random numbers for the given decision point.
Parameters
----------
decision_point :
A unique identifier for a stream of random numbers. Typically represents
a decision that needs to be made each time step like 'moves_left' or
'gets_disease'.
for_initialization :
A flag indicating whether this stream is used to generate key initialization information
that will be used to identify simulants in the Common Random Number framework. These streams
cannot be copied and should only be used to generate the state table columns specified
in ``builder.configuration.randomness.key_columns``.
Raises
------
RandomnessError :
If another location in the simulation has already created a randomness stream
with the same identifier.
"""
if decision_point in self._decision_points:
raise RandomnessError(f"Two separate places are attempting to create "
f"the same randomness stream for {decision_point}")
stream = RandomnessStream(key=decision_point, clock=self._clock, seed=self._seed,
index_map=self._key_mapping, manager=self, for_initialization=for_initialization)
self._decision_points[decision_point] = stream
return stream
|
Provides a new source of random numbers for the given decision point.
Parameters
----------
decision_point :
A unique identifier for a stream of random numbers. Typically represents
a decision that needs to be made each time step like 'moves_left' or
'gets_disease'.
for_initialization :
A flag indicating whether this stream is used to generate key initialization information
that will be used to identify simulants in the Common Random Number framework. These streams
cannot be copied and should only be used to generate the state table columns specified
in ``builder.configuration.randomness.key_columns``.
Raises
------
RandomnessError :
If another location in the simulation has already created a randomness stream
with the same identifier.
|
def get_array_dimensions(data):
"""
Given an array type data item, check that it is an array and
return the dimensions as a tuple.
Ex: get_array_dimensions([[1, 2, 3], [4, 5, 6]]) returns (2, 3)
"""
depths_and_dimensions = get_depths_and_dimensions(data, 0)
# re-form as a dictionary with `depth` as key, and all of the dimensions found at that depth.
grouped_by_depth = {
depth: tuple(dimension for depth, dimension in group)
for depth, group in groupby(depths_and_dimensions, itemgetter(0))
}
# validate that there is only one dimension for any given depth.
invalid_depths_dimensions = tuple(
(depth, dimensions)
for depth, dimensions in grouped_by_depth.items()
if len(set(dimensions)) != 1
)
if invalid_depths_dimensions:
raise ValidationError(
'\n'.join(
[
"Depth {0} of array data has more than one dimensions: {1}".
format(depth, dimensions)
for depth, dimensions in invalid_depths_dimensions
]
)
)
dimensions = tuple(
toolz.first(set(dimensions))
for depth, dimensions in sorted(grouped_by_depth.items())
)
return dimensions
|
Given an array type data item, check that it is an array and
return the dimensions as a tuple.
Ex: get_array_dimensions([[1, 2, 3], [4, 5, 6]]) returns (2, 3)
|
def prepare_socket(bind_addr, family, type, proto, nodelay, ssl_adapter):
"""Create and prepare the socket object."""
sock = socket.socket(family, type, proto)
prevent_socket_inheritance(sock)
host, port = bind_addr[:2]
IS_EPHEMERAL_PORT = port == 0
if not (IS_WINDOWS or IS_EPHEMERAL_PORT):
"""Enable SO_REUSEADDR for the current socket.
Skip for Windows (has different semantics)
or ephemeral ports (can steal ports from others).
Refs:
* https://msdn.microsoft.com/en-us/library/ms740621(v=vs.85).aspx
* https://github.com/cherrypy/cheroot/issues/114
* https://gavv.github.io/blog/ephemeral-port-reuse/
"""
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if nodelay and not isinstance(bind_addr, str):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if ssl_adapter is not None:
sock = ssl_adapter.bind(sock)
# If listening on the IPV6 any address ('::' = IN6ADDR_ANY),
# activate dual-stack. See
# https://github.com/cherrypy/cherrypy/issues/871.
listening_ipv6 = (
hasattr(socket, 'AF_INET6')
and family == socket.AF_INET6
and host in ('::', '::0', '::0.0.0.0')
)
if listening_ipv6:
try:
sock.setsockopt(
socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0,
)
except (AttributeError, socket.error):
# Apparently, the socket option is not available in
# this machine's TCP stack
pass
return sock
|
Create and prepare the socket object.
|
def batch_fetch_labels(ids):
"""
fetch all rdfs:label assertions for a set of CURIEs
"""
m = {}
for id in ids:
label = anyont_fetch_label(id)
if label is not None:
m[id] = label
return m
|
fetch all rdfs:label assertions for a set of CURIEs
|
def add_project(self, ):
"""Add a project and store it in the self.projects
:returns: None
:rtype: None
:raises: None
"""
i = self.prj_tablev.currentIndex()
item = i.internalPointer()
if item:
project = item.internal_data()
if self._atype:
self._atype.projects.add(project)
elif self._dep:
self._dep.projects.add(project)
else:
project.users.add(self._user)
self.projects.append(project)
item.set_parent(None)
|
Add a project and store it in the self.projects
:returns: None
:rtype: None
:raises: None
|
def peek_at(iterable: Iterable[T]) -> Tuple[T, Iterator[T]]:
"""Returns the first value from iterable, as well as a new iterator with
the same content as the original iterable
"""
gen = iter(iterable)
peek = next(gen)
return peek, itertools.chain([peek], gen)
|
Returns the first value from iterable, as well as a new iterator with
the same content as the original iterable
|
def main_passpersist(self):
"""
Main function that handle SNMP's pass_persist protocol, called by
the start method.
Direct call is unnecessary.
"""
line = sys.stdin.readline().strip()
if not line:
raise EOFError()
if 'PING' in line:
print("PONG")
elif 'getnext' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
elif oid == "":
# Fallback to the first entry
print(self.get_first())
else:
print(self.get_next(oid))
elif 'get' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
else:
print(self.get(oid))
elif 'set' in line:
oid = sys.stdin.readline().strip()
typevalue = sys.stdin.readline().strip()
self.set(oid, typevalue)
elif 'DUMP' in line: # Just for debbuging
from pprint import pprint
pprint(self.data)
else:
print("NONE")
sys.stdout.flush()
|
Main function that handle SNMP's pass_persist protocol, called by
the start method.
Direct call is unnecessary.
|
def before_all(ctx):
"""
Pulls down busybox:latest before anything is tested.
"""
ctx.client = get_client()
try:
ctx.client.inspect_image(IMAGE)
except NotFound:
ctx.client.pull(IMAGE)
|
Pulls down busybox:latest before anything is tested.
|
def alternative_short_name(self, name=None, entry_name=None, limit=None, as_df=False):
"""Method to query :class:`.models.AlternativeShortlName` objects in database
:param name: alternative short name(s)
:type name: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.AlternativeShortName`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.AlternativeShortName`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.AlternativeShortName)
model_queries_config = (
(name, models.AlternativeShortName.name),
)
q = self.get_model_queries(q, model_queries_config)
q = self.get_one_to_many_queries(q, ((entry_name, models.Entry.name),))
return self._limit_and_df(q, limit, as_df)
|
Method to query :class:`.models.AlternativeShortlName` objects in database
:param name: alternative short name(s)
:type name: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.AlternativeShortName`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.AlternativeShortName`) or :class:`pandas.DataFrame`
|
def clear_cache(backend=None):
'''
.. versionadded:: 2015.5.0
Clear the fileserver cache from VCS fileserver backends (:mod:`git
<salt.fileserver.gitfs>`, :mod:`hg <salt.fileserver.hgfs>`, :mod:`svn
<salt.fileserver.svnfs>`). Executing this runner with no arguments will
clear the cache for all enabled VCS fileserver backends, but this
can be narrowed using the ``backend`` argument.
backend
Only clear the update lock for the specified backend(s). If all passed
backends start with a minus sign (``-``), then these backends will be
excluded from the enabled backends. However, if there is a mix of
backends with and without a minus sign (ex: ``backend=-roots,git``)
then the ones starting with a minus sign will be disregarded.
CLI Example:
.. code-block:: bash
salt-run fileserver.clear_cache
salt-run fileserver.clear_cache backend=git,hg
salt-run fileserver.clear_cache hg
salt-run fileserver.clear_cache -roots
'''
fileserver = salt.fileserver.Fileserver(__opts__)
cleared, errors = fileserver.clear_cache(back=backend)
ret = {}
if cleared:
ret['cleared'] = cleared
if errors:
ret['errors'] = errors
if not ret:
return 'No cache was cleared'
return ret
|
.. versionadded:: 2015.5.0
Clear the fileserver cache from VCS fileserver backends (:mod:`git
<salt.fileserver.gitfs>`, :mod:`hg <salt.fileserver.hgfs>`, :mod:`svn
<salt.fileserver.svnfs>`). Executing this runner with no arguments will
clear the cache for all enabled VCS fileserver backends, but this
can be narrowed using the ``backend`` argument.
backend
Only clear the update lock for the specified backend(s). If all passed
backends start with a minus sign (``-``), then these backends will be
excluded from the enabled backends. However, if there is a mix of
backends with and without a minus sign (ex: ``backend=-roots,git``)
then the ones starting with a minus sign will be disregarded.
CLI Example:
.. code-block:: bash
salt-run fileserver.clear_cache
salt-run fileserver.clear_cache backend=git,hg
salt-run fileserver.clear_cache hg
salt-run fileserver.clear_cache -roots
|
def handleNotification(self, handle, data):
"""Handle Callback from a Bluetooth (GATT) request."""
_LOGGER.debug("Got notification from %s: %s", handle, codecs.encode(data, 'hex'))
if handle in self._callbacks:
self._callbacks[handle](data)
|
Handle Callback from a Bluetooth (GATT) request.
|
def array_addunique(path, value, create_parents=False, **kwargs):
"""
Add a new value to an array if the value does not exist.
:param path: The path to the array
:param value: Value to add to the array if it does not exist.
Currently the value is restricted to primitives: strings, numbers,
booleans, and `None` values.
:param create_parents: Create the array if it does not exist
.. note::
The actual position of the new item is unspecified. This means
it may be at the beginning, end, or middle of the existing
array)
This operation is only valid in :cb_bmeth:`mutate_in`.
.. seealso:: :func:`array_append`, :func:`upsert`
"""
return _gen_4spec(LCB_SDCMD_ARRAY_ADD_UNIQUE, path, value,
create_path=create_parents, **kwargs)
|
Add a new value to an array if the value does not exist.
:param path: The path to the array
:param value: Value to add to the array if it does not exist.
Currently the value is restricted to primitives: strings, numbers,
booleans, and `None` values.
:param create_parents: Create the array if it does not exist
.. note::
The actual position of the new item is unspecified. This means
it may be at the beginning, end, or middle of the existing
array)
This operation is only valid in :cb_bmeth:`mutate_in`.
.. seealso:: :func:`array_append`, :func:`upsert`
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.