content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
import socket
def _get_ip():
"""
:return: This computer's default AF_INET IP address as a string
"""
# find ip using answer with 75 votes
# https://stackoverflow.com/questions/166506/finding-local-ip-addresses-using-pythons-stdlib
ip = ''
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# apparently any IP will work
sock.connect(('192.168.1.1', 1))
ip = sock.getsockname()[0]
except Exception as e:
print(e)
print('Error: Couldn\'t get IP! Make sure you are connected to a network.')
finally:
sock.close()
return str(ip)
|
f39c961877a1ec026596a7ced01679411962fca4
| 3,638,912
|
def _get_value(cav, _type):
"""Get value of custom attribute item"""
if _type == 'Map:Person':
return cav["attribute_object"]["id"] \
if cav.get("attribute_object") else None
if _type == 'Checkbox':
return cav["attribute_value"] == '1'
return cav["attribute_value"]
|
c8210579cf8b2a29dffc1f28a6e204fc9f89f274
| 3,638,913
|
def get_inference(model, vectorizer, topics, text, threshold):
"""
runs inference on text input
paramaters
----------
model: loaded model to use to transform the input
vectorizer: instance of the vectorizer e.g TfidfVectorizer(ngram_range=(2, 3))
topics: the list of topics in the model
text: input string to be classified
threshold: float of threshold to use to output a topic
returns
-------
tuple => top score
"""
v_text = vectorizer.transform([text])
score = model.transform(v_text)
labels = set()
for i in range(len(score[0])):
if score[0][i] > threshold:
labels.add(topics[i])
if not labels:
return 'None', -1, set()
return topics[np.argmax(score)]
|
e48ba018d372de317dd79fb678d69d2c83b4787b
| 3,638,914
|
def get_message_id(update: dict, status_update: str) -> int:
"""функция для получения номера сообщения.
Описание - функция получает номер сообщения от пользователя
Parameters
----------
update : dict
новое сообщение от бота
status_update : str
состояние сообщения, изменено или новое
Returns
-------
message_status : str
статус сообщения, если новое, то message, если отредактированое
edited_message
"""
return update[status_update]['message_id']
|
9b299c94e322ad9cea92fd73cb9e7a55f3364caa
| 3,638,915
|
def gmm_clustering_predict(model, X):
"""
X is a (N, 1) array
"""
X = np.clip(X, -2.5, 2.5)
return model.predict(X)
|
9dcff9aa68fe008713dbb5142e16702bb68f65a0
| 3,638,916
|
import requests
def http_request(url, method='GET', timeout=2, **kwargs):
"""Generic task to make an http request."""
headers = kwargs.get('headers', {})
params = kwargs.get('params', {})
data = kwargs.get('data', {})
request_kwargs = {}
if headers:
request_kwargs['headers'] = headers
if params:
request_kwargs['params'] = params
if method in ['post', 'put']:
request_kwargs['data'] = data
s = requests.Session()
if method not in METHOD_CHOICES:
raise ValueError(f'{method} not supported!')
method = method.lower()
request = getattr(s, method)
try:
response = request(url, timeout=timeout, **request_kwargs)
# response.raise_for_status()
except requests.exceptions.RequestException:
logger.exception('%s request to url %s failed!', method, url)
return None
else:
logger.info('%s request to url %s successfull!', method, url)
return {
'status_code': response.status_code,
'headers': dict(response.headers),
'text': response.text
}
|
f7605d5b88bb7e23a1b541b7103185d229427270
| 3,638,917
|
from typing import Type
def unify_nest(args: Type[MultiNode], kwargs: Type[MultiNode], node_str, mode, axis=0, max_depth=1):
"""
Unify the input nested arguments, which consist of sub-arrays spread across arbitrary nodes, to unified arrays
on the single target node.
:param args: The nested positional arguments to unify.
:type args: MultiNode
:param kwargs: The nested keyword arguments to unify.
:type kwargs: MultiNode
:param node_str: The node to unify the nested arguments to.
:type node_str: str
:param mode: The mode by which to unify, must be one of [ concat | mean | sum ]
:type mode: str
:param axis: The axis along which to concattenate the sub-arrays. Default is 0.
:type axis: int, optional
:param max_depth: The maximum nested depth to reach. Default is 1. Increase this if the nest is deeper.
:type max_depth: int, optional
:return: nested arguments unified to the target node
"""
args = args._data if isinstance(args, MultiNodeIter) else args
kwargs = kwargs._data if isinstance(kwargs, MultiNodeIter) else kwargs
args_uni = ivy.nested_map(args, lambda x: unify(x, node_str, mode, axis), max_depth=max_depth)
kwargs_uni = ivy.nested_map(kwargs, lambda x: unify(x, node_str, mode, axis), max_depth=max_depth)
return args_uni, kwargs_uni
|
392491382b31c566db7eb8b13a98001158d8153a
| 3,638,918
|
def ast_for_inv_exp(inv: 'Ast', ctx: 'ReferenceDict'):
"""
invExp ::= atomExpr (atomExpr | invTrailer)*;
"""
assert inv.name is UNameEnum.invExp
atom_expr, *inv_trailers = inv
res = ast_for_atom_expr(atom_expr, ctx)
if len(inv_trailers) is 1:
[each] = inv_trailers
if each.name is UNameEnum.atomExpr:
return res(ast_for_atom_expr(each, ctx))
return ast_for_atom_expr(each[0], ctx)(res)
stack = []
for each in inv_trailers:
if each.name is UNameEnum.atomExpr:
stack.append(ast_for_atom_expr(each, ctx))
continue
if stack:
res = res(*stack)
stack.clear()
res = (ast_for_atom_expr(each[0], ctx))(res)
if stack:
res = res(*stack)
return res
|
77de8d7c1fd5dfc4a8fefa04ee6c0a5da17a6bc1
| 3,638,919
|
def create_input_pipeline(files,
batch_size,
n_epochs,
shape,
crop_shape=None,
crop_factor=1.0,
n_threads=2):
"""Creates a pipefile from a list of image files.
Includes batch generator/central crop/resizing options.
The resulting generator will dequeue the images batch_size at a time until
it throws tf.errors.OutOfRangeError when there are no more images left in
the queue.
Parameters
----------
files : list
List of paths to image files.
batch_size : int
Number of image files to load at a time.
n_epochs : int
Number of epochs to run before raising tf.errors.OutOfRangeError
shape : list
[height, width, channels]
crop_shape : list
[height, width] to crop image to.
crop_factor : float
Percentage of image to take starting from center.
n_threads : int, optional
Number of threads to use for batch shuffling
Returns
-------
TYPE
Description
"""
# We first create a "producer" queue. It creates a production line which
# will queue up the file names and allow another queue to deque the file
# names all using a tf queue runner.
# Put simply, this is the entry point of the computational graph.
# It will generate the list of file names.
# We also specify it's capacity beforehand.
producer = tf.train.string_input_producer(
files, capacity=len(files), num_epochs=n_epochs)
# We need something which can open the files and read its contents.
reader = tf.WholeFileReader()
# We pass the filenames to this object which can read the file's contents.
# This will create another queue running which dequeues the previous queue.
keys, vals = reader.read(producer)
# And then have to decode its contents as we know it is a jpeg image
imgs = tf.image.decode_jpeg(
vals, channels=3 if len(shape) > 2 and shape[2] == 3 else 0)
# We have to explicitly define the shape of the tensor.
# This is because the decode_jpeg operation is still a node in the graph
# and doesn't yet know the shape of the image. Future operations however
# need explicit knowledge of the image's shape in order to be created.
imgs.set_shape(shape)
# Next we'll centrally crop the image to the size of 100x100.
# This operation required explicit knowledge of the image's shape.
if shape[0] > shape[1]:
rsz_shape = [
int(shape[0] / shape[1] * crop_shape[0] / crop_factor), int(
crop_shape[1] / crop_factor)
]
else:
rsz_shape = [
int(crop_shape[0] / crop_factor),
int(shape[1] / shape[0] * crop_shape[1] / crop_factor)
]
rszs = tf.image.resize_images(imgs, rsz_shape)
crops = (tf.image.resize_image_with_crop_or_pad(rszs, crop_shape[0],
crop_shape[1])
if crop_shape is not None else imgs)
# Now we'll create a batch generator that will also shuffle our examples.
# We tell it how many it should have in its buffer when it randomly
# permutes the order.
min_after_dequeue = len(files) // 100
# The capacity should be larger than min_after_dequeue, and determines how
# many examples are prefetched. TF docs recommend setting this value to:
# min_after_dequeue + (num_threads + a small safety margin) * batch_size
capacity = min_after_dequeue + (n_threads + 1) * batch_size
# Randomize the order and output batches of batch_size.
batch = tf.train.shuffle_batch(
[crops],
enqueue_many=False,
batch_size=batch_size,
capacity=capacity,
min_after_dequeue=min_after_dequeue,
num_threads=n_threads)
# alternatively, we could use shuffle_batch_join to use multiple reader
# instances, or set shuffle_batch's n_threads to higher than 1.
return batch
|
17c26de3659cccd7e32d8297ed0e31167ef05c38
| 3,638,920
|
def confirm_email_page():
"""Returns page for users that have not confirmed their
email address"""
if not g.loggedIn:
return redirect(url_for('general.loginPage'))
next = request.args.get('next')
if general_db.is_activated(g.user):
if next is not '':
return make_auth_token_response(g.user, g.email, next)
return make_auth_token_response(g.user, g.email,
url_for('articles.index'))
if next:
err = 'You must confirm your email to access this endpoint'
flash(err, 'danger')
return render_template('confirm_email.html', next=next, email=g.email)
return render_template('confirm_email.html', email=g.email)
|
e37c59e9f1fa1d710d2795257be1cfb8bc9aa3df
| 3,638,921
|
from bs4 import BeautifulSoup
def get_movie_names(url_data):
"""Get all the movies from the webpage"""
soup = BeautifulSoup(url_data, 'html.parser')
data = soup.findAll('ul', attrs={'class' : 'ctlg-holder'}) #Get all the lines from HTML that are a part of ul with class = 'ctlg-holder'
movie_list = []
for div in data:
links = div.findAll('a') #Choose all the lines with links
for a in links:
if a is not None and a is not "#":
movie_list.append(a.get('href', None))
print("Movie Names Obtained")
return movie_list
|
1cae6b0093f0e0ca9e361bdc207be9ea654e7c2b
| 3,638,922
|
def message_results():
"""Shows the user their message, with the letters in sorted order."""
message = request.form.get('message')
encrypted_message = sort_letters(message)
return render_template('message_results.html', message=encrypted_message)
|
8e0868330c318c958da496a742f630583822bbd0
| 3,638,923
|
def flatten_dict(dicts, keys):
"""
Input is list of dicts. This operation pulls out the key in each dict and combines the values into a new list mapped to the original key. A new dictionary is formed with these key -> list mappings.
"""
return {
key: flatten_n([d[key] for d in dicts])
for key in keys
}
|
ca037e47e2e6287145da693cd55f47719d463115
| 3,638,924
|
def render_url(fullpath, notebook=False): # , prefix="files"):
"""Converts a path relative to the notebook (i.e. kernel) to a URL that
can be served by the notebook server, by prepending the notebook
directory"""
if fullpath.startswith('http://'):
url = fullpath
else:
url = (radiopadre.FILE_URL_ROOT if not notebook else radiopadre.NOTEBOOK_URL_ROOT) + fullpath
# print "{} URL is {}".format(fullpath, url)
return url
|
ee401c4521cf93fe4ec95b2d3c1fb7dbe337ff52
| 3,638,925
|
def register():
"""Register User route."""
email = request.form.get('email')
password = request.form.get('password')
new_user = User.register(email, password)
if new_user:
return jsonify({'message': 'Registration successful.'}), 201
return jsonify({'message': 'Invalid username or password.'}), 400
|
a6148b514268e36fc28a69737718598fcc355460
| 3,638,926
|
import json
def route_sns_task(event, context):
"""
Gets SNS Message, deserialises the message,
imports the function, calls the function with args
"""
record = event['Records'][0]
message = json.loads(
record['Sns']['Message']
)
return run_message(message)
|
1e7c8f774f62cddf633e51d631f74cad3fa1ec8e
| 3,638,927
|
def release_dp_mean_absolute_deviation(x, bounds, epsilon):
"""Release the dp mean absolute deviation.
Assumes dataset size len(`x`) is public.
Theorem 27: https://arxiv.org/pdf/2001.02285.pdf
"""
lower, upper = bounds
sensitivity = (upper - lower) * 2. / len(x)
x = np.clip(x, *bounds)
mad = (x - x.mean()).abs().mean()
base_lap = binary_search_chain(lambda s: make_base_laplace(s), sensitivity, epsilon)
return base_lap(mad)
|
da49088e52fcd0ccf8358db072354fcd39de565e
| 3,638,928
|
def LoadScores(firstfile, prevfile):
"""Load the first and previous scores. For each peptide, compute a prize
that is -log10(min p-value across all time points). Assumes the scores
are p-values or equivalaent scores in (0, 1]. Do not allow null or missing
scores.
Return: data frame with scores and prize for each peptide
"""
first_df = pd.read_csv(firstfile, sep="\t", comment="#", header=None, index_col=0)
prev_df = pd.read_csv(prevfile, sep="\t", comment="#", header=None, index_col=0)
first_shape = first_df.shape
assert first_shape == prev_df.shape, "First and previous score files must have the same number of peptides and time points"
assert not first_df.isnull().values.any(), "First scores file contains N/A values. Replace with 1.0"
assert not prev_df.isnull().values.any(), "Previous scores file contains N/A values. Replace with 1.0"
print "Loaded {} peptides and {} scores in the first and previous score files".format(first_shape[0], first_shape[1])
# Merge the two types of scores
merged_df = pd.concat([first_df, prev_df], axis=1, join="outer")
merged_shape = merged_df.shape
assert merged_shape[0] == first_shape[0], "First and previous significance scores contain different peptides"
assert merged_shape[1] == 2*first_shape[1], "Unexpected number of significance scores after merging first and previous scores"
# Compute prizes
merged_df["prize"] = merged_df.apply(CalcPrize, axis=1)
return merged_df
|
b6a0d9769795937a21aee195d782060db73ec494
| 3,638,930
|
def _gen_find(subseq, generator):
"""Returns the first position of `subseq` in the generator or -1 if there is no such position."""
if isinstance(subseq, bytes):
subseq = bytearray(subseq)
subseq = list(subseq)
pos = 0
saved = []
for c in generator:
saved.append(c)
if len(saved) > len(subseq):
saved.pop(0)
pos += 1
if saved == subseq:
return pos
return -1
|
ec89e787a61d684e2a7d0c8c2d0fb9c89cf73ada
| 3,638,932
|
def all_permits(target_dynamo_table):
"""
Simply return all data from DynamoDb Table
:param target_dynamo_table:
:return:
"""
response = target_dynamo_table.scan()
data = response['Items']
while response.get('LastEvaluatedKey', False):
response = target_dynamo_table.scan(ExclusiveStartKey=response['LastEvaluatedKey'])
data.extend(response['Items'])
return data
|
8efdaf4ff407d0e2ce8dd592eeac766b0ec2264b
| 3,638,934
|
def maximum_difference_sort_value(contributions):
"""
Auxiliary function to sort the contributions for the compare_plot.
Returns the value of the maximum difference between values in contributions[0].
Parameters
----------
contributions: list
list containing 2 elements:
a Numpy.ndarray of contributions of the indexes compared, and the features' names.
Returns
-------
value_max_difference : float
Value of the maximum difference contribution.
"""
if len(contributions[0]) <= 1:
max_difference = contributions[0][0]
else:
max_difference = max(
[
abs(contrib_i - contrib_j)
for i, contrib_i in enumerate(contributions[0])
for j, contrib_j in enumerate(contributions[0])
if i <= j
]
)
return max_difference
|
cd7f66ec252199fb01b9891440d0f7da370c7b8e
| 3,638,935
|
def get_primer_target_sequence(id, svStartChr, svStartPos, svEndChr, svEndPos, svType, svComment, primerTargetSize, primerOffset, blastdbcmd, genomeFile):
"""Get the sequences in which primers will be placed"""
if svType in ["del", "inv3to3", "trans3to3", "trans3to5", "snv", "invRefA", "invAltA"]:
targetSeq1Start = svStartPos - primerOffset - primerTargetSize
targetSeq1End = svStartPos - primerOffset
targetSeq1 = get_DNA_sequence(svStartChr, targetSeq1Start, targetSeq1End, blastdbcmd, genomeFile).upper()
elif svType in ["invRefB"]:
targetSeq1Start = max(svEndPos - primerOffset - primerTargetSize, svStartPos + primerOffset)
targetSeq1End = svEndPos - primerOffset
targetSeq1 = get_DNA_sequence(svStartChr, targetSeq1Start, targetSeq1End, blastdbcmd, genomeFile).upper()
elif svType in ["trans5to3", "trans5to5"]:
targetSeq1Start = svStartPos + primerOffset
targetSeq1End = svStartPos + primerOffset + primerTargetSize
targetSeq1 = reverseComplementSequence(get_DNA_sequence(svStartChr, targetSeq1Start, targetSeq1End, blastdbcmd, genomeFile).upper())
elif svType in ["dup", "inv5to5", "invAltB"]:
targetSeq1Start = svStartPos + primerOffset
targetSeq1End = min(svStartPos + primerOffset + primerTargetSize, svEndPos - primerOffset)
targetSeq1 = reverseComplementSequence(get_DNA_sequence(svStartChr, targetSeq1Start, targetSeq1End, blastdbcmd, genomeFile).upper())
if svType in ["del", "inv5to5", "snv", "invRefB", "invAltB"]:
targetSeq2Start = svEndPos + primerOffset
targetSeq2End = svEndPos + primerOffset + primerTargetSize
targetSeq2 = get_DNA_sequence(svStartChr, targetSeq2Start, targetSeq2End, blastdbcmd, genomeFile).upper()
elif svType in ["invRefA"]:
targetSeq2Start = svStartPos + primerOffset
targetSeq2End = min(svStartPos + primerOffset + primerTargetSize, svEndPos - primerOffset)
targetSeq2 = get_DNA_sequence(svStartChr, targetSeq2Start, targetSeq2End, blastdbcmd, genomeFile).upper()
elif svType in ["dup", "inv3to3", "invAltA"]:
targetSeq2Start = max(svEndPos - primerTargetSize - primerOffset, svStartPos + primerOffset)
targetSeq2End = svEndPos - primerOffset
targetSeq2 = reverseComplementSequence(get_DNA_sequence(svStartChr, targetSeq2Start, targetSeq2End, blastdbcmd, genomeFile).upper())
elif svType in ["trans3to5", "trans5to5"]:
targetSeq2Start = svEndPos + primerOffset
targetSeq2End = svEndPos + primerOffset + primerTargetSize
targetSeq2 = get_DNA_sequence(svEndChr, targetSeq2Start, targetSeq2End, blastdbcmd, genomeFile).upper()
elif svType in ["trans3to3", "trans5to3"]:
targetSeq2Start = svEndPos - primerTargetSize - primerOffset
targetSeq2End = svEndPos - primerOffset
targetSeq2 = reverseComplementSequence(get_DNA_sequence(svEndChr, targetSeq2Start, targetSeq2End, blastdbcmd, genomeFile).upper())
return (targetSeq1, targetSeq2)
|
b8b32319d6a37a2373a620b1be867ab838c54fdc
| 3,638,937
|
def human_format(num):
"""
:param num: A number to print in a nice readable way.
:return: A string representing this number in a readable way (e.g. 1000 --> 1K).
"""
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
return '%.2f%s' % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])
|
41e4f3823f756588c18b0fb926949a5aca9c6942
| 3,638,938
|
from typing import Optional
from typing import Dict
def torchserve(
model_path: str,
management_api: str,
image: str = TORCHX_IMAGE,
params: Optional[Dict[str, object]] = None,
) -> specs.AppDef:
"""Deploys the provided model to the given torchserve management API
endpoint.
>>> from torchx.components.serve import torchserve
>>> torchserve(
... model_path="s3://your-bucket/your-model.pt",
... management_api="http://torchserve:8081",
... )
AppDef(name='torchx-serve-torchserve', ...)
Args:
model_path: The fsspec path to the model archive file.
management_api: The URL to the root of the torchserve management API.
image: Container to use.
params: torchserve parameters.
See https://pytorch.org/serve/management_api.html#register-a-model
Returns:
specs.AppDef: the Torchx application definition
"""
args = [
"torchx/apps/serve/serve.py",
"--model_path",
model_path,
"--management_api",
management_api,
]
if params is not None:
for param, value in params.items():
args += [
f"--{param}",
str(value),
]
return specs.AppDef(
name="torchx-serve-torchserve",
roles=[
specs.Role(
name="torchx-serve-torchserve",
image=image,
entrypoint="python3",
args=args,
port_map={"model-download": 8222},
),
],
)
|
b90ec26512525e3f23a54034a685be380ef0be96
| 3,638,939
|
def ExpandRange(r,s=1):
"""expand 1-5 to [1..5], step by 1-10/2"""
if REGEX_PATTERNS['step'].search(r):
[r1,s] = r.split('/')
s=int(s)
else:
r1 = r
(start,end) = r1.split('-')
return [i for i in range(int(start),int(end)+1,s)]
|
1e8ca3b5b026c36817acfefd1666312b0bcccf23
| 3,638,940
|
def write_file_if_changed(name, data):
""" Write a file if the contents have changed. Returns True if the file was written. """
if path_exists(name):
old_contents = read_file(name)
else:
old_contents = ''
if (data != old_contents):
write_file(name, data)
return True
return False
|
42962e9f9159d8cab121826e223bfa10467b8d5c
| 3,638,941
|
def _get_preprocessor_loader(plugin_name):
"""Get a class that loads a preprocessor class.
This returns a class with a single class method, ``transform``,
which, when called, finds a plugin and defers to its ``transform``
class method. This is necessary because ``convert()`` is called as
a decorator at import time, but we cannot be confident that the
ResourceType plugins may not be loaded yet. (In fact, since
``convert()`` is used to decorate plugins, we can be confident
that not all plugins are loaded when it is called.)
This permits us to defer plugin searching until the moment when
``preprocess()`` calls the various preprocessors, at which point
we can be certain that all plugins have been loaded and finding
them by name will work.
"""
def transform(cls, *args, **kwargs):
plug = ResourceType.get(plugin_name)
return plug.transform(*args, **kwargs)
return type("PluginLoader_%s" % plugin_name,
(object,),
{"transform": classmethod(transform)})
|
5b2c1687be92b21f31c0e9e28a3566505831c876
| 3,638,942
|
def preprocess_sample(data, word_dict):
"""
Args:
data (dict)
Returns:
dict
"""
processed = {}
processed['Abstract'] = [sentence_to_indices(sent, word_dict) for sent in data['Abstract'].split('$$$')]
if 'Task 2' in data:
processed['Label'] = label_to_onehot(data['Task 2'])
return processed
|
0b1b50285be0afa1faf78917024b1e2cd01fb167
| 3,638,943
|
import yaml
import torch
def read_input_file(input_file_path):
"""
read inputs from input_file_path
:param input_file_path:
:return:
"""
cprint('[INFO]', bc.dgreen, "read input file: {}".format(input_file_path))
with open(input_file_path, 'r') as input_file_read:
dl_inputs = yaml.load(input_file_read, Loader=yaml.FullLoader)
dl_inputs['gru_lstm']['learning_rate'] = float(dl_inputs['gru_lstm']['learning_rate'])
# initialize before checking if GPU actually exists
device = torch.device("cpu")
dl_inputs['general']['is_cuda'] = False
if dl_inputs['general']['use_gpu']:
# --- check cpu/gpu availability
# returns a Boolean True if a GPU is available, else it'll return False
is_cuda = torch.cuda.is_available()
if is_cuda:
device = torch.device(dl_inputs["general"]["gpu_device"])
dl_inputs['general']['is_cuda'] = True
else:
cprint('[INFO]', bc.lred, 'GPU was requested but not available.')
dl_inputs['general']['device'] = device
cprint('[INFO]', bc.lgreen, 'pytorch will use: {}'.format(dl_inputs['general']['device']))
if not "early_stopping_patience" in dl_inputs["gru_lstm"]:
dl_inputs["gru_lstm"]["early_stopping_patience"] = False
if dl_inputs['gru_lstm']["early_stopping_patience"] <= 0:
dl_inputs['gru_lstm']["early_stopping_patience"] = False
# XXX separation in the input CSV file
# Hardcoded, see issue #38
dl_inputs['preprocessing']['csv_sep'] = "\t"
return dl_inputs
|
ade3584937997798b496690d5799e23effae1cd1
| 3,638,944
|
def plugin(version: str) -> 'Plugin':
"""Get the application plugin."""
return XPXPlugin
|
8211b8b3f2aaedbbfe289184117e6964fad5cce5
| 3,638,946
|
def is_anaconda_5():
"""
anaconda 5 has conda version 4.4.0 or greater... obviously :/
"""
vers = conda_version()
if not vers:
return False
ma = vers['major'] >= 4
mi = vers['minor'] >= 4
return ma and mi
|
cc4701bb788867a6370c53b48994c166dffa7cd4
| 3,638,947
|
def relay_array_map(c, fn, *array):
"""Implementation of array_map for Relay."""
assert fn.is_constant(Primitive)
fn = fn.value
if fn is P.switch:
rfn = relay.where
else:
rfn = SIMPLE_MAP[fn]
return rfn(*[c.ref(a) for a in array])
|
8d3d89ea131272f987054c198353ec7fc398e4a0
| 3,638,948
|
def get_multi_objects_dict(*args, params=None):
"""Convertir un array de objetos en diccionarios"""
object_group = []
result = {}
for data_object in args:
if params is not None and params['fields']:
fields = params['fields']
else:
fields = [attr for attr in data_object.__dict__.keys() if not attr.startswith('_')]
row = {}
for field in fields:
value = getattr(data_object, field)
if field.startswith('date') and value is not None:
row.update({field: value.strftime('%Y-%m-%d %H:%M:%S')})
else:
row.update({field: parse_value(value)})
object_group.append(row)
for data_object in object_group:
result.update(**data_object)
return result
|
2f4e2bc6e68bc77fedfae89ff4562cdab5fa91fb
| 3,638,949
|
from btu.manual_tests import ping_now
def test_function_ping_now_bytes():
"""
Picking the 'ping_now' function and return as bytes.
"""
queue_args = {
"site": frappe.local.site,
"user": frappe.session.user,
"method": ping_now,
"event": None,
"job_name": "ping_now",
"is_async": True, # always true; we want to run Tasks via the Redis Queue, not on the Web Server.
"kwargs": {} # if 'ping_now' had keyword arguments, we'd set them here.
}
new_sanchez = Sanchez()
new_sanchez.build_internals(func=execute_job, _args=None, _kwargs=queue_args)
http_result: bytes = new_sanchez.get_serialized_rq_job()
return http_result
|
0673efa3ff11aa9b55b470c4ac84a35f7878af98
| 3,638,950
|
def post_equals_form(post, json_response):
"""
Checks if the posts object is equal to the json object
"""
if post.title != json_response['title']:
return False
if post.deadline != json_response['deadline']:
return False
if post.details != json_response['details']:
return False
if post.category != json_response['category']:
return False
if post.preferred_contact != json_response['preferred_contact']:
return False
if post.zip_code != json_response['zip_code']:
return False
return True
|
965a533c7ebbb70001bcdcb0e143b617708807e3
| 3,638,951
|
def get_shield(plugin: str) -> dict:
"""
Generate shield json for napari plugin.
If the package is not a valid plugin, display 'plugin not found' instead.
:param plugin: name of the plugin
:return: shield json used in shields.io.
"""
shield_schema = {
"color": "#0074B8",
"label": "napari hub",
"logoSvg": "<svg width=\"512\" height=\"512\" viewBox=\"0 0 512 512\" fill=\"none\" "
"xmlns=\"http://www.w3.org/2000/svg\"><circle cx=\"256.036\" cy=\"256\" "
"r=\"85.3333\" fill=\"white\" stroke=\"white\" stroke-width=\"56.8889\"/>"
"<circle cx=\"256.036\" cy=\"42.6667\" r=\"42.6667\" fill=\"white\"/>"
"<circle cx=\"256.036\" cy=\"469.333\" r=\"42.6667\" fill=\"white\"/>"
"<path d=\"M256.036 28.4445L256.036 142.222\" stroke=\"white\" "
"stroke-width=\"56.8889\" stroke-linecap=\"round\" stroke-linejoin=\"round\"/>"
"<path d=\"M256.036 369.778L256.036 483.556\" stroke=\"white\" stroke-width=\"56.8889\" "
"stroke-linecap=\"round\" stroke-linejoin=\"round\"/>"
"<circle cx=\"71.2838\" cy=\"149.333\" r=\"42.6667\" transform=\"rotate(-60 71.2838 149.333)\" "
"fill=\"white\"/><circle cx=\"440.788\" cy=\"362.667\" r=\"42.6667\" "
"transform=\"rotate(-60 440.788 362.667)\" fill=\"white\"/>"
"<path d=\"M58.967 142.222L157.501 199.111\" stroke=\"white\" stroke-width=\"56.8889\" "
"stroke-linecap=\"round\" stroke-linejoin=\"round\"/><path d=\"M354.57 312.889L453.105 369.778\" "
"stroke=\"white\" stroke-width=\"56.8889\" stroke-linecap=\"round\" stroke-linejoin=\"round\"/>"
"<circle cx=\"71.2838\" cy=\"362.667\" r=\"42.6667\" transform=\"rotate(-120 71.2838 362.667)\" "
"fill=\"white\"/><circle cx=\"440.788\" cy=\"149.333\" r=\"42.6667\" "
"transform=\"rotate(-120 440.788 149.333)\" fill=\"white\"/>"
"<path d=\"M58.967 369.778L157.501 312.889\" stroke=\"white\" stroke-width=\"56.8889\" "
"stroke-linecap=\"round\" stroke-linejoin=\"round\"/><path d=\"M354.57 199.111L453.105 142.222\" "
"stroke=\"white\" stroke-width=\"56.8889\" stroke-linecap=\"round\" stroke-linejoin=\"round\"/>"
"</svg>",
"schemaVersion": 1,
"style": "flat-square"
}
plugins = get_valid_plugins()
if plugin not in plugins:
shield_schema['message'] = 'plugin not found'
else:
shield_schema['message'] = plugin
return shield_schema
|
f1c7dadabd0b5fe6b1b0012188559b4958ca5fd0
| 3,638,952
|
def check_auth(username, password):
"""This function is called to check if a username /
password combination is valid.
"""
return username == expectedUN and password == expectedPW
|
e19759a1514fad47a085e3dad2180c5b8b49827c
| 3,638,953
|
def Lambda(t, y):
"""Original Arnett 1982 dimensionless bolometric light curve expression
Calculates the bolometric light curve due to radioactive decay of 56Ni,
assuming no other energy input.
t: time since explosion in days
y: Arnett 1982 light curve width parameter (typical 0.7 < y < 1.4)
Returns the dimensionless light curve shape function.
"""
tm = 2*tNi*y
a, x = [ ], np.atleast_1d(t/tm)
ig = lambda z: 2*z * np.exp(-2*z*y + z**2)
for xi in x.ravel(): a.append(np.exp(-xi**2) * quad(ig, 0, xi)[0])
return np.array(a)
|
85752fa09f1189ca7e24a32d821e36c58379572d
| 3,638,954
|
from datetime import datetime
def get_utcnow_time(format: str = None) -> str:
"""
Return string with current utc time in chosen format
Args:
format (str): format string. if None "%y%m%d.%H%M%S" will be used.
Returns:
str: formatted utc time string
"""
if format is None:
format = "%y%m%d.%H%M%S"
result = datetime.utcnow().strftime(format)
return result
|
994e47abde4a4b56bd0f22ccc41d7d91c7b3b8d0
| 3,638,955
|
def repair_branch(cmorph, cut, rmorph, rep, force=False):
"""Attempts to extend cut neurite using intact branch.
Args:
cmorph (treem.Morph): cut morphology.
cut (treem.Node): cut node, from cmorph.
rmorph (treem.Morph): repair morphology.
rep (treem.Node): undamaged branch start node, from rmorph.
force (bool): force repair if branch is too short.
Returns:
True if repaired.
"""
done = 0
cutsec = list(reversed(list(cut.section(reverse=True))))
repsec = list(rep.section())
cutlen = cmorph.length(cutsec)
replen = rmorph.length(repsec)
target = cut
if replen > cutlen:
for node in repsec[-1::-1]:
if rmorph.length(node.section()) > replen - cutlen:
break
source = node # pylint: disable=undefined-loop-variable
elif rep.breadth() > 1 or force:
source = rep
else:
source = None
if source:
tree = rmorph.copy(source)
scale_z = -1
scale_r = cmorph.radii(cutsec).mean() / rmorph.radii(repsec).mean()
tree.data[:, SWC.XYZR] *= np.array([1, 1, scale_z, scale_r])
u = np.mean(tree.data[:, SWC.XYZ], axis=0) - tree.root.coord()
v = target.coord() - cmorph.root.coord()
axis, angle = rotation(u, v)
tree.rotate(axis, angle)
shift = (target.coord() - tree.root.coord() +
target.coord() - target.parent.coord())
tree.translate(shift)
cmorph.graft(tree, target)
done = 1
return done
|
1e76ec2619f1b74791c1258c65c649c25261a740
| 3,638,956
|
def us2cycles(us):
"""
Converts microseconds to integer number of tProc clock cycles.
:param cycles: Number of microseconds
:type cycles: float
:return: Number of tProc clock cycles
:rtype: int
"""
return int(us*fs_proc)
|
51d405c512c146bdfda0a091470ad84593872819
| 3,638,958
|
from datetime import datetime
def date_range(begin_date, end_date):
"""
获取一个时间区间的list
"""
dates = []
dt = datetime.datetime.strptime(begin_date, "%Y-%m-%d")
date = begin_date[:]
while date <= end_date:
dates.append(date)
dt = dt + datetime.timedelta(1)
date = dt.strftime("%Y-%m-%d")
return dates
|
a3373ab76752423eaf1484e5d66dc5d6334c4360
| 3,638,959
|
def scalar(name):
"""
Create a scalar variable with the corresponding name. The 'name' will be during code generation, so should match the
variable name used in the C++ code.
"""
tname = name
return symbols(tname)
|
8f1f7295d15b136be38383135729fe7717fd71b8
| 3,638,960
|
def add(number1, number2):
"""
This functions adds two numbers
Arguments:
number1 : first number to be passed
number2 : second number to be passed
Returns: number1*number2
the result of two numbers
Examples:
>>> add(0,0)
0
>>> add(1,1)
2
>>> add(1.1,2.2)
3.3000000000000003
"""
return number1 + number2
|
5db1a461f65672d5fc1201a82657fada30220743
| 3,638,961
|
def calculate_timeout(start_point, end_point, planner):
"""
Calucaltes the time limit between start_point and end_point considering a fixed speed of 5 km/hr.
Args:
start_point: initial position
end_point: target_position
planner: to get the shortest part between start_point and end_point
Returns:
time limit considering a fixed speed of 5 km/hr
"""
path_distance = planner.get_shortest_path_distance(
[start_point.location.x, start_point.location.y, 0.22], [
start_point.orientation.x, start_point.orientation.y, 0.22], [
end_point.location.x, end_point.location.y, end_point.location.z], [
end_point.orientation.x, end_point.orientation.y, end_point.orientation.z])
return ((path_distance / 1000.0) / 5.0) * 3600.0 + 10.0
|
cb7ae44df9b6a89d2e171046fa0bdfe3f81445c5
| 3,638,962
|
def func_parallel(func, list_inputs, leave_cpu_num=1):
"""
:param func: func(list_inputs[i])
:param list_inputs: each element is the input of func
:param leave_cpu_num: num of cpu that not use
:return: [return_of_func(list_inputs[0]), return_of_func(list_inputs[1]), ...]
"""
cpu_cores = mp.cpu_count() - leave_cpu_num
pool = mp.Pool(processes=cpu_cores)
list_outputs = pool.map(func, list_inputs)
pool.close()
return list_outputs
|
4642149db87236b444e26515747a18ccbc420e64
| 3,638,964
|
def get_mean(jsondata):
"""Get average of list of items using numpy."""
if len(jsondata['results']) > 1:
return mean([float(price.get('price')) for price in jsondata['results'] if 'price' in price]) # key name from itunes
# [a.get('a') for a in alist if 'a' in a]
else:
return float(jsondata['results'][0]['price'])
|
63851f6e89bea230549975eba68391421b57f087
| 3,638,965
|
from typing import Dict
import torch
import time
def evaluate_with_trajectory(
sc_dataset: SingleCellDataset,
n_samples: int,
trajectory_type: str,
trajectory_coef: Dict,
types: DeconvolutionDatatypeParametrization,
deconvolution_params: Dict,
n_iters=5_000,
):
"""Evaluate L1_error and measure fit time for fitting on a simulated dataset from a given trajectory
:param sc_dataset: SingleCellDataset for generated simulations from
:param n_samples: number of samples along the time axis to generate
:param trajectory_type: string indicating the trajectory type to which the `trajectory_coef` correspond
:param trajectory_coef: trajectory coefficients
:param types: DeconvolutionDatatypeParametrization identifying datatypes to use
:param deconvolution_params: Dictionary with deconvolution parameters
:param n_iters: Number of learning iterations for each execution
:return: Dictionary with results
"""
# Simulate bulk data
sim_res = simulate_data(
w_hat_gc=torch.Tensor(sc_dataset.w_hat_gc),
num_samples=n_samples,
trajectory_type=trajectory_type,
dirichlet_alpha=10.0,
trajectory_coef=trajectory_coef,
)
simulated_bulk = generate_anndata_from_sim(sim_res, sc_dataset)
ebov_simulated_dataset = DeconvolutionDataset(
types=types,
parametrization=DeconvolutionDatasetParametrization(
sc_anndata=sc_dataset.sc_anndata,
sc_celltype_col="Subclustering_reduced",
bulk_anndata=simulated_bulk,
bulk_time_col="time",
),
)
# Prepare deconvolution object
pseudo_time_reg_deconv_sim = TimeRegularizedDeconvolutionModel(
dataset=ebov_simulated_dataset, types=types, **deconvolution_params,
)
# Deconvolve
t_0 = time.perf_counter()
pseudo_time_reg_deconv_sim.fit_model(
n_iters=n_iters,
verbose=True,
log_frequency=1000,
keep_param_store_history=False,
)
t_1 = time.perf_counter()
# Calculate errors
errors = calculate_trajectory_prediction_error(sim_res, pseudo_time_reg_deconv_sim)
# Return
return {
"n_samples": n_samples,
"l1_error_norm": errors["L1_error_norm"],
"fit_time": t_1 - t_0,
}
|
bb82164f4ec9d79bcc675be1612a61ff5b209752
| 3,638,966
|
def plot_diffraction_1d(result, deg):
"""
Returns this result instance in PlotData1D representation.
:param deg: if False the phase is expressed in radians, if True in degrees.
"""
# Distinguish between the strings "phase in deg" and "phase in rad".
if deg:
phase_string = "Phase in deg"
else:
phase_string = "Phase in rad"
# Retrieve setup information.
info_dict = result.diffractionSetup().toDictionary()
info_dict["Bragg angle"] = str(result.braggAngle())
# Retrieve angles of the results.
angles_in_um = [i * 1e+6 for i in result.angleDeviations()]
# Define inner function to duplicate info for every plot.
def addPlotInfo(info_dict, energy, angles_in_um, data):
plot_data = PlotData1D(data[0], data[1], data[2])
plot_data.set_x(angles_in_um)
plot_data.set_y(data[3])
for key, value in info_dict.items():
plot_data.add_plot_info(key, value)
plot_data.add_plot_info("Energy", str(energy))
return plot_data
plots = []
for energy in result.energies():
# Intensity S polarization.
categories = []
s_intensity = ("Intensity - Polarization S",
"Angle deviation in urad",
"Intensity",
result.sIntensityByEnergy(energy))
plots.append(addPlotInfo(info_dict, energy, angles_in_um, s_intensity))
p_intensity = ("Intensity - Polarization P",
"Angle deviation in urad",
"Intensity",
result.pIntensityByEnergy(energy))
plots.append(addPlotInfo(info_dict, energy, angles_in_um, p_intensity))
intensity_difference = ("Intensity difference",
"Angle deviation in urad",
"Intensity",
result.differenceIntensityByEnergy(energy))
plots.append(addPlotInfo(info_dict, energy, angles_in_um, intensity_difference))
s_phase = ("Phase - Polarization S",
"Angle deviation in urad",
phase_string,
result.sPhaseByEnergy(energy, deg))
plots.append(addPlotInfo(info_dict, energy, angles_in_um, s_phase))
p_phase = ("Phase - Polarization P",
"Angle deviation in urad",
phase_string,
result.pPhaseByEnergy(energy, deg))
plots.append(addPlotInfo(info_dict, energy, angles_in_um, p_phase))
phase_difference = ("Phase difference",
"Angle deviation in urad",
phase_string,
result.differencePhaseByEnergy(energy, deg))
plots.append(addPlotInfo(info_dict, energy, angles_in_um, phase_difference))
return plots
|
f316e1f02a5b5b295bfed22fc5307bcf908788c2
| 3,638,968
|
import logging
def prepare_go_environ():
"""Returns dict with environment variables to set to use Go toolset.
Installs or updates the toolset and vendored dependencies if necessary.
"""
bootstrap(LAYOUT, logging.INFO)
return get_go_environ(LAYOUT)
|
cf7d6ee594193317a1201beb127e607139fd367f
| 3,638,969
|
def get_subnets(client, name='tag:project', values=[ec2_project_name,], dry=True):
"""
Get VPC(s) by tag (note: create_tags not working via client api, use cidr or object_id instead )
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.describe_subnets
"""
try:
return client.describe_subnets(Filters=[{'Name': name, 'Values': values},], DryRun=dry)
except Exception as err:
handle(err)
|
4504a37689bce171d3d62a3cf6f66365c58f56e8
| 3,638,970
|
import requests
def get_object_handler(s3_client, request_context, user_request):
"""
Handler for the GetObject Operation
:param s3_client: s3 client
:param request_context: GetObject request context
:param user_request: user request
:return: WriteGetObjectResponse
"""
# Validate user request and return error if invalid
requests_validation = validator.validate_request(user_request)
if not requests_validation.is_valid:
return error.write_error_response(s3_client, request_context, requests.codes.bad_request,
'InvalidRequest', requests_validation.error_msg)
# Get the original object from Amazon S3
s3_url = request_context["inputS3Url"]
request_header = get_request_header(user_request["headers"])
object_response = requests.get(s3_url, headers=request_header)
# Check if the get original object request from S3 is successful
if object_response.status_code != requests.codes.ok:
# For 304 Not Modified, Error Message dont need to be send
if object_response.status_code == requests.codes.not_modified:
return s3_client.write_get_object_response(
RequestRoute=request_context["outputRoute"],
RequestToken=request_context["outputToken"],
StatusCode=object_response.status_code,
)
return error.write_error_response_for_s3(s3_client,
request_context,
object_response)
# Transform the object
original_object = object_response.content
transformed_whole_object = transform.transform_object(original_object)
# Handle range or partNumber if present in the request
partial_object_response = apply_range_or_part_number(transformed_whole_object, user_request)
if partial_object_response.hasError:
return error.write_error_response(s3_client, request_context, requests.codes.bad_request,
'InvalidRequest', partial_object_response.error_msg)
transformed_object = partial_object_response.object
# Send the transformed object back to Amazon S3 Object Lambda
transformed_object_checksum = checksum.get_checksum(transformed_object)
return s3_client.write_get_object_response(RequestRoute=request_context["outputRoute"],
RequestToken=request_context["outputToken"],
Body=transformed_object,
Metadata={
'body-checksum-algorithm': transformed_object_checksum.algorithm,
'body-checksum-digest': transformed_object_checksum.digest
})
|
fc98197f99e8751976245902eb4034a5b4930d3b
| 3,638,971
|
def decline_agreement(supplier_code):
"""Decline agreement (role=supplier)
---
tags:
- seller edit
parameters:
- name: supplier_code
in: path
type: number
required: true
responses:
200:
description: Agreement declined.
400:
description: Bad request.
403:
description: Unauthorised to decline agreement.
404:
description: Supplier not found.
500:
description: Unexpected error.
"""
if current_user.supplier_code != supplier_code:
return forbidden('Unauthorised to decline agreement')
try:
seller_edit_business.decline_agreement({
'supplier_code': current_user.supplier_code,
'email_address': current_user.email_address
})
except NotFoundError as nfe:
not_found(str(nfe))
except DeletedError as de:
abort(str(de))
except UnauthorisedError as ue:
abort(str(ue))
return Response(status=200)
|
fa7f2186af9f7beb2b138eab3347cd34580557f0
| 3,638,974
|
import torch
def load_model(model, model_path):
"""
Load model from saved weights.
"""
if hasattr(model, "module"):
model.module.load_state_dict(torch.load(model_path, map_location="cpu"), strict=False)
else:
model.load_state_dict(torch.load(model_path, map_location="cpu"), strict=False)
return model
|
0fbf34548474c4af89c25806f05d1e7d3170bbde
| 3,638,975
|
def get_file_size(filepath: str):
"""
Not exactly sure how os.stat or os.path.getsize work, but they seem to get the total allocated size of the file and
return that while the file is still copying. What we want, is the actual file size written to disk during copying.
With standard Windows file copying, we can just try open/close the file, and if that succeeds, the file is finished.
With Kongsberg systems writing to disk, we can actually open and read the .all file as it copies, so the try/except is
not good enough. This function will find the length of the actual readable data on disk.
Parameters
----------
filepath
file path to a file being written
Returns
-------
int
file size in bytes
"""
with open(filepath, "r") as file:
# move pointer to the end of the file
file.seek(0, 2)
# retrieve the current position of the pointer
# this will be the file's size in bytes
size = file.tell()
return size
|
6936a8227a96e3ebc4b1146f8363f092d232cafd
| 3,638,976
|
import json
def get_aws_regions_from_file(region_file):
"""
Return the list of region names read from region_file.
The format of region_file is as follows:
{
"regions": [
"cn-north-1",
"cn-northwest-1"
]
}
"""
with open(region_file) as r_file:
region_data = json.load(r_file)
return sorted(r for r in region_data.get("regions"))
|
639da8c6417295f97621f9fd5321d8499652b7b2
| 3,638,977
|
def item_pack():
""" RESTful CRUD controller """
s3db.configure("supply_item_pack",
listadd = False,
)
return s3_rest_controller()
|
e6bce829b441a08c98dc81fa6ac1ea432ef67c89
| 3,638,978
|
def inv_cipher(rkey, ct, Nk=4):
"""AES decryption cipher."""
assert Nk in {4, 6, 8}
Nr = Nk + 6
rkey = rkey.reshape(4*(Nr+1), 32)
ct = ct.reshape(128)
# first round
state = add_round_key(ct, rkey[4*Nr:4*(Nr+1)])
for i in range(Nr-1, 0, -1):
state = inv_shift_rows(state)
state = inv_sub_bytes(state)
state = add_round_key(state, rkey[4*i:4*(i+1)])
state = inv_mix_columns(state)
# final round
state = inv_shift_rows(state)
state = inv_sub_bytes(state)
state = add_round_key(state, rkey[0:4])
return state
|
477b32450b4fef060f936952d0af3115ca4b8add
| 3,638,979
|
def _ptrarray_to_list(ptrarray):
"""Converts a ptr_array structure from SimpLL into a Python list."""
result = []
for i in range(0, ptrarray.len):
result.append(ptrarray.arr[i])
lib.freePointerArray(ptrarray)
return result
|
430c26f15ee41dbf5b4bdf562dd81c0167eead18
| 3,638,981
|
from typing import Callable
def endpoint(path: str) -> Callable[[], Endpoint]:
"""Decorator for creating an
Arguments:
path: The path to the API endpoint (relative to the API's
``base_url``).
Returns:
The wrapper for the endpoint method.
"""
def wrapper(method):
return Endpoint(path, build_converter(method))
return wrapper
|
1a0b9b836630f1ab4eea902861899c50303aa539
| 3,638,983
|
def from_string_to_bytes(a):
"""
Based on project: https://github.com/chaeplin/dashmnb.
"""
return a if isinstance(a, bytes) else bytes(a, 'utf-8')
|
e76509f1be8baf8df0bf3b7160615f9a9c04ff86
| 3,638,984
|
def split(x, divider):
"""Split a string.
Parameters
----------
x : any
A str object to be split. Anything else is returned as is.
divider : str
Divider string.
"""
if isinstance(x, str):
return x.split(divider)
return x
|
e77a162777d9bb13262e4686ba1cb9732ebab221
| 3,638,985
|
def despesa_update(despesa_id):
"""
Editar uma despesa.
Args:
despesa_id (int): ID da despesa a ser editada.
Lógica matemática é chamada de utils.py: adicionar_registro()
Returns:
Template renderizado: despesa.html
Redirecionamento: aplication.transacoes
"""
despesa = Despesa.query.get_or_404(despesa_id)
if despesa.user != current_user:
abort(403)
form = DespesaForm()
if form.validate_on_submit():
valor_antigo = despesa.valor
id_conta_bancaria_antiga = despesa.conta_bancaria.id
despesa.valor=form.valor.data
despesa.data_origem=form.data_origem.data
despesa.descricao=form.descricao.data
despesa.categoria_despesa=form.categoria.data
despesa.conta_bancaria=form.conta.data
if despesa.status:
adicionar_registro(id_conta_bancaria_antiga, form.conta.data.id,
valor_antigo, form.valor.data, 1)
db.session.commit()
flash('Sua despesa foi alterada.', 'success')
return redirect(url_for('aplication.transacoes', despesa_id=despesa.id))
elif request.method == 'GET':
form.valor.data=despesa.valor
form.data_origem.data=despesa.data_origem
form.descricao.data=despesa.descricao
form.categoria.data=despesa.categoria_despesa
form.conta.data=despesa.conta_bancaria
return render_template('despesa.html', title='Atualizar despesa',
legend='Atualizar despesa', form=form)
|
bd848eacc19144c40822a7389ceecdae4f5c5532
| 3,638,986
|
def _convert_format(partition):
"""
Converts the format of the python-louvain into a numpy array
Parameters
----------
partition : dict
Standard output from python-louvain package
Returns
-------
partition: np.array
Partition as a numpy array
"""
return np.array([partition[val] for val in partition.keys()])
|
5afffe9745c0083829a2ce88f5842b295583e737
| 3,638,987
|
def settingsdir():
"""In which directory to save to the settings file"""
return module_dir()+"/settings"
|
ac485b7d947cfa051adc9eeed6f194b9746d8401
| 3,638,988
|
from typing import Dict
from typing import List
import copy
def run_range_mcraptor(
timetable: Timetable,
origin_station: str,
dep_secs_min: int,
dep_secs_max: int,
max_rounds: int,
) -> Dict[str, List[Journey]]:
"""
Perform the McRAPTOR algorithm for a range query
"""
# Get stops for origins and destinations
from_stops = timetable.stations.get_stops(origin_station)
destination_stops = {
st.name: timetable.stations.get_stops(st.name) for st in timetable.stations
}
destination_stops.pop(origin_station, None)
# Find all trips leaving from stops within time range
potential_trip_stop_times = timetable.trip_stop_times.get_trip_stop_times_in_range(
from_stops, dep_secs_min, dep_secs_max
)
potential_dep_secs = sorted(
list(set([tst.dts_dep for tst in potential_trip_stop_times])), reverse=True
)
logger.info(
"Potential departure times : {}".format(
[sec2str(x) for x in potential_dep_secs]
)
)
journeys_to_destinations = {
station_name: [] for station_name, _ in destination_stops.items()
}
logger.info("Calculating journeys to all destinations")
s = perf_counter()
# Find Pareto-optimal journeys for all possible departure times
for dep_index, dep_secs in enumerate(potential_dep_secs):
logger.info(f"Processing {dep_index} / {len(potential_dep_secs)}")
logger.info(f"Analyzing best journey for departure time {sec2str(dep_secs)}")
# Run Round-Based Algorithm
mcraptor = McRaptorAlgorithm(timetable)
if dep_index == 0:
bag_round_stop, actual_rounds = mcraptor.run(from_stops, dep_secs, max_rounds)
else:
bag_round_stop, actual_rounds = mcraptor.run(from_stops, dep_secs, max_rounds, last_round_bag)
last_round_bag = copy(bag_round_stop[actual_rounds])
# Determine the best destination ID, destination is a platform
for destination_station_name, to_stops in destination_stops.items():
destination_legs = best_legs_to_destination_station(
to_stops, last_round_bag
)
if len(destination_legs) != 0:
journeys = reconstruct_journeys(
from_stops, destination_legs, bag_round_stop, k=actual_rounds
)
journeys_to_destinations[destination_station_name].extend(journeys)
logger.info(f"Journey calculation time: {perf_counter() - s}")
# Keep unique journeys
for destination_station_name, journeys in journeys_to_destinations.items():
unique_journeys = []
for journey in journeys:
if not journey in unique_journeys:
unique_journeys.append(journey)
journeys_to_destinations[destination_station_name] = unique_journeys
return journeys_to_destinations
|
d09a85fbe5f3e1a8e3081037e195298aed6e5fc8
| 3,638,990
|
def _choose_node_type(w_operator, w_constant, w_input, t):
"""
Choose a random node (from operators, constants and input variables)
:param w_operator: Weighting of choosing an operator
:param w_constant: Weighting of choosing a constant
:param w_input: Weighting of choosing an input
:param t: Trace object
:return: An operator, constant or input variable
"""
w_sum = w_operator + w_constant + w_input
rb = t.random()
# print('Chose:', rb)
r = rb * w_sum
# r = random.uniform(0, w_sum)
if r < w_operator:
return BNode(_random_from_list(operators, t))
elif r < w_operator + w_constant:
return _random_constant(t)
else:
return input_var
|
7517d347b97bce2748e4ccd45a5f25120e074e9d
| 3,638,991
|
def _plat_idx_to_val(idx: int , edge: float = 0.5, FIO_IO_U_PLAT_BITS: int = 6, FIO_IO_U_PLAT_VAL: int = 64) -> float:
""" Taken from fio's stat.c for calculating the latency value of a bin
from that bin's index.
idx : the value of the index into the histogram bins
edge : fractional value in the range [0,1]** indicating how far into
the bin we wish to compute the latency value of.
** edge = 0.0 and 1.0 computes the lower and upper latency bounds
respectively of the given bin index. """
# MSB <= (FIO_IO_U_PLAT_BITS-1), cannot be rounded off. Use
# all bits of the sample as index
if (idx < (FIO_IO_U_PLAT_VAL << 1)):
return idx
# Find the group and compute the minimum value of that group
error_bits = (idx >> FIO_IO_U_PLAT_BITS) - 1
base = 1 << (error_bits + FIO_IO_U_PLAT_BITS)
# Find its bucket number of the group
k = idx % FIO_IO_U_PLAT_VAL
# Return the mean (if edge=0.5) of the range of the bucket
return base + ((k + edge) * (1 << error_bits))
|
f992194492e031add3d14f0e145888303a5b4f06
| 3,638,994
|
def is_blank(value):
"""
Returns True if ``value`` is ``None`` or an empty string.
>>> is_blank("")
True
>>> is_blank(0)
False
>>> is_blank([])
False
"""
return value is None or value == ""
|
6a30f9f6726701a4b7a9df8957503111a5222558
| 3,638,995
|
def overload_check(data, min_overload_samples=3):
"""Check data for overload
:param data: one or two (time, samples) dimensional array
:param min_overload_samples: number of samples that need to be equal to max
for overload
:return: overload status
"""
if data.ndim > 2:
raise Exception('Number of dimensions of data should be 2 or less')
def _overload_check(x):
s = np.sort(np.abs(x))[::-1]
over = s == np.max(s)
if np.sum(over) >= min_overload_samples:
return True
else:
return False
if data.ndim == 2:
over = [_overload_check(d) for d in data.T]
return over
else:
over = _overload_check(data)
return over
|
9c59bb2e105828afd93af193949a2ad01a34a32e
| 3,638,996
|
import socket
import time
def send_packet_to_capture_last_one():
"""
Since we read packets from stdout of tcpdump, we do not know when a packet is finished
Hence you should send an additional packet after you assume all interesting packets were sent
"""
def send():
conf = get_netconfig()
sock = socket(AF_PACKET, SOCK_RAW)
sock.bind((conf.dev.name, 0))
dst_mac = MAC("22:22:22:22:22:22")
src_ip = IP("192.168.69.10")
dst_ip = IP("192.168.69.20")
src_mac = MAC("11:11:11:11:11:11")
packet = arp_packet(dst_mac, src_mac, 2, src_mac, src_ip, dst_mac, dst_ip)
sock.send(packet)
time.sleep(0.05)
return send
|
984848ec685273d97630dd6a95d93c939665969e
| 3,638,997
|
from typing import Callable
from pathlib import Path
from typing import Iterable
from typing import Optional
import signal
import random
def diss(
demos: Demos,
to_concept: Identify,
to_chain: MarkovChainFact,
competency: CompetencyEstimator,
lift_path: Callable[[Path], Path] = lambda x: x,
n_iters: int = 25,
reset_period: int = 5,
cooling_schedule: Callable[[int], float] | None = None,
size_weight: float = 1.0,
surprise_weight: float = 1.0,
sgs_temp: float = 2.0,
synth_timeout: int = 15,
example_drop_prob: float = 0.0,
) -> Iterable[tuple[LabeledExamples, Optional[Concept]]]:
"""Perform demonstration informed gradiented guided search."""
if cooling_schedule is None:
def cooling_schedule(t: int) -> float:
return 100*(1 - t / n_iters) + 1
sggs = GradientGuidedSampler.from_demos(
demos=demos,
to_chain=to_chain,
competency=competency,
temp=sgs_temp,
)
def handler(signum, frame):
raise ConceptIdException
signal.signal(signal.SIGALRM, handler)
def drop_pred(example):
if example_drop_prob == 0.0:
return True
elif example_drop_prob == 1.0:
return False
return example_drop_prob <= random.random()
weights = np.array([size_weight, surprise_weight])
concept2energy = {} # Concepts seen so far + associated energies.
concept2data = {} # Concepts seen so far + associated data.
energy, new_data = float('inf'), LabeledExamples()
for t in range(n_iters):
temp = cooling_schedule(t)
# Sample from proposal distribution.
if (t % reset_period) == 0: # Reset to best example set.
concept = None
proposed_examples = reset(temp, concept2energy, concept2data)
else:
# Drop examples with some probability.
examples2 = LabeledExamples(
positive=filter(drop_pred, examples.positive),
negative=filter(drop_pred, examples.negative),
)
proposed_examples = examples2 @ new_data
try:
signal.alarm(synth_timeout)
concept = to_concept(proposed_examples, concept=concept)
signal.alarm(0) # Unset alarm.
concept2data.setdefault(concept, proposed_examples)
except ConceptIdException:
new_data = LabeledExamples() # Reject: New data caused problem.
signal.alarm(0) # Unset alarm.
continue
new_data, metadata = sggs(concept)
new_data = new_data.map(lift_path)
new_energy = weights @ [concept.size, metadata['surprisal']]
metadata |= {
'energy': new_energy,
'conjecture': new_data,
'data': proposed_examples,
}
yield (proposed_examples, concept, metadata)
# DISS Bookkeeping for resets.
concept2energy[concept] = new_energy
# Accept/Reject proposal based on energy delta.
dE = new_energy - energy
if (dE < 0) or (np.exp(-dE / temp) > np.random.rand()):
energy, examples = new_energy, proposed_examples # Accept.
else:
new_data = LabeledExamples() # Reject.
|
016af4c38a890426fa148af78e1349b6bacdfa79
| 3,638,998
|
def expand_gelu(expand_info):
"""Gelu expander"""
# get op info.
input_desc = expand_info['input_desc'][0]
graph_builder = builder.GraphBuilder()
# generate a graph.
with graph_builder.graph_scope('main') as graph_scope:
# create tensor input.
input_x = graph_builder.tensor(input_desc['shape'], input_desc['data_type'], input_desc['format'])
dtype = input_x.dtype
if dtype == 'float16':
input_x = graph_builder.emit('Cast', [input_x], attrs={'dst_type': 'float32'})
# cal tanh.
mul_0 = graph_builder.emit('Mul', [input_x, input_x])
pow_0 = graph_builder.emit('Mul', [mul_0, input_x])
const_csvalue = graph_builder.value(pow_0.dtype, CSVALUE, input_desc['format'])
mul_1 = graph_builder.emit('Mul', [pow_0, const_csvalue])
tanh_res = graph_builder.emit('TensorAdd', [input_x, mul_1])
const_csvalue_a = graph_builder.value(tanh_res.dtype, CSVALUE_A, input_desc['format'])
mul_0 = graph_builder.emit('Mul', [tanh_res, const_csvalue_a])
const_zero = graph_builder.value(mul_0.dtype, 0.0, input_desc['format'])
mul_0_min = graph_builder.emit('Minimum', [mul_0, const_zero])
right_mul = graph_builder.emit('Exp', [mul_0_min])
mul_0_abs = graph_builder.emit('Abs', [mul_0])
const_neg_one = graph_builder.value(mul_0_abs.dtype, -1.0, input_desc['format'])
mul_0_abs_neg = graph_builder.emit('Mul', [mul_0_abs, const_neg_one])
mul_0_abs_neg_exp = graph_builder.emit('Exp', [mul_0_abs_neg])
const_one = graph_builder.value(mul_0_abs_neg_exp.dtype, 1.0, input_desc['format'])
mul_0_abs_neg_exp_add = graph_builder.emit('TensorAdd', [mul_0_abs_neg_exp, const_one])
left_mul = graph_builder.emit('RealDiv', [input_x, mul_0_abs_neg_exp_add])
result = graph_builder.emit('Mul', [left_mul, right_mul])
if dtype == 'float16':
result = graph_builder.emit('Cast', [result], attrs={'dst_type': 'float16'})
# set graph output.
graph_scope.set_output(result)
graph = graph_builder.get()[0]
return graph
|
1237d4899ef0411b827efd930fb7e2e0fa5fddde
| 3,638,999
|
def cache_mixin(cache, session):
"""CacheMixin factory"""
hook = EventHook([cache], session)
class _Cache(CacheMixinBase):
_hook = hook
_cache_client = cache
_db_session = session
return _Cache
|
79368b4cc2680ff95be520c9d877dcca5a6a1eef
| 3,639,000
|
def _read_output(path):
"""Read CmdStan output csv file.
Parameters
----------
path : str
Returns
-------
Dict[str, Any]
"""
# Read data
columns, data, comments = _read_output_file(path)
pconf = _process_configuration(comments)
# split dataframe to warmup and draws
saved_warmup = (
int(pconf.get("save_warmup", 0))
* int(pconf.get("num_warmup", 0))
// int(pconf.get("thin", 1))
)
data_warmup = data[:saved_warmup]
data = data[saved_warmup:]
# Split data to sample_stats and sample
sample_stats_columns = {col: idx for col, idx in columns.items() if col.endswith("__")}
sample_columns = {col: idx for col, idx in columns.items() if col not in sample_stats_columns}
return {
"sample": data,
"sample_warmup": data_warmup,
"sample_columns": sample_columns,
"sample_stats_columns": sample_stats_columns,
"configuration_info": pconf,
}
|
aba1fe156de9f2fe9f595d5e5e64994b9eab539b
| 3,639,001
|
def linear_chance_constraint_noinit(a,M,N,risk,num_gpcpoly,n_states,n_uncert,p):
"""
Pr{a^\Top x + b \leq 0} \geq 1-eps
Converts to SOCP
"""
a_hat = np.kron(a.T,M)
a_dummy = np.zeros((n_states,n_states))
for ii in range(n_states):
a_dummy[ii,ii] = a[ii,0]
#print(a_dummy)
U = np.kron(a_dummy,np.identity(num_gpcpoly))
# Sigma_det = U*N*N.T*U.T
Sigma_det = N.T*U.T
return np.reshape(np.round(np.array(a_hat,dtype=float),5),num_gpcpoly*n_states), np.round(np.array(Sigma_det,dtype=float),5)
|
9b6421213f3f2251824a45fc04b69146cfeebbaa
| 3,639,002
|
import fastapi
def patroni(response: responses.Response,
session: sqlalchemy.orm.Session = fastapi.Depends(models.patroni.get_session)):
"""
Returns a health check for the reachability of the Patroni database.
"""
return db_health(response, session, 'Patroni')
|
fabaf09e2754e0e89ff17312e9a3f86d48972dc0
| 3,639,003
|
def authorization_code_grant_step1(request):
"""
Code grant step1 short-cut. This will return url with code.
"""
django_request = oauth2_request_class()(request)
grant = CodeGrant(oauth2_server, django_request)
return grant.authorization()
|
5227158127b5313b17c27fb0f351f8294faec840
| 3,639,004
|
async def activity(
guild_id: int,
discord_id: int,
activity_input: DestinyActivityInputModel,
db: AsyncSession = Depends(get_db_session),
):
"""Return information about the user their stats in the supplied activity ids"""
user = await discord_users.get_profile_from_discord_id(discord_id)
# update the user's db entries
activities = DestinyActivities(db=db, user=user)
await activities.update_activity_db()
return await activities.get_activity_stats(
activity_ids=activity_input.activity_ids,
mode=activity_input.mode,
character_class=activity_input.character_class,
character_ids=activity_input.character_ids,
start_time=activity_input.start_time,
end_time=activity_input.end_time,
)
|
25a2eb719648cbdb6161baa2b1de1570e07a42ea
| 3,639,005
|
def put_topoverlays(image, rects, alpha=0.3):
"""
a function for drawing some rectangles with random color
Args:
image: an opencv image with format of BGR
rects: a list of opencv rectangle
alpha: a float, blend level
Return:
An opencv image
"""
h, w, _ = image.shape
im = np.ones(shape=image.shape).astype(np.uint8)
overlay_bboxs = []
for i in rects:
x1 = int(i[0])
x2 = int(min(i[0] + 1.7 * (i[2] - i[0]), w))
y1 = int(i[1])
y2 = int(max(i[1] - 0.2 * (i[3] - i[1]), 0))
overlay_bboxs.append([x1, y1, x2, y2])
cv2.rectangle(im, (x1, y1), (x2, y2), (100, 100, 0), -1)
cv2.rectangle(im, (x1, y1), (x2, y2), (0, 100, 255), 2)
image = cv2.addWeighted(im, alpha, image, 1 - alpha, 0, image)
return image, overlay_bboxs
|
9f6cf0cfd33214503905a16384fade2976bad190
| 3,639,006
|
def extract_next_token(link):
"""Use with paginated endpoints for extracting
token which points to next page of data."""
clean_link = link.split(";")[0].strip("<>")
token = clean_link.split("?token=")[1]
# token is already quoted we have to unqoute so it can be passed to params
return unquote(token)
|
f0eae72cf8d99e816dfff1c6345f0a9b73abdd21
| 3,639,007
|
import urllib
import json
def get_host_country(host_ip):
"""Gets country of the target's IP"""
country = 'NOT DEFINED'
try:
response_body = urllib.request.urlopen(f'https://ipinfo.io/{host_ip}').read().decode('utf8')
response_data = json.loads(response_body)
country = response_data['country']
except:
pass
return country
|
05df2c54dd275c654631cb188cbfdaa0d8e15ed9
| 3,639,008
|
import traceback
def astng_wrapper(func, modname):
"""wrapper to give to ASTNGManager.project_from_files"""
print 'parsing %s...' % modname
try:
return func(modname)
except ASTNGBuildingException, exc:
print exc
except Exception, exc:
traceback.print_exc()
|
fbb1b3090bfc7b93258fbbcefea1a8d1463dded2
| 3,639,009
|
def clean(s):
"""Clean text!"""
return patList.do(liblang.fixRepetedVowel(s))
|
d81f16dfb35b4f218af5017de1eef8a005d3e7de
| 3,639,010
|
def elastic_transform(image, alpha=1000, sigma=30, spline_order=1, mode='nearest', random_state=np.random):
"""Elastic deformation of image as described in [Simard2003]_.
.. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
Convolutional Neural Networks applied to Visual Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
"""
assert image.ndim == 3
shape = image.shape[:2]
dx = gaussian_filter((random_state.rand(*shape) * 2 - 1),
sigma, mode="constant", cval=0) * alpha
dy = gaussian_filter((random_state.rand(*shape) * 2 - 1),
sigma, mode="constant", cval=0) * alpha
x, y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij')
indices = [np.reshape(x + dx, (-1, 1)), np.reshape(y + dy, (-1, 1))]
result = np.empty_like(image)
for i in range(image.shape[2]):
result[:, :, i] = map_coordinates(
image[:, :, i], indices, order=spline_order, mode=mode).reshape(shape)
return result
|
ddabb6a15deba901398f799352216b2c89652296
| 3,639,012
|
import json
def format_search_log(json_string):
"""
usage example {{ model_object|format_search_log }}
"""
query_json = json.loads(json_string)
attributes_selected = sorted(query_json.get('_source'))
context = {}
context['attributes_selected'] = attributes_selected
return attributes_selected
|
eb5aa21590474acaee7b2b94a1cfdc52c080d017
| 3,639,013
|
def set_variable(value,variable=None):
"""Load some value into session memory by creating a new variable.
If an existing variable is given, load the value into the given variable.
"""
sess = get_session()
if variable is not None:
assign_op = tf.assign(variable,value)
sess.run([assign_op])
return variable
else:
variable = tf.Variable(initial_value=value)
sess.run([tf.variables_initializer([variable])])
return variable
|
8256a27c2a446e600e6cfe818c5e4c60e18f1d04
| 3,639,014
|
def matrix ( mtrx , i , j ) :
"""Get i,j element from matrix-like object
>>> mtrx = ...
>>> value = matrix ( m , 1 , 2 )
"""
if isinstance ( mtrx , ROOT.TMatrix ) :
if i < mtrx.GetNrows () and j < mtrx.GetNcols () :
return mtrx ( i , j )
if callable ( mtrx ) :
try :
return mtrx ( i , j )
except :
pass
try :
return m [ i , j ]
except :
pass
try :
return m [ i ] [ j ]
except :
pass
return TypeError("Can't get m(%d,%d) for m=%s" % ( i , j , mtrx ) )
|
1101d5bd4bf569f11ec7ee41700171906e58e743
| 3,639,015
|
def check_type(instance, *classes):
"""Check if object is instance of given class"""
for klass in classes:
if type(instance).__name__ == klass:
return True
for T in getmro(type(instance)):
if T.__name__ == klass:
return True
return False
|
1761be42fd1a781ef5b6d94b42006fdcb2789e8b
| 3,639,016
|
def test_get_earth_imperative_solution(solar_system):
"""
## Imperative Solution
The first example uses flow control statements to define a
[Imperative Solution]( https://en.wikipedia.org/wiki/Imperative_programming). This is a
very common approach to solving problems.
"""
def get_planet_by_name(name, the_solar_system):
try:
planets = the_solar_system['star']['planets']
for arc in planets.values():
for planet in arc:
if name == planet.get('name', None):
return planet
except KeyError:
pass
return None
actual = get_planet_by_name('Earth', solar_system)
expected = {'Number of Moons': '1', 'diameter': 12756, 'has-moons': True, 'name': 'Earth'}
assert actual == expected
|
f966886e3384547803106c404a21e2bb7ecd8fa9
| 3,639,017
|
import glob
import tqdm
def animate(map, time, phase0=0.0, res=75, interval=75):
"""
"""
# Load the SPICE data
ephemFiles = glob.glob('../data/TESS_EPH_PRE_LONG_2018*.bsp')
tlsFile = '../data/tess2018338154046-41240_naif0012.tls'
solarSysFile = '../data/tess2018338154429-41241_de430.bsp'
#print(spice.tkvrsn('TOOLKIT'))
for ephFil in ephemFiles:
spice.furnsh(ephFil)
spice.furnsh(tlsFile)
spice.furnsh(solarSysFile)
# JD time range
allTJD = time + TJD0
nT = len(allTJD)
allET = np.zeros((nT,), dtype=np.float)
for i, t in enumerate(allTJD):
allET[i] = spice.unitim(t, 'JDTDB', 'ET')
# Calculate positions of TESS, the Earth, and the Sun
tess = np.zeros((3, len(allET)))
sun = np.zeros((3, len(allET)))
for i, et in enumerate(allET):
outTuple = spice.spkezr('Mgs Simulation', et, 'J2000', 'NONE', 'Earth')
tess[0, i] = outTuple[0][0] * REARTH
tess[1, i] = outTuple[0][1] * REARTH
tess[2, i] = outTuple[0][2] * REARTH
outTuple = spice.spkezr('Sun', et, 'J2000', 'NONE', 'Earth')
sun[0, i] = outTuple[0][0] * REARTH
sun[1, i] = outTuple[0][1] * REARTH
sun[2, i] = outTuple[0][2] * REARTH
# Figure setup
fig = plt.figure(figsize=(8, 8))
ax = np.zeros((2, 2), dtype=object)
ax[0, 0] = plt.subplot(221)
ax[0, 1] = plt.subplot(222)
ax[1, 0] = plt.subplot(223, sharex=ax[0, 0], sharey=ax[0, 0])
ax[1, 1] = plt.subplot(224, sharex=ax[0, 0], sharey=ax[0, 0])
for axis in [ax[0, 0], ax[1, 0], ax[1, 1]]:
axis.set_aspect(1)
axis.set_xlim(-65, 65)
axis.set_ylim(-65, 65)
for tick in axis.xaxis.get_major_ticks() + axis.yaxis.get_major_ticks():
tick.label.set_fontsize(10)
i = 0
# Orbit xz
ax[0, 0].plot(tess[0], tess[2], "k.", ms=1, alpha=0.025)
txz, = ax[0, 0].plot(tess[0, i], tess[2, i], 'o', color="C0", ms=4)
norm = 1. / np.sqrt(sun[0, i] ** 2 + sun[2, i] ** 2)
x = sun[0, i] * norm
y = sun[2, i] * norm
theta = 180. / np.pi * np.arctan2(y, x)
dayxz = Wedge((0, 0), 5, theta - 90, theta + 90, color=cmap(0.8))
nightxz = Wedge((0, 0), 5, theta + 90, theta + 270, color=cmap(0.0))
ax[0, 0].add_artist(dayxz)
ax[0, 0].add_artist(nightxz)
ax[0, 0].set_ylabel("z", fontsize=16)
# Orbit xy
ax[1, 0].plot(tess[0], tess[1], "k.", ms=1, alpha=0.025)
txy, = ax[1, 0].plot(tess[0, i], tess[1, i], 'o', color="C0", ms=4)
norm = 1. / np.sqrt(sun[0, i] ** 2 + sun[1, i] ** 2)
x = sun[0, i] * norm
y = sun[1, i] * norm
theta = 180. / np.pi * np.arctan2(y, x)
dayxy = Wedge((0, 0), 5, theta - 90, theta + 90, color=cmap(0.8))
nightxy = Wedge((0, 0), 5, theta + 90, theta + 270, color=cmap(0.0))
ax[1, 0].add_artist(dayxy)
ax[1, 0].add_artist(nightxy)
ax[1, 0].set_xlabel("x", fontsize=16)
ax[1, 0].set_ylabel("y", fontsize=16)
# Orbit zy
ax[1, 1].plot(tess[2], tess[1], "k.", ms=1, alpha=0.025)
tzy, = ax[1, 1].plot(tess[2, i], tess[1, i], 'o', color="C0", ms=4)
norm = 1. / np.sqrt(sun[2, i] ** 2 + sun[1, i] ** 2)
x = sun[2, i] * norm
y = sun[1, i] * norm
theta = 180. / np.pi * np.arctan2(y, x)
dayzy = Wedge((0, 0), 5, theta - 90, theta + 90, color=cmap(0.8))
nightzy = Wedge((0, 0), 5, theta + 90, theta + 270, color=cmap(0.0))
ax[1, 1].add_artist(dayzy)
ax[1, 1].add_artist(nightzy)
ax[1, 1].set_xlabel("z", fontsize=16)
# Render the image
t = (time - time[0]) / (time[-1] - time[0])
t = 2 * (t - 0.5)
Z = np.empty((len(time), res, res))
north_pole = np.empty((len(time), 3))
y = np.array(map[:, :, :])
for i in tqdm(range(len(time))):
# Reset the map and rotate it to the correct phase
# in the mean equatorial (J2000) frame
map[:, :, :] = y
'''
map.axis = [0, 1, 0]
phase = (360. * time[i]) % 360. + phase0
map.rotate(phase)
'''
# Rotate so that TESS is along the +z axis
r = np.sqrt(np.sum(tess[:, i] ** 2))
costheta = np.dot(tess[:, i], [0, 0, r])
axis = np.cross(tess[:, i], [0, 0, r])
sintheta = np.sqrt(np.sum(axis ** 2))
axis /= sintheta
theta = 180. / np.pi * np.arctan2(sintheta, costheta)
R = starry.RAxisAngle(axis, theta)
north_pole[i] = np.dot(R, [0, 0, 1])
source = np.dot(R, sun[:, i])
source /= np.sqrt(np.sum(source ** 2, axis=0))
'''
map.axis = axis
map.rotate(theta)
'''
# Align the pole of the Earth with the "north" direction
costheta = np.dot([0, 1, 0], north_pole[i])
axis = np.cross([0, 1, 0], north_pole[i])
sintheta = np.sqrt(np.sum(axis ** 2))
axis /= sintheta
theta = 180. / np.pi * np.arctan2(sintheta, costheta)
map.axis = axis
map.rotate(theta)
# Rotate to the correct phase
map.axis = north_pole[i]
phase = (360. * time[i]) % 360. + phase0
map.rotate(phase)
# Finally, rotate the image so that north always points up
# This doesn't actually change the integrated flux!
map.axis = [0, 0, 1]
theta = 180. / np.pi * np.arctan2(north_pole[i, 0], north_pole[i, 1])
map.rotate(theta)
R = starry.RAxisAngle([0, 0, 1], theta)
north_pole[i] = np.dot(R, north_pole[i])
source = np.dot(R, source)
# Render the image
Z[i] = map.render(t=t[i], source=source, res=res)[0]
# Reset the map
map[:, :, :] = y
map.axis = [0, 1, 0]
# Image
vmin = 0.0
vmax = np.nanmax(Z)
cmap.set_under(cmap(vmin))
image = ax[0, 1].imshow(Z[0], extent=(-1, 1, -1, 1),
origin="lower", cmap=cmap,
vmin=vmin, vmax=vmax)
npl, = ax[0, 1].plot(north_pole[0, 0], north_pole[0, 1], marker=r"$N$", color="r")
spl, = ax[0, 1].plot(-north_pole[0, 0], -north_pole[0, 1], marker=r"$S$", color="b")
if north_pole[0, 2] > 0:
npl.set_visible(True)
spl.set_visible(False)
else:
npl.set_visible(False)
spl.set_visible(True)
ax[0, 1].axis("off")
ax[0, 1].set_xlim(-1.1, 1.1)
ax[0, 1].set_ylim(-1.1, 1.1)
# Function to animate each frame
def update(i):
# Update orbit
txz.set_xdata(tess[0, i])
txz.set_ydata(tess[2, i])
norm = 1. / np.sqrt(sun[0, i] ** 2 + sun[2, i] ** 2)
x = sun[0, i] * norm
y = sun[2, i] * norm
theta = 180. / np.pi * np.arctan2(y, x)
dayxz.set_theta1(theta - 90)
dayxz.set_theta2(theta + 90)
nightxz.set_theta1(theta + 90)
nightxz.set_theta2(theta + 270)
txy.set_xdata(tess[0, i])
txy.set_ydata(tess[1, i])
norm = 1. / np.sqrt(sun[0, i] ** 2 + sun[1, i] ** 2)
x = sun[0, i] * norm
y = sun[1, i] * norm
theta = 180. / np.pi * np.arctan2(y, x)
dayxy.set_theta1(theta - 90)
dayxy.set_theta2(theta + 90)
nightxy.set_theta1(theta + 90)
nightxy.set_theta2(theta + 270)
tzy.set_xdata(tess[2, i])
tzy.set_ydata(tess[1, i])
norm = 1. / np.sqrt(sun[2, i] ** 2 + sun[1, i] ** 2)
x = sun[2, i] * norm
y = sun[1, i] * norm
theta = 180. / np.pi * np.arctan2(y, x)
dayzy.set_theta1(theta - 90)
dayzy.set_theta2(theta + 90)
nightzy.set_theta1(theta + 90)
nightzy.set_theta2(theta + 270)
image.set_data(Z[i])
npl.set_xdata(north_pole[i, 0])
npl.set_ydata(north_pole[i, 1])
spl.set_xdata(-north_pole[i, 0])
spl.set_ydata(-north_pole[i, 1])
if north_pole[i, 2] > 0:
npl.set_visible(True)
spl.set_visible(False)
else:
npl.set_visible(False)
spl.set_visible(True)
return txz, dayxz, nightxz, txy, dayxy, nightxy, \
tzy, dayzy, nightzy, image, npl, spl
# Generate the animation
ani = FuncAnimation(fig, update, frames=len(time), interval=interval,
blit=False)
try:
if 'zmqshell' in str(type(get_ipython())):
plt.close()
display(HTML(ani.to_html5_video()))
else:
raise NameError("")
except NameError:
plt.show()
plt.close()
return np.nansum(Z, axis=(1, 2))
|
0fa39a0299a8d8cd75b0475f45e49caa731925a7
| 3,639,019
|
def bresenham(points):
""" Apply Bresenham algorithm for a list points.
More info: https://en.wikipedia.org/wiki/Bresenham's_line_algorithm
# Arguments
points: ndarray. Array of points with shape (N, 2) with N being the number
if points and the second coordinate representing the (x, y)
coordinates.
# Returns
ndarray: Array of points after having applied the bresenham algorithm.
"""
points = np.asarray(points, dtype=np.int)
def line(x0, y0, x1, y1):
""" Bresenham line algorithm.
"""
d_x = x1 - x0
d_y = y1 - y0
x_sign = 1 if d_x > 0 else -1
y_sign = 1 if d_y > 0 else -1
d_x = np.abs(d_x)
d_y = np.abs(d_y)
if d_x > d_y:
xx, xy, yx, yy = x_sign, 0, 0, y_sign
else:
d_x, d_y = d_y, d_x
xx, xy, yx, yy = 0, y_sign, x_sign, 0
D = 2 * d_y - d_x
y = 0
line = np.empty((d_x + 1, 2), dtype=points.dtype)
for x in range(d_x + 1):
line[x] = [x0 + x * xx + y * yx, y0 + x * xy + y * yy]
if D >= 0:
y += 1
D -= 2 * d_x
D += 2 * d_y
return line
nb_points = len(points)
if nb_points < 2:
return points
new_points = []
for i in range(nb_points - 1):
p = points[i:i + 2].ravel().tolist()
new_points.append(line(*p))
new_points = np.concatenate(new_points, axis=0)
return new_points
|
9c49edd9eda3113855582ec3cc35c4d40d056dd9
| 3,639,021
|
def radialBeamProfile_flatTop(x,y,a):
"""Top hat beam profile
\param[in] x x-position for profile computation
\param[in] y y-position for profile computation
\param[in] a radial extension of flat-top component
\param[in] R 1/e-width of beam profile
\param[out] isp radial irradiation source profile
"""
if (x**2+y**2) <= a*a:
return 1.0
else:
return 0.0
|
699d214c499d8cbcf1c0ed26a5d0d00cf2813f3f
| 3,639,022
|
def _split_data(x, y, k_idx, k, perm_indices):
"""Randomly and coordinates splits two indexable items.
Splits items in accordiance with k-fold cross-validatoin.
Arguments:
x: [?]
indexable item
y: [?]
indexable item
k_idx: int
index of the k-fold partition to use
k: int
number of partitions for k-fold cross-validation
perm_indices: np.ndarray, int
array of indices representing a permutation of the samples with
shape (num_sample, )
Returns:
x_majority: [?]
majority partition of indexable item
y_majority: [?]
majority partition of indexable item
x_minority: [?]
minority partition of indexable item
y_minority: [?]
minority partition of indexable item
"""
assert k > 0
assert k_idx >= 0
assert k_idx < k
N = len(x)
partition_size = int(ceil(N / k))
# minority group is the single selected partition
# majority group is the other partitions
minority_start = k_idx * partition_size
minority_end = minority_start + partition_size
minority_indices = perm_indices[minority_start:minority_end]
majority_indices = np.append(perm_indices[0:minority_start],
perm_indices[minority_end:])
assert np.array_equal(np.sort(np.append(minority_indices, majority_indices)),
np.array(range(N)))
x_majority = [x[i] for i in majority_indices]
y_majority = [y[i] for i in majority_indices]
x_minority = [x[i] for i in minority_indices]
y_minority = [y[i] for i in minority_indices]
return (x_majority, y_majority), (x_minority, y_minority)
|
7e53d6a172335b7777887ed493ec41ecb6833461
| 3,639,023
|
from typing import Any
from typing import Optional
def resolve_Log(
parent: Any,
info: gr.ResolveInfo,
id: Optional[int] = None,
uuid: Optional[str] = None,
) -> ENTITY_DICT_TYPE:
"""Resolution function."""
return resolve_entity(Log, info, id, uuid)
|
eecd46296e9d1c0ce55dc31ee1249b4c3b512b15
| 3,639,025
|
from typing import Union
from pathlib import Path
def _get_path_size(source: Union[Path, ZipInfo]) -> int:
"""
A helper method that returns the file size for the given source
:param source: the source object to get the file size for.
:return: the source's size.
"""
return source.stat().st_size if isinstance(source, Path) else source.file_size
|
2981b2b88e776cfd2315785fea8ba1e1ec63c7cf
| 3,639,026
|
def get_graph_subsampling_dataset(
prefix, arrays, shuffle_indices, ratio_unlabeled_data_to_labeled_data,
max_nodes, max_edges,
**subsampler_kwargs):
"""Returns tf_dataset for online sampling."""
def generator():
labeled_indices = arrays[f"{prefix}_indices"]
if ratio_unlabeled_data_to_labeled_data > 0:
num_unlabeled_data_to_add = int(ratio_unlabeled_data_to_labeled_data *
labeled_indices.shape[0])
unlabeled_indices = np.random.choice(
NUM_PAPERS, size=num_unlabeled_data_to_add, replace=False)
root_node_indices = np.concatenate([labeled_indices, unlabeled_indices])
else:
root_node_indices = labeled_indices
if shuffle_indices:
root_node_indices = root_node_indices.copy()
np.random.shuffle(root_node_indices)
for index in root_node_indices:
graph = sub_sampler.subsample_graph(
index,
arrays["author_institution_index"],
arrays["institution_author_index"],
arrays["author_paper_index"],
arrays["paper_author_index"],
arrays["paper_paper_index"],
arrays["paper_paper_index_t"],
paper_years=arrays["paper_year"],
max_nodes=max_nodes,
max_edges=max_edges,
**subsampler_kwargs)
graph = add_nodes_label(graph, arrays["paper_label"])
graph = add_nodes_year(graph, arrays["paper_year"])
graph = tf_graphs.GraphsTuple(*graph)
yield graph
sample_graph = next(generator())
return tf.data.Dataset.from_generator(
generator,
output_signature=utils_tf.specs_from_graphs_tuple(sample_graph))
|
da31aff7064c3516f95fb5597f2ee757ee35fa25
| 3,639,027
|
def check_comment_exists(comment_id_required=True):
"""
Decorator to check if a given comment exists. If it does not, it returns an
HTTP 400 error. Must be called with (), and may pass the optional argument
of whether the id is required. If the id is passed, it will be checked
against entities of the Comment kind, and a 400 error will be returned if
it is not found. If the id is not passed, an error will be returned, unless
the argument is False.
"""
def decorator(func):
@wraps(func)
def wrapper(self, comment_id=''):
comment_entity = None
if comment_id:
comment_key = db.Key.from_path('Comment', int(comment_id))
comment_entity = db.get(comment_key)
if not comment_entity:
# bad request
return self.error(400)
elif comment_id_required:
return self.error(400)
func(self, comment_entity)
return wrapper
return decorator
|
2c2dd4bd1149ee9f0e87b5d426211a3d5bba78c0
| 3,639,028
|
def GeoMoonState(time):
"""Calculates equatorial geocentric position and velocity of the Moon at a given time.
Given a time of observation, calculates the Moon's position and velocity vectors.
The position and velocity are of the Moon's center relative to the Earth's center.
The position (x, y, z) components are expressed in AU (astronomical units).
The velocity (vx, vy, vz) components are expressed in AU/day.
The coordinates are oriented with respect to the Earth's equator at the J2000 epoch.
In Astronomy Engine, this orientation is called EQJ.
If you need the Moon's position only, and not its velocity,
it is much more efficient to use #GeoMoon instead.
Parameters
----------
time : Time
The date and time for which to calculate the Moon's position and velocity.
Returns
-------
StateVector
The Moon's position and velocity vectors in J2000 equatorial coordinates (EQJ).
"""
# This is a hack, because trying to figure out how to derive a time
# derivative for CalcMoon() would be extremely painful!
# Calculate just before and just after the given time.
# Average to find position, subtract to find velocity.
dt = 1.0e-5 # 0.864 seconds
t1 = time.AddDays(-dt)
t2 = time.AddDays(+dt)
r1 = GeoMoon(t1)
r2 = GeoMoon(t2)
return StateVector(
(r1.x + r2.x) / 2,
(r1.y + r2.y) / 2,
(r1.z + r2.z) / 2,
(r2.x - r1.x) / (2 * dt),
(r2.y - r1.y) / (2 * dt),
(r2.z - r1.z) / (2 * dt),
time
)
|
50c523a2f838e7730546fac4e4b8ed2a13eefe0a
| 3,639,029
|
import platform
def get_machine_name():
"""
Portable way of calling hostname shell-command.
Regarding docker containers:
NOTE: If we are running from inside the docker-dev environment, then $(hostname) will return
the container-id by default.
For now we leave that behaviour.
We can override it in the future by passing --hostname to the container.
Documentation:
https://docs.docker.com/config/containers/container-networking/#ip-address-and-hostname
:return:
Unique name for a node in the cluster
"""
machine_name = platform.node()
return machine_name
|
ae5a7090846164a97cafd07af4701dcfcc25070e
| 3,639,030
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.