content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def image_ppg(ppg_np):
"""
Input:
ppg: numpy array
Return:
ax: 画布信息
im:图像信息
"""
ppg_deps = ppg.DependenciesPPG()
ppg_M = Matrix(ppg_np)
monophone_ppgs = ppg.reduce_ppg_dim(ppg_M, ppg_deps.monophone_trans)
monophone_ppgs = monophone_ppgs.numpy().T
fig, ax = plt.subplots(figsize=(10, 6))
im = ax.imshow(monophone_ppgs, aspect="auto", origin="lower",
interpolation='none')
return ax, im
|
714ccc3e294a5f02983a9aa384c2d6aa313ee4e5
| 3,639,369
|
def is_hex_value(val):
"""
Helper function that returns True if the provided value is an integer in
hexadecimal format.
"""
try:
int(val, 16)
except ValueError:
return False
return True
|
6ba5ac1cfa9b8a4f8397cc52a41694cca33a4b8d
| 3,639,370
|
from typing import Optional
def create_cluster(*, cluster_name: str) -> Optional[Operation]:
"""Create a dataproc cluster """
cluster_client = dataproc.ClusterControllerClient(client_options={"api_endpoint": dataproc_api_endpoint})
cluster = {
"project_id": project_id,
"cluster_name": cluster_name,
"config": {
"config_bucket": config_bucket,
"temp_bucket": temp_bucket,
"master_config": {"num_instances": 1, "machine_type_uri": "n1-standard-2"},
"worker_config": {"num_instances": 2, "machine_type_uri": "n1-standard-2"},
},
}
logger.info("cluster: %s is creating now", cluster_name)
operation = cluster_client.create_cluster(request={"project_id": project_id, "region": region, "cluster": cluster})
logger.info("cluster: %s is created successfully", cluster_name)
return operation
|
1657190a7605f28f3c4dd2f2dc6c32230fb44087
| 3,639,371
|
import math
def gc_cache(seq: str) -> Cache:
"""Return the GC ratio of each range, between i and j, in the sequence
Args:
seq: The sequence whose tm we're querying
Returns:
Cache: A cache for GC ratio lookup
"""
n = len(seq)
arr_gc = []
for _ in seq:
arr_gc.append([math.inf] * len(seq))
# fill in the diagonal
for i in range(n):
if i == n - 1: # hackish
arr_gc[i][i] = arr_gc[i - 1][i - 1]
continue
arr_gc[i][i] = 1.0 if seq[i] in "GC" else 0.0
if i == n - 2 and not arr_gc[i][i]: # don't ignore last pair
arr_gc[i][i] = 1.0 if seq[i + 1] in "GC" else 0.0
# fill in the upper right of the array
for i in range(n):
for j in range(i + 1, n):
arr_gc[i][j] = arr_gc[i][j - 1] + arr_gc[j][j]
# convert to ratios
for i in range(n):
for j in range(i, n):
arr_gc[i][j] = round(arr_gc[i][j] / (j - i + 1), 1)
return arr_gc
|
7118cc96d0cd431b720b099b399c64ee419df5aa
| 3,639,372
|
def ParseVariableName(variable_name, args):
"""Parse a variable name or URL, and return a resource.
Args:
variable_name: The variable name.
args: CLI arguments, possibly containing a config name.
Returns:
The parsed resource.
"""
return _ParseMultipartName(variable_name, args,
'runtimeconfig.projects.configs.variables',
'variablesId')
|
1073739195ca1bb0ac427e89e66525a7e7ada40b
| 3,639,373
|
def index(request):
"""Home page"""
return render(request, 'read_only_site/index.html')
|
623c0cdc3229d1873e50ebc3065ca1ba55da50e7
| 3,639,374
|
def parse_calculation_strings_OLD(args):
"""form the strings into arrays
"""
calculations = []
for calculation in args.calculations:
calculation = calculation.split("/")
foreground = np.fromstring(
",".join(calculation[0].replace("x", "0")), sep=",")
background = np.fromstring(
",".join(calculation[1].replace("x", "0")), sep=",")
calculations.append((foreground, background))
args.calculations = calculations
return None
|
04c979cc09bd25d659dad0a96ca89b88b43267cb
| 3,639,375
|
def find_border(edge_list) :
"""
find_border(edge_list)
Find the borders of a hexagonal graph
Input
-----
edge_list : array
List of edges of the graph
Returns
-------
border_set : set
Set of vertices of the border
"""
G = nx.Graph([(edge_list[i,0], edge_list[i,1]) for i in range(len(edge_list))])
occurence_list = np.unique(np.reshape(edge_list, 2*len(edge_list)), return_counts=True)
# list of vertex of degree 2
sec_edge_list = occurence_list[0][np.argwhere(occurence_list[:][1] == 2)]
# list of vertex of degree 3
three_edge_list = occurence_list[0][np.argwhere(occurence_list[:][1] == 3)]
sec = np.reshape(sec_edge_list, newshape=(len(sec_edge_list)))
border_set = set(sec)
inner_set = set()
for elem in three_edge_list :
for neigh in G[elem[0]].keys() :
if len(G[neigh]) == 2 :
border_set.add(elem[0])
return border_set
|
718a2b56438caf60d3ca4e3cd7419452c8fbbb63
| 3,639,377
|
from typing import Set
from datetime import datetime
def get_all_files(credentials: Credentials, email: str) -> Set['DriveResult']:
"""Get all files shared with the specified email in the current half-year
(January-June or July-December of the current year)"""
# Create drive service with provided credentials
service = build('drive', 'v3', credentials=credentials, cache_discovery=False)
all_user_files = []
next_page_token = None
date = datetime.date.today()
while True:
# Request the next page of files
metadata, next_page_token = request_files(service, next_page_token, email, date)
all_user_files = all_user_files + metadata
print('\r{} files processed'.format(len(all_user_files)), end='')
# If we have reached the end of the list of documents, next_page_token will be None
if next_page_token is None:
break
return {DriveResult(student_email=file['owners'][0]['emailAddress'],
file_name=file['name'],
create_time=file['createdTime'],
url=file['webViewLink'])
for file in all_user_files}
|
eb7e491cac08bada675f0d39414ae3d907686741
| 3,639,378
|
def _split_kwargs(model, kwargs, lookups=False, with_fields=False):
"""
Split kwargs into fields which are safe to pass to create, and
m2m tag fields, creating SingleTagFields as required.
If lookups is True, TagFields with tagulous-specific lookups will also be
matched, and the returned tag_fields will be a dict of tuples in the
format ``(val, lookup)``
The only tagulous-specific lookup is __exact
For internal use only - likely to change significantly in future versions
Returns a tuple of safe_fields, singletag_fields, tag_fields
If with_fields is True, a fourth argument will be returned - a dict to
look up Field objects from their names
"""
safe_fields = {}
singletag_fields = {}
tag_fields = {}
field_lookup = {}
for field_name, val in kwargs.items():
# Check for lookup
if lookups and "__" in field_name:
orig_field_name = field_name
field_name, lookup = field_name.split("__", 1)
# Only one known lookup
if lookup == "exact":
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
# Unknown - pass it on untouched
pass
else:
if isinstance(field, TagField):
# Store for later
tag_fields[field_name] = (val, lookup)
field_lookup[field_name] = field
continue
# Irrelevant lookup - no need to take special actions
safe_fields[orig_field_name] = val
continue
# No lookup
# Try to look up the field
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
# Assume it's something clever and pass it through untouched
# If it's invalid, an error will be raised later anyway
safe_fields[field_name] = val
# Next field
continue
field_lookup[field_name] = field
# Take special measures depending on field type
if isinstance(field, SingleTagField):
singletag_fields[field_name] = val
elif isinstance(field, TagField):
# Store for later
if lookups:
tag_fields[field_name] = (val, None)
else:
tag_fields[field_name] = val
else:
safe_fields[field_name] = val
if with_fields:
return safe_fields, singletag_fields, tag_fields, field_lookup
return safe_fields, singletag_fields, tag_fields
|
f73cb84bab0889b51962ed3504b6de265831d18f
| 3,639,379
|
def sliceResultToBytes(sr):
"""Copies a FLSliceResult to a Python bytes object. Does not free the FLSliceResult."""
if sr.buf == None:
return None
lib.FLSliceResult_Release(sr)
b = bytes( ffi.buffer(sr.buf, sr.size) )
return b
|
0e2207a99749b4cd3df4b71ca7338de4c0ad6a06
| 3,639,380
|
def cycle_dual(G, cycles, avg_fun=None):
"""
Returns dual graph of cycle intersections, where each edge
is defined as one cycle intersection of the original graph
and each node is a cycle in the original graph.
The general idea of this algorithm is:
* Find all cycles which share edges by an efficient dictionary
operation
* Those edges which border on exactly two cycles are connected
The result is a possibly disconnected version of the dual
graph which can be further processed.
The naive algorithm is O(n_cycles^2) whereas this improved
algorithm is better than O(n_cycles) in the average case.
"""
if avg_fun == None:
avg_fun = lambda c, w: average(c, weights=w)
dual = nx.Graph()
neighbor_cycles = find_neighbor_cycles(G, cycles)
# Construct dual graph
for ns in neighbor_cycles:
# Add cycles
for c, n in ((cycles[n], n) for n in ns):
dual.add_node(n, x=c.com[0], y=c.com[1], cycle=c, \
external=False, cycle_area=c.area())
# Connect pairs
if len(ns) == 2:
a, b = ns
c_a = cycles[a]
c_b = cycles[b]
sect = c_a.intersection(c_b)
wts = [G[u][v]['weight'] for u, v in sect]
conds = [G[u][v]['conductivity'] for u, v in sect]
wt = sum(wts)
#cond = average(conds, weights=wts)
#cond = min(conds)
cond = avg_fun(conds, wts)
dual.add_edge(a, b, weight=wt,
conductivity=cond, intersection=sect)
return dual
|
a923a4cea0f1d158e6936a68e513bd2285ea6b15
| 3,639,381
|
def get_timebucketedlog_reader(log, event_store):
"""
:rtype: TimebucketedlogReader
"""
return TimebucketedlogReader(log=log, event_store=event_store)
|
676e38a446f60dd8f2c90b38df572b2f5fc9c21e
| 3,639,383
|
def get_database_name(url):
"""Return a database name in a URL.
Example::
>>> get_database_name('http://foobar.com:5984/testdb')
'testdb'
:param str url: The URL to parse.
:rtype: str
"""
name = compat.urlparse(url).path.strip("/").split("/")[-1]
# Avoid re-encoding the name
if "%" not in name:
name = encode_uri_component(name)
return name
|
2916e5a5999aae68b018858701dfb5e695857f7f
| 3,639,384
|
def get_tags():
"""
在这里希望根据用户来获取,和用户有关的tag
所以我们需要做的是,获取用户所有的post,然后找到所有的tag
:return:
"""
result_tags = []
# 找到某个用户的所有的文章,把所有文章的Tag都放在一块
def append_tag(user_posts):
tmp = []
for post in user_posts:
for tag in post.tags.all():
tmp.append(tag.tag_name)
return tmp
# 如果当前用户存在,就是用当前用户
if g.get('current_user', None):
user_posts_ = g.current_user.posts.all()
result_tags.extend(append_tag(user_posts_))
# 如果不存在,就是用默认用户
else:
user = User.query.get(1)
result_tags.extend(append_tag(user.posts.all()))
result_tags = list(set(result_tags))
return jsonify(result_tags)
|
821ca1bb222e4fe15ea336282fed0eb172d460f9
| 3,639,385
|
def a_star_search(graph, start, goal):
"""Runs an A* search on the specified graph to find a path from the ''start'' node to the ''goal'' node.
Returns a list of nodes specifying a minimal path between the two nodes.
If no path exists (disconnected components), returns an empty list.
"""
all_nodes = graph.get_all_node_ids()
if start not in all_nodes:
raise NonexistentNodeError(start)
if goal not in all_nodes:
raise NonexistentNodeError(goal)
came_from, cost_so_far, goal_reached = _a_star_search_internal(graph, start, goal)
if goal_reached:
path = reconstruct_path(came_from, start, goal)
path.reverse()
return path
else:
return []
|
f2eabef1e30f12460359ea45cbc089f8fb28e5f9
| 3,639,387
|
import click
def output_format_option(default: OutputFormat = OutputFormat.TREE):
"""
A ``click.option`` for specifying a format to use when outputting data.
Args:
default (:class:`~ape.cli.choices.OutputFormat`): Defaults to ``TREE`` format.
"""
return click.option(
"--format",
"output_format",
type=output_format_choice(),
default=default.value,
callback=lambda ctx, param, value: OutputFormat(value.upper()),
)
|
9f73a8b8d270975d16ec9d3b2962f4fd61491aab
| 3,639,388
|
def compute_errors(u_e, u):
"""Compute various measures of the error u - u_e, where
u is a finite element Function and u_e is an Expression.
Adapted from https://fenicsproject.org/pub/tutorial/html/._ftut1020.html
"""
print('u_e',u_e.ufl_element().degree())
# Get function space
V = u.function_space()
# Explicit computation of L2 norm
error = (u - u_e)**2*dl.dx
E1 = np.sqrt(abs(dl.assemble(error)))
# Explicit interpolation of u_e onto the same space as u
u_e_ = dl.interpolate(u_e, V)
error = (u - u_e_)**2*dl.dx
E2 = np.sqrt(abs(dl.assemble(error)))
# Explicit interpolation of u_e to higher-order elements.
# u will also be interpolated to the space Ve before integration
Ve = dl.FunctionSpace(V.mesh(), 'P', 5)
u_e_ = dl.interpolate(u_e, Ve)
error = (u - u_e)**2*dl.dx
E3 = np.sqrt(abs(dl.assemble(error)))
# Infinity norm based on nodal values
u_e_ = dl.interpolate(u_e, V)
E4 = abs(u_e_.vector().get_local() - u.vector().get_local()).max()
# L2 norm
E5 = dl.errornorm(u_e, u, norm_type='L2', degree_rise=3)
# H1 seminorm
E6 = dl.errornorm(u_e, u, norm_type='H10', degree_rise=3)
# Collect error measures in a dictionary with self-explanatory keys
errors = {'u - u_e': E1,
'u - interpolate(u_e, V)': E2,
'interpolate(u, Ve) - interpolate(u_e, Ve)': E3,
'infinity norm (of dofs)': E4,
'L2 norm': E5,
'H10 seminorm': E6}
return errors
|
c9fbd459ab1c3cd65fb4d290e1399dd4937ed5a2
| 3,639,389
|
def list_to_str(input_list, delimiter=","):
"""
Concatenates list elements, joining them by the separator specified by the
parameter "delimiter".
Parameters
----------
input_list : list
List with elements to be joined.
delimiter : String, optional, default ','.
The separator used between elements.
Returns
-------
String
Returns a string, resulting from concatenation of list's elements, separeted by the delimiter.
"""
return delimiter.join(
[x if isinstance(x, str) else repr(x) for x in input_list]
)
|
4decfbd5a9d637f27473ec4a917998137af5ffe0
| 3,639,390
|
def strategy_supports_no_merge_call():
"""Returns if the current `Strategy` can operate in pure replica context."""
if not distribution_strategy_context.has_strategy():
return True
strategy = distribution_strategy_context.get_strategy()
return not strategy.extended._use_merge_call() # pylint: disable=protected-access
|
dc2b609a52d7e25b372e0cd1a04a0637d76b8ec1
| 3,639,391
|
def is_group(obj):
"""Returns true if the object is a h5py-like group."""
kind = get_h5py_kind(obj)
return kind in ["file", "group"]
|
37c86b6d4f052eab29106b9d51c17cdd36b1dc98
| 3,639,392
|
from bs4 import BeautifulSoup
def analyze_page(page_url):
""" Analyzes the content at page_url and returns a list of the highes weighted
words.json/phrases and their weights """
html = fetch_html(page_url)
if not html:
return
soup = BeautifulSoup(html, "html.parser")
word_counts = {}
url_words = words_in_url(page_url)
stop_words = get_stop_words('english')
words_to_add = ['like', '...']
stop_words = stop_words + words_to_add
ignore_tags = ["script", "img", "meta", "style"] # html tags to ignore
weights = {'title': 15, 'div': .5, 'a': .3, 'span': .5, "link": .2, 'url': 22, \
'two' : 3, 'three': 3, 'four': 5, 'five': 5} # adjust weights here
lemma = WordNetLemmatizer()
for tag in soup.find_all():
if tag.name not in ignore_tags:
words = tag.find(text=True,
recursive=False) # with bs4, recursive = False means we will not be double counting tags
if words:
words = words.split()
words = [w for w in words if w not in stop_words] # remove common stop words.json
words = [w for w in words if len(w) > 1] # ignore single character words.json
for index, word in enumerate(words):
word_lower = lemma.lemmatize(word.lower()) # lemmatize/stem words.json
multiplier = 1
if tag.name in weights: # assign weight based on HTML tag
multiplier = weights[tag.name]
if word_lower in word_counts:
word_counts[word_lower] = word_counts[word_lower] + (1 * multiplier)
else:
word_counts[word_lower] = 1 * multiplier
if index < (len(words) - 1): # two word phrase
two_word = word_lower + ' ' + lemma.lemmatize((words[index + 1]).lower()).strip()
two_word = two_word.strip()
if two_word != word_lower:
if two_word in word_counts:
word_counts[two_word] = word_counts[two_word] + (weights['two'] * multiplier)
else:
word_counts[two_word] = 1 * multiplier
if index < (len(words) - 2): # three word phrase
two_word = word_lower + ' ' + lemma.lemmatize((words[index + 1]).lower()).strip() \
+ ' ' + lemma.lemmatize((words[index + 2]).lower()).strip()
two_word = two_word.strip()
if two_word != word_lower:
if two_word in word_counts:
word_counts[two_word] = word_counts[two_word] + (weights['three'] * multiplier)
else:
word_counts[two_word] = 1 * multiplier
if index < (len(words) - 3): # four word phrase
two_word = word_lower + ' ' + lemma.lemmatize((words[index + 1]).lower()).strip() \
+ ' ' + lemma.lemmatize((words[index + 2]).lower()).strip() \
+ ' ' + lemma.lemmatize((words[index + 3]).lower()).strip()
two_word = two_word.strip()
if two_word != word_lower:
if two_word in word_counts:
word_counts[two_word] = word_counts[two_word] + (weights['four'] * multiplier)
else:
word_counts[two_word] = 1 * multiplier
if index < (len(words) - 4): # five word phrase
two_word = word_lower + ' ' + lemma.lemmatize((words[index + 1]).lower()).strip() \
+ ' ' + lemma.lemmatize((words[index + 2]).lower()).strip() \
+ ' ' + lemma.lemmatize((words[index + 3]).lower()).strip() \
+ ' ' + lemma.lemmatize((words[index + 4]).lower()).strip()
two_word = two_word.strip()
if two_word != word_lower:
if two_word in word_counts:
word_counts[two_word] = word_counts[two_word] + (weights['five'] * multiplier)
else:
word_counts[two_word] = 1 * multiplier
for word in url_words: # add weight for words.json in the url string
if word in word_counts:
word_counts[word] = word_counts[word] + weights['url']
def determine(x, top_25):
""" Helper function for removing phrases that are substrings of other phrases """
if len(x[0].split()) > 1:
# print(x[0])
for i in top_25:
if x[0] in i[0] and x[0] != i[0]:
return False
return True
top_25 = list(reversed(sorted(word_counts.items(), key=lambda x: x[1])[-25:])) # grab highest 25 weighted items
final_list = [x for x in top_25 if determine(x, top_25)] # remove phrases that are substrings of other phrases
return final_list
|
55928add263defa51a171a2dfb20bffe6491430c
| 3,639,393
|
from typing import Iterable
from typing import List
def load_config_from_paths(config_paths: Iterable[str], strict: bool = False) -> List[dict]:
"""
Load configuration from paths containing \*.yml and \*.json files.
As noted in README.config, .json will take precedence over .yml files.
:param config_paths: Path to \*.yml and \*.json config files.
:param strict: Set to true to error if the file is not found.
:return: A list of configs in increasing order of precedence.
"""
# Put the .json configs after the .yml configs to make sure .json takes
# precedence over .yml.
sorted_paths = sorted(config_paths, key=lambda x: x.endswith(".json"))
return list(map(lambda path: load_config_from_file(path, strict), sorted_paths))
|
8e32c46e7e620ae02dffcc652b32bb0098a0a2b3
| 3,639,394
|
from typing import List
def sort_flats(flats_unsorted: List[arimage.ARImage]):
""" Sort flat images into a dictionary with "filter" as the key """
if bool(flats_unsorted) == False:
return None
flats = { }
logger.info("Sorting flat images by filter")
for flat in flats_unsorted:
fl = flat.filter
if fl not in flats:
# Found a flat with a new filter
# Create a new array in the dictionary
logger.info("Found a flat with filter=" + fl)
flats[fl] = []
flats[fl].append(flat)
return flats
|
d0e3fe2c7e1a8f34cf7ed8f6985d3dd7bc82f3f1
| 3,639,395
|
import concurrent
import logging
def run_in_parallel(function, list_of_kwargs_to_function, num_workers):
"""Run a function on a list of kwargs in parallel with ThreadPoolExecutor.
Adapted from code by mlbileschi.
Args:
function: a function.
list_of_kwargs_to_function: list of dictionary from string to argument
value. These will be passed into `function` as kwargs.
num_workers: int.
Returns:
list of return values from function.
"""
if num_workers < 1:
raise ValueError(
'Number of workers must be greater than 0. Was {}'.format(num_workers))
with concurrent.futures.ThreadPoolExecutor(num_workers) as executor:
futures = []
logging.info(
'Adding %d jobs to process pool to run in %d parallel '
'threads.', len(list_of_kwargs_to_function), num_workers)
for kwargs in list_of_kwargs_to_function:
f = executor.submit(function, **kwargs)
futures.append(f)
for f in concurrent.futures.as_completed(futures):
if f.exception():
# Propagate exception to main thread.
raise f.exception()
return [f.result() for f in futures]
|
24b99f68ba1221c4f064a65540e6c165c9474e43
| 3,639,396
|
def show_project(project_id):
"""return a single project formatted according to Swagger spec"""
try:
project = annif.project.get_project(
project_id, min_access=Access.hidden)
except ValueError:
return project_not_found_error(project_id)
return project.dump()
|
3f7108ec7cb27270f91517bef194f3514c3eb4e5
| 3,639,398
|
def pollard_rho(n: int, e: int, seed: int = 2) -> int:
"""
Algoritmo de Pollard-Rho para realizar a quebra de chave na criptografia RSA.
n - n da chave pública
e - e da chave pública
seed - valor base para executar o ciclo de testes
"""
a, b = seed, seed
p = 1
while (p == 1):
a = ( pow(a,2) + 1 ) % n
b = ( pow(b,2) + 1 )
b = ( pow(b,2) + 1 ) % n
p = gcd( abs(a-b)%n, n)
if p == n:
return pollard_rho(n, e, seed+1) #brutal_force(n, e,) #
else:
q = n // p
phi = (p - 1) * (q - 1)
d = find_inverse(e, phi)
return d
|
4870627a5fca863d4110f3cadfdc1e7b618c2a48
| 3,639,399
|
import warnings
def _url_from_string(url):
"""
Generate actual tile url from tile provider definition or template url.
"""
if "tileX" in url and "tileY" in url:
warnings.warn(
"The url format using 'tileX', 'tileY', 'tileZ' as placeholders "
"is deprecated. Please use '{x}', '{y}', '{z}' instead.",
FutureWarning,
)
url = (
url.replace("tileX", "{x}").replace("tileY", "{y}").replace("tileZ", "{z}")
)
return {"url": url}
|
f3d4393163e48a7949f3229c55ea8951411dcd63
| 3,639,400
|
import socket
def get_reverse_dns(ip_address: str) -> str:
"""Does a reverse DNS lookup and returns the first IP"""
try:
rev = socket.gethostbyaddr(ip_address)
if rev:
return rev[0]
return "" # noqa
except (socket.herror, socket.gaierror, TypeError, IndexError):
return ""
|
58a27e25f7a9b11ab7dcddebeea743b7864f80f1
| 3,639,401
|
def abs_path(file_path):
"""
Returns the absolute path from the file that calls this function to file_path. Needed to access other files within aide_gui when initialized by aide.
Parameters
----------
file_path: String
The relative file path from the file that calls this function.
"""
return join(dirname(abspath(__file__)), file_path)
|
63e4a4b0c8fafb5920c78310fda90b119fd18104
| 3,639,402
|
def function(x: np.ndarray) -> float:
"""The ellipse function is x0^2 + 2 * x1^2 + 3 * x2^2 + ..."""
return np.linalg.norm(np.sqrt(np.arange(1, 1 + len(x))) * x) ** 2
|
efe468177ff232d45d18385fa2744a9cf63739eb
| 3,639,403
|
def replace_with_encoded_bits(one_hot_matrix, enum_val, add_value, last_col_index):
"""
Generate encoded bits for a categorical data value using one hot encoding.
:param one_hot_matrix: matrix representing the encoding of categorical data value to 1-hot encoding
:param enum_val: categorical data value, could be np.nan
:param add_value: set to 1 if a reference value is needed in addition to 1-hot encoding
:param last_col_index: index into encoding for np.nan if exists
:return: vector representing the encoded values for a enum value
"""
if np.isnan(enum_val): # if data value is np.nan
return one_hot_matrix[last_col_index]
else:
return one_hot_matrix[int(enum_val-add_value)]
|
d5ee111d74071fdbaa3890b35a193aa9e24df745
| 3,639,404
|
def cosine_similarity(n_co_elements, n_first_element, n_second_element):
"""
Description
A function which returns the cosine similarity between two elements.
Arguments
:param n_co_elements: Number of co-elements.
:type n_co_elements: int
:param n_first_element: Size of the first element.
:type n_first_element: int
:param n_second_element: Size of the second element
:type n_second_element: int
"""
try:
return n_co_elements / (sqrt(n_first_element) * sqrt(n_second_element))
except ZeroDivisionError:
return 0
|
ea35e47ecf3e77a95d535b0421afbe5f3a679817
| 3,639,405
|
def AddForwardEulerDynamicsConstraint(mp, A, B, x, u, xnext, dt):
"""
Add a dynamics constraint to the given Drake mathematical program mp, represinting
the euler dynamics:
xnext = x + (A*x + B*u)*dt,
where x, u, and xnext are symbolic variables.
"""
n = A.shape[0]
Aeq = np.hstack([ (np.eye(n)+A*dt), B*dt, -np.eye(n) ])
beq = np.zeros((n,1))
xeq = np.hstack([ x, u, xnext])[np.newaxis].T
return mp.AddLinearEqualityConstraint(Aeq,beq,xeq)
|
e0070aa28b61833330706e3934cbfaa8eb1c1d1b
| 3,639,406
|
import json
async def light_pure_rgb_msg_fixture(hass):
"""Return a mock MQTT msg with a pure rgb light actuator message."""
light_json = json.loads(
await hass.async_add_executor_job(load_fixture, "ozw/light_pure_rgb.json")
)
message = MQTTMessage(topic=light_json["topic"], payload=light_json["payload"])
message.encode()
return message
|
93156674ece713d6c9371f64840852a3d5d292b5
| 3,639,407
|
import csv
def make_header_names_thesaurus(header_names_thesaurus_file=HEADER_NAMES_THESAURUS_FILE):
"""
Get a dict mapping ideal domain-specific phrases to list of alternates.
Parameters
----------
header_names_thesaurus_file : str
Filepath.
Returns
-------
Dict of {'ideal phrase': ['alt_phrase0', 'alt_phrase1', ...]}.
"""
with open(header_names_thesaurus_file, 'rbU') as f:
f.readline() # skip headers
csvreader = csv.reader(f)
header_names_thesaurus = {}
for row in csvreader:
header_primary_name = row[0]
header_names_thesaurus[header_primary_name] = [x.lower().rstrip() for x in filter(None,row)]
return header_names_thesaurus
|
20f89be5dfbdf0feac5facddcaeeddb346d394a8
| 3,639,408
|
def split_train_valid_test(adata_here,
training_proportion=0.6,
validation_proportion=0.2,
test_proportion=0.2,
rng=None,copy_adata=False):
"""Split cells into training, validation and test
"""
assert training_proportion<=1.0
assert validation_proportion<=1.0
assert test_proportion<=1.0
assert (training_proportion+validation_proportion+test_proportion)<=1.0
num_examples=adata_here.n_obs
if rng==None:
idx_shuff=np.random.RandomState(seed=77).permutation(range(num_examples))
else:
idx_shuff=rng.permutation(range(num_examples))
training_threshold=int(num_examples*training_proportion)
validation_threshold=int(num_examples*(training_proportion+validation_proportion))
training=range(training_threshold)
validation=range(training_threshold,min(validation_threshold,num_examples))
test=range(validation_threshold,num_examples)
#make obs with train, validation, test
train_test_df=pd.DataFrame({'cell':adata_here.obs_names,
'train_valid_test':'train'},index=adata_here.obs_names)
train_test_df=train_test_df.iloc[idx_shuff,:]
train_test_df.iloc[training,1]='train'
train_test_df.iloc[validation,1]='valid'
train_test_df.iloc[test,1]='test'
print('splitting',train_test_df.loc[adata_here.obs_names,'train_valid_test'].value_counts())
return(train_test_df.loc[adata_here.obs_names,'train_valid_test'])
|
ccff7c2b1372b74429bb6acb04df1dd66ad5c113
| 3,639,409
|
def index(request):
""" Main index. Editor view. """
# Render editor
body = render_to_string('editor.html', {})
data = {
'body': body
}
# Render page layout
return render(request, 'index.html', data)
|
bab60def7716ae11d328a95274d2ee7b6305dbaf
| 3,639,411
|
def isUsdExt(ext):
""" Check if the given extension is an expected USD file extension.
:Parameters:
ext : `str`
:Returns:
If the file extension is a valid USD extension
:Rtype:
`bool`
"""
return ext.lstrip('.') in USD_EXTS
|
5c2f7a48869c9ab4a94b4d8a84e892b76938e91a
| 3,639,412
|
def _get_dflt_lexicon(a_pos, a_neg):
"""Generate default lexicon by putting in it terms from seed set.
@param a_pos - set of positive terms
@param a_neg - set of negative terms
@return list(3-tuple) - list of seed set terms with uniform scores and
polarities
"""
return [(w, POSITIVE, 1.) for w in a_pos] \
+ [(w, NEGATIVE, -1.) for w in a_neg]
|
b06a1f81629368447227a846ac3216220beaa77b
| 3,639,413
|
def rct(target_t : Tensor, source_t : Tensor, target_mask_t : Tensor = None, source_mask_t : Tensor = None, mask_cutoff = 0.5) -> Tensor:
"""
Transfer color using rct method.
arguments
target_t Tensor( [N]CHW ) C==3 (BGR) float16|32
source_t Tensor( [N]CHW ) C==3 (BGR) float16|32
target_mask_t(None) Tensor( [N]CHW ) C==1|3 float16|32
target_source_t(None) Tensor( [N]CHW ) C==1|3 float16|32
reference: Color Transfer between Images https://www.cs.tau.ac.il/~turkel/imagepapers/ColorTransfer.pdf
"""
if target_t.ndim != source_t.ndim:
raise ValueError('target_t.ndim != source_t.ndim')
if target_t.ndim == 3:
ch_axis = 0
spatial_axes = (1,2)
else:
ch_axis = 1
spatial_axes = (2,3)
target_t = cvt_color(target_t, 'BGR', 'LAB', ch_axis=ch_axis)
source_t = cvt_color(source_t, 'BGR', 'LAB', ch_axis=ch_axis)
target_stat_t = target_t
if target_mask_t is not None:
target_stat_t = any_wise('O = I0*(I1 >= I2)', target_stat_t, target_mask_t, np.float32(mask_cutoff) )
source_stat_t = source_t
if source_mask_t is not None:
source_stat_t = any_wise('O = I0*(I1 >= I2)', source_stat_t, source_mask_t, np.float32(mask_cutoff) )
target_stat_mean_t, target_stat_var_t = moments(target_stat_t, axes=spatial_axes)
source_stat_mean_t, source_stat_var_t = moments(source_stat_t, axes=spatial_axes)
target_t = any_wise(f"""
O_0 = clamp( (I0_0 - I1_0) * sqrt(I2_0) / sqrt(I3_0) + I4_0, 0.0, 100.0);
O_1 = clamp( (I0_1 - I1_1) * sqrt(I2_1) / sqrt(I3_1) + I4_1, -127.0, 127.0);
O_2 = clamp( (I0_2 - I1_2) * sqrt(I2_2) / sqrt(I3_2) + I4_2, -127.0, 127.0);
""", target_t, target_stat_mean_t, source_stat_var_t, target_stat_var_t, source_stat_mean_t,
dim_wise_axis=ch_axis)
return cvt_color(target_t, 'LAB', 'BGR', ch_axis=ch_axis)
|
87f350c3e8cef10ef2e3bc883457acf861ab064c
| 3,639,415
|
def random_policy(num_actions):
"""
Returns a policy where all actions have equal probabilities, i.e., an uniform distribution.
"""
return np.zeros((num_actions,)) + 1 / num_actions
|
9a95865cf3bc7634bc4bf033f343b5811ba40c9f
| 3,639,416
|
def find_object(func, name, *args, **kwargs):
"""Locate an object by name or identifier
This function will use the `name` argumetn to attempt to
locate an object. It will first attempt to find the
object by identifier and if that fails, it will attempt
to find the object by name.
Since object names are non-unique values in the Pureport
API, this function will return the first value it finds in the
case of multiple objects.
If the requested object can not be found, this function
will raise an exception.
:param name: The name or identifier of the object to locate
:type name: str
:returns: An instance of the object found
:rtype: `pureport.models.Model`
:raises: `pureport.exceptions.PureportError`
"""
objects = func(*args, **kwargs)
match = None
name_matches = list()
for item in objects:
if name == item.id:
match = item
break
elif name == item.name:
name_matches.append(item)
else:
if not name_matches:
raise PureportError("could not locate object `{}`".format(name))
if match is None:
match = first(name_matches)
return match
|
6ee8085d42883798c1f3ab5d0a7711af26b2b614
| 3,639,417
|
def CreateHSpline(points, multiple=False):
"""
Construct an H-spline from a sequence of interpolation points
Args:
points (IEnumerable<Point3d>): Points to interpolate
"""
url = "rhino/geometry/nurbscurve/createhspline-point3darray"
if multiple: url += "?multiple=true"
args = [points]
if multiple: args = [[item] for item in points]
response = Util.ComputeFetch(url, args)
response = Util.DecodeToCommonObject(response)
return response
|
b5f7b2000dcce04a60087ab32956fa4701d1dadc
| 3,639,422
|
def get_instance_embedding_loss(embedding,
instance_loss_type,
instance_labels,
crop_area,
crop_min_height,
num_samples=10,
similarity_strategy='dotproduct',
loss_strategy='softmax'):
"""Returns the instance embedding loss based on instance_loss_type.
Args:
embedding: A tf.float32 tensor of size [height, width, dims] or
[batch_size, height, width, dims].
instance_loss_type: A string containing the type of the embedding loss.
instance_labels: A tf.int32 tensor of size [height, width] or
[batch_size, heigh, width] containing instance ids.
Assumed values in target start from 0 and cover 0 to N-1.
crop_area: Area of the crop window. Only used in some cases of embedding
loss.
crop_min_height: Minimum height of the crop window. Only used in some cases
of embedding loss.
num_samples: Number of samples. Only used in some cases of embedding loss.
similarity_strategy: Defines the method for computing similarity between
embedding vectors. Possible values are 'dotproduct' and
'distance'.
loss_strategy: Defines the type of loss including 'softmax' or 'sigmoid'.
Returns:
Instance embedding loss.
Raises:
ValueError: If instance loss type is not known.
"""
# Handling the case where there is a batch size.
embedding_shape = embedding.get_shape().as_list()
if len(embedding_shape) == 4:
num_batches = embedding_shape[0]
losses = []
embedding_list = tf.unstack(embedding)
instance_label_list = tf.unstack(instance_labels)
for i in range(num_batches):
embedding_i = embedding_list[i]
instance_labels_i = instance_label_list[i]
loss = get_instance_embedding_loss(embedding_i,
instance_loss_type,
instance_labels_i,
crop_area,
crop_min_height,
num_samples,
similarity_strategy,
loss_strategy)
losses.append(loss)
return tf.reduce_mean(tf.stack(losses))
if instance_loss_type == 'npair':
return instance_embedding_npair_loss(
embedding=embedding,
instance_labels=instance_labels,
crop_min_height=crop_min_height,
crop_area=crop_area,
similarity_strategy=similarity_strategy,
loss_strategy=loss_strategy)
elif instance_loss_type == 'npair_r_c':
return instance_embedding_npair_random_center_loss(
embedding=embedding,
instance_labels=instance_labels,
similarity_strategy=similarity_strategy,
loss_strategy=loss_strategy)
elif instance_loss_type == 'npair_r_c_r_s':
return instance_embedding_npair_random_center_random_sample_loss(
embedding=embedding,
instance_labels=instance_labels,
num_samples=num_samples,
similarity_strategy=similarity_strategy,
loss_strategy=loss_strategy)
elif instance_loss_type == 'npair_r_s':
return instance_embedding_npair_random_sample_loss(
embedding=embedding,
instance_labels=instance_labels,
num_samples=num_samples,
similarity_strategy=similarity_strategy,
loss_strategy=loss_strategy)
elif instance_loss_type == 'iou':
return instance_embedding_iou_loss(
embedding=embedding,
instance_labels=instance_labels,
num_samples=num_samples,
similarity_strategy=similarity_strategy)
else:
raise ValueError('Instance loss type is not known')
|
ff1e08ea60f4c937fd44bec967eda37d6916ef00
| 3,639,423
|
def str_to_array(value):
"""
Check if value can be parsed to a tuple or and array.
Because Spark can handle tuples we will try to transform tuples to arrays
:param value:
:return:
"""
try:
if isinstance(literal_eval((value.encode('ascii', 'ignore')).decode("utf-8")), (list, tuple)):
return True
except (ValueError, SyntaxError,):
pass
|
d565021781a3c2c19c882073ddc6cbd24334b74a
| 3,639,424
|
import inspect
def get_current_func_name():
"""for python version greater than equal to 2.7"""
return inspect.stack()[1][3]
|
002d318bcab98639cab6c38317322f247a1ad0e0
| 3,639,425
|
def getParmNames(parmsDef):
"""Return a list of parm names in a model parm definition
parmsDef: list of tuples, each tuple is a list of parms and a time
constraint. Call with modelDict[modelname]['Parms].
Returns: List of string parameter names
Here's an example of how to remove unused parms from Fcst, this can
run in localConfig:
parmsToRemove=[]
for p in getParmNames(modelDict['Fcst']):
pl=p.lower()
for t in ['period','swell','wave','surf', 'surge']:
if t in pl:
parmsToRemove.append(p)
break
removeParms(modelDict,'Fcst',parmsToRemove)
"""
result=[]
for pList,tc in parmsDef:
# p is the parmDef tuple where first item is the parm name
newParms=[p[0] for p in pList]
result+=newParms
return sorted(result)
|
785661200c388f23c5f38ae67e773a43fd8f57b3
| 3,639,426
|
def dict_merge(lft, rgt):
"""
Recursive dict merge.
Recursively merges dict's. not just simple lft['key'] = rgt['key'], if
both lft and rgt have a key who's value is a dict then dict_merge is
called on both values and the result stored in the returned dictionary.
"""
if not isinstance(rgt, dict):
return rgt
result = deepcopy(lft)
for key, val in rgt.iteritems():
if key in result and isinstance(result[key], dict):
result[key] = dict_merge(result[key], val)
else:
result[key] = deepcopy(val)
return result
|
c939fed14ff10452663bc5a32247b21f6170897a
| 3,639,427
|
def modified_zscore(x: np.ndarray) -> np.ndarray:
"""
Modified z-score transformation.
The modified z score might be more robust than the standard z-score because
it relies on the median for calculating the z-score. It is less influenced
by outliers when compared to the standard z-score.
Parameters
----------
x: (N,) np.ndarray
numbers
Returns
-------
z: (N,) np.ndarray
z-scored numbers computed using modified z-score
"""
med = np.median(x)
med_abs_dev = np.median(np.abs(x - med))
return (x - med) / (1.486 * med_abs_dev)
|
8f0933bf30ec55ba6305c9bd926437bb0715a938
| 3,639,428
|
def update_profile(email, username, name, bio, interest, picture=None):
"""更新 profile"""
db = get_db()
cursor = db.cursor()
# query user
user = get_user_by_email(email)
email = user['email']
profile_id = user['profile_id']
if profile_id is None:
# add profile
cursor.execute(
"INSERT INTO profiles (username, name, bio, interest, picture) VALUES (?, ?, ?, ?, ?)",
(username, name, bio, interest, picture)
)
db.commit()
profile_id = cursor.lastrowid
cursor.execute(
"UPDATE users SET profile_id = ? WHERE email=?",
(profile_id, email)
)
db.commit()
else:
# Update profile
if picture:
sql = "UPDATE profiles SET username=?,name=?,bio=?,interest=?,picture=? WHERE id=?"
values = (username, name, bio, interest, picture, profile_id)
else:
sql = "UPDATE profiles SET username=?,name=?,bio=?,interest=? WHERE id=?"
values = (username, name, bio, interest, profile_id)
cursor.execute(
sql,
values,
)
db.commit()
return True
|
0b13d81f9d36198d4660179eae7616d8f25ee37e
| 3,639,429
|
import zlib
import marshal
def serialize(object):
"""
Serialize the data into bytes using marshal and zlib
Args:
object: a value
Returns:
Returns a bytes object containing compressed with zlib data.
"""
return zlib.compress(marshal.dumps(object, 2))
|
650cbc8937df5eae79960f744b69b8b12b623195
| 3,639,430
|
def logo_if(interp, expr, block, elseBlock=None):
"""
IF tf instructionlist
(IF tf instructionlist1 instructionlist2)
command. If the first input has the value TRUE, then IF runs
the second input. If the first input has the value FALSE, then
IF does nothing. (If given a third input, IF acts like IFELSE,
as described below.) It is an error if the first input is not
either TRUE or FALSE.
"""
if expr:
return logo_eval(interp, block)
elif elseBlock is not None:
return logo_eval(interp, elseBlock)
|
94f143f59fa02f059469f8f17a3ff11093110c84
| 3,639,431
|
import itertools
def select_model_general(
df,
grid_search,
target_col_name,
frequency,
partition_columns=None,
parallel_over_columns=None,
executor=None,
include_rules=None,
exclude_rules=None,
country_code_column=None,
output_path="",
persist_cv_results=False,
persist_cv_data=False,
persist_model_reprs=False,
persist_best_model=False,
persist_partition=False,
persist_model_selector_results=False,
):
"""Run cross validation on data and select best model
Best models are selected for each timeseries and if wanted persisted.
Parameters
----------
df : pandas.DataFrame
Container holding historical data for training
grid_search : sklearn.model_selection.GridSearchCV
Preconfigured grid search definition which determines which models
and parameters will be tried
target_col_name : str
Name of target column
frequency : str
Temporal frequency of data.
Data with different frequency will be resampled to this frequency.
partition_columns : list, tuple
Column names based on which the data should be split up / partitioned
parallel_over_columns : list, tuple
Subset of partition_columns, that are used to parallel split.
executor : prefect.engine.executors
Provide prefect's executor. Only valid when `parallel_over_columns` is set.
For more information see https://docs.prefect.io/api/latest/engine/executors.html
include_rules : dict
Dictionary with keys being column names and values being list of values to include in
the output.
exclude_rules : dict
Dictionary with keys being column names and values being list of values to exclude
from the output.
country_code_column : str
Name of the column with country code, which can be used for supplying holiday
(i.e. having gridsearch with HolidayTransformer with argument `country_code_column`
set to this one).
output_path : str
Path to directory for storing the output, default behavior is current working directory
persist_cv_results : bool
If True cv_results of sklearn.model_selection.GridSearchCV as pandas df
will be saved as pickle for each partition
persist_cv_data : bool
If True the pandas df detail cv data
will be saved as pickle for each partition
persist_model_reprs : bool
If True model reprs will be saved as json for each partition
persist_best_model : bool
If True best model will be saved as pickle for each partition
persist_partition : bool
If True dictionary of partition label will be saved as json for each partition
persist_model_selector_results : bool
If True ModelSelectoResults with all important information
will be saved as pickle for each partition
Returns
-------
list
List of ModelSelectorResult
"""
if parallel_over_columns is not None:
# run prefect flow with paralellism
flow_result = run_model_selection(**locals())
# access result of select_model and flatten it
result = flow_result[1].result[flow_result[0].get_tasks("select_model")[0]].result
flat_list = list(itertools.chain.from_iterable(result))
return flat_list
else:
partition_columns = partition_columns if partition_columns is not None else []
# run without prefect
df_prep = df.pipe(filter_data, include_rules=include_rules, exclude_rules=exclude_rules).pipe(
prepare_data_for_training,
frequency=frequency,
partition_columns=partition_columns,
country_code_column=country_code_column,
)
result = select_model(
df=df_prep,
target_col_name=target_col_name,
partition_columns=partition_columns,
grid_search=grid_search,
parallel_over_dict=None,
frequency=frequency,
country_code_column=country_code_column,
)
if any(
[
persist_cv_results,
persist_cv_data,
persist_model_reprs,
persist_partition,
persist_best_model,
persist_model_selector_results,
]
):
persist_experts_in_physical_partition(
results=result,
folder_path=output_path,
persist_cv_results=persist_cv_results,
persist_cv_data=persist_cv_data,
persist_model_reprs=persist_model_reprs,
persist_partition=persist_partition,
persist_best_model=persist_best_model,
persist_model_selector_results=persist_model_selector_results,
)
return result
|
1c286b8cf922a50c1c1071aa0d0506b0cf102a6b
| 3,639,432
|
def create_arma_sample(ar_order=1, ma_order=1, size=100):
"""Get a random ARMA sample.
Parameters
----------
ar_order, ma_order, size : int
Values for the desired AR order, MA order and sample size.
Returns
-------
An ARMA sample as a pandas Series.
"""
ar_coeff = np.linspace(1, -0.9, ar_order + 1) # arbitrary ar coefficients
ma_coeff = np.linspace(1, 0.9, ma_order + 1) # arbitrary ma coefficients
sample = tsa.ArmaProcess(ar_coeff, ma_coeff).generate_sample(size)
index = pd.date_range(start=date.today(), periods=size, freq="D")
return pd.Series(sample, index=index, name="sample")
|
e859413cee0a20e51fc80aeffbb75b3ada83f010
| 3,639,433
|
def get_img(file_path, gray=False):
"""
获取输入图片
:param file_path: 图片文件位置
:param gray: 是否转换为灰度图
:return: img
"""
try:
img = Image.open(file_path)
if gray:
img = img.convert('L')
return img
except Exception:
print("不支持的图片格式")
return None
|
ac3ad78a1ce877905f550ebc43b7e9a6335fd762
| 3,639,434
|
from datetime import datetime
def working_days(days: int):
"""Return a list of N workingdays
Keyword arguments:
days -- days past
"""
dates = []
today = datetime.utcnow()
for i in range(days):
day = today - timedelta(days=i)
day = day.date()
dates.append(day)
for idx, date in enumerate(dates):
if date.weekday() == 6:
for i in range(idx, len(dates)):
dates[i] = dates[i] - timedelta(days=2)
if date.weekday() == 5:
for i in range(idx, len(dates)):
dates[i] = dates[i] - timedelta(days=1)
return dates
|
222002b53bcf536f7b31993a22424446fcce24cc
| 3,639,435
|
def GetFile(message=None, title=None, directory=None, fileName=None,
allowsMultipleSelection=False, fileTypes=None):
"""
An get file dialog.
Optionally a `message`, `title`, `directory`, `fileName` and
`allowsMultipleSelection` can be provided.
::
from fontParts.ui import GetFile
print(GetFile())
"""
return dispatcher["GetFile"](message=message, title=title, directory=directory,
fileName=fileName,
allowsMultipleSelection=allowsMultipleSelection,
fileTypes=fileTypes)
|
b81ba1e11764231c8c04164316e4ee55b0305044
| 3,639,436
|
def str_to_dtype(s):
"""Convert dtype string to numpy dtype."""
return eval('np.' + s)
|
e0ff793404af5a8022d260fde5878329abbac483
| 3,639,437
|
from typing import Callable
from typing import Tuple
def integrate_const(
f: Callable,
t_span: Tuple,
dt: float,
y0: np.ndarray,
method: str = 'runge_kutta4'
) -> Tuple[np.ndarray, np.ndarray]:
"""
A Python wrapper for Boost::odeint runge_kutta4 (the only one supported right now)
stepper and ODE integration.
:param f:
The ODE system RHS.
:param t_span:
The time range in which integration is performed. It is provided as
(t_initial, t_final) tuple.
:param dt:
The time-step to increment time from t_span[0] to t_span[1].
:param y0:
Initial conditions for the system state.
:param method:
The stepper method. Only 'runge_kutta4' is supported at the moment.
:return:
A tuple with two arrays: (time, solution). The first contains the time points
from integration and the last is a matrix with the solution for each state provided
by columns. In other words, solution[:, 0] contains the solution for state 0,
solution[:, 1] for state 1 and so forth.
"""
time, solution = _integrate_const(f, t_span, dt, y0, method)
solution = np.array(solution)
time = np.array(time)
return time, solution
|
e43479c829fd46e0f4cdd8c7918294577e91beed
| 3,639,438
|
def cleanup(serialized):
"""
Remove all missing values. Sometimes its useful for object methods
to return missing value in order to not include that value in the
json format.
Examples::
>>> User(Serializable):
... def attributes():
... return ['id', 'name', 'birthday', 'somefunc']
... def age():
... if birthday:
... return empty
... else:
... return calc_age(self.birthday)
Now if some user has birthday the age function is going to return the age.
However if user doesn't have birthday the age function is returning a
special empty value which tells jsonifier not to include that key in
json format.
>>> User(id=1, name='someone').as_json()
{'id': 1, 'name': 'Someone'}
"""
return dict(filter(lambda a: a[1] is not empty, serialized.items()))
|
5e4bfd13408ec8272c4fc4e9a499349e13dd2798
| 3,639,439
|
import json
def PyValueToMessage(message_type, value):
"""Convert the given python value to a message of type message_type."""
return JsonToMessage(message_type, json.dumps(value))
|
576237ebbacb85ac4c51be8b5523f4f95cfcc019
| 3,639,442
|
from typing import Any
from typing import get_origin
def istype(obj: Any, annotation: type) -> bool:
"""Check if object is consistent with the annotation"""
if get_origin(annotation) is None:
if annotation is None:
return obj is None
return isinstance(obj, annotation)
else:
raise NotImplementedError("Currently only the basic types are supported")
|
c1903ea2ec6c0b6b9006a38f7c0720c88987b706
| 3,639,443
|
import logging
import platform
def test_cand_gen(caplog):
"""Test extracting candidates from mentions from documents."""
caplog.set_level(logging.INFO)
if platform == "darwin":
logger.info("Using single core.")
PARALLEL = 1
else:
logger.info("Using two cores.")
PARALLEL = 2 # Travis only gives 2 cores
def do_nothing_matcher(fig):
return True
max_docs = 10
session = Meta.init("postgresql://localhost:5432/" + DB).Session()
docs_path = "tests/data/html/"
pdf_path = "tests/data/pdf/"
# Parsing
logger.info("Parsing...")
doc_preprocessor = HTMLDocPreprocessor(docs_path, max_docs=max_docs)
corpus_parser = Parser(
session, structural=True, lingual=True, visual=True, pdf_path=pdf_path
)
corpus_parser.apply(doc_preprocessor, parallelism=PARALLEL)
assert session.query(Document).count() == max_docs
assert session.query(Sentence).count() == 5548
docs = session.query(Document).order_by(Document.name).all()
# Mention Extraction
part_ngrams = MentionNgramsPart(parts_by_doc=None, n_max=3)
temp_ngrams = MentionNgramsTemp(n_max=2)
volt_ngrams = MentionNgramsVolt(n_max=1)
figs = MentionFigures(types="png")
Part = mention_subclass("Part")
Temp = mention_subclass("Temp")
Volt = mention_subclass("Volt")
Fig = mention_subclass("Fig")
fig_matcher = LambdaFunctionFigureMatcher(func=do_nothing_matcher)
with pytest.raises(ValueError):
mention_extractor = MentionExtractor(
session,
[Part, Temp, Volt],
[part_ngrams, volt_ngrams], # Fail, mismatched arity
[part_matcher, temp_matcher, volt_matcher],
)
with pytest.raises(ValueError):
mention_extractor = MentionExtractor(
session,
[Part, Temp, Volt],
[part_ngrams, temp_matcher, volt_ngrams],
[part_matcher, temp_matcher], # Fail, mismatched arity
)
mention_extractor = MentionExtractor(
session,
[Part, Temp, Volt, Fig],
[part_ngrams, temp_ngrams, volt_ngrams, figs],
[part_matcher, temp_matcher, volt_matcher, fig_matcher],
)
mention_extractor.apply(docs, parallelism=PARALLEL)
assert session.query(Part).count() == 234
assert session.query(Volt).count() == 107
assert session.query(Temp).count() == 136
assert session.query(Fig).count() == 223
part = session.query(Part).order_by(Part.id).all()[0]
volt = session.query(Volt).order_by(Volt.id).all()[0]
temp = session.query(Temp).order_by(Temp.id).all()[0]
logger.info("Part: {}".format(part.context))
logger.info("Volt: {}".format(volt.context))
logger.info("Temp: {}".format(temp.context))
# Candidate Extraction
PartTemp = candidate_subclass("PartTemp", [Part, Temp])
PartVolt = candidate_subclass("PartVolt", [Part, Volt])
with pytest.raises(ValueError):
candidate_extractor = CandidateExtractor(
session,
[PartTemp, PartVolt],
throttlers=[
temp_throttler,
volt_throttler,
volt_throttler,
], # Fail, mismatched arity
)
with pytest.raises(ValueError):
candidate_extractor = CandidateExtractor(
session,
[PartTemp], # Fail, mismatched arity
throttlers=[temp_throttler, volt_throttler],
)
# Test that no throttler in candidate extractor
candidate_extractor = CandidateExtractor(
session, [PartTemp, PartVolt]
) # Pass, no throttler
candidate_extractor.apply(docs, split=0, parallelism=PARALLEL)
assert session.query(PartTemp).count() == 4141
assert session.query(PartVolt).count() == 3610
assert session.query(Candidate).count() == 7751
candidate_extractor.clear_all(split=0)
assert session.query(Candidate).count() == 0
# Test with None in throttlers in candidate extractor
candidate_extractor = CandidateExtractor(
session, [PartTemp, PartVolt], throttlers=[temp_throttler, None]
)
candidate_extractor.apply(docs, split=0, parallelism=PARALLEL)
assert session.query(PartTemp).count() == 3879
assert session.query(PartVolt).count() == 3610
assert session.query(Candidate).count() == 7489
candidate_extractor.clear_all(split=0)
assert session.query(Candidate).count() == 0
candidate_extractor = CandidateExtractor(
session, [PartTemp, PartVolt], throttlers=[temp_throttler, volt_throttler]
)
candidate_extractor.apply(docs, split=0, parallelism=PARALLEL)
assert session.query(PartTemp).count() == 3879
assert session.query(PartVolt).count() == 3266
assert session.query(Candidate).count() == 7145
assert docs[0].name == "112823"
assert len(docs[0].parts) == 70
assert len(docs[0].volts) == 33
assert len(docs[0].temps) == 24
# Test that deletion of a Candidate does not delete the Mention
session.query(PartTemp).delete()
assert session.query(PartTemp).count() == 0
assert session.query(Temp).count() == 136
assert session.query(Part).count() == 234
# Test deletion of Candidate if Mention is deleted
assert session.query(PartVolt).count() == 3266
assert session.query(Volt).count() == 107
session.query(Volt).delete()
assert session.query(Volt).count() == 0
assert session.query(PartVolt).count() == 0
|
44cf505a7eedef55e6322eafebfb92ad3b882697
| 3,639,444
|
def spending_from_savings(take_home_pay: float, savings: float) -> Decimal:
"""
Calculate your spending based on your take home pay and how much
you save. This is useful if you use what Paula Pant calls the anti-budget,
instead of tracking your spending in detail. This number can be used as
input for the savings_rate function.
Args:
take_home_pay: monthly take-home pay
savings: amount of money saved towards FI
Returns:
The amount of money spent
"""
return Decimal(take_home_pay) - Decimal(savings)
|
da26cae052bd27efb11893440353d53e8b6aed89
| 3,639,445
|
def large_asymmetric_bulge(data):
"""
:param data: image data as array
:return: the width and location of the largest asymmetric bulge (if any) in the sequence
"""
# retrieve the lengths of the bars in the sequences (the counts) from the palindrome function
score, upper_half_counts, lower_half_counts, len_premiRNA = palindrome(data)
# zip the count lists and check whether a large asymmetric bulge is included (pixel bar reaching image border)
# and in which of the two image halves the bulge is located
bulge_array = []
bulge_locations = []
# go over the bar lengths in the counts arrays and check whether they match the large asymmetric bulge requirements
for pixel_upper, pixel_lower in zip(upper_half_counts[0:len_premiRNA], lower_half_counts[0:len_premiRNA]):
if pixel_upper == pixel_lower:
bulge_array.append(0)
bulge_locations.append(0)
else:
# check for large asymmetric bulge in lower half of image
if pixel_upper == 2 and pixel_lower == 12:
bulge_array.append(1)
bulge_locations.append('lower')
# check for large asymmetric bulge in upper half of image
elif pixel_upper == 12 and pixel_lower == 2:
bulge_array.append(1)
bulge_locations.append('upper')
else:
# if above conditions do not hold, the sequence does not contain a large asymmetric bulge
bulge_array.append(0)
bulge_locations.append(0)
# find the exact location and width of the large asymmetric bulge in the sequence by going over the bulge_array
widths = []
bulge_width = 0
bulge_exact_locations = []
bulge_exact_location = []
for i in range(len(bulge_array) - 1):
# if the integer in the bulge_array is 1, we are at a large asymmetric bulge and we should increment the width
if bulge_array[i] == 1:
bulge_width += 1
bulge_exact_location.append((bulge_locations[i], i))
# if the next integer in bulge_array is 0, we have reached the end of the bulge and we should store the
# width and all location info
if bulge_array[i + 1] == 0:
widths.append(bulge_width)
bulge_width = 0
bulge_exact_locations.append(bulge_exact_location)
bulge_exact_location = []
else:
i += 1
# create empty values for the attributes of interest if there is no large asymmetric bulge found in the sequence
if not widths:
largest_bulge = np.nan
largest_bulge_location = (np.nan, np.nan)
# if there is at least one large asymmetric bulge, find the widest one among all and store this as the largest
# asymmetric bulge of the sequence
else:
largest_bulge = np.max(widths)
largest_bulge_index = np.argmax(widths)
largest_bulge_location = bulge_exact_locations[largest_bulge_index]
middle_bulge_location = int(len(largest_bulge_location) / 2)
largest_bulge_location = (largest_bulge_location[0][0],
largest_bulge_location[middle_bulge_location][1])
return largest_bulge, largest_bulge_location
|
be7aef1cc6a2443de3ecff5099d6e28554544f7a
| 3,639,447
|
import requests
def request(host, path, bearer_token, url_params):
"""Given a bearer token, send a GET request to the API.
Args:
host (str): The domain host of the API.
path (str): The path of the API after the domain.
bearer_token (str): OAuth bearer token, obtained using client_id and client_secret.
url_params (dict): An optional set of query parameters in the request.
Returns:
dict: The JSON response from the request.
Raises:
HTTPError: An error occurs from the HTTP request.
"""
url_params = url_params or {}
url = '{0}{1}'.format(host, quote(path.encode('utf8')))
headers = {
'Authorization': 'Bearer %s' % bearer_token,
}
response = requests.request('GET', url, headers=headers, params=url_params)
return response.json()
|
8f322307bfc1cf48ff5e1a7e52df18e5c9dc7ddf
| 3,639,448
|
def find_unique_distances(distance_ij: pd.Series) -> np.ndarray:
"""Finds the unique distances that define the neighbor groups.
:param distance_ij: A pandas ``Series`` of pairwise neighbor distances.
:return: An array of unique neighbor distances.
"""
unique_floats: np.ndarray = np.sort(distance_ij.unique())
next_distance_not_close: np.ndarray = np.logical_not(
np.isclose(unique_floats[1:], unique_floats[:-1])
)
return np.concatenate(
(unique_floats[:1], unique_floats[1:][next_distance_not_close])
)
|
ca4d8252c4b79bd536a10a058ca5f75b9f39416e
| 3,639,449
|
from typing import Dict
from typing import Any
def session(monkeypatch: pytest.MonkeyPatch) -> nox.Session:
"""Fixture for a Nox session."""
registry: Dict[str, Any] = {}
monkeypatch.setattr("nox.registry._REGISTRY", registry)
@nox.session(venv_backend="none")
def test(session: nox.Session) -> None:
"""Example session."""
config = nox._options.options.namespace(posargs=[])
[runner] = nox.manifest.Manifest(registry, config)
runner._create_venv()
return nox.Session(runner)
|
646403d4383c6e426d736bf55278e001db2a40e1
| 3,639,450
|
import yaml
def _load_yaml_with_clear_tag(stream):
"""Like yaml.safe_load(), but everything with a !clear tag before it
will be wrapped in ClearedValue()."""
loader = yaml.SafeLoader(stream)
loader.add_constructor('!clear', _cleared_value_constructor)
try:
return loader.get_single_data()
finally:
if hasattr(loader, 'dispose'): # it doesn't in PyYAML 3.09
loader.dispose()
|
dec04cec96fae797250d1fb37491755ceaea399c
| 3,639,451
|
def highlights(state_importance_df, exec_traces, budget, context_length, minimum_gap=0,
overlay_limit=0):
"""generate highlights summary"""
sorted_df = state_importance_df.sort_values(['importance'], ascending=False)
summary_states, summary_traces, state_trajectories = [], [], {}
seen_indexes, seen_importance = {x: [] for x in range(len(exec_traces))}, []
"""for each state by importance"""
for index, row in sorted_df.iterrows():
state = row['state']
"""unique score for frogger"""
if row["importance"] in seen_importance:
continue
else:
seen_importance.append(row["importance"])
trace_len = len(exec_traces[state[0]].states)
lower, upper = get_relevant_range(state[1], trace_len, context_length, minimum_gap,
overlay_limit)
if lower not in seen_indexes[state[0]] and upper not in seen_indexes[state[0]]:
seen_indexes[state[0]] += list(range(lower, upper + 1))
summary_states.append(state)
if len(summary_states) == budget:
break
#
# trajectories = {}
# for trace_idx, trace in enumerate(exec_traces):
# if state in trace.states:
# state_index = trace.states.index(state)
# trace_len = len(trace.states)
# lower, upper = get_relevant_range(state_index, trace_len, context_length,
# minimum_gap, overlay_limit)
# """check if these states are not neighbours of previously seen states"""
# for seen_state in summary_states:
# # if [1 for x in trace.states[lower:upper] if x == seen_state]:
# if seen_state[0] != trace_idx:
# break
# else:
# if seen_state[1] in trace.states[lower:upper]:
# break
# else:
# trajectories[trace_idx] = state_index
# if not summary_states:
# trajectories[trace_idx] = state_index
#
# """if no siutable trajectories found - try next state"""
# if not trajectories:
# continue
# else:
# state_trajectories[state] = trajectories
#
# """once a trace is obtained, get the state index in it"""
# summary_states.append(state)
# summary_traces.append(list(trajectories.keys()))
# if len(summary_states) == budget:
# break
summary_state_trajectories = {}
for t_i, s_i in summary_states:
t = exec_traces[t_i].states
lower, upper = get_relevant_range(s_i, len(t), context_length)
summary_state_trajectories[(t_i, s_i)] = t[lower:upper]
return summary_state_trajectories
|
50c1dddaad88fa697f850380b215c2fb9e5f1a13
| 3,639,452
|
def draw_figure(canvas, figure, loc=(0, 0)):
"""
Draw a matplotlib figure onto a Tk grafica
loc: location of top-left corner of figure on grafica in pixels.
Inspired by matplotlib source: lib/matplotlib/backends/backend_tkagg.py
"""
figure_canvas_agg = FigureCanvasAgg(figure)
figure_canvas_agg.draw()
figure_x, figure_y, figure_w, figure_h = figure.bbox.bounds
figure_w, figure_h = int(figure_w+1), int(figure_h+1)
photo = tk.PhotoImage(master=canvas, width=figure_w, height=figure_h)
# Position: convert from top-left anchor to center anchor
canvas.create_image(loc[0] + figure_w/2, loc[1] + figure_h/2, image=photo)
# Unfortunatly, there's no accessor for the pointer to the native renderer
blit(photo, figure_canvas_agg.get_renderer()._renderer, colormode=2)
# Return a handle which contains a reference to the photo object
# which must be kept live or else the picture disappears
return photo
|
02e4bc4a6cd475c63239170c0dae0648199c46b5
| 3,639,453
|
def find_struct(lines):
"""Finds structures in output data"""
struct = ''
name1 = ''
name2 = ''
seq1 = ''
seq2 = ''
result = []
for line in lines:
if line.startswith('; ========'):
break
if line.startswith('; ALIGNING'):
line = line.split()
name1 = line[2]
name2 = line[4]
continue
if line.startswith('; ALIGN %s' % name1):
line = line.split()[3:]
line = ''.join(line)
seq1 = ''.join([seq1,line])
continue
if line.startswith('; ALIGN %s' % name2):
line = line.split()[3:]
line = ''.join(line)
seq2 = ''.join([seq2,line])
continue
if line.startswith('; ALIGN Structure'):
line = line.split()[3:]
line = ''.join(line)
struct = ''.join([struct,line])
continue
struct = ViennaStructure(struct).toPairs()
struct.sort()
result.append([struct,seq1,seq2])
return result
|
b7f7e5c70fe0b1111f33e43a40bb9fdde4182b68
| 3,639,454
|
from typing import Callable
def chain(*fs: Callable) -> Callable:
"""
Compose given functions in reversed order.
Given functions f, g, the result of chain is chain(f, g) = g o f.
>>> def f(x: int) -> int:
... return x + 1
>>> def g(x: int) -> str:
... return str(x)
>>> chain(f, g)(41)
'42'
Chaining single function is the function itself.
>>> chain(f) is f
True
Empty function chain is identity.
>>> chain()(42)
42
"""
g: Callable = compose(*reversed(fs))
return g
|
4956a955a760d5243988f8fc6fdb0303e3351704
| 3,639,455
|
def astra_fp_2d_fan(volume, angles, source_object, object_det):
"""
:param volume:
:param angles: degrees
:return:
"""
detector_size = volume.shape[1]
proj_geom = build_proj_geometry_fan_2d(detector_size, angles, source_object, object_det)
rec = astra_fp_2d(volume, proj_geom)
return rec
|
5114730387bd43585bb56a16e5e930491aa87fd2
| 3,639,456
|
from typing import Mapping
def get_remappings_prefix() -> Mapping[str, str]:
"""Get the remappings for xrefs based on the prefix.
.. note:: Doesn't take into account the semicolon `:`
"""
return _get_curated_registry()['remappings']['prefix']
|
02cb1bb1cfa4ffb177327442c6fb63c4fc3fa320
| 3,639,457
|
import json
def generate_schema():
""" schema generation from today filename dataset """
today = date.today().strftime("%d_%m_%Y")
complete_dataset = pd.read_csv(f"complete_dataset_{today}.csv")
json_schema = pd.io.json.build_table_schema(complete_dataset)
with open("json_schema_for_big_query.json", "w", encoding="utf-8") as f:
json.dump(json_schema, f, ensure_ascii=False, indent=4)
return None
|
67dca17ddfae8f3530e8ced2a730c28657fa77ca
| 3,639,458
|
def binary_truncated_sprt_with_llrs(llrs, labels, alpha, beta, order_sprt):
""" Used in run_truncated_sprt_with_llrs .
Args:
llrs: A Tensor with shape (batch, duration). LLRs (or scores) of all frames.
labels: A Tensor with shape (batch,).
alpha : A float.
beta: A float.
order_sprt: An int.
Returns:
confmx: A Tensor with shape (2, 2).
mean_hittime: A scalar Tensor.
var_hittime: A scalar Tensor.
truncate_rate: A scalar Tensor.
"""
llrs_shape = llrs.shape
duration = int(llrs_shape[1])
batch_size = llrs_shape[0]
assert batch_size != 0
# Calc thresholds
thresh = [np.log(beta/(1-alpha)), np.log((1-beta)/alpha)]
if not ( (thresh[1] >= thresh[0]) and (thresh[1] * thresh[0] < 0) ):
raise ValueError("thresh must be thresh[1] >= thresh[0] and thresh[1] * thresh[0] < 0. Now thresh = {}".format(thresh))
# Calc all predictions and waits
signs1 = (tf.sign(llrs - thresh[1]) + 1)/2 # 1:hit, 0:wait
signs0 = (-1 - tf.sign(thresh[0] - llrs))/2 # -1:hit, 0:wait
preds_all_frames = signs1 + signs0 # (batch, duration), value= +1, 0, -1
# Calc truncate rate
hit_or_wait_all_frames = -(tf.abs(preds_all_frames) - 1) # wait=1, hit=0
truncate_rate = tf.reduce_mean(tf.reduce_prod(hit_or_wait_all_frames, 1), 0)
# Truncate survivors (forced decision)
preds_last_frame = tf.sign(llrs[:,-1]) # (batch,) value= +1, -1
preds_last_frame = tf.expand_dims(preds_last_frame, -1) # (batch, 1)
preds_all_frames_trunc = tf.concat([preds_all_frames[:,:-1], preds_last_frame], -1) # (batch, duration-1)+(batch,1)=(batch, duration)
if duration == 1:
# Calc mean hitting time and confusion matrix
mean_hittime = tf.constant(1., tf.float32)
preds = preds_all_frames_trunc[:,0] # (batch,)
preds = tf.cast((preds + 1) / 2, tf.int32)
confmx = tf.math.confusion_matrix(labels, preds, num_classes=2, dtype=tf.int32)
else:
# Calc mean hitting time
mask = tf.constant([i+1 for i in range(duration)][::-1], tf.float32)
mask = tf.tile(mask, [batch_size,])
mask = tf.reshape(mask, [batch_size, duration])
masked = preds_all_frames_trunc * mask # (batch, duration)
signed_hittimes1 = tf.reduce_max(masked, 1, keepdims=True)
signed_hittimes0 = tf.reduce_min(masked, 1, keepdims=True)
signed_hittimes0_abs = tf.abs(signed_hittimes0)
signed_hittimes_twin = tf.concat([signed_hittimes1, signed_hittimes0], 1)
hittimes_twin = tf.abs(signed_hittimes_twin)
answers1 = tf.greater(signed_hittimes1, signed_hittimes0_abs)
answers0 = tf.less(signed_hittimes1, signed_hittimes0_abs)
answers = tf.concat([answers1, answers0], 1)
hittimes = hittimes_twin[answers]
hittimes = duration - hittimes + 1
mean_hittime, var_hittime = tf.nn.moments(hittimes, axes=[0])
# Calc confusion matrix
signs_twin = tf.sign(signed_hittimes_twin)
preds = signs_twin[answers]
preds = tf.cast((preds + 1) / 2, tf.int32)
confmx = tf.math.confusion_matrix(labels, preds, num_classes=2, dtype=tf.int32)
return confmx, mean_hittime, var_hittime, truncate_rate
|
4d4f67d1ad9407df1cf8bfdc0e4c5cf775fcc57b
| 3,639,459
|
import time
def backoff(action, condition, max_attempts=40):
"""
Calls result = action() up to max_attempts times until condition(result) becomes true, with 30 s backoff. Returns a bool flag indicating whether condition(result) was met.
"""
timeout = 30
for attempt in range(max_attempts):
result = action()
if condition(result):
return True
printf("Condition not met, retrying in {0} seconds...".format(timeout))
time.sleep(timeout)
return False
|
93fe5ff9ee672073eb9eb4792572e41d4b4c3faa
| 3,639,460
|
def get_file_info(repo, path):
"""we need change_count, last_change, nbr_committers."""
committers = []
last_change = None
nbr_changes = 0
for commit in repo.iter_commits(paths=path):
#print(dir(commit))
committers.append(commit.committer)
last_change = commit.committed_date
nbr_changes += 1
return nbr_changes, last_change, len(set(committers))
|
6ff99df399d35b79d0e2a5635b1e76e1f65fe0bd
| 3,639,461
|
import requests
import urllib3
def retryable_session(session: requests.Session, retries: int = 8) -> requests.Session:
"""
Session with requests to allow for re-attempts at downloading missing data
:param session: Session to download with
:param retries: How many retries to attempt
:return: Session that does downloading
"""
retry = urllib3.util.retry.Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=0.3,
status_forcelist=(500, 502, 504),
)
adapter = requests.adapters.HTTPAdapter(max_retries=retry)
session.mount("http://", adapter)
session.mount("https://", adapter)
return session
|
a57d2021077997ab14576df35b4e5ad9d281575e
| 3,639,462
|
def apply_affine(x, y, z, affine):
""" Apply the affine matrix to the given coordinate.
Parameters
----------
x: number or ndarray
The x coordinates
y: number or ndarray
The y coordinates
z: number or ndarray
The z coordinates
affine: 4x4 ndarray
The affine matrix of the transformation
"""
shape = x.shape
assert y.shape == shape, 'Coordinate shapes are not equal'
assert z.shape == shape, 'Coordinate shapes are not equal'
# Ravel, but avoiding a copy if possible
x = np.reshape(x, (-1,))
y = np.reshape(y, (-1,))
z = np.reshape(z, (-1,))
in_coords = np.c_[x,
y,
z,
np.ones(x.shape)].T
x, y, z, _ = np.dot(affine, in_coords)
x = np.reshape(x, shape)
y = np.reshape(y, shape)
z = np.reshape(z, shape)
return x, y, z
|
b940c98da65a61cd46d2ad85ec33c791619341a0
| 3,639,463
|
def square_valid(board: Board, n: int, pawn_value: int, x: int, y: int) -> bool:
"""Check if the square at x and y is available to put a pawn on it."""
return (coordinates_within_board(n, x, y) and
square_playable(board, pawn_value, x, y))
|
725f65e64a8570e7483f103f0bf669cef3d7f1ef
| 3,639,465
|
def epb2jd(epb):
""" Besselian epoch to Julian date.
:param epb: Besselian epoch.
:type epb: float
:returns: a tuple of two items:
* MJD zero-point, always 2400000.5 (float)
* modified Julian date (float).
.. seealso:: |MANUAL| page 76
"""
djm0 = _ct.c_double()
djm = _ct.c_double()
_sofa.iauEpb2jd(epb, _ct.byref(djm0), _ct.byref(djm))
return djm0.value, djm.value
|
c5a9bcb422ab34ba0875d152cf8c39dda898e68b
| 3,639,466
|
def one_hot_decision_function(y):
"""
Examples
--------
>>> y = [[0.1, 0.4, 0.5],
... [0.8, 0.1, 0.1],
... [0.2, 0.2, 0.6],
... [0.3, 0.4, 0.3]]
>>> one_hot_decision_function(y)
array([[ 0., 0., 1.],
[ 1., 0., 0.],
[ 0., 0., 1.],
[ 0., 1., 0.]])
"""
z = np.zeros_like(y)
z[np.arange(len(z)), np.argmax(y, axis=1)] = 1
return z
|
a6eecff684ab926a46d746ca9c18e6b098308286
| 3,639,467
|
def combine_incomes(toshl_income, excel_income):
"""
Combines two data sources of incomes: toshl incomes and incomes from cashflow excel.
:param toshl_income: Preprocessed dataframe of toshl incomes (after cleaning and splitting)
:param excel_income: Raw excel income data
:return: Total income data
"""
df_in = toshl_income.reset_index().copy()
df_in["Tags"] = df_in["Tags"].apply(lambda x: "Salary" if x in ["Privat", "NHK", "OL"] else x)
df_in2 = excel_income.copy()
df_in2 = df_in2[["Datum", "Art", "Betrag"]].rename(columns={"Datum": "Date",
"Art": "Tags",
"Betrag": "Amount"}).dropna()
df_in2["Date"] = pd.to_datetime(df_in2["Date"], format="%d.%m.%Y")
df_in2["Tags"] = df_in2["Tags"].apply(lambda x: "Salary" if x in ["Gehalt", "Sodexo"] else x)
df_income = pd.concat([df_in, df_in2], ignore_index=True)
assert df_income.count()[0] == df_in.count()[0] + df_in2.count()[0], "Some income rows were lost!"
df_income = df_income.groupby([pd.Grouper(key='Date', freq='1M'), 'Tags']).sum()
return(df_income)
|
31efb2d7b7420f3c71fcb12876cdc09d7ff748ec
| 3,639,468
|
def generate_k(data_set, k):
"""
Given `data_set`, which is an array of arrays,
find the minimum and maximum for each coordinate, a range.
Generate `k` random points between the ranges.
Return an array of the random points within the ranges.
"""
centers = []
dimensions = len(data_set[0])
min_max = defaultdict(int)
for point in data_set:
for i in range(dimensions):
val = point[i]
min_key = 'min_%d' % i
max_key = 'max_%d' % i
if min_key not in min_max or val < min_max[min_key]:
min_max[min_key] = val
if max_key not in min_max or val > min_max[max_key]:
min_max[max_key] = val
for _k in range(k):
rand_point = []
for i in range(dimensions):
min_val = min_max['min_%d' % i]
max_val = min_max['max_%d' % i]
rand_point.append(uniform(min_val, max_val))
centers.append(rand_point)
return centers
|
1fd4eb6a825a0ca2b8e6b8200081ecfded351c7d
| 3,639,469
|
import requests
def __ping_url(url: str) -> bool:
"""Check a link for rotting."""
try:
r = requests.head(url)
return r.status_code in (
requests.codes.ok,
requests.codes.created,
requests.codes.no_content,
requests.codes.not_modified,
)
except Exception:
return False
|
e680cec006127bbe889dcab0291be3149f30d10e
| 3,639,470
|
def get_all_list_data():
"""
Handles the GET request to '/get-all-list-data'.
:return: Json with all list data
"""
conn = get_db()
all_types = TypeDataAccess(conn).get_types(False)
all_tags = TagDataAccess(conn).get_tags()
all_groups = ResearchGroupDataAccess(conn).get_research_groups(False)
all_employees = EmployeeDataAccess(conn).get_employees(False)
result = {
"types": [obj.to_dict() for obj in all_types],
"tags": all_tags,
"research groups": [obj.to_dict() for obj in all_groups],
"employees": [obj.to_dict() for obj in all_employees]
}
return jsonify(result)
|
4a4a942e054d301f936ae7993b04aff6c554f91c
| 3,639,471
|
def truncate_range(data, percMin=0.25, percMax=99.75, discard_zeros=True):
"""Truncate too low and too high values.
Parameters
----------
data : np.ndarray
Image to be truncated.
percMin : float
Percentile minimum.
percMax : float
Percentile maximum.
discard_zeros : bool
Discard voxels with value 0 from truncation.
Returns
-------
data : np.ndarray
Truncated data.
pMin : float
Minimum truncation threshold which is used.
pMax : float
Maximum truncation threshold which is used.
"""
if discard_zeros:
msk = ~np.isclose(data, 0.)
pMin, pMax = np.nanpercentile(data[msk], [percMin, percMax])
else:
pMin, pMax = np.nanpercentile(data, [percMin, percMax])
temp = data[~np.isnan(data)]
temp[temp < pMin], temp[temp > pMax] = pMin, pMax # truncate min and max
data[~np.isnan(data)] = temp
if discard_zeros:
data[~msk] = 0 # put back masked out voxels
return data, pMin, pMax
|
c9f56e593255ae6261b6f709b725cc952accc884
| 3,639,472
|
def obtain_dcdb_to_drugbank(biana_cnx, unification_protocol, output_pickle_file):
"""
Obtain a dictionary {dcdb : drugbank}
"""
up_table = return_unification_protocol_table(biana_cnx, unification_protocol)
query = ('''SELECT DC.value, DB.value FROM externalEntityDCDB_drugID DC, {} U1, {} U2, externalEntityDrugBankID DB
WHERE DC.externalEntityID = U1.externalEntityID AND U1.userEntityID = U2.userEntityID AND U2.externalEntityID = DB.externalEntityID
'''.format(up_table, up_table))
cursor = biana_cnx.cursor()
cursor.execute(query)
dcdb_to_drugbank = {}
for items in cursor:
dcdb = items[0]
drugbank = items[1]
dcdb_to_drugbank.setdefault(dcdb, set())
dcdb_to_drugbank[dcdb].add(drugbank)
cursor.close()
print(dcdb_to_drugbank)
cPickle.dump(dcdb_to_drugbank, open(output_pickle_file, 'w'))
return dcdb_to_drugbank
|
02b9d5b6ddb29974d551123e7bb12a7a6aca3ca4
| 3,639,473
|
def duo_username(user):
""" Return the Duo username for user. """
return user.username
|
92b2bfd5f6f3027787db493880139a8564597946
| 3,639,474
|
import random
def random_number_list(data=[]):
""" Add random number between 0 and 9 (both inclusive) to a list """
for i in range( 0, list_length ):
# append a random int to the data list
data.append( random.randint(0, 10))
return data
|
5a04409a40e1e65216579056f95024269da1fc5a
| 3,639,475
|
def _matrix_method_reshape(df: pd.DataFrame) -> pd.DataFrame:
"""
Reshape df for matrix method and deal with missing values.
We first drop columns which contain all missing values, transpose
the dataframe and then fill the remaining missing values with zero,
to deal with missing items in some periods.
Parameters
----------
df : pd.DataFrame
The dataframe to reshape.
Returns
-------
pd.DataFrame
The reshaped dataframe.
"""
return df.dropna(how='all', axis=1).T.fillna(0)
|
64989a6c61d1d891a3190cc1f6a36c98cf562775
| 3,639,476
|
import warnings
def sim_bursty_oscillator(T, Fs, freq, prob_enter_burst=.1,
prob_leave_burst=.1, cycle_features=None,
return_cycle_df=False):
"""Simulate a band-pass filtered signal with 1/f^2
Input suggestions: f_range=(2,None), Fs=1000, N=1001
Parameters
----------
freq : float
oscillator frequency
T : float
signal duration (seconds)
Fs : float
signal sampling rate
prob_enter_burst : float
probability of a cycle being oscillating given
the last cycle is not oscillating
prob_leave_burst : float
probability of a cycle not being oscillating
given the last cycle is oscillating
cycle_features : dict
specify the mean and standard deviations
(within and across bursts) of each cycle's
amplitude, period, and rise-decay symmetry.
This can include a complete or incomplete set
(using defaults) of the following keys:
amp_mean - mean cycle amplitude
amp_std - standard deviation of cycle amplitude
amp_burst_std - std. of mean amplitude for each burst
period_mean - mean period (computed from `freq`)
period_std - standard deviation of period (samples)
period_burst_std - std. of mean period for each burst
rdsym_mean - mean rise-decay symmetry
rdsym_std - standard deviation of rdsym
rdsym_burst_std - std. of mean rdsym for each burst
return_cycle_df : bool
if True, return the dataframe that contains the simulation
parameters for each cycle. This may be useful for computing
power, for example. Because the power of the oscillator
should only be considered over the times where there's
bursts, not when there's nothing.
Returns
-------
signal : np.array
bursty oscillator
df : pd.DataFrame
cycle-by-cycle properties of the simulated oscillator
"""
# Define default parameters for cycle features
mean_period_samples = int(Fs / freq)
cycle_features_use = {'amp_mean': 1, 'amp_burst_std': .1, 'amp_std': .2,
'period_mean': mean_period_samples,
'period_burst_std': .1 * mean_period_samples,
'period_std': .1 * mean_period_samples,
'rdsym_mean': .5, 'rdsym_burst_std': .05, 'rdsym_std': .05}
# Overwrite default cycle features with those specified
if cycle_features is not None:
for k in cycle_features:
cycle_features_use[k] = cycle_features[k]
# Determine number of cycles to generate
N_samples = T * Fs
N_cycles_overestimate = int(np.ceil(N_samples / mean_period_samples * 2))
# Simulate if a series of cycles are oscillating or not oscillating
is_oscillating = [False]
N_cycles_current = 1
while N_cycles_current < N_cycles_overestimate:
rand_num = np.random.rand()
if is_oscillating[-1]:
is_oscillating.append(rand_num > prob_leave_burst)
else:
is_oscillating.append(rand_num < prob_enter_burst)
N_cycles_current += 1
# Determine period, amp, and rdsym for each cycle
periods = []
amps = []
rdsyms = []
for is_osc in is_oscillating:
if is_osc is False:
period = cycle_features_use['period_mean'] + \
np.random.randn() * cycle_features_use['period_std']
periods.append(int(period))
amps.append(np.nan)
rdsyms.append(np.nan)
current_burst_period_mean = np.nan
current_burst_amp_mean = np.nan
current_burst_rdsym_mean = np.nan
else:
if np.isnan(current_burst_period_mean):
current_burst_period_mean = cycle_features_use['period_mean'] + \
np.random.randn() * cycle_features_use['period_burst_std']
current_burst_amp_mean = cycle_features_use['amp_mean'] + \
np.random.randn() * cycle_features_use['amp_burst_std']
current_burst_rdsym_mean = cycle_features_use['rdsym_mean'] + \
np.random.randn() * cycle_features_use['rdsym_burst_std']
N_iter = 0
period, amp, rdsym = 0, 0, 0
while np.min([period, amp, rdsym]) <= 0:
if N_iter > 0:
if period < 0:
feat0 = 'period'
elif rdsym < 0:
feat0 = 'rise-decay symmetry'
else:
feat0 = 'amp'
warnings.warn('Simulation settings are such that the {:s} is occasionally computed to be negative. You may want to reset your simulation settings'.format(feat0))
period = current_burst_period_mean + \
np.random.randn() * cycle_features_use['period_std']
amp = current_burst_amp_mean + \
np.random.randn() * cycle_features_use['amp_std']
rdsym = current_burst_rdsym_mean + \
np.random.randn() * cycle_features_use['rdsym_std']
N_iter += 1
periods.append(int(period))
amps.append(amp)
rdsyms.append(rdsym)
df = pd.DataFrame({'is_cycle': is_oscillating, 'period': periods,
'amp': amps, 'rdsym': rdsyms})
df['start_sample'] = np.insert(df['period'].cumsum().values[:-1], 0, 0)
df = df[df['start_sample'] < N_samples]
# Shorten df to only cycles that are included in the data
# Simulate time series for each cycle
x = np.array([])
last_cycle_oscillating = False
for i, row in df.iterrows():
if row['is_cycle'] is False:
# If last cycle was oscillating, add a decay to 0 then 0s
if last_cycle_oscillating:
decay_pha = np.linspace(0, np.pi / 2, int(row['period'] / 4))
decay_t = np.cos(decay_pha) * x[-1]
x = np.append(x, decay_t)
cycle_t = np.zeros(row['period'] - int(row['period'] / 4))
x = np.append(x, cycle_t)
else:
# Add a blank cycle
cycle_t = np.zeros(row['period'])
x = np.append(x, cycle_t)
last_cycle_oscillating = False
else:
# If last cycle was oscillating, add a decay to 0
if not last_cycle_oscillating:
rise_pha = np.linspace(-np.pi / 2, 0,
int(row['period'] / 4))[1:]
rise_t = np.cos(rise_pha) * row['amp']
x[-len(rise_t):] = rise_t
# Add a cycle with rdsym
rise_samples = int(np.round(row['period'] * row['rdsym']))
decay_samples = row['period'] - rise_samples
pha_t = np.hstack([np.linspace(0, np.pi, decay_samples + 1)[1:],
np.linspace(-np.pi, 0, rise_samples + 1)[1:]])
cycle_t = np.cos(pha_t)
# Adjust decay if the last cycle was oscillating
if last_cycle_oscillating:
scaling = (row['amp'] + x[-1]) / 2
offset = (x[-1] - row['amp']) / 2
cycle_t[:decay_samples] = cycle_t[:decay_samples] * \
scaling + offset
cycle_t[decay_samples:] = cycle_t[decay_samples:] * row['amp']
else:
cycle_t = cycle_t * row['amp']
x = np.append(x, cycle_t)
last_cycle_oscillating = True
x = x[:N_samples]
if return_cycle_df:
return x, df
else:
return x
|
ea408f91f6160114f0077bd441ea049f848d2da1
| 3,639,478
|
def visualize_percent_diff(df):
"""Creates a visualization of difference in percentage of tweets of a topic
across the entire US and returns the mean sentiment felt about the
topic across the entire US
Parameters:
-----------
df: pd.DataFrame
dataframe containing all tweets. Must contain the columns
- state
- sentiment
Returns:
--------
map: Choropleth map of the US, where the color refers to the total
number of tweets
avg_sentiment: The average sentiment of a topic
"""
avg_sentiment = df.sentiment.mean()
tweet_processor = process_tweets.TweetProcessor('models/stemmed_lr.pk')
default_rate = tweet_processor.get_default_rate()
df_grouped = df[['sentiment', 'state']].groupby(['state']).count()
df_grouped['sentiment'] = 100.*df_grouped['sentiment']\
/df_grouped['sentiment'].sum()
df_grouped = pd.merge(df_grouped, default_rate, how='right',
left_index=True, right_index=True)
df_grouped.fillna(0.)
df_grouped['sentiment'] = 100*df_grouped['sentiment']/df_grouped['rate']
gdf = gpd.read_file('data/cb_2016_us_state_20m.dbf')
merged_df = gdf.merge(df_grouped, how='left', left_on='NAME',
right_index=True)
merged_df = merged_df.fillna(0)
data_df = merged_df[['NAME', 'sentiment']].fillna(0)
geo_str = merged_df[['NAME', 'geometry']].to_json()
threshold_scale = np.linspace(min(0, data_df['sentiment'].min()),
data_df['sentiment'].max(),
6)
threshold_scale = list(threshold_scale)
map1 = folium.Map(location=[+37, -100],
tiles='Cartodb Positron',
zoom_start=4)
map1.choropleth(geo_data=geo_str,
data=data_df,
columns=['NAME', 'sentiment'],
fill_color='YlGn',
legend_name='percentage of expected',
name='topic: sentiment = {:.2f}'.format(avg_sentiment),
threshold_scale=threshold_scale,
key_on='feature.properties.NAME')
return map1, avg_sentiment
|
d3f3404e5695a0191580f3df20eaf4c824d3e436
| 3,639,479
|
import six
import inspect
def basic_compare(first, second, strict=False):
"""
Comparison used for custom match functions,
can do pattern matching, function evaluation or simple equality.
Returns traceback if something goes wrong.
"""
try:
if is_regex(second):
if not isinstance(first, six.string_types) and not strict:
first = str(first)
result = bool(second.match(first))
elif callable(second):
result = bool(second(first))
else:
result = first == second
return result, None
except Exception as exc:
return None, format_trace(inspect.trace(), exc)
|
ee16806fd78f46c2bcf01a5263f6d0210c22f32a
| 3,639,480
|
def parse_line(line,):
"""Return a list of 2-tuples of the possible atomic valences for a given line from
the APS defining sheet."""
possap = []
for valence, entry in enumerate(line[4:]):
if entry != "*":
possap.append((valence, int(entry)))
return possap
|
d27ed66cb35084c9927cae8658d7ea8a421c69a4
| 3,639,481
|
from datetime import datetime
def dashboard():
"""Получить статистику по сайту"""
user = get_user_from_request()
if not user.is_admin:
return errors.no_access()
users = User.select().count()
d = datetime.datetime.now() - datetime.timedelta(days=7)
active_users = User.select().where(User.last_active_date > d).count()
return jsonify({"success": 1, "users": users, "active_users_7_days": active_users})
|
05363fd27ee6980258b7ea015a81e644799c5baa
| 3,639,483
|
def dct(f, axis=-1):
"""
Compute the Discrete Cosine Transform over the specified axis.
:param f: The input array.
:param axis: Axis along which the DCT is computed. The default is over the last axis.
:return c: The computed DCT.
"""
# Size of the input along the specified axis.
n = f.shape[axis]
# Create two vectors containing the integers from 0 to n-1.
i = k = np.arange(n)
# Compute the x-axis coordinate of the f function.
x = (2 * i + 1) / (2 * n)
# Compute the outer product of x and kπ, obtaining the nxn matrix that will
# form the argument of the cosine.
arg = np.multiply.outer(x, k * np.pi)
# Normalization factors.
alpha = np.where(k == 0, 1 / np.sqrt(n), np.sqrt(2 / n))
# The orthonormal DCT basis.
w = alpha * np.cos(arg)
# Compute the convolution between the input array and the DCT basis.
# The output contains the amplitude coefficient for every frequency.
c = np.tensordot(f, w, axes=(axis, 0))
# `axis` becomes the last dimension in the output of `np.tensordot`.
# Move it back to its original position so that the output shape matches
# the input shape.
c = np.moveaxis(c, -1, axis)
return c
|
3e6cd65a3088d948fb81f61c25b2f590facb8351
| 3,639,484
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.