content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
return meta('NewBase', bases, {})
|
a8257c1d7a4fdec6331985983b65954e9b1d9453
| 3,644,478
|
def team(slug):
"""The team page. Shows statuses for all users in the team."""
db = get_session(current_app)
team = db.query(Team).filter_by(slug=slug).first()
if not team:
return page_not_found('Team not found.')
return render_template(
'status/team.html',
team=team,
users=team.users,
teams=db.query(Team).order_by(Team.name).all(),
statuses=team.recent_statuses(
request.args.get('page', 1),
startdate(request),
enddate(request)))
|
c702b4837c3e7342e248f81180821c4ff3404793
| 3,644,479
|
def mask_layer(layer, mask, mask_value = np.nan):
"""apply a mask to a layer
layer[mask == True] = mask_value
"""
layer[mask] = mask_value
return layer
|
b8ac53633bb351eea2e0025eeb5daba1f2eeab54
| 3,644,480
|
def get_disabled():
"""
Return a list of all disabled services
CLI Example:
.. code-block:: bash
salt '*' service.get_disabled
"""
return _get_svc_list(status="DISABLED")
|
34d2389bf6e2c3284b06376780c7424205f340be
| 3,644,482
|
def calling_method():
"""
call recursive method
:return: list all post 2 days delta-time
"""
list_posts = list()
return create_json_poyload(list_posts)
|
587afbf856dbd014a253847816c185edd23f8485
| 3,644,483
|
import json
def read_json(path):
"""
Read a BayesNet object from the json format. This
format has the ".bn" extension and is completely
unique to pyBN.
Arguments
---------
*path* : a string
The file path
Returns
-------
None
Effects
-------
- Instantiates and sets a new BayesNet object
Notes
-----
This function reads in a libpgm-style format into a bn object
File Format:
{
"V": ["Letter", "Grade", "Intelligence", "SAT", "Difficulty"],
"E": [["Intelligence", "Grade"],
["Difficulty", "Grade"],
["Intelligence", "SAT"],
["Grade", "Letter"]],
"Vdata": {
"Letter": {
"ord": 4,
"numoutcomes": 2,
"vals": ["weak", "strong"],
"parents": ["Grade"],
"children": None,
"cprob": [[.1, .9],[.4, .6],[.99, .01]]
},
...
}
"""
def byteify(input):
if isinstance(input, dict):
return {byteify(key):byteify(value) for key,value in input.iteritems()}
elif isinstance(input, list):
return [byteify(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
bn = BayesNet()
f = open(path,'r')
ftxt = f.read()
success=False
try:
data = byteify(json.loads(ftxt))
bn.V = data['V']
bn.E = data['E']
bn.F = data['F']
success = True
except ValueError:
print("Could not read file - check format")
bn.V = topsort(bn.E)
return bn
|
4c483f8fe148ff3a94bdee4accc22fb2964dc09d
| 3,644,485
|
def solve_primal(run_id, problem, mip_solution, solver):
"""Solve primal by fixing integer variables and solving the NLP.
If the search fails and f `mip_solution` has a solution pool, then also
try to find a feasible solution starting at the solution pool points.
Parameters
----------
run_id : str
the run_id used for logging
problem : Problem
the mixed integer, (possibly) non convex problem
mip_solution : MipSolution
the linear relaxation solution
solver : Solver
the NLP solver used to solve the problem
"""
starting_point = [v.value for v in mip_solution.variables]
solution = solve_primal_with_starting_point(
run_id, problem, starting_point, solver
)
if solution.status.is_success():
return solution
# Try solutions from mip solution pool, if available
if mip_solution.solution_pool is None:
return solution
for mip_solution_from_pool in mip_solution.solution_pool:
if seconds_left() <= 0:
return solution
starting_point = [
v.value
for v in mip_solution_from_pool.inner.variables
]
solution_from_pool = solve_primal_with_starting_point(
run_id, problem, starting_point, solver
)
if solution_from_pool.status.is_success():
return solution_from_pool
# No solution from pool was feasible, return original infeasible sol
return solution
|
9b01b65553a752cebb899bcc8b4f78f5355db5f9
| 3,644,486
|
def SpliceContinuations(tree):
"""Given a pytree, splice the continuation marker into nodes.
Arguments:
tree: (pytree.Node) The tree to work on. The tree is modified by this
function.
"""
def RecSplicer(node):
"""Inserts a continuation marker into the node."""
if isinstance(node, pytree.Leaf):
if node.prefix.lstrip().startswith('\\\n'):
new_lineno = node.lineno - node.prefix.count('\n')
return pytree.Leaf(
type=format_token.CONTINUATION,
value=node.prefix,
context=('', (new_lineno, 0)))
return None
num_inserted = 0
for index, child in enumerate(node.children[:]):
continuation_node = RecSplicer(child)
if continuation_node:
node.children.insert(index + num_inserted, continuation_node)
num_inserted += 1
RecSplicer(tree)
|
9bb36363b3ae8ef2e04649bc966d8e664fa1202f
| 3,644,487
|
def rayleigh(flow_resis, air_dens, sound_spd,
poros, freq=np.arange(100, 10001, 1)):
"""
Returns through the Rayleigh Model the Material Charactheristic Impedance
and the Material Wave Number.
Parameters:
----------
flow_resis : int
Resistivity of the material
air_dens : int | float
The air density
sound_spd : int | float
The speed of the sound
poros : float
Porosity of the material
freq : ndarray
A range of frequencies
NOTE: default range goes from 100 [Hz] to 10 [kHz].
Returns:
-------
zc : int | float | complex
Material Charactheristic Impedance
kc : int | float | complex
Material Wave Number
"""
omega = 2 * np.pi * freq
alpha = (1 - (1j * poros * flow_resis) / (air_dens * omega)) ** 0.5
# Material Charactheristic Impedance (zc) and the Material Wave Number (kc)
kc = (omega/sound_spd) * alpha
zc = ((air_dens * sound_spd)/poros) * alpha
return zc, kc
|
cf1330591e1f97f831268bd19babac2d682369aa
| 3,644,488
|
import requests
import logging
def set_iam_policy(project_id: str, policy: dict, token: str) -> dict:
"""Sets the Cloud IAM access control policy for a ServiceAccount.
Args:
project_id: GCP project ID.
policy: IAM policy.
token: Access token from the Google Authorization Server.
Returns:
A dict containing the response body.
"""
host = "https://cloudresourcemanager.googleapis.com"
url = f"{host}/v1/projects/{project_id}:setIamPolicy"
resp = requests.post(url, json={
"policy": policy,
}, headers={
"Authorization": f"Bearer {token}"
})
try:
resp.raise_for_status()
except requests.exceptions.HTTPError as err:
logging.error(err.response.text)
raise err
return resp.json()
|
3afc6902bcd20c4af62ba9c3f2e873da5d425d06
| 3,644,489
|
def feature_set_is_deployed(db: Session, fset_id: int) -> bool:
"""
Returns if this feature set is deployed or not
:param db: SqlAlchemy Session
:param feature_set_id: The Feature Set ID in question
:return: True if the feature set is deployed
"""
d = db.query(models.FeatureSetVersion). \
filter((models.FeatureSetVersion.feature_set_id == fset_id) &
(models.FeatureSetVersion.deployed == True)). \
count()
return bool(d)
|
e66609ec97a17eb55ea0a6c7218a0f5f9fb1ca9b
| 3,644,490
|
import time
async def graf(request: Request):
"""
Zobrazí graf nameranej charakteristiky
"""
localtime = time.asctime(time.localtime(time.time()))
print("Graf; Čas:", localtime)
return templates.TemplateResponse("graf.html", {"request": request, "time": localtime})
|
a09ad4790cfaf71927b2c3e2b371f4089c8f0937
| 3,644,492
|
def mapRangeUnclamped(Value, InRangeA, InRangeB, OutRangeA, OutRangeB):
"""Returns Value mapped from one range into another where the Value is
clamped to the Input Range.
(e.g. 0.5 normalized from the range 0->1 to 0->50 would result in 25)"""
return lerp(OutRangeA, OutRangeB, GetRangePct(InRangeA, InRangeB, Value))
|
1579359f6585bb17228cf2b94b29ee8cd00e672e
| 3,644,493
|
import json
def folders(request):
"""Handle creating, retrieving, updating, deleting of folders.
"""
if request.method == "GET":
q = bookshelf_models.Folder.objects.filter(owner=request.user)
data = [[e.guid, e.title] for e in q]
if request.method == "POST":
if "create" in request.POST:
newfolder = bookshelf_models.Folder(owner=request.user, title="New Folder")
newfolder.save()
data = [[newfolder.guid, "New Folder"]]
if "update" in request.POST:
guid = request.POST.get("id", "")
folder = bookshelf_models.Folder.objects.get(guid=guid)
folder.title = request.POST.get("newname", "")
folder.save()
data = [[folder.guid, folder.title]]
if "delete" in request.POST:
folderid = request.POST.get("folderid", "")
nbids = request.POST.getlist("nbids")
folder = bookshelf_models.Folder.objects.get(owner=request.user, guid=folderid)
folder.delete()
for nbid in nbids:
nb = notebook_models.Notebook.objects.get(owner=request.user, guid=nbid)
nb.delete()
data = {"response":"ok"}
jsobj = json.dumps(data)
return HttpResponse(jsobj, mimetype='application/json')
|
29a3c58970188682724e429d4f8a8a244938f54c
| 3,644,494
|
def Sort_list_by_Prism_and_Date(lst):
"""
Argument:
- A list containing the prism name, position of recording, decimal year, position and meteo corrected position for each prism.
Return:
- A list containing lists of prisms sorted by name and date.
"""
#text must be a converted GKA file
outList = [] #[[Name,[Data]],[],[],...]
#Sort by prism name
for k in lst:
index = FindIndexByName(k[0],outList)
if index != None:
outList[index][1].append(k)
else:
outList.append([k[0],[k]])
#Sort by crescent date
for j in outList:
j[1] = SortCrescent(j[1],2)
return outList
|
164a4c8b646363b3d8c57068ee785b410cbc3cf7
| 3,644,497
|
def _convert_to_RVector(value, force_Rvec=True):
"""
Convert a value or list into an R vector of the appropriate type.
Parameters
----------
value : numeric or str, or list of numeric or str
Value to be converted.
force_Rvec : bool, default True
If `value` is not a list, force conversion into a R vector?
False will return an int, float, or str if value is non-list.
True will always return an R vector.
Returns
-------
int, float, str, an rpy2 R vector
A value or R vector of an appropriate data type.
"""
if not isinstance(value, list) and not force_Rvec:
return value
elif not isinstance(value, list) and force_Rvec:
value = [value]
else:
pass
if all(isinstance(x, bool) for x in value):
return ro.BoolVector(value)
elif all(isinstance(x, (int, np.integer)) for x in value):
return ro.IntVector(value)
elif all(isinstance(x, (int, np.integer, float, np.float)) for x in value):
return ro.FloatVector(value)
else:
return ro.StrVector(value)
|
cc71e8c8906084b33c1638a1423944576fb75366
| 3,644,498
|
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
temp=e_x / e_x.sum(axis=0) # only difference
if np.isnan(temp).any()==True:
return [0.0,1.0,0.0]
else:
return temp
|
ed3a4c5e60dbfaf86acec1357e7700492ab3f69d
| 3,644,499
|
import requests
from bs4 import BeautifulSoup
import dateutil
def fetch_events_art_history(base_url='https://www.sas.upenn.edu'):
"""
Fetch events from Art History Department
"""
page = requests.get(urljoin(base_url, '/arthistory/events'))
page_soup = BeautifulSoup(page.content, 'html.parser')
range_pages = max([int(n_page.text) for n_page in page_soup.find('div',
attrs={'class': 'pagination pagination-centered'}).find_all('li') if n_page.text.isdigit()])
events = []
for n_page in range(1, range_pages):
page = requests.get(
(urljoin(base_url, '/arthistory/events?&page={}')).format(n_page))
page_soup = BeautifulSoup(page.content, 'html.parser')
all_events = page_soup.find(
'div', attrs={'class': 'item-list'}).find_all('li')
for event in all_events:
event_url = urljoin(base_url, event.find('a')['href'])
title = event.find('h3').text if event.find(
'h3') is not None else ''
# event_type = event.find('strong').text if event.find('strong') is not None else ''
date = event.find('span', attrs={'class': 'date-display-single'})
if date is not None:
date, event_time = date.attrs.get('content').split('T')
if '-' in event_time:
starttime, endtime = event_time.split('-')
try:
starttime, endtime = dateutil.parser.parse(starttime).strftime(
"%I:%M %p"), dateutil.parser.parse(endtime).strftime("%I:%M %p")
except:
pass
else:
starttime, endtime = event_time, ''
else:
date, starttime, endtime = '', '', ''
location = event.find('div', attrs={'class': 'location'})
location = location.text.strip() if location is not None else ''
event_soup = BeautifulSoup(requests.get(
event_url).content, 'html.parser')
description = event_soup.find('div', attrs={'class': 'field-body'})
description = description.text.strip() if description is not None else ''
events.append({
'title': title,
'speaker': '',
'date': date,
'location': location,
'description': description,
'starttime': starttime,
'endtime': endtime,
'url': event_url,
'owner': 'Art History'
})
return events
|
2c17219cbbdd94251db43f52459c196dada014fc
| 3,644,500
|
def calc_Q(nu=0.0,delta=0.0,lam=1.0,ret_k=False):
"""
Calculate psic Q in the cartesian lab frame.
nu and delta are in degrees, lam is in angstroms
if ret_k == True return tuple -> (Q,ki,kr)
"""
(ki,kr) = calc_kvecs(nu=nu,delta=delta,lam=lam)
Q = kr - ki
if ret_k == True:
return (Q,ki,kr)
else:
return Q
|
9c5a9e885b1f78bab7a1de2bbf6a3de2d5723e18
| 3,644,501
|
def build_data(args):
"""
build test data
"""
task_name = args.task_name.lower()
processor = reader.MatchProcessor(data_dir=args.data_dir,
task_name=task_name,
vocab_path=args.vocab_path,
max_seq_len=args.max_seq_len,
do_lower_case=args.do_lower_case)
test_data_generator = processor.data_generator(
batch_size=args.batch_size,
phase='test',
epoch=1,
shuffle=False,
device=args.gpu)
num_test_examples = processor.get_num_examples(phase='test')
test_data = [test_data_generator, num_test_examples]
return processor, test_data
|
ca52d035d34a83e1de0b3cad261f03ce53fd2f0c
| 3,644,502
|
def format_date(d):
"""Date format used in the report."""
if type(d) == str:
d = dateutil_parse(d)
return d.isoformat()
|
de999992e16fe52f42f4b79bbb0a78668d3fa109
| 3,644,503
|
import torch
def pytorch_argmax(op):
"""Implementation of argmax for pytorch."""
def _impl(x, dim):
dim = tuple(sorted(dim))
n = ()
for _s in range(len(x.shape)):
if _s not in dim:
n = n + (_s,)
n = n + dim
x = x.permute(n)
ns = x.shape[0 : -len(dim)] + (-1,)
r = torch.argmax(x.reshape(ns), -1, keepdim=False)
rl = list(r.shape)
for _sd in dim:
rl.insert(_sd, 1)
rf = tuple(rl)
return (torch.reshape(r, rf),)
return _impl, op.inputs[1:]
|
cc466b41c0dd4bb9730dcdf50816b9d0cf66cfaa
| 3,644,504
|
def parse_eos(eos):
"""Function to interpret input as an EOS"""
if hasattr(eos, 'asq_of_rho_p'):
return eos # already is EOS class
if eos == 'H' or eos == 'h':
return SimpleHydrogen()
try:
return Ideal(float(eos)) # try parsing as a gamma value
except ValueError:
raise ValueError('Cannot parse EOS "{0:}".'.format(eos))
|
2303a9028b89647fae4b9a4fca0363826310b730
| 3,644,506
|
def get_2D_hse_kpoints(struct_for_path, ibzkpth):
"""
Args:
struct_for_path: Structure from which linemode k-points will
be generated.
ibzkpth:
Returns:
the Kpoints file object in the form of a string
ready for execution by MPInterfaces
calibrate objects
"""
# Read IBZKPT from prep step
ibz_lines = open(ibzkpth).readlines()
n_ibz_kpts = int(ibz_lines[1].split()[0])
# Read linemode KPOINTs from the dict (makes sure it is Kpoints
# file with only 20 per atom for the optimized settings
# Kpoints.from_dict(kpoint_dict).write_file('linemode_KPOINTS')
kpath = HighSymmKpath(struct_for_path)
Kpoints.automatic_linemode(20, kpath).write_file('KPOINTS_linemode')
remove_z_kpoints_linemode()
linemode_lines = open('KPOINTS_linemode').readlines()
# put them together
abs_path = []
for i in range(4, len(linemode_lines), 3):
start_kpt = linemode_lines[i].split()
end_kpt = linemode_lines[i+1].split()
increments = [
(float(end_kpt[0]) - float(start_kpt[0])) / 20,
(float(end_kpt[1]) - float(start_kpt[1])) / 20,
(float(end_kpt[2]) - float(start_kpt[2])) / 20
]
abs_path.append(start_kpt[:3] + ['0', start_kpt[4]])
for n in range(1, 20):
abs_path.append(
[str(float(start_kpt[0]) + increments[0] * n),
str(float(start_kpt[1]) + increments[1] * n),
str(float(start_kpt[2]) + increments[2] * n), '0']
)
abs_path.append(end_kpt[:3] + ['0', end_kpt[4]])
n_linemode_kpts = len(abs_path)
# write out the kpoints file and return the object
Kpoints_hse_file = '\n'.join(
['Automatically generated mesh',
'{}'.format(n_ibz_kpts + n_linemode_kpts),
'Reciprocal Lattice',
'{}'.format(str(''.join([line for line in ibz_lines[3:]])))]) + \
'{}'.format(str('\n'.join(
[' '.join(point) for point in abs_path])))
## can be used for test print out
# with open('KPOINTS_HSE', 'w') as kpts:
# kpts.write('Automatically generated mesh\n')
# kpts.write('{}\n'.format(n_ibz_kpts + n_linemode_kpts))
# kpts.write('Reciprocal Lattice\n')
# for line in ibz_lines[3:]:
# kpts.write(line)
# for point in abs_path:
# kpts.write('{}\n'.format(' '.join(point)))
return Kpoints_hse_file
|
e4ad65df4f4fc41c0af48e84dfd9b9bbddea9e20
| 3,644,507
|
def neutralize(word, g, word_to_vec_map):
"""
Removes the bias of "word" by projecting it on the space orthogonal to the bias axis.
This function ensures that gender neutral words are zero in the gender subspace.
Arguments:
word -- string indicating the word to debias
g -- numpy-array of shape (50,), corresponding to the bias axis (such as gender)
word_to_vec_map -- dictionary mapping words to their corresponding vectors.
Returns:
e_debiased -- neutralized word vector representation of the input "word"
"""
# Select word vector representation of "word". Use word_to_vec_map. (≈ 1 line)
e = word_to_vec_map[word]
# Compute e_biascomponent using the formula give above. (≈ 1 line)
e_biascomponent = (np.dot(e,g) / np.square(np.linalg.norm(g))) * g
# Neutralize e by substracting e_biascomponent from it
# e_debiased should be equal to its orthogonal projection. (≈ 1 line)
e_debiased = e - e_biascomponent
return e_debiased
|
a732050ef214fe29c6e234cea2f0a7d63b784829
| 3,644,509
|
def loadRowCluster(ndPage,algo):
"""
load cluster algo = aglo
"""
xpCluster = f".//Cluster[@algo='{algo}']"
lClusters= ndPage.xpath(xpCluster)
return lClusters
|
dcb75214e58d6656f58bee78b904562c05fd36d8
| 3,644,510
|
def _elementwise(f):
""" Enables elementwise operations
The wrapper implements two different modes of argument evaluation
for given p_1,..., p_k that represent the predicted distributions
and and x_1,...,x_m that represent the values to evaluate them on.
"elementwise" (default): Repeat the sequence of p_i until there are m,
i.e., p_1,...,p_k,p_1,p_2,...,p_k,p_1,...,p_m'
where m' is the remainder of dividing m by k.
"batch": x_1, ..., x_m is evaluated on every distribution p_i
resulting in a matrix m columns and k rows.
Parameters
----------
f: The function to decorate
Returns
-------
Decorated function
"""
def wrapper(self, x, *args, **kwargs):
if len(np.array(x).shape) > 1:
x = x.flatten()
# cache index
index_ = self.index
self.index = slice(None)
# disable elementwise mode if x is scalar
elementwise = (self.mode == 'elementwise' and len(np.array(x).shape) != 0)
if elementwise:
evaluations = len(x)
else:
evaluations = len(self.X)
# compose result
result = []
number_of_points = len(self.X)
for index in range(evaluations):
# set evaluation index and point
if elementwise:
self.index = index % number_of_points
at = x[index]
else:
self.index = index
at = x
# evaluate the function at this point
result.append(f(self, at, *args, **kwargs))
# rollback index
self.index = index_
if len(result) > 1:
return np.array(result)
else:
return result[0]
return _forward_meta(wrapper, f)
|
7cb9a17c648384e07bde3b57415244efd7e34e8a
| 3,644,511
|
def is_valid_task_id(task_id):
"""
Return False if task ID is not valid.
"""
parts = task_id.split('-')
if len(parts) == 5 and [len(i) for i in parts[1:]] == [8, 4, 4, 4]:
tp = RE_TASK_PREFIX.split(parts[0])
return (len(tp) == 5 and
all(i.isdigit() for i in tp[::2]) and
tp[1] in TT and
tp[3] in TG)
return False
|
d39e26ae52d96f9c6ed0bf5fea2ac317d5b9e8af
| 3,644,512
|
def figure_14_9():
"""Return the unweighted, undirected graph from Figure 14.9 of DSAP.
This is the same graph as in Figure 14.10.
"""
E = (
('A', 'B'), ('A', 'E'), ('A', 'F'), ('B', 'C'), ('B', 'F'),
('C', 'D'), ('C', 'G'), ('D', 'G'), ('D', 'H'), ('E', 'F'),
('E', 'I'), ('F' 'I'), ('G', 'J'), ('G', 'K'), ('G', 'L'),
('H', 'L'), ('I', 'J'), ('I', 'M'), ('I', 'N'), ('J', 'K'),
('K', 'N'), ('K', 'O'), ('L', 'P'), ('M', 'N'),
)
return graph_from_edgelist(E, False)
|
d81a11aa46bd62942c880dfa8f0a724801979449
| 3,644,513
|
def audit_umbrelladns(networks_fwrules):
"""Accepts a list of firewall rules for a client
Checks for rules to allow DNS lookups to Umbrella and
deny all other DNS lookups.
Returns a list of clients and a boolean of whether Umbrella DNS
is configured properly"""
umbrelladns_audit = []
host1 = '208.67.222.222/32'
host2 = '208.67.220.220/32'
for customer in networks_fwrules:
customer_result = {
'organizationId': customer['organizationId'],
'organizationName': customer['organizationName']
}
for network in customer['networks']:
umbrella_allow, dns_deny = 'False', 'False'
if 'l3FirewallRules' in network:
for rule in network['l3FirewallRules']:
destcidr = rule['destCidr'].split(",")
if rule['policy'] == 'allow' \
and rule['protocol'] == 'tcp' \
and rule['destPort'] == '53' \
and (host1 in destcidr and host2 in destcidr):
umbrella_allow = 'True'
if rule['policy'] == 'allow' \
and rule['protocol'] == 'udp' \
and rule['destPort'] == '53' \
and (host1 in destcidr and host2 in destcidr):
umbrella_allow = 'True'
if rule['policy'] == 'deny' \
and rule['protocol'] == 'tcp' \
and rule['destPort'] == '53' \
and rule['destCidr'] == 'Any':
dns_deny = 'True'
if rule['policy'] == 'deny' \
and rule['protocol'] == 'udp' \
and rule['destPort'] == '53' \
and rule['destCidr'] == 'Any':
dns_deny = 'True'
if umbrella_allow is 'True' and dns_deny is 'True':
customer_result['umbrellaDns'] = 'True'
else:
customer_result['umbrellaDns'] = 'False'
umbrelladns_audit.append(customer_result)
return umbrelladns_audit
|
26c01011dee998ba398db03603c61c00845055ea
| 3,644,514
|
from typing import Tuple
import itertools
def parse_element_container(elem: ET.Element) -> Tuple[Types.FlexElement, ...]:
"""Parse XML element container into FlexElement subclass instances.
"""
tag = elem.tag
if tag == "FxPositions":
# <FxPositions> contains an <FxLots> wrapper per currency.
# Element structure here is:
# <FxPositions><FxLots><FxLot /></FxLots></FxPositions>
# Flatten the nesting to create FxPositions as a tuple of FxLots
fxlots = (parse_element_container(child) for child in elem)
return tuple(itertools.chain.from_iterable(fxlots))
instances = tuple(parse_data_element(child) for child in elem)
return instances
|
477776ff49e47fb0ca45767c5a74ff6941d0abb0
| 3,644,515
|
def _is_smooth_across_dateline(mid_lat, transform, rtransform, eps):
"""
test whether the CRS is smooth over the dateline
idea borrowed from IsAntimeridianProjToWGS84 with minor mods...
"""
left_of_dt_x, left_of_dt_y, _ = rtransform.TransformPoint(180-eps, mid_lat)
right_of_dt_x, right_of_dt_y, _ = rtransform.TransformPoint(-180+eps, mid_lat)
if _dist(right_of_dt_x-left_of_dt_x, right_of_dt_y-left_of_dt_y) > 1:
return False
left_of_dt_lon, left_of_dt_lat, _ = transform.TransformPoint(left_of_dt_x, left_of_dt_y)
right_of_dt_lon, right_of_dt_lat, _ = transform.TransformPoint(right_of_dt_x, right_of_dt_y)
if (_dist(left_of_dt_lon - 180 + eps, left_of_dt_lat - mid_lat) > 2 * eps or
_dist(right_of_dt_lon + 180 - eps, right_of_dt_lat - mid_lat) > 2 * eps):
return False
return True
|
c1058bb24f254ce7158ec69872cfec1081a3027c
| 3,644,516
|
def reverse_args(func: Func) -> fn:
"""
Creates a function that invokes func with the positional arguments order
reversed.
Examples:
>>> concat = sk.reverse_args(lambda x, y, z: x + y + z)
>>> concat("a", "b", "c")
'cba'
"""
func = to_callable(func)
return fn(lambda *args, **kwargs: func(*args[::-1], **kwargs))
|
e1e734f767fb187f9563f51d1f106ebfc17ebbfb
| 3,644,517
|
def ar(x, y, z):
"""Offset arange by z/2."""
return z / 2 + np.arange(x, y, z, dtype='float')
|
0aca14778dd4ba814d9303146d3226f6645f2366
| 3,644,518
|
def dot(inputs, axes, normalize=False, **kwargs):
"""Functional interface to the `Dot` layer.
Args:
inputs: A list of input tensors (at least 2).
axes: Integer or tuple of integers,
axis or axes along which to take the dot product.
normalize: Whether to L2-normalize samples along the
dot product axis before taking the dot product.
If set to True, then the output of the dot product
is the cosine proximity between the two samples.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the dot product of the samples from the inputs.
"""
return Dot(axes=axes, normalize=normalize, **kwargs)(inputs)
|
2e5d83aad376e82b7938ebfec8cef3074bec3c58
| 3,644,519
|
import inspect
def get_all_methods(klass):
"""Get all method members (regular, static, class method).
"""
if not inspect.isclass(klass):
raise ValueError
pairs = list()
for attr, value in inspect.getmembers(
klass, lambda x: inspect.isroutine(x)):
if not (attr.startswith("__") or attr.endswith("__")):
pairs.append((attr, value))
return pairs
|
ada4f47c750455ddd1300f26eb3e296b046acefe
| 3,644,521
|
import pathlib
def _suffix_directory(key: pathlib.Path):
"""Converts '/folder/.../folder/folder/folder' into 'folder/folder'"""
key = pathlib.Path(key)
shapenet_folder = key.parent.parent
key = key.relative_to(shapenet_folder)
return key
|
147539065c3d21ee351b23f2d563c662fe55f04a
| 3,644,522
|
def setDesktop( studyID ):
"""This method sets and returns TRUST_PLOT2D desktop"""
global moduleDesktop, desktop
if studyID not in moduleDesktop:
moduleDesktop[studyID] = DynamicDesktop( sgPyQt )
moduleDesktop[studyID].initialize()
desktop = moduleDesktop[studyID]
return desktop
|
fd7ad5b57a832e4d6d4adbf2b5fbf973cc1b9e3e
| 3,644,523
|
def load(file_path: str):
"""Used for loading dataset files that have been downloaded.
Args:
file_path: Path to file to be loaded.
Returns:
x: Data used to train models.
y: Dataset labels.
Example:
>>> data,labels = load("model/mnist.npz")
>>> # Print first dataset example and first label
>>> print(data[0])
>>> print(label[0])
[0 200 ... 15 0]
5
"""
with np.load(file_path) as data:
return data['x'], \
data['y']
|
47e045d343509322cf9f845f454a99bf6f34cde7
| 3,644,524
|
def xrefchar(*args):
"""
xrefchar(xrtype) -> char
Get character describing the xref type.
@param xrtype: combination of Cross-Reference type flags and a
cref_t of dref_t value (C++: char)
"""
return _ida_xref.xrefchar(*args)
|
a6991e0a56710359804d21b79b86ed3ead852769
| 3,644,525
|
def problem_5_14_8(scalars, vectors):
"""
>>> u = list2vec([1,1,0,0])
>>> v = list2vec([0,1,1,0])
>>> w = list2vec([0,0,1,1])
>>> x = list2vec([1,0,0,1])
>>> problem_5_14_8([1, -1, 1], [u, v, w]) == x
True
>>> problem_5_14_8([-1, 1, 1], [u, v, x]) == w
True
>>> problem_5_14_8([1, 1, -1], [u, w, x]) == v
True
>>> problem_5_14_8([1, -1, 1], [v, w, x]) == u
True
"""
return lin_comb_sum(scalars, vectors)
|
e8456cbf7a0e47519003c3b3a414560c1d1ee5ac
| 3,644,526
|
def atomic(fn, self, *args, **kwargs):
"""
Atomic method.
"""
return self._atom(fn, args, kwargs)
|
96fdd8451bb534deefb2ffbe101526838d75fa6e
| 3,644,527
|
def text_to_string(filename, useEncoding):
"""Read a text file and return a string."""
with open(filename, encoding=useEncoding, errors='ignore') as infile:
return infile.read()
|
f879bb747699496204820b74944fd563658a7117
| 3,644,528
|
def forward_propagation(x, paras, bn_paras, decay=0.9):
""" forward propagation function
Paras
------------------------------------
x: input dataset, of shape (input size, number of examples)
W: weight matrix of shape (size of current layer, size of previous layer)
b: bias vector of shape (size of current layer,1)
gamma: scale vector of shape (size of current layer ,1)
beta: offset vector of shape (size of current layer ,1)
decay: the parameter of exponential weight average
moving_mean: decay * moving_mean + (1 - decay) * current_mean
moving_var: decay * moving_var + (1 - decay) * moving_var
Returns
------------------------------------
y: the output of the last Layer(y_predict)
caches: list, every element is a tuple:(W,b,z,A_pre)
"""
L = len(paras) // 4 # number of layer
caches = []
# calculate from 1 to L-1 layer
for l in range(1, L):
W = paras["W" + str(l)]
b = paras["b" + str(l)]
gamma = paras["gamma" + str(l)]
beta = paras["beta" + str(l)]
# linear forward -> relu forward ->linear forward....
z = linear(x, W, b)
mean, var, sqrt_var, normalized, out = batch_norm(z, gamma, beta)
caches.append((x, W, b, gamma, sqrt_var, normalized, out))
x = relu(out)
bn_paras["moving_mean" + str(l)] = decay * bn_paras["moving_mean" + str(l)] + (1 - decay) * mean
bn_paras["moving_var" + str(l)] = decay * bn_paras["moving_var" + str(l)] + (1 - decay) * var
# calculate Lth layer
W = paras["W" + str(L)]
b = paras["b" + str(L)]
z = linear(x, W, b)
caches.append((x, W, b, None, None, None, None))
y = sigmoid(z)
return y, caches, bn_paras
|
eb2955f9bff056ad1639d2b63a39a6ff40293400
| 3,644,529
|
def sentence_indexes_for_fragment(fragment: Fragment, sentences: list) -> list:
"""Get the start and end indexes in the whole article for the sentences encompassing a fragment."""
start_sentence_index = sentence_index_for_fragment_index(fragment.start, sentences)
end_sentence_index = sentence_index_for_fragment_index(fragment.end, sentences)
return list(range(start_sentence_index, end_sentence_index +1))
|
08ec8df9c9e7e06f20dd6554a9da3a0ca89e4f53
| 3,644,530
|
def train_and_eval(trial: optuna.Trial, ex_dir: str, seed: [int, None]):
"""
Objective function for the Optuna `Study` to maximize.
.. note::
Optuna expects only the `trial` argument, thus we use `functools.partial` to sneak in custom arguments.
:param trial: Optuna Trial object for hyper-parameter optimization
:param ex_dir: experiment's directory, i.e. the parent directory for all trials in this study
:param seed: seed value for the random number generators, pass `None` for no seeding
:return: objective function value
"""
# Synchronize seeds between Optuna trials
pyrado.set_seed(seed)
# Environment
env_hparams = dict(dt=1/250., max_steps=1500)
env = QQubeSim(**env_hparams)
env = ActNormWrapper(env)
# Policy
policy_hparam = dict(
feats=FeatureStack([identity_feat, sign_feat, abs_feat, squared_feat, bell_feat, MultFeat([4, 5])])
)
policy = LinearPolicy(spec=env.spec, **policy_hparam)
# Algorithm
algo_hparam = dict(
num_sampler_envs=1, # parallelize via optuna n_jobs
max_iter=150,
pop_size=trial.suggest_categorical('pop_size', [100, 150, 200, 250]),
num_rollouts=trial.suggest_categorical('num_rollouts', [4, 6, 8, 10, 12]),
num_is_samples=trial.suggest_categorical('num_is_samples', [50, 100, 150, 200]),
expl_std_init=trial.suggest_uniform('expl_std_init', 0.2, 1.5),
expl_std_min=0.02,
symm_sampling=trial.suggest_categorical('symm_sampling', [True, False]),
)
csv_logger = create_csv_step_logger(osp.join(ex_dir, f'trial_{trial.number}'))
algo = PoWER(osp.join(ex_dir, f'trial_{trial.number}'), env, policy, **algo_hparam, logger=csv_logger)
# Train without saving the results
algo.train(snapshot_mode='latest', seed=seed)
# Evaluate
min_rollouts = 1000
sampler = ParallelSampler(env, policy, num_envs=1, min_rollouts=min_rollouts) # parallelize via optuna n_jobs
ros = sampler.sample()
mean_ret = sum([r.undiscounted_return() for r in ros])/min_rollouts
return mean_ret
|
128b94452d3a398efe5b754e4e3dacf25bd5e165
| 3,644,532
|
def alert_query(alert, authz):
"""Construct a search query to find new matching entities and documents
for a particular alert. Update handling is done via a timestamp of the
latest known result."""
# Many users have bookmarked complex queries, otherwise we'd use a
# precise match query.
query = {
'simple_query_string': {
'query': alert.query,
'fields': ['text'],
'default_operator': 'AND',
'minimum_should_match': '90%'
}
}
filter_since = {
'range': {
'created_at': {'gt': alert.notified_at}
}
}
return {
'size': MAX_PAGE,
'query': {
'bool': {
'should': [query],
'filter': [filter_since, authz_query(authz)],
'minimum_should_match': 1
}
}
}
|
c7181e174613ea61fe67d6165f1022a10ab5862e
| 3,644,533
|
def iscomment(s):
"""
Define what we call a comment in MontePython chain files
"""
return s.startswith('#')
|
ab3a9d240e423c562c9e83cdd9599fddf144b7c3
| 3,644,534
|
def fix_bayes_factor(bayes_factor):
"""
If one of the bayes factors is 'inf' we get a string instead of a
tuple back. This is hacky but fixes that.
"""
# Maximum cut off for Bayes factor value
max_bf = 1e12
if type(bayes_factor) == str:
bayes_factor = bayes_factor.split(",")
bayes_factor = [min(float(x), max_bf) for x in bayes_factor]
bayes_factor = tuple(bayes_factor)
bayes_factor = bayes_factor[0]
return bayes_factor
|
7e7912ea9b0c90f0945f486aa397a2df2d13d5cc
| 3,644,536
|
def fiebelkorn_binning(x_trial, t_trial):
"""
Given accuracy and time-points, find the time-smoothed average accuracy
Parameters
----------
x_trial : np.ndarray
Accuracy (Hit: 1, Miss: 0) of each trial
t_trial : np.ndarray
The time-stamp of each trial
Returns
-------
x_bin : np.ndarray
The average accuracy within each time bin
t_bin : np.ndarray
The centers of each time bin
"""
details = behav_details['fiebelkorn']
# Time-stamps of the center of each bin
t_bin = np.arange(details['t_start'],
details['t_end'] + 1e-10,
details['bin_step'])
# Accuracy within each bin
x_bin = []
for i_bin in range(len(t_bin)):
bin_center = t_bin[i_bin]
bin_start = bin_center - (details['bin_width'] / 2)
bin_end = bin_center + (details['bin_width'] / 2)
bin_sel = (bin_start <= t_trial) & (t_trial <= bin_end)
x_bin_avg = np.mean(x_trial[bin_sel])
x_bin.append(x_bin_avg)
x_bin = np.array(x_bin)
return x_bin, t_bin
|
29651c03dba351475c881d77a08da618ba89aa6a
| 3,644,537
|
def get_fastest_while_jump(condition:str, jump_tag:str, verdicts: list) -> list:
"""Verdicts like ["while", "a", "<", "10"] """
result = []
jumpables = ("===", ) + tuple(INVERT_TABLE.keys())
if len(verdicts) == 2:
result.append(F"jump-if {jump_tag} {verdicts[1]} != false")
elif verdicts[2] in jumpables:
result.append(F"jump-if {jump_tag} " + (" ".join(verdicts[1:]) ) )
else:
result.append(create_temporary_xlet(condition, verdicts[1:]))
result.append(F"jump-if {jump_tag} {condition} != false")
return result
|
16f4b8ba1e180dbad22e93f6bf08ab52eecb0086
| 3,644,538
|
import torch
def create_hcp_sets(skeleton, side, directory, batch_size, handedness=0):
"""
Creates datasets from HCP data
IN: skeleton: boolean, True if input is skeleton, False otherwise,
side: str, 'right' or 'left'
handedness: int, 0 if mixed ind, 1 if right handed, 2 if left handed
directory: str, folder in which save the results
batch_size: int, size of training batches
weights: list, list of weights to apply to skeleton values
OUT: root_dir: created directory where results will be stored
dataset_train_loader, dataset_val_loader, dataset_test_loader: loaders
that will be used for training and testing
"""
print(torch.cuda.current_device())
date_exp = date.today().strftime("%d%m%y")
if skeleton == True:
skel = 'skeleton'
loss_type = 'CrossEnt'
root_dir = directory + side + '_hemi_' + skel + '_' + date_exp + '_' +loss_type + '_' + str(handedness) + '_2classes/'
else:
skel = 'norm_spm'
loss_type = 'L2'
root_dir = directory + side + '_hemi_' + skel + '_' + date_exp + '_' +loss_type + '_' + str(handedness) +'/'
#print("Parameters : skeleton: {}, side: {}, weights: {}, loss_type: {}".format(skeleton, side, weights, loss_type))
print(root_dir)
save_results.create_folder(root_dir)
if skeleton:
data_dir = '/neurospin/dico/lguillon/skeleton/sts_crop/'
#data_dir = '/home_local/lg261972/data/'
if handedness == 0:
input_data = 'sts_crop_skeleton_' + side
tmp = pd.read_pickle(data_dir + input_data +'.pkl')
filenames = list(tmp.columns)
tmp = torch.from_numpy(np.array([tmp.loc[0].values[k] for k in range(len(tmp))]))
else:
if handedness == 1:
input_data = side + '_hemi_rightH_sts_crop_skeleton'
else:
input_data = side + '_hemi_leftH_sts_crop_skeleton'
print(input_data)
tmp = pd.read_pickle(data_dir + input_data +'.pkl')
filenames = tmp.Subject.values
print(len(filenames))
tmp = torch.from_numpy(np.array([tmp.loc[k].values[0] for k in range(len(tmp))]))
else:
data_dir = '/neurospin/dico/lguillon/hcp_cs_crop/sts_crop/'+ side + '_hemi/'
data_dir = '/home_local/lg261972/data/'
if handedness == 0:
input_data = 'sts_crop_' + side
tmp = pd.read_pickle(data_dir + input_data +'.pkl')
filenames = list(tmp.columns)
tmp = torch.from_numpy(np.array([tmp.loc[0].values[k] for k in range(len(tmp))]))
else:
if handedness == 1:
input_data = side + '_hemi_rightH_sts_crop'
else:
input_data = side + '_hemi_leftH_sts_crop'
print(input_data)
tmp = pd.read_pickle(data_dir + input_data +'.pkl')
filenames = tmp.Subject.values
print(len(filenames))
tmp = torch.from_numpy(np.array([tmp.loc[k].values[0] for k in range(len(tmp))]))
tmp = tmp.to('cuda')
hcp_dataset = TensorDataset(filenames=filenames, data_tensor=tmp,
skeleton=skeleton, vae=False)
# Split training set into train, val and test
partition = [0.7, 0.2, 0.1]
print([round(i*(len(hcp_dataset))) for i in partition])
train_set, val_set, test_set = torch.utils.data.random_split(hcp_dataset,
[round(i*(len(hcp_dataset))) for i in partition])
#train_set = AugDatasetTransformer(train_set)
#val_set = AugDatasetTransformer(val_set)
#test_set = AugDatasetTransformer(test_set)
dataset_train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size,
shuffle=True, num_workers=0)
dataset_val_loader = torch.utils.data.DataLoader(val_set, shuffle=True,
num_workers=0)
dataset_test_loader = torch.utils.data.DataLoader(test_set, shuffle=True,
num_workers=0)
print("Dataset generated \n Size of training dataset :", len(dataset_train_loader))
return root_dir, dataset_train_loader, dataset_val_loader, dataset_test_loader
|
32615f19b70b8d78240adc1c2f60c5191f4c93fb
| 3,644,542
|
def rtrim(n):
"""Returns a transform that removes the rightmost n points
"""
def t(xarr, yarr, *args):
return (xarr[:-n], yarr[:-n]) + args
t.__name__ = b'rtrim({})'.format(n)
return t
|
583e4e2b9eef8281002760ccb1d336b9fdff36af
| 3,644,543
|
def analyze_avg_prof_quality_by_department(dict_cursor, departmentID, campus):
"""
>>> analyze_avg_prof_quality_by_department(dict_cursor, 'CSC', 'St. George')
CSC
enthusiasm 3.95
course_atmosphere 3.90
...
(This is not complete)
"""
return __analyze_data_by_DB_GETMETHOD_WITH_TWO_ARGS(DEPARTMENT_QUALITY_BY_DID, dict_cursor, departmentID, campus)
|
c44b74181e223c4a543575dd42f1db73b57e48b9
| 3,644,544
|
import re
import json
def parse_to_json(data_str):
"""
Convert string to a valid json object
"""
json_obj_list = []
obj = data_str.split('%')
for record in obj:
attributes = re.split(',', record)
data = json.dumps(attributes)
data = re.sub(r':', '":"', data)
data = re.sub(r'\[', '{', data)
data = re.sub(r']', '}', data)
json_obj_list.append(data)
return json_obj_list
|
288911694548fd603a3a261ac9c51c5c971599e0
| 3,644,546
|
def calculate_elbo(model, X, recon_X):
"""
Compute the ELBO of the model with reconstruction error and KL divergence..
"""
rec_loss = - np.sum(X * np.log(1e-8 + recon_X)
+ (1 - X) * np.log(1e-8 + 1 - recon_X), 1)
mu, logvar = model.transform(X)
kl = -0.5 * np.sum(1 + logvar - mu ** 2 - np.exp(logvar), 1)
elbo = np.mean(rec_loss + kl)
return elbo
|
aa3f2123bcc8ed0ee62b0b28a4fb3aeb0c1c886c
| 3,644,547
|
def dice_loss(pred, target):
"""
Dice Loss based on Dice Similarity Coefficient (DSC)
@param pred: torch.tensor, model prediction
@param target: torch.tensor, ground truth label
"""
return 1 - dice_coeff(pred, target)
|
9f940c09c4dac7477c6f77f2ecf632b95107f04f
| 3,644,548
|
import struct
def parse(transaction):
""" Parses Bitcoin Transaction into it's component parts"""
byteStringLength = 2
# Version
version = struct.unpack('<L', transaction[0:4*byteStringLength].decode("hex"))[0]
offset = 4*byteStringLength
# print "Version is: " + str(version)
# Inputs
varLength, inputCount = translationUtil.unformatVarInt(transaction[offset:offset+9*byteStringLength].decode("hex"))
# print "Input Count is: " + str(inputCount)
offset += varLength*byteStringLength
inputs = []
for i in range(0, inputCount):
# Hash of input (previous output) transaction
inHash = (transaction[offset:offset+64].decode("hex"))[::-1].encode("hex")
offset += 64
# Index of reference within input (previous output) transaction
inIndex = struct.unpack('<L', transaction[offset:offset+4*byteStringLength].decode("hex"))[0]
offset += 4*byteStringLength
# Script signature length
varLength, scriptLen = translationUtil.unformatVarInt(transaction[offset:offset+9*byteStringLength].decode("hex"))
offset += varLength*byteStringLength
# Script
script = transaction[offset:offset+scriptLen*byteStringLength].decode("hex")
offset += scriptLen*byteStringLength
# Sequence
sequence = struct.unpack('<L', transaction[offset:offset+4*byteStringLength].decode("hex"))[0]
offset += 4*byteStringLength
# Append
# print "Input {0} is: {1}, {2}, {3}, {4}".format(i, inHash, inIndex, script, sequence)
inputs.append([inHash, inIndex, script, sequence])
# Outputs
varLength, outputCount = translationUtil.unformatVarInt(transaction[offset:offset+9*byteStringLength].decode("hex"))
# print "Output Count is: {0}".format(outputCount)
offset += varLength*byteStringLength
outputs = []
for i in range(0, outputCount):
# Index of reference within input (previous output) transaction
value = struct.unpack('<Q', transaction[offset:offset+8*byteStringLength].decode("hex"))[0]
offset += 8*byteStringLength
# Script signature length
varLength, scriptLen = translationUtil.unformatVarInt(transaction[offset:offset+9*byteStringLength].decode("hex"))
offset += varLength*2
# Script
script = transaction[offset:offset+scriptLen*byteStringLength].decode("hex")
offset += scriptLen*byteStringLength
# Append
# print "Output {0} is: {1}, {2}".format(i, value, script)
outputs.append([value, script])
# Block Lock Time
blockLockTime = struct.unpack('<L', transaction[offset:offset+4*byteStringLength].decode("hex"))[0]
# print "Block Lock Time is: " + str(blockLockTime)
return (version, inputs, outputs, blockLockTime)
|
fcf9eede33b3dda8026a00a8a3a57ab2cd84ef22
| 3,644,549
|
def get_related_items_by_type(parser, token):
"""Gets list of relations from object identified by a content type.
Syntax::
{% get_related_items_by_type [content_type_app_label.content_type_model] for [object] as [varname] [direction] %}
"""
tokens = token.contents.split()
if len(tokens) not in (6, 7):
raise template.TemplateSyntaxError(
"%r tag requires 6 arguments" % tokens[0]
)
if tokens[2] != 'for':
raise template.TemplateSyntaxError(
"Third argument in %r tag must be 'for'" % tokens[0]
)
if tokens[4] != 'as':
raise template.TemplateSyntaxError(
"Fifth argument in %r tag must be 'as'" % tokens[0]
)
direction = 'forward'
if len(tokens) == 7:
direction = tokens[6]
return GetRelatedItemsByTypeNode(
name=tokens[1], obj=tokens[3], as_var=tokens[5], direction=direction
)
|
a774830f92b6e2abc1df9d172c6b696b87dc83d0
| 3,644,551
|
def stitch_valleys(valley_list):
"""Returns a stitched list of valleys to extract seq from."""
valley_collection = utils.LocusCollection(valley_list, 1)
stitched_valley_collection = valley_collection.stitch_collection()
loci = []
regions = []
for valley in stitched_valley_collection.get_loci():
if [valley.chr, valley.start, valley.end] not in regions:
loci.append(valley)
regions.append([valley.chr, valley.start, valley.end])
return loci
|
d5b4e35d66c9c5ff05a027569454d2ec1b612e45
| 3,644,552
|
def no_gcab_namespace(name, *args):
"""
Mock gi.require_version() to raise an ValueError to
simulate that GCab bindings are not available.
We mock importing the whole 'gi', so that this test
can be run even when the 'gi' package is not available.
"""
if name.startswith("gi"):
m = mock.Mock()
m.require_version.side_effect = ValueError
return m
return orig_import(name, *args)
|
7952d944aa1fb512874874870a2d9bfaa31c5834
| 3,644,553
|
import logging
def logger(module_name: str):
"""Инициализация и конфигурирования логгера"""
logging.basicConfig(
level=logging.INFO,
format='[%(levelname)s][%(asctime)s] %(name)s: %(message)s'
)
return logging.getLogger(module_name)
|
0a436b50d16c752404d31e3f34b38239391236d5
| 3,644,555
|
def __generation_dec(n: int, m: int, x_min: np.array, x_max: np.array) -> np.matrix:
"""
:param n: num rows in returned matrix
:param m: num cols in returned matrix
:param x_min: float array, min possible nums in cols of returned matrix
:param x_max: float array, max possible nums in cols of returned matrix
:return: n times m float matrix with nums in col number i in [x_min[i], x_max[i])
"""
assert n > 0, "n should be positive"
assert m > 0, "m should be positive"
assert x_min.shape == (m, ), "x_min should be of shape (m, )"
assert x_max.shape == (m, ), "x_max should be of shape (m, )"
return np.random.uniform(low=x_min, high=x_max, size=(n, m))
|
d76970858faacc8757c0bfa5b8840f4b5ab200d0
| 3,644,557
|
def apply_tariff(kwh, hour):
"""Calculates cost of electricity for given hour."""
if 0 <= hour < 7:
rate = 12
elif 7 <= hour < 17:
rate = 20
elif 17 <= hour < 24:
rate = 28
else:
raise ValueError(f'Invalid hour: {hour}')
return rate * kwh
|
fb2c5b458c13456a39612720b6e80e0cd707391e
| 3,644,558
|
def _compound_smiles(compound: reaction_pb2.Compound) -> str:
"""Returns the compound SMILES, if defined."""
for identifier in compound.identifiers:
if identifier.type == identifier.SMILES:
return identifier.value
return ""
|
44c9f8169442b9a116a4d77ea6be74ec4cc27a31
| 3,644,559
|
def cross_correlizer(sample_rate, max_itd, max_frequency):
"""
Convenience function for creating a CrossCorrelizer with appropriate
parameters.
sample_rate : the sample rate of the wav files to expect.
max_itd : the maximum interaural time difference to test.
max_frequency : the highest frequency to test.
"""
shift_max = int(np.ceil(max_itd * sample_rate))
shift_steps = int(float(sample_rate) / max_frequency / 2.)
return CrossCorrelizer(sample_rate, shift_max, shift_steps)
|
747c42c3db2ad1f7642e575a35e3ce6d3c84b4b2
| 3,644,561
|
import altair as alt
def plot_precision_recall_at_k(
predicate_df, idx_flip, max_k=100, give_random=True, give_ensemble=True
):
"""
Plots precision/recall at `k` values for flipped label experiments.
Returns an interactive altair visualisation. Make sure it is installed beforehand.
Arguments:
predicate_df: the dataframe with predicates from `ensemble.get_predicates`
idx_flip: array that indicates if labels are wrong
max_k: the maximum value for `k` to consider
give_random: plot the "at k" statistics for the randomly selected lower bound
give_ensemble: plot the "at k" statistics from the reason ensemble
"""
alt.data_transformers.disable_max_rows()
# We combine the results in dataframes
plot_df = calculate_precision_recall_at_k(
predicate_df=predicate_df,
idx_flip=idx_flip,
max_k=max_k,
give_random=give_random,
give_ensemble=give_ensemble,
)
# So that we may plot it.
return (
alt.Chart(plot_df)
.mark_line()
.encode(x="k", y="value", color="variable", strokeDash="setting")
.interactive()
)
|
e2edc16d8648f9dd913df41dfc39e0b48140cfe7
| 3,644,562
|
from typing import List
from typing import Union
def has_permissions(
permissions: int, required: List[Union[int, BasePermission]]
) -> bool:
"""Returns `True` if `permissions` has all required permissions"""
if permissions & Administrator().value:
return True
all_perms = 0
for perm in required:
if isinstance(perm, int):
all_perms |= perm
else:
all_perms |= perm.value
return permissions & all_perms == all_perms
|
db32fe9d1a53cd5b14b71522d08901172bbad8f7
| 3,644,563
|
def mat_to_xyz(mat: NDArrayFloat) -> NDArrayFloat:
"""Convert a 3D rotation matrix to a sequence of _extrinsic_ rotations.
In other words, 3D rotation matrix and returns a sequence of Tait-Bryan angles
representing the transformation.
Reference: https://en.wikipedia.org/wiki/Euler_angles#Rotation_matrix
Reference: https://en.wikipedia.org/wiki/Euler_angles#Tait%E2%80%93Bryan_angles_2
Args:
mat: (...,3,3) Rotation matrix.
Returns:
(...,3) Tait-Bryan angles (in radians) formulated for a sequence of extrinsic rotations.
"""
xyz_rad: NDArrayFloat = Rotation.from_matrix(mat).as_euler("xyz", degrees=False)
return xyz_rad
|
1f27e503b28f9a932bc4aa703de8a210968f64f6
| 3,644,564
|
import hashlib
def get_user_gravatar(user_id):
"""
Gets link to user's gravatar from serializer.
Usage::
{% get_user_gravatar user_id %}
Examples::
{% get_user_gravatar 1 %}
{% get_user_gravatar user.id %}
"""
try:
user = User.objects.get(pk=user_id)
except User.DoesNotExist:
return static('img/anonymous.png')
if not user.email:
return static('img/anonymous.png')
url_base = 'https://www.gravatar.com/avatar/{}?d=mp'
user_hash = hashlib.md5(user.email.lower().encode('utf-8')).hexdigest()
return url_base.format(user_hash)
|
b8cd883c3ca76a3dc45253457715ac011c04785d
| 3,644,565
|
def natural_key(s):
"""Converts string ``s`` into a tuple that will sort "naturally" (i.e.,
``name5`` will come before ``name10`` and ``1`` will come before ``A``).
This function is designed to be used as the ``key`` argument to sorting
functions.
:param s: the str/unicode string to convert.
:rtype: tuple
"""
# Use _nkre to split the input string into a sequence of
# digit runs and non-digit runs. Then use _nkconv() to convert
# the digit runs into ints and the non-digit runs to lowercase.
return tuple(_nkconv(m) for m in _nkre.findall(s))
|
7eda87824ac9ad952c911d8b1946cc0af43fa4aa
| 3,644,566
|
def read_ValidationSets_Sources():
"""Read and return ValidationSets_Sources.csv file"""
df = pd.read_csv(data_dir + 'ValidationSets_Sources.csv',header=0,
dtype={"Year":"str"})
return df
|
ea653e91ab37abd91297783caf8ea1fa6bd14545
| 3,644,567
|
def regular_polygon_area_equivalent_radius(n, radius=1.0):
""" Compute equivalent radius to obtain same surface as circle.
\theta = \frac{2 \pi}{n}
r_{eqs} = \sqrt{\frac{\theta r^2}{\sin{\theta}}}
:param radius: circle radius
:param n: number of regular polygon segments
:return: equivalent regular polynom surface
"""
theta = 2 * np.pi / n
r = np.sqrt((theta * radius ** 2) / np.sin(theta))
return r
|
4aacc8c2ab57516bef15167e5a22485c9f55bc2d
| 3,644,569
|
def get_dashboard_list(project_id=None, page=1, page_size=25, token_info=None, user=None):
"""Get a list of dashboards
:param project_id: Filter dashboards by project ID
:type project_id: str
:param user_id: Filter dashboards by user ID
:type user_id: str
:param limit: Limit the dashboards
:type limit: int
:param offset: Offset the dashboards
:type offset: int
:rtype: DashboardList
"""
query = Dashboard.query
project = None
if "project_id" in connexion.request.args:
project = Project.query.get(connexion.request.args["project_id"])
if project:
if not project_has_user(project, user):
return "Forbidden", 403
query = query.filter(Dashboard.project_id == project_id)
offset = (page * page_size) - page_size
total_items = query.count()
total_pages = (total_items // page_size) + (1 if total_items % page_size > 0 else 0)
dashboards = query.offset(offset).limit(page_size).all()
return {
"dashboards": [dashboard.to_dict() for dashboard in dashboards],
"pagination": {
"page": page,
"pageSize": page_size,
"totalItems": total_items,
"totalPages": total_pages,
},
}
|
9a15c87b081dcdb87e1a5c4778b0114309365a2b
| 3,644,570
|
import torch
def _ssim(X, Y, filter, K=(0.01, 0.03)):
""" Calculate ssim index for X and Y"""
K1, K2 = K
# batch, channel, [depth,] height, width = X.shape
C1 = K1 ** 2
C2 = K2 ** 2
filter = filter.to(X.device, dtype=X.dtype)
mu_x = gaussian_filter(X, filter)
mu_y = gaussian_filter(Y, filter)
mu_x_sq = mu_x.pow(2)
mu_y_sq = mu_y.pow(2)
mu_x_mu_y = mu_x * mu_y
sigma_x_sq = (gaussian_filter(X * X, filter) - mu_x_sq)
sigma_y_sq = (gaussian_filter(Y * Y, filter) - mu_y_sq)
sigma_xy = (gaussian_filter(X * Y, filter) - mu_x_mu_y)
cs_map = (2 * sigma_xy + C2) / (sigma_x_sq + sigma_y_sq + C2) # set alpha=beta=gamma=1
ssim_map = ((2 * mu_x_mu_y + C1) / (mu_x_sq + mu_y_sq + C1))
ssim_map *= cs_map
ssim_per_channel = torch.flatten(ssim_map, 2).mean(-1)
cs = torch.flatten(cs_map, 2).mean(-1)
return ssim_per_channel, cs
|
49deca478e06c35f06436f16ad34fb8154ba0cfd
| 3,644,571
|
def get_all_messages(notification_queue, **kwargs):
"""
Get all messages on the specified notification queue
Variables:
complete_queue => Queue to get the message from
Arguments:
None
Data Block:
None
Result example:
[] # List of messages
"""
resp_list = []
u = NamedQueue("nq-%s" % notification_queue,
host=config.core.redis.persistent.host,
port=config.core.redis.persistent.port,
db=config.core.redis.persistent.db)
while True:
msg = u.pop(blocking=False)
if msg is None:
break
resp_list.append(msg)
return make_api_response(resp_list)
|
c0a61d50cc3e6373bc007f8978278d49f66544e9
| 3,644,572
|
def ssa_reconstruct(pc, v, k):
"""
from Vimal
Series reconstruction for given SSA decomposition using vector of components
:param pc: matrix with the principal components from SSA
:param v: matrix of the singular vectors from SSA
:param k: vector with the indices of the components to be reconstructed
:return: the reconstructed time series
"""
if np.isscalar(k):
k = [k]
if pc.ndim != 2:
raise ValueError('pc must be a 2-dimensional matrix')
if v.ndim != 2:
raise ValueError('v must be a 2-dimensional matrix')
t, dim = pc.shape
n_points = t + (dim - 1)
if any(filter(lambda x: dim < x or x < 0, k)):
raise ValueError('k must be vector of indexes from range 0..%d' % dim)
pc_comp = np.asarray(np.matrix(pc[:, k]) * np.matrix(v[:, k]).T)
xr = np.zeros(n_points)
times = np.zeros(n_points)
# reconstruction loop
for i in range(dim):
xr[i: t + i] = xr[i: t + i] + pc_comp[:, i]
times[i: t + i] = times[i: t + i] + 1
xr = (xr / times) * np.sqrt(t)
return xr
|
1ac054f2d31ab6f883a369e682a33235305df604
| 3,644,573
|
def get_theo_joints_pm(W, b, beta):
"""calculate the theoretical state distribution for a Boltzmann
machine
"""
N = len(b)
joints = []
states = get_states(N)
for s in states:
joints.append(np.exp(-1. * get_energy(W, b, (2. * s - 1.), beta)))
joints /= np.sum(joints)
return joints
|
c84c518fac47d139f951d4973907dceec1d9c825
| 3,644,575
|
from typing import BinaryIO
def tail(the_file: BinaryIO, lines_2find: int = 20) -> list[bytes]:
"""
From http://stackoverflow.com/questions/136168/get-last-n-lines-of-a-file-with-python-similar-to-tail
"""
lines_found: int = 0
total_bytes_scanned: int = 0
the_file.seek(0, 2)
bytes_in_file: int = the_file.tell()
while lines_2find + 1 > lines_found and bytes_in_file > total_bytes_scanned:
byte_block: int = min(1024, bytes_in_file - total_bytes_scanned)
the_file.seek(-(byte_block + total_bytes_scanned), 2)
total_bytes_scanned += byte_block
lines_found += the_file.read(1024).count(b"\n")
the_file.seek(-total_bytes_scanned, 2)
line_list: list[bytes] = list(the_file.readlines())
return line_list[-lines_2find:]
# We read at least 21 line breaks from the bottom, block by block for speed
# 21 to ensure we don't get a half line
|
094917839d4b26e284244715452982eaf6e8c08a
| 3,644,576
|
def add_device_tag_command(client, args):
""" Command to add tag to an existing admin devices entry """
site, concentrator, map = get_site_params()
transmitter_id = args.get('transmitter_id')
tag = args.get('tag')
result = client.add_device_tag(site=site, concentrator=concentrator, map=map,
transmitter_id=transmitter_id, tag=tag)
if 'status' not in result:
return_error('Failed to add device tag')
return result['status'], {}, result
|
9aeaff1110515215bb7f2d3aa1a6ab5123cd31b2
| 3,644,577
|
def CommaSeparatedFloats(sFloatsCSV):
"""Read comma-separated floats from string.
[sFloatsCSV]: string, contains comma-separated floats.
<retval>: list, floats parsed from string.
"""
return [float(sFloat) for sFloat in sFloatsCSV.replace(" ","").split(",")]
|
1aa12ca7297aa3bd809f6d2ffaf155233a826b49
| 3,644,578
|
def merge_channels(image_list):
"""
Merge channels of multiple scalar ANTsImage types into one
multi-channel ANTsImage
ANTsR function: `mergeChannels`
Arguments
---------
image_list : list/tuple of ANTsImage types
scalar images to merge
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> image = ants.image_read(ants.get_ants_data('r16'), 'float')
>>> image2 = ants.image_read(ants.get_ants_data('r16'), 'float')
>>> image3 = ants.merge_channels([image,image2])
>>> image3.components == 2
"""
inpixeltype = image_list[0].pixeltype
dimension = image_list[0].dimension
components = len(image_list)
for image in image_list:
if not isinstance(image, iio.ANTsImage):
raise ValueError('list may only contain ANTsImage objects')
if image.pixeltype != inpixeltype:
raise ValueError('all images must have the same pixeltype')
libfn = utils.get_lib_fn('mergeChannels%s' % image_list[0]._libsuffix)
image_ptr = libfn([image.pointer for image in image_list])
return iio.ANTsImage(pixeltype=inpixeltype,
dimension=dimension,
components=components,
pointer=image_ptr)
|
33b5588d6ad4d128ed6206652919408e32520c80
| 3,644,579
|
def get_var(name: str, options: dict) -> str:
"""
Returns the value from the given dict with key 'INPUT_$key',
or if this does not exist, key 'key'.
"""
return options.get('INPUT_{}'.format(name)) or options.get(name)
|
9df0e3ec92af83b5719b88ca34f323bdfc7d1d84
| 3,644,580
|
def create_txt_response(name, txt_records):
"""
Returns an RRSet containing the 'txt_records' as the result of a DNS
query for 'name'.
This takes advantage of the fact that an Answer object mostly behaves
like an RRset.
"""
return dns.rrset.from_text_list(name, 60, "IN", "TXT", txt_records)
|
1f649719576b810a40ed7042b9b254653fe1364a
| 3,644,582
|
import ast
def bit_xor(*arguments):
"""
Bitwise XOR function.
"""
return ast.BitXor(*arguments)
|
07af3232a18796b4122e3ac6a4279ec00032c31d
| 3,644,583
|
def get_chromiumdir(platform, release):
"""
Args:
platform (str): a sys.platform str
Returns:
str: path to Chromium User Data Directory
http://www.chromium.org/user-experience/user-data-directory
"""
if platform == 'darwin':
chromedir = os.path.expanduser(
'~/Library/Application Support/Chromium')
elif platform.startswith('linux'):
chromedir = os.path.expanduser(
'~/.config/chromium')
elif platform == 'win32':
if release == 'XP':
chromedir = os.path.expanduser(
'~\Local Settings\Application Data\Chromium\User Data')
else:
chromedir = os.path.expanduser(
'~\AppData\Local\Chromium\User Data')
else:
raise NotImplementedError("Unknown platform: %r" % platform)
return [chromedir]
|
4ed1a9d70dfd3430911d26ac47322e9612bfdb06
| 3,644,584
|
def make_ts_scorer(
score_func, greater_is_better=True, needs_proba=False, needs_threshold=False, **kwargs,
):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in `~sklearn.model_selection.GridSearchCV`
and `~sklearn.model_selection.cross_validate`. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean
Not yet implemented, kept only to be compatible with the scikit-learn API
needs_threshold : boolean
Not yet implemented, kept only to be compatible with the scikit-learn API
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
callable
scorer object that returns a scalar score
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True," " but not both.")
if needs_proba:
raise NotImplementedError("Usage/evaluation of prediction probabilities are not yet implemented.")
elif needs_threshold:
raise NotImplementedError("Evaluation of decision function output is not yet implemented.")
else:
cls = _TSPredictScorer
return cls(score_func, sign, kwargs)
|
9003f82b52a1e915111bd46002fda8f61f6c9b9e
| 3,644,585
|
def pfas(x):
"""Parse a JSON array of PFA expressions as a PFA abstract syntax trees.
:type x: open JSON file, JSON string, or Pythonized JSON
:param x: PFA expressions in a JSON array
:rtype: list of titus.pfaast.Expression
:return: parsed expressions as a list of abstract syntax trees
"""
return jsonToAst.exprs(x)
|
e3544a89f16c908752a55ea58bfc3360abbe4121
| 3,644,586
|
def overlap(x, y, a, b):
"""Finds the overlap of (x, y) and (a, b).
Assumes an overlap exists, i.e. y >= a and b >= x.
"""
c = clamp(x, a, b)
d = clamp(y, a, b)
return c, d
|
c26b2f32ba9c12f72108c756ca4c1b4993fe8d55
| 3,644,587
|
def topological_sort(g):
"""
Returns a list of vertices in directed acyclic graph g in topological
order.
"""
ready = []
topo = []
in_count = {}
for v in g.vertices():
in_count[v] = g.degree(v, outgoing=False)
if in_count[v] == 0: # v has no constraints, i.e no incoming edges
ready.append(v)
while len(ready) > 0:
u = ready.pop()
topo.append(u)
for e in g.incident_edges(u):
v = e.opposite(u)
in_count[v] -= 1 # v now no longer has u as a constraint
if in_count[v] == 0:
ready.append(v)
return topo
|
5ac6261bf1b6fa92280abdc3fc95679ad9294e80
| 3,644,588
|
def probability_of_failure_in_any_period(p, n):
"""
Returns the probability that a failure (of probability p in one period)
happens once or more in n periods.
The probability of failure in one period is p, so the probability
of not failing is (1 - p). So the probability of not
failing over n periods is (1 - p) ** n, and the probability
of one or more failures in n periods is:
1 - (1 - p) ** n
Doing the math without losing precision is tricky.
After the binomial expansion, you get (for even n):
a = 1 - (1 - choose(n, 1) * p + choose(n, 2) p**2 - p**3 + p**4 ... + choose(n, n) p**n)
For odd n, the last term is negative.
To avoid precision loss, we don't want to to (1 - p) if p is
really tiny, so we'll cancel out the 1 and get:
you get:
a = choose(n, 1) * p - choose(n, 2) * p**2 ...
"""
if p < 0.01:
# For tiny numbers, (1 - p) can lose precision.
# First, compute the result for the integer part
n_int = int(n)
result = 0.0
sign = 1
for i in range(1, n_int + 1):
p_exp_i = p ** i
if p_exp_i != 0:
result += sign * choose(n_int, i) * (p ** i)
sign = -sign
# Adjust the result to include the fractional part
# What we want is: 1.0 - (1.0 - result) * ((1.0 - p) ** (n - n_int))
# Which gives this when refactored:
result = 1.0 - ((1.0 - p) ** (n - n_int)) + result * ((1.0 - p) ** (n - n_int))
return result
else:
# For high probabilities of loss, the powers of p don't
# get small faster than the coefficients get big, and weird
# things happen
return 1.0 - (1.0 - p) ** n
|
92439161b6b1e3288fc665c72c145282c6c09bb2
| 3,644,589
|
def stage_1(transformed_token_list):
"""Checks tokens against ngram to unigram dictionary"""
dict_data = pd.read_excel(v.stage_1_input_path, sheet_name=v.input_file_sheet_name)
selected_correct_token_data = pd.DataFrame(dict_data, columns=v.stage_1_input_file_columns)
transformed_state_1 = []
for sentence in transformed_token_list:
for row in selected_correct_token_data.itertuples():
b = list(literal_eval(row.ngram))
ngram = ''
for word in b: ngram += (' ' + word)
split_bigram = ngram.strip().split(' ')
split_sentence = sentence.strip().split(' ')
if ngram.strip() in sentence and split_bigram[0] in split_sentence and split_bigram[1] in split_sentence:
sentence = sentence.replace(ngram.strip(), row.unigram)
transformed_state_1.append(sentence)
print_to_file(v.stage_1_output_path, transformed_state_1, v.input_file_columns)
return transformed_state_1
|
6dea5bb1e1e04d183ade142f50c36aea00933ff1
| 3,644,590
|
def _perform_sanity_checks(config, extra_metadata):
"""
Method to perform sanity checks on current classification run.
:param config: dirbs config instance
:param extra_metadata: job extra metadata dict obj
:return: bool (true/false)
"""
curr_conditions = [c.as_dict() for c in config.conditions]
curr_operators = [op.as_dict() for op in config.region_config.operators]
curr_amnesty = config.amnesty_config.as_dict()
if curr_conditions == extra_metadata['conditions'] and \
curr_operators == extra_metadata['operators'] and \
curr_amnesty == extra_metadata['amnesty']:
return True
return False
|
fa5fa39bae91393c4f91ab6aa3b595f8a0db2e4f
| 3,644,591
|
def get_key_from_id(id : str) -> str:
"""
Gets the key from an id.
:param id:
:return:
"""
assert id in KEYMAP, "ID not found"
return KEYMAP[id]
|
7fbf00bbd905382888b993bbee5564c42edf4e73
| 3,644,592
|
import string
def CreateFromDict(registration_dict):
"""Returns the content of the header file."""
template = string.Template("""\
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is autogenerated by
// base/android/jni_generator/jni_registration_generator.py
// Please do not change its content.
#ifndef HEADER_GUARD
#define HEADER_GUARD
#include <jni.h>
#include "base/android/jni_generator/jni_generator_helper.h"
#include "base/android/jni_int_wrapper.h"
// Step 1: Forward declaration.
${FORWARD_DECLARATIONS}
// Step 2: Main dex and non-main dex registration functions.
bool RegisterMainDexNatives(JNIEnv* env) {
${REGISTER_MAIN_DEX_NATIVES}
return true;
}
bool RegisterNonMainDexNatives(JNIEnv* env) {
${REGISTER_NON_MAIN_DEX_NATIVES}
return true;
}
#endif // HEADER_GUARD
""")
if len(registration_dict['FORWARD_DECLARATIONS']) == 0:
return ''
return jni_generator.WrapOutput(template.substitute(registration_dict))
|
08d49b8cbb1275104b4498b98aed00747163e874
| 3,644,593
|
def static_message_fixture(tmpdir_factory, prefix, message, suffix):
"""A fixture which provides a static message."""
filename = tmpdir_factory.mktemp('data').join('static_message.txt').strpath
file_contents = "{0}{1}{2}".format(prefix, message, suffix)
with open(filename, 'w') as f:
f.write(file_contents)
return filename
|
a9a11508eb10760452cad557e792df30b068e8bc
| 3,644,595
|
def entropy_image(filename,bins=30):
"""
extracts the renyi entropy of image stored under filename.
"""
img = cv2.imread(filename,0)/255.0 # gray images
p,_ = np.histogram( img, range=[0.0,1.0],bins=bins )
return -np.log(np.dot(p,p)/(np.sum(p)**2.0))
|
b9686647601cb8850a6b03a1c52f4ad0a4218553
| 3,644,596
|
def satisfies_constraint(kel: dict, constraint: dict) -> bool:
"""Determine whether knowledge graph element satisfies constraint.
If the constrained attribute is missing, returns False.
"""
try:
attribute = next(
attribute
for attribute in kel.get("attributes", None) or []
if attribute["attribute_type_id"] == constraint["id"]
)
except StopIteration:
return False
return constraint.get("not", False) != operator_map[constraint["operator"]](
attribute["value"],
constraint["value"],
)
|
39ae764e03c77dcb0145b9091d21df092894850d
| 3,644,597
|
def static_unroll(core, input_sequence, initial_state, time_major=True):
"""Performs a static unroll of an RNN.
An *unroll* corresponds to calling the core on each element of the
input sequence in a loop, carrying the state through::
state = initial_state
for t in range(len(input_sequence)):
outputs, state = core(input_sequence[t], state)
A *static* unroll replaces a loop with its body repeated multiple
times when executed inside :func:`jax.jit`::
state = initial_state
outputs0, state = core(input_sequence[0], state)
outputs1, state = core(input_sequence[1], state)
outputs2, state = core(input_sequence[2], state)
...
See :func:`dynamic_unroll` for a loop-preserving unroll function.
Args:
core: An :class:`RNNCore` to unroll.
input_sequence: An arbitrarily nested structure of tensors of shape
``[T, ...]`` if time-major=True, or ``[B, T, ...]`` if time_major=False,
where ``T`` is the number of time steps.
initial_state: An initial state of the given core.
time_major: If True, inputs are expected time-major, otherwise they are
expected batch-major.
Returns:
A tuple with two elements:
* **output_sequence** - An arbitrarily nested structure of tensors
of shape ``[T, ...]`` if time-major, otherwise ``[B, T, ...]``.
* **final_state** - Core state at time step ``T``.
"""
output_sequence = []
time_axis = 0 if time_major else 1
num_steps = jax.tree_leaves(input_sequence)[0].shape[time_axis]
state = initial_state
for t in range(num_steps):
if time_major:
inputs = jax.tree_map(lambda x, _t=t: x[_t], input_sequence)
else:
inputs = jax.tree_map(lambda x, _t=t: x[:, _t], input_sequence)
outputs, state = core(inputs, state)
output_sequence.append(outputs)
# Stack outputs along the time axis.
output_sequence = jax.tree_multimap(
lambda *args: jnp.stack(args, axis=time_axis),
*output_sequence)
return output_sequence, state
|
f61c9de5b90a0757617f9db588ab54e69918bc4b
| 3,644,598
|
from typing import List
from typing import Tuple
def getElementByClass(className: str, fileName: str) -> List[Tuple[int, str]]:
"""Returns first matching tag from an HTML/XML document"""
nonN: List[str] = []
with open(fileName, "r+") as f:
html: List[str] = f.readlines()
for line in html:
nonN.append(line.replace("\n", ""))
pattern: str = f'class="{className}"'
patternAlt: str = f"class='{className}'"
matches: List[Tuple[int, str]] = []
for line in nonN:
if pattern in line or patternAlt in line:
lineNo = nonN.index(line) + 1
matches.append((int(lineNo), line))
break
return matches
|
969e4070e16dec2e10e26e97cbaaab9d95e7b904
| 3,644,599
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.