content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def is_valid_ip(ip_addr):
"""
:param ip_addr:
:return:
"""
octet_ip = ip_addr.split(".")
int_octet_ip = [int(i) for i in octet_ip]
if (len(int_octet_ip) == 4) and \
(0 <= int_octet_ip[0] <= 255) and \
(0 <= int_octet_ip[1] <= 255) and \
(0 <= int_octet_ip[2] <= 255) and \
(0 <= int_octet_ip[3] <= 255):
return True
else:
print("Invalid IP, closing program... \n")
exit(0)
|
7d776107f54e3c27a2a918570cbb267b0e9f419e
| 3,646,480
|
def make_replay_buffer(env: gym.Env, size: int) -> ReplayBuffer:
"""Make a replay buffer.
If not ShinEnv:
Returns a ReplayBuffer with ("rew", "done", "obs", "act", "log_prob", "timeout").
If ShinEnv:
Returns a ReplayBuffer with ("rew", "done", "obs", "act", "log_prob", "timeout", "state").
"""
is_shin_env = hasattr(env, "mdp")
if isinstance(env.action_space, gym.spaces.Discrete):
act_type, act_shape = int, 1
elif isinstance(env.action_space, gym.spaces.Box):
act_type, act_shape = float, env.action_space.shape
env_dict = {
"rew": {"dtype": float, "shape": 1},
"done": {"dtype": bool, "shape": 1},
"obs": {"dtype": float, "shape": env.observation_space.shape},
"act": {"dtype": act_type, "shape": act_shape},
"log_prob": {"dtype": float, "shape": act_shape},
"timeout": {"dtype": bool, "shape": 1},
}
if is_shin_env:
env_dict.update({"state": {"dtype": int, "shape": 1}})
return ReplayBuffer(size, env_dict, next_of=("obs", "state"))
return ReplayBuffer(size, env_dict, next_of=("obs",))
|
27f7c0bae37fc1963f4f7c72b42e8da424ab313e
| 3,646,481
|
from typing import Callable
import decimal
def scale_places(places: int) -> Callable[[decimal.Decimal], decimal.Decimal]:
"""
Returns a function that shifts the decimal point of decimal values to the
right by ``places`` places.
"""
if not isinstance(places, int):
raise ValueError(
'Argument `places` must be int. Got value {} of type {}.'.
format(places, type(places)),
)
with decimal.localcontext(abi_decimal_context):
scaling_factor = TEN ** -places
def f(x: decimal.Decimal) -> decimal.Decimal:
with decimal.localcontext(abi_decimal_context):
return x * scaling_factor
places_repr = 'Eneg{}'.format(places) if places > 0 else 'Epos{}'.format(-places)
func_name = 'scale_by_{}'.format(places_repr)
f.__name__ = func_name
f.__qualname__ = func_name
return f
|
aaf2d9eb14d7a1b28d169d971011b456e2164000
| 3,646,482
|
def create_model(params : model_params):
"""
Create ReasoNet model
Args:
params (class:`model_params`): The parameters used to create the model
"""
logger.log("Create model: dropout_rate: {0}, init:{1}, embedding_init: {2}".format(params.dropout_rate, params.init, params.embedding_init))
# Query and Doc/Context/Paragraph inputs to the model
query_seq_axis = Axis('sourceAxis')
context_seq_axis = Axis('contextAxis')
query_sequence = sequence.input(shape=(params.vocab_dim), is_sparse=True, sequence_axis=query_seq_axis, name='query')
context_sequence = sequence.input(shape=(params.vocab_dim), is_sparse=True, sequence_axis=context_seq_axis, name='context')
entity_ids_mask = sequence.input(shape=(1,), is_sparse=False, sequence_axis=context_seq_axis, name='entity_ids_mask')
# embedding
if params.embedding_init is None:
embedding_init = create_random_matrix(params.vocab_dim, params.embedding_dim)
else:
embedding_init = params.embedding_init
embedding = parameter(shape=(params.vocab_dim, params.embedding_dim), init=None)
embedding.value = embedding_init
embedding_matrix = constant(embedding_init, shape=(params.vocab_dim, params.embedding_dim))
if params.dropout_rate is not None:
query_embedding = ops.dropout(times(query_sequence , embedding), params.dropout_rate, name='query_embedding')
context_embedding = ops.dropout(times(context_sequence, embedding), params.dropout_rate, name='context_embedding')
else:
query_embedding = times(query_sequence , embedding, name='query_embedding')
context_embedding = times(context_sequence, embedding, name='context_embedding')
contextGruW = Parameter(_INFERRED + _as_tuple(params.hidden_dim), init=glorot_uniform(), name='gru_params')
queryGruW = Parameter(_INFERRED + _as_tuple(params.hidden_dim), init=glorot_uniform(), name='gru_params')
entity_embedding = ops.times(context_sequence, embedding_matrix, name='constant_entity_embedding')
# Unlike other words in the context, we keep the entity vectors fixed as a random vector so that each vector just means an identifier of different entities in the context and it has no semantic meaning
full_context_embedding = ops.element_select(entity_ids_mask, entity_embedding, context_embedding)
context_memory = ops.optimized_rnnstack(full_context_embedding, contextGruW, params.hidden_dim, 1, True, recurrent_op='gru', name='context_mem')
query_memory = ops.optimized_rnnstack(query_embedding, queryGruW, params.hidden_dim, 1, True, recurrent_op='gru', name='query_mem')
qfwd = ops.slice(sequence.last(query_memory), -1, 0, params.hidden_dim, name='fwd')
qbwd = ops.slice(sequence.first(query_memory), -1, params.hidden_dim, params.hidden_dim*2, name='bwd')
init_status = ops.splice(qfwd, qbwd, name='Init_Status') # get last fwd status and first bwd status
return attention_model(context_memory, query_memory, init_status, params.hidden_dim, params.attention_dim, max_steps = params.max_rl_steps)
|
b175adef530dbbbdb132fed0a6653945ec02fef9
| 3,646,483
|
def _process_voucher_data_for_order(cart):
"""Fetch, process and return voucher/discount data from cart."""
vouchers = Voucher.objects.active(date=date.today()).select_for_update()
voucher = get_voucher_for_cart(cart, vouchers)
if cart.voucher_code and not voucher:
msg = pgettext(
'Voucher not applicable',
'Voucher expired in meantime. Order placement aborted.')
raise NotApplicable(msg)
if not voucher:
return {}
increase_voucher_usage(voucher)
return {
'voucher': voucher,
'discount_amount': cart.discount_amount,
'discount_name': cart.discount_name,
'translated_discount_name': cart.translated_discount_name}
|
d89816fc24192d7d2d4ce7d8edaf11ae94e3f171
| 3,646,484
|
def prep_seven_zip_path(path, talkative=False):
"""
Print p7zip path on POSIX, or notify if not there.
:param path: Path to use.
:type path: str
:param talkative: Whether to output to screen. False by default.
:type talkative: bool
"""
if path is None:
talkaprint("NO 7ZIP\nPLEASE INSTALL p7zip", talkative)
sentinel = False
else:
talkaprint("7ZIP FOUND AT {0}".format(path), talkative)
sentinel = True
return sentinel
|
c9d4cc77111c8fc9768c713556fb16e5b8f69ec2
| 3,646,486
|
def overlapping_community(G, community):
"""Return True if community partitions G into overlapping sets.
"""
community_size = sum(len(c) for c in community)
# community size must be larger to be overlapping
if not len(G) < community_size:
return False
# check that the set of nodes in the communities is the same as G
if not set(G) == set.union(*community):
return False
return True
|
da9e3465c6351df0efd19863e579c49bbc6b9d67
| 3,646,488
|
import json
def validate_credential(zone, credential):
"""
Token is already calculated
"""
source = DataSource(DataSource.TYPE_DATABASE, CONNECTION_FILE_PATH)
canAccess = source.get_or_create_client_access_rights(credential, zone)
if canAccess:
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
else:
return json.dumps({'success':False}), 403, {'ContentType':'application/json'}
|
083ecc977b53e6f5c5df64b0ed52ad9ebeeee821
| 3,646,489
|
def gm(data,g1=0.0,g2=0.0,g3=0.0,inv=False):
"""
Lorentz-to-Gauss Apodization
Functional form:
gm(x_i) = exp(e - g*g)
Where: e = pi*i*g1
g = 0.6*pi*g2*(g3*(size-1)-i)
Parameters:
* data Array of spectral data.
* g1 Inverse exponential width.
* g2 Gaussian broaden width.
* g3 Location of gauss maximum.
* inv Set True for inverse apodization.
"""
size = data.shape[-1]
e = pi*np.arange(size)*g1
g = 0.6*pi*g2*(g3*(size-1) - np.arange(size))
apod = np.exp(e-g*g, sig = data.dtype)
if inv:
apod = 1/apod
return apod*data
|
7c6aec6d9a21f9c5b2800aa742e5aaa3ead1ac63
| 3,646,490
|
import torch
def exp_t(u, t):
"""Compute exp_t for `u`."""
if t == 1.0:
return torch.exp(u)
else:
return torch.relu(1.0 + (1.0 - t) * u) ** (1.0 / (1.0 - t))
|
8b1a8773b8a5159d9332332d6f77d65cacc68d7c
| 3,646,491
|
def decode_json_dict(data):
# type: (Dict) -> Dict
"""Converts str to python 2 unicodes in JSON data."""
return _strify(data)
|
d2512ea50bf5cfca059ca706adc403bea5af1753
| 3,646,492
|
from typing import Any
def linear_search(lst: list, x: Any) -> int:
"""Return the index of the first element of `lst` equal to `x`, or -1 if no
elements of `lst` are equal to `x`.
Design idea: Scan the list from start to finish.
Complexity: O(n) time, O(1) space.
For an improvement on linear search for sorted lists, see the binary search
function in the decrease_and_conquer module.
"""
for i, y in enumerate(lst):
if x == y:
return i
return -1
|
47e73d53ff68954aadc6d0e9e293643717a807d8
| 3,646,493
|
def get_color_cmap(name, n_colors=6):
"""
Return discrete colors from a matplotlib palette.
:param name: Name of the palette. This should be a named matplotlib colormap.
:type: str
:param n_colors: Number of discrete colors in the palette.
:type: int
:return: List-like object of colors as hexadecimal tuples
:type: list
"""
brewer_qual_pals = {"Accent": 8, "Dark2": 8, "Paired": 12,
"Pastel1": 9, "Pastel2": 8,
"Set1": 9, "Set2": 8, "Set3": 12, 'tab20':20, 'tab20b':20}
if name == 'tab20' and n_colors > 19:
second = 'tab20b'
ncolor2 = n_colors - 19
n_colors = 19
else :
second = False
cmap = getattr(cm, name)
if name in brewer_qual_pals:
bins = np.linspace(0, 1, brewer_qual_pals[name])
if 'tab20' == name :
len_bins = len(bins)
bins = [bins[i] for i in range(len_bins) if i != 14][:n_colors]
else :
bins = bins[:n_colors]
else:
bins = np.linspace(0, 1, n_colors + 2)[1:-1]
palette = list(map(tuple, cmap(bins)[:, :3]))
if second :
cmap = getattr(cm, second)
bins = np.linspace(0, 1, brewer_qual_pals[second])[:ncolor2]
palette += list(map(tuple, cmap(bins)[:, :3]))
pal_cycle = cycle(palette)
palette = [next(pal_cycle) for _ in range(n_colors+ncolor2)]
else :
pal_cycle = cycle(palette)
palette = [next(pal_cycle) for _ in range(n_colors)]
return [colors.rgb2hex(rgb) for rgb in palette]
|
90550127196bb1841f48d37ed1f304462d165037
| 3,646,494
|
def logkde2entropy(vects, logkde):
"""
computes the entropy of the kde
incorporates vects so that kde is properly normalized (transforms into a truly discrete distribution)
"""
vol = vects2vol(vects)
truth = logkde > -np.infty
return -vects2vol(vects)*np.sum(np.exp(logkde[truth])*logkde[truth])
|
5ce96636607bc3b2160791cda28ef586cb0f29c2
| 3,646,495
|
from typing import Optional
from typing import Dict
import json
def get_deployment_json(
runner: Runner,
deployment_name: str,
context: str,
namespace: str,
deployment_type: str,
run_id: Optional[str] = None,
) -> Dict:
"""Get the decoded JSON for a deployment.
If this is a Deployment we created, the run_id is also passed in - this is
the uuid we set for the telepresence label. Otherwise run_id is None and
the Deployment name must be used to locate the Deployment.
"""
assert context is not None
assert namespace is not None
span = runner.span()
try:
get_deployment = [
"get",
deployment_type,
"-o",
"json",
"--export",
]
if run_id is None:
return json.loads(
runner.get_kubectl(
context,
namespace,
get_deployment + [deployment_name],
stderr=STDOUT
)
)
else:
# When using a selector we get a list of objects, not just one:
return json.loads(
runner.get_kubectl(
context,
namespace,
get_deployment + ["--selector=telepresence=" + run_id],
stderr=STDOUT
)
)["items"][0]
except CalledProcessError as e:
raise SystemExit(
"Failed to find Deployment '{}': {}".format(
deployment_name, str(e.stdout, "utf-8")
)
)
finally:
span.end()
|
b9cb4cabea6a506cc33c18803bbe45699cf2b222
| 3,646,496
|
import ctypes
def is_admin() -> bool:
"""Check does the script has admin privileges."""
try:
return ctypes.windll.shell32.IsUserAnAdmin()
except AttributeError: # Windows only
return None
|
000fdc8034bf026045af0a5264936c6847489063
| 3,646,497
|
import urllib
def get_firewall_status(gwMgmtIp, api_key):
"""
Reruns the status of the firewall. Calls the op command show chassis status
Requires an apikey and the IP address of the interface we send the api request
:param gwMgmtIp:
:param api_key:
:return:
"""
global gcontext
# cmd = urllib.request.Request('https://google.com')
cmd = urllib.request.Request(
"https://" + gwMgmtIp + "/api/?type=op&cmd=<show><chassis-ready></chassis-ready></show>&key=" + api_key)
# Send command to fw and see if it times out or we get a response
logger.info('[INFO]: Sending command: {}'.format(cmd))
try:
response = urllib.request.urlopen(cmd, data=None, context=gcontext, timeout=5).read()
logger.info(
"[INFO]:Got http 200 response from FW with address {}. So need to check the response".format(gwMgmtIp))
# Now we do stuff to the gw
except urllib.error.URLError:
logger.info("[INFO]: No response from FW with address {}. So maybe not up!".format(gwMgmtIp))
return 'down'
# sleep and check again?
else:
logger.info("[INFO]: FW is responding!!")
logger.info("[RESPONSE]: {}".format(response))
resp_header = et.fromstring(response)
if resp_header.tag != 'response':
logger.info("[ERROR]: didn't get a valid response from firewall...maybe a timeout")
return 'down'
if resp_header.attrib['status'] == 'error':
logger.info("[ERROR]: Got response header error for the command")
return 'down'
if resp_header.attrib['status'] == 'success':
# The fw responded with a successful command execution
for element in resp_header:
if element.text.rstrip() == 'yes':
# Call config gw command?
logger.info("[INFO]: FW with ip {} is ready ".format(gwMgmtIp))
return 'running'
else:
return 'down'
|
16d06a5659e98b3d420ab90b21d720367ecde97a
| 3,646,498
|
import logging
def create_logger(name, logfile, level):
"""
Sets up file logger.
:param name: Logger name
:param logfile: Location of log file
:param level: logging level
:return: Initiated logger
"""
logger = logging.getLogger(name)
handler = logging.FileHandler(logfile)
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(level)
return logger
|
83a0614053c558682588c47e641eceee368f88e0
| 3,646,499
|
def checksum(number):
"""Calculate the checksum. A valid number should have a checksum of 1."""
check = 0
for n in number:
check = (2 * check + int(10 if n == 'X' else n)) % 11
return check
|
8ada40ca46bc62bbe8f96d69528f2cd88021ad6a
| 3,646,502
|
def instanceof(value, type_):
"""Check if `value` is an instance of `type_`.
:param value: an object
:param type_: a type
"""
return isinstance(value, type_)
|
3de366c64cd2b4fe065f15de10b1e6ac9132468e
| 3,646,503
|
def step(y, t, dt):
""" RK2 method integration"""
n = y.shape[0]
buf_f0 = np.zeros((n, ndim+1))
buf_f1 = np.zeros((n, ndim+1))
buf_y1 = np.zeros((n, ndim+1))
buf_f0 = tendencies(y)
buf_y1 = y + dt * buf_f0
buf_f1 = tendencies(buf_y1)
Y = y + 0.5 * (buf_f0 + buf_f1) * dt
return Y
|
e3c946b37d96ad0083fc5cc7a8d84b2f03ca897b
| 3,646,504
|
import torch
import gc
def sample_deletes(graph_, rgb_img_features, xyz,
delete_scores, num_deletes, threshold,
gc_neighbor_dist, padding_config,
**kwargs):
"""Sample Deletes.
Args:
graph_: a torch_geometric.data.Batch instance with attributes:
- rgb: a [N x C_app] torch.FloatTensor of rgb features
- depth: a [N x 3 x H' x W'] torch.FloatTensor
- mask: a [N x 1 x H' x W'] torch.FloatTensor
- orig_masks: a [N x H x W] torch.FloatTensor of original masks
- crop_indices: a [N, 4] torch.LongTensor. xmin, ymin, xmax, ymax.
rgb_img_features: an OrderedDict of image features. Output of gc.extract_rgb_img_features()
xyz_img: a [3, H, W] torch.FloatTensor. 3D point cloud from camera frame of reference
delete_scores: a [N] torch.FloatTensor with values in [0, 1]. Output of
DeleteNetWrapper.delete_scores().
num_deletes: Maximum number of deletes allowed.
threshold: Minimum delete score required to consider the delete.
gc_neighbor_dist: Distance threshold for connecting nodes in new graph
padding_config: a Python dictionary with padding parameters.
Returns:
boolean of whether merge operation was successful.
a torch_geometric.data.Data instance.
"""
# Sort scores, consider only the ones above a certain threshold
sorted_scores, score_indices = torch.sort(delete_scores, descending=True)
num_potential_deletes = torch.sum(sorted_scores > threshold)
if num_potential_deletes == 0 and torch.all(~graph_.added): # Nothing to delete
return False, None
score_indices = score_indices[:num_potential_deletes]
delete_inds = torch.zeros(graph_.orig_masks.shape[0]).bool()
# Sample some masks to delete
leftover_delete_scores = delete_scores[score_indices]
leftover_delete_indices = score_indices
while torch.sum(delete_inds) < num_deletes and leftover_delete_indices.shape[0] > 0:
# Sample delete index
sample_idx = torch.multinomial(leftover_delete_scores, 1)
delete_idx = leftover_delete_indices[sample_idx][0]
delete_inds[delete_idx] = True
# Get leftover potential deletes
temp = torch.ones(leftover_delete_scores.shape[0]).bool()
temp[sample_idx] = False
leftover_delete_indices = leftover_delete_indices[temp]
leftover_delete_scores = leftover_delete_scores[temp]
# If the deleting only undoes the potential adds, consider the sampling to be a failure
if torch.all(delete_inds == graph_.added):
return False, None
# Keep the un-deleted masks
new_masks = graph_.orig_masks[~delete_inds]
# Create new graph
new_masks = new_masks[1:] # Get rid of BG mask
new_masks = util_.convert_mask_NHW_to_HW(new_masks.float(), start_label=constants.OBJECTS_LABEL)
new_graph = gc.construct_segmentation_graph(rgb_img_features, xyz, new_masks,
neighbor_dist=gc_neighbor_dist,
padding_config=padding_config)
return True, new_graph
|
3a24d3806e3e7aebf5ae6d2c7141149358d21607
| 3,646,505
|
def make_char(hex_val):
"""
Create a unicode character from a hex value
:param hex_val: Hex value of the character.
:return: Unicode character corresponding to the value.
"""
try:
return unichr(hex_val)
except NameError:
return chr(hex_val)
|
edbbad92c56ec74ff28295c46dca4f2976768d0a
| 3,646,506
|
def normalize(features):
"""
Normalizes data using means and stddevs
"""
means, stddevs = compute_moments(features)
normalized = (np.divide(features, 255) - means) / stddevs
return normalized
|
3b4c07bf80e68ec3d6c807a9293aa5b4f4203401
| 3,646,507
|
def get_args_from_str(input: str) -> list:
"""
Get arguments from an input string.
Args:
input (`str`): The string to process.
Returns:
A list of arguments.
"""
return ARG_PARSE_REGEX.findall(input)
|
50de69e4ee60da31a219842ce09833a92218ea14
| 3,646,508
|
from typing import List
def simulate(school: List[int], days: int) -> int:
"""Simulates a school of fish for ``days`` and returns the number of fish."""
school = flatten_school(school)
for day in range(1, days + 1):
school = simulate_day(school)
return sum(school)
|
efcfbfdde9c3fc941a40028459ddc35db0653296
| 3,646,510
|
import torch
def SPTU(input_a, input_b, n_channels: int):
"""Softplus Tanh Unit (SPTU)"""
in_act = input_a+input_b
t_act = torch.tanh(in_act[:, :n_channels, :])
s_act = torch.nn.functional.softplus(in_act[:, n_channels:, :])
acts = t_act * s_act
return acts
|
a03cc114cf960af750b13cd61db8f4d2e6c064ad
| 3,646,511
|
def is_fouling_team_in_penalty(event):
"""Returns True if fouling team over the limit, else False"""
fouls_to_give_prior_to_foul = event.previous_event.fouls_to_give[event.team_id]
return fouls_to_give_prior_to_foul == 0
|
ac1578af1092586a30b8fc9cdb3e5814da1f1544
| 3,646,512
|
import re
def is_img_id_valid(img_id):
"""
Checks if img_id is valid.
"""
t = re.sub(r'[^a-z0-9_:\-\.]', '', img_id, re.IGNORECASE)
t = re.sub(r'\.+', '.', t)
if img_id != t or img_id.count(':') != 1:
return False
profile, base_name = img_id.split(':', 1)
if not profile or not base_name:
return False
try:
get_profile_configs(profile)
except ValueError:
return False
return True
|
749a8830d1a932465ca0c9c8c3a18032e2dc357e
| 3,646,513
|
import warnings
def lmc(wave, tau_v=1, **kwargs):
""" Pei 1992 LMC extinction curve.
:param wave:
The wavelengths at which optical depth estimates are desired.
:param tau_v: (default: 1)
The optical depth at 5500\AA, used to normalize the
attenuation curve.
:returns tau:
The optical depth at each wavelength.
"""
if (wave < 1e3).any():
warnings.warn('LMC: extinction extrapolation below 1000AA is poor')
mic = wave * 1e-4
aa = [175., 19., 0.023, 0.005, 0.006, 0.020]
ll = [0.046, 0.08, 0.22, 9.7, 18., 25.]
bb = [90., 5.50, -1.95, -1.95, -1.80, 0.00]
nn = [2.0, 4.5, 2.0, 2.0, 2.0, 2.0]
abs_ab = mic * 0.
norm_v = 0 # hack to go from tau_b to tau_v
mic_5500 = 5500 * 1e-4
for i, a in enumerate(aa):
norm_v += aa[i] / ((mic_5500 / ll[i])**nn[i] +
(ll[i] / mic_5500)**nn[i] + bb[i])
abs_ab += aa[i] / ((mic / ll[i])**nn[i] + (ll[i] / mic)**nn[i] + bb[i])
return tau_v * (abs_ab / norm_v)
|
04c89605e8ad4188c62b631e173a9c8fe714958a
| 3,646,514
|
def minMax(xs):
"""Calcule le minimum et le maximum d'un tableau de valeur xs (non-vide !)"""
min, max = xs[0], xs[0]
for x in xs[1:]:
if x < min:
min = x
elif x > max:
max = x
return min,max
|
8453b71e5b62592f38f4be84f4366fb02bd0171b
| 3,646,515
|
def events(request):
"""Events"""
# Get profile
profile = request.user.profile
# Get a QuerySet of events for this user
events = Event.objects.filter(user=request.user)
# Create a new paginator
paginator = Paginator(events, profile.entries_per_page)
# Make sure page request is an int, default to 1st page
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
# If page request is out of range, deliver last page of results
try:
events = paginator.page(page)
except (EmptyPage, InvalidPage):
events = paginator.page(paginator.num_pages)
# Render template
return render_page(
'thing/events.html',
{
'events': events,
'user': request.user
},
request,
)
|
3561856af65d2e54eb4f00a13ca85ece4c939b7a
| 3,646,516
|
from typing import Tuple
from typing import Callable
def latent_posterior_factory(x: np.ndarray, y: np.ndarray) -> Tuple[Callable]:
"""Factory function that yields further functions to compute the log-posterior
of the stochastic volatility model given parameters `x`. The factory also
constructs functions for the gradient of the log-posterior and the Fisher
information metric.
Args:
x: The stochastic volatilities.
y: Observations from the stochastic volatility model.
Returns:
log_posterior: Function to compute the log-posterior.
grad_log_posterior: Function to compute the gradient of the log-posterior.
metric: Function to compute the Fisher information metric.
grad_metric: Function to compute the gradient of the Fisher information
metric.
"""
T = x.size
def _log_posterior(sigma: float, phi: float, beta: float) -> float:
"""The log-posterior of the stochastic volatility model given the stochastic
volatilities. The inference is over the model parameters `sigma`, `phi`,
and `beta`.
Args:
sigma: Parameter of the stochastic volatility model.
phi: Parameter of the stochastic volatility model.
beta: Parameter of the stochastic volatility model.
Returns:
lp: The log-posterior of the stochastic volatility model.
"""
phisq = np.square(phi)
ly = spst.norm.logpdf(y, 0.0, beta*np.exp(0.5 * x)).sum()
lxo = spst.norm.logpdf(x[0], 0.0, sigma / np.sqrt(1.0 - phisq))
lx = spst.norm.logpdf(x[1:], phi*x[:-1], sigma).sum()
lp = ly + lx + lxo + log_prior(sigma, phi, beta)
return lp
def _grad_log_posterior_helper(gamma, alpha, beta, sigmasq, phi, phisq):
dpgamma, dpalpha, dpbeta = grad_log_prior(gamma, alpha, beta)
dbeta = (-T / beta
+ np.sum(np.square(y) / np.exp(x)) / np.power(beta, 3.0)
+ dpbeta)
dgamma = (
-T + np.square(x[0])*(1.0 - phisq) / sigmasq
+ np.sum(np.square(x[1:] - phi*x[:-1])) / sigmasq
+ dpgamma)
dalpha = (
-phi + phi*np.square(x[0])*(1.0 - phisq) / sigmasq
+ np.sum(x[:-1] * (x[1:] - phi*x[:-1])) * (1.0 - phisq) / sigmasq
+ dpalpha)
return np.array([dgamma, dalpha, dbeta])
def _metric_helper(gamma, alpha, beta, sigmasq, phi, phisq):
# Note that this ordering of the variables differs from that presented
# in the Riemannian manifold HMC paper.
G = np.array([
# gamma alpha beta
[ 2.0*T, 2.0*phi, 0.0], # gamma
[2.0*phi, 2.0*phisq + (T - 1.0)*(1.0 - phisq), 0.0], # alpha
[ 0.0, 0.0, 2.0 * T / np.square(beta)] # beta
])
# Add in the negative Hessian of the log-prior.
H = hess_log_prior(gamma, alpha, beta)
G -= H
return G
def _grad_metric_helper(gamma, alpha, beta, sigmasq, phi, phisq):
dGbeta = np.array([
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -4.0 * T / np.power(beta, 3.0)]
])
dGgamma = np.zeros((3, 3))
a = 2.0*(1.0 - phisq)
b = 2.0*phi*(3.0 - T)*(1.0 - phisq)
dGalpha = np.array([
[0.0, a, 0.0],
[ a, b, 0.0],
[0.0, 0.0, 0.0]
])
dG = np.array([dGgamma, dGalpha, dGbeta]).swapaxes(0, -1)
dH = grad_hess_log_prior(gamma, alpha, beta)
return dG - dH
def _grad_log_posterior(gamma: float, alpha: float, beta: float) -> np.ndarray:
"""The gradient log-posterior of the stochastic volatility model given the
stochastic volatilities with respect to the (transformed) parameters
`gamma`, `alpha`, and `beta`.
Args:
gamma: Transformed parameter `sigma` of the stochastic volatility model.
alpha: Transformed parameter `phi` of the stochastic volatility model.
beta: Parameter of the stochastic volatility model.
Returns:
dgamma: The gradient of the log-posterior with respect to the
transformed parameter `sigma`.
dalpha: The gradient of the log-posterior with respect to the
transformed parameter `phi`.
dbeta: The gradient of the log-posterior with respect to `beta`.
"""
sigma = np.exp(gamma)
sigmasq = np.square(sigma)
phi = np.tanh(alpha)
phisq = np.square(phi)
return _grad_log_posterior_helper(gamma, alpha, beta, sigmasq, phi, phisq)
def _metric(gamma: float, alpha: float, beta: float) -> np.ndarray:
"""The Fisher information metric of the stochastic volatility model given the
stochastic volatilities.
Args:
gamma: Transformed parameter of the stochastic volatility model.
alpha: Transformed parameter of the stochastic volatility model.
beta: Parameter of the stochastic volatility model.
Returns:
G: The Fisher information metric.
"""
sigma = np.exp(gamma)
sigmasq = np.square(sigma)
phi = np.tanh(alpha)
phisq = np.square(phi)
return _metric_helper(gamma, alpha, beta, sigmasq, phi, phisq)
def _grad_metric(gamma: float, alpha: float, beta: float) -> np.ndarray:
"""The gradient of the Fisher information metric of the stochastic volatility
model given the stochastic volatilities with respect to the `sigma`,
`alpha`, and `beta` parameters of the stochastic volatility model.
Args:
gamma: Transformed parameter of the stochastic volatility model.
alpha: Transformed parameter of the stochastic volatility model.
beta: Parameter of the stochastic volatility model.
Returns:
dG: The gradient of the Fisher information metric.
"""
sigma = np.exp(gamma)
sigmasq = np.square(sigma)
phi = np.tanh(alpha)
phisq = np.square(phi)
return _grad_metric_helper(gamma, alpha, beta, sigmasq, phi, phisq)
def grad_log_posterior_and_metric_and_grad_metric(q):
gamma, alpha, beta = q
sigma = np.exp(gamma)
sigmasq = np.square(sigma)
phi = np.tanh(alpha)
phisq = np.square(phi)
glp = _grad_log_posterior_helper(gamma, alpha, beta, sigmasq, phi, phisq)
G = _metric_helper(gamma, alpha, beta, sigmasq, phi, phisq)
dG = _grad_metric_helper(gamma, alpha, beta, sigmasq, phi, phisq)
return glp, G, dG
# Convert functions defined for separate arguments to take a vector
# concatenation of the parameter.
log_posterior = lambda q: _log_posterior(*inverse_transform(q)[0])
grad_log_posterior = lambda q: _grad_log_posterior(q[0], q[1], q[2])
metric = lambda q: _metric(q[0], q[1], q[2])
grad_metric = lambda q: _grad_metric(q[0], q[1], q[2])
return (
log_posterior, grad_log_posterior, metric, grad_metric,
grad_log_posterior_and_metric_and_grad_metric)
|
0fe2ec7a7fab480fbe19a374e71ac3ab5232d8e0
| 3,646,517
|
def update_build_configuration_set(id, **kwargs):
"""
Update a BuildConfigurationSet
"""
data = update_build_configuration_set_raw(id, **kwargs)
if data:
return utils.format_json(data)
|
ee02faf0d683e271747d6e30a3ef8ffd9c271e6c
| 3,646,518
|
from typing import Optional
def create_app(settings_override: Optional[dict]=None) -> Flask:
"""
Create a Flask app
:param settings_override: any settings to override
:return: flask app
"""
app = Flask(__name__, instance_relative_config=True)
app.config.from_object('config.settings')
app.config.from_pyfile('settings.py', silent=True)
if settings_override:
app.config.update(settings_override)
configure_logging(app)
initialize_extensions(app)
db.app = app
register_blueprints(app)
initialize_jinja2(app)
load_models()
return app
|
2a4ee3b8f4f67db1966a678b6059b53aa21ac73f
| 3,646,519
|
def compute_prefix_function(pattern):
"""
Computes the prefix array for KMP.
:param pattern:
:type pattern: str
:return:
"""
m = len(pattern)
prefixes = [0]*(m+1)
i = 0
for q in range(2, m + 1):
while i > 0 and pattern[i] != pattern[q - 1]:
i = prefixes[i]
if pattern[i] == pattern[q - 1]:
i += 1
prefixes[q] = i
return prefixes[1:]
|
7933cc33eba53247e858ae40b9691d101c7030e6
| 3,646,520
|
def binary_indicator(states,
actions,
rewards,
next_states,
contexts,
termination_epsilon=1e-4,
offset=0,
epsilon=1e-10,
state_indices=None,
summarize=False):
"""Returns 0/1 by checking if next_states and contexts overlap.
Args:
states: A [batch_size, num_state_dims] Tensor representing a batch
of states.
actions: A [batch_size, num_action_dims] Tensor representing a batch
of actions.
rewards: A [batch_size] Tensor representing a batch of rewards.
next_states: A [batch_size, num_state_dims] Tensor representing a batch
of next states.
contexts: A list of [batch_size, num_context_dims] Tensor representing
a batch of contexts.
termination_epsilon: terminate if dist is less than this quantity.
offset: Offset the rewards.
epsilon: small offset to ensure non-negative/zero distance.
Returns:
A new tf.float32 [batch_size] rewards Tensor, and
tf.float32 [batch_size] discounts tensor.
"""
del states, actions # unused args
next_states = index_states(next_states, state_indices)
dist = tf.reduce_sum(tf.squared_difference(next_states, contexts[0]), -1)
dist = tf.sqrt(dist + epsilon)
discounts = dist > termination_epsilon
rewards = tf.logical_not(discounts)
rewards = tf.to_float(rewards) + offset
return tf.to_float(rewards), tf.ones_like(tf.to_float(discounts))
|
68531010c695e4bb8d49d05f5b0ba8799e1e3cf5
| 3,646,521
|
import math
def sigmoid(num):
"""
Find the sigmoid of a number.
:type number: number
:param number: The number to find the sigmoid of
:return: The result of the sigmoid
:rtype: number
>>> sigmoid(1)
0.7310585786300049
"""
# Return the calculated value
return 1 / (1 + math.exp(-num))
|
73730a39627317011d5625ab85c146b6bd7793d8
| 3,646,522
|
def list_lattices(device_name: str = None, num_qubits: int = None,
connection: ForestConnection = None):
"""
Query the Forest 2.0 server for its knowledge of lattices. Optionally filters by underlying
device name and lattice qubit count.
:return: A dictionary keyed on lattice names and valued in dictionaries of the form
{
"device_name": device_name,
"qubits": num_qubits
}
"""
if connection and connection.session:
session = connection.session
else:
session = get_session()
if connection:
url = connection.sync_endpoint + "/lattices"
else:
config = PyquilConfig()
try:
url = config.forest_url + "/lattices"
except TypeError:
raise ValueError("""Encountered an error when querying the Forest 2.0 endpoint.
Most likely, you're missing an address for the Forest 2.0 server endpoint. This can
be set through the environment variable FOREST_URL or by changing the following lines
in the QCS config file:
[Rigetti Forest]
url = https://rigetti.com/valid/forest/url""")
try:
response = get_json(session, url,
params={"device_name": device_name,
"num_qubits": num_qubits})
return response["lattices"]
except Exception as e:
raise ValueError("""
list_lattices encountered an error when querying the Forest 2.0 endpoint.
Some common causes for this error include:
* You don't have valid user authentication information. Very likely this is because you
haven't yet been invited to try QCS. We plan on making our device information publicly
accessible soon, but in the meanwhile, you'll have to use default QVM configurations and
to use `list_quantum_computers` with `qpus = False`.
* You do have user authentication information, but it is missing or modified. You can find
this either in the environment variables FOREST_API_KEY and FOREST_USER_ID or in the
config file (stored by default at ~/.qcs_config, but with location settable through the
environment variable QCS_CONFIG), which contains the subsection
[Rigetti Forest]
user_id = your_user_id
key = your_api_key
* You're missing an address for the Forest 2.0 server endpoint, or the address is invalid.
This too can be set through the environment variable FOREST_URL or by changing the
following lines in the QCS config file:
[Rigetti Forest]
url = https://rigetti.com/valid/forest/url
For the record, here's the original exception: {}
""".format(repr(e)))
|
a6fb4754f3f76135ed2083441782924f03160994
| 3,646,523
|
def inflate_tilegrid(
bmp_path=None,
target_size=(3, 3),
tile_size=None,
transparent_index=None,
bmp_obj=None,
bmp_palette=None,
):
"""
inflate a TileGrid of ``target_size`` in tiles from a 3x3 spritesheet by duplicating
the center rows and columns.
:param Optional[str] bmp_path: filepath to the 3x3 spritesheet bitmap file
:param Optional[tuple] target_size: desired size in tiles (target_width, target_height)
:param Optional[tuple] tile_size: size of the tiles in the 3x3 spritesheet. If
None is used it will equally divide the width and height of the Bitmap by 3.
:param Optional[Union[tuple, int]] transparent_index: a single index within the palette to
make transparent, or a tuple of multiple indexes to make transparent
:param Optional[OnDiskBitmap] bmp_obj: Already loaded 3x3 spritesheet in an OnDiskBitmap
:param Optional[Palette] bmp_palette: Already loaded spritesheet Palette
"""
# pylint: disable=too-many-arguments, too-many-locals, too-many-branches
if bmp_path is None and (bmp_obj is None and bmp_palette is None):
raise AttributeError("Must pass either bmp_path or bmp_obj and bmp_palette")
if bmp_path is not None:
image, palette = adafruit_imageload.load(bmp_path)
else:
image = bmp_obj
palette = bmp_palette
if transparent_index is not None:
if isinstance(transparent_index, tuple):
for index in transparent_index:
palette.make_transparent(index)
elif isinstance(transparent_index, int):
palette.make_transparent(transparent_index)
if tile_size is None:
tile_width = image.width // 3
tile_height = image.height // 3
else:
tile_width = tile_size[0]
tile_height = tile_size[1]
target_width = target_size[0]
target_height = target_size[1]
tile_grid = displayio.TileGrid(
image,
pixel_shader=palette,
height=target_height,
width=target_width,
tile_width=tile_width,
tile_height=tile_height,
)
# corners
tile_grid[0, 0] = 0 # upper left
tile_grid[tile_grid.width - 1, 0] = 2 # upper right
tile_grid[0, tile_grid.height - 1] = 6 # lower left
tile_grid[tile_grid.width - 1, tile_grid.height - 1] = 8 # lower right
for x in range(target_size[0] - 2):
tile_grid[x + 1, 0] = 1
tile_grid[x + 1, tile_grid.height - 1] = 7
for y in range(target_size[1] - 2):
tile_grid[0, y + 1] = 3
tile_grid[tile_grid.width - 1, y + 1] = 5
for y in range(target_size[1] - 2):
for x in range(target_size[0] - 2):
tile_grid[x + 1, y + 1] = 4
return tile_grid
|
b3c67c9aaa38cc77208f6fc7cafe91814a0fdbb4
| 3,646,524
|
def get_name_and_version(requirements_line: str) -> tuple[str, ...]:
"""Get the name a version of a package from a line in the requirement file."""
full_name, version = requirements_line.split(" ", 1)[0].split("==")
name_without_extras = full_name.split("[", 1)[0]
return name_without_extras, version
|
424b3c3138ba223610fdfa1cfa6d415b8e31aff3
| 3,646,525
|
def _compute_eval_stats(params, batch,
model,
pad_id):
"""Computes pre-training task predictions and stats.
Args:
params: Model state (parameters).
batch: Current batch of examples.
model: The model itself. Flax separates model state and architecture.
pad_id: Token ID representing padding. A mask is used to distinguish padding
from actual inputs.
Returns:
Model predictions and metrics.
"""
inputs = {
"input_ids": batch["input_ids"],
"input_mask": (batch["input_ids"] != pad_id).astype(np.int32),
"type_ids": batch["type_ids"],
"masked_lm_positions": batch["masked_lm_positions"],
"masked_lm_labels": batch["masked_lm_ids"],
"masked_lm_weights": batch["masked_lm_weights"],
"next_sentence_labels": batch["next_sentence_labels"],
"deterministic": True
}
return model.apply({"params": params}, **inputs)
|
cc7e9b48d6255c8f82ae2bff978c54631d246bda
| 3,646,526
|
import locale
import itertools
def validateTextFile(fileWithPath):
"""
Test if a file is a plain text file and can be read
:param fileWithPath(str): File Path
:return:
"""
try:
file = open(fileWithPath, "r", encoding=locale.getpreferredencoding(), errors="strict")
# Read only a couple of lines in the file
for line in itertools.islice(file, 10):
line = line
file.readlines()
# Close the file handle
file.close()
# Return the systems preferred encoding
return locale.getpreferredencoding()
except:
validencodings = ["utf-8", "ascii", "utf-16", "utf-32", "iso-8859-1", "latin-1"]
for currentEncoding in validencodings:
try:
file = open(fileWithPath, "r", encoding=currentEncoding, errors="strict")
# Read only a couple of lines in the file
for line in itertools.islice(file, 10):
line = line
# Close the file handle
file.close()
# Return the succeded encoding
return currentEncoding
except:
# Error occured while reading the file, skip to next iteration
continue
# Error, no encoding was correct
return None
|
22167a4501ca584061f1bddcc7738f00d4390085
| 3,646,527
|
from bs4 import BeautifulSoup
def get_title(filename="test.html"):
"""Read the specified file and load it into BeautifulSoup. Return the title tag
"""
with open(filename, "r") as my_file:
file_string = my_file.read()
file_soup = BeautifulSoup(file_string, 'html.parser')
#find all of the a tags with href attribute
title = file_soup.select("title")
return title
|
31c35588bb10132509a0d35b49a9b7eeed902018
| 3,646,528
|
import re
def is_valid_dump_key(dump_key):
"""
True if the `dump_key` is in the valid format of
"database_name/timestamp.dump"
"""
regexmatch = re.match(
r'^[\w-]+/\d{4}_\d{2}_\d{2}_\d{2}_\d{2}_\d{2}_\d+\.\w+\.dump$',
dump_key,
)
return regexmatch
|
66fd7d465f641a96bd8b22e95918a6dcbefef658
| 3,646,529
|
import math
def GetProfileAtAngle( imdata, xc,yc, angle, radius, width=1 ):
"""
Returns a 1D profile cut through an image at specified angle, extending to
specified radius.
Note: this is designed to imitate pvect, so angles are measured CCW from +x axis!
This function uses IRAF coordinates (1-based, x = column number)
Parameters
----------
imdata : 2D ndarray of float
image data array
xc : int or float
x-coordinate of center to extract profile from (IRAF ordering, 1-based)
yc : int or float
y-coordinate of center to extract profile from (IRAF ordering, 1-based)
angle : float
angle measured CCW from +x axis, in degrees
radius : int
length of profile, in pixels
width : int, optional
width of profile (perpendicular to profile) in pixels
Returns
-------
rr,ii : tuple of 1D ndarray of float
rr = array of radius values (= 0 at (xc,yc))
ii = data pixel values along profile [= Nan if all pixels for that bin
were masked]
"""
angle_rad = math.radians(angle)
x_end = xc + math.cos(angle_rad) * radius
y_end = yc + math.sin(angle_rad) * radius
x_start = xc - math.cos(angle_rad) * radius
y_start = yc - math.sin(angle_rad) * radius
rr,ii = ExtractProfile(imdata, x_start,y_start, x_end,y_end, width=width)
rr = rr - radius
return rr, ii
|
5c20ae064989251a807690e8f90f7156a6dbe642
| 3,646,530
|
def extract_axon_and_myelin_masks_from_image_data(image_data):
"""
Returns the binary axon and myelin masks from the image data.
:param image_data: the image data that contains the 8-bit greyscale data, with over 200 (usually 255 if following
the ADS convention) being axons, 100 to 200 (usually 127 if following the ADS convention) being myelin
and 0 being background
:return axon_mask: the binairy axon mask
:return myelin_mask: the binary myelin mask
"""
image_data_array = np.array(image_data)
axon_mask = image_data_array > 200
myelin_mask = (image_data_array > 100) & (image_data_array < 200)
axon_mask = axon_mask.astype(np.uint8)
myelin_mask = myelin_mask.astype(np.uint8)
return axon_mask, myelin_mask
|
087f80d4c55b7bbba7e60720be26ff3e3ca1648a
| 3,646,532
|
def expand_advanced(var, vars_, nounset, indirect, environ, var_symbol):
"""Expand substitution."""
if len(vars_) == 0:
raise MissingClosingBrace(var)
if vars_[0] == "-":
return expand_default(
var,
vars_[1:],
set_=False,
nounset=nounset,
indirect=indirect,
environ=environ,
var_symbol=var_symbol,
)
if vars_[0] == "=":
return expand_default(
var,
vars_[1:],
set_=True,
nounset=nounset,
indirect=indirect,
environ=environ,
var_symbol=var_symbol,
)
if vars_[0] == "+":
return expand_substitute(
var, vars_[1:], nounset=nounset, environ=environ, var_symbol=var_symbol
)
if vars_[0] == "?":
return expand_strict(
var, vars_[1:], nounset=nounset, environ=environ, var_symbol=var_symbol
)
return expand_offset(
var, vars_, nounset=nounset, environ=environ, var_symbol=var_symbol
)
|
1be5d66c18775bca8669d97ccf8ccd439f154ff2
| 3,646,534
|
def overlap(n2, lamda_g, gama):
""" Calculates the 1/Aeff (M) from the gamma given.
The gamma is supposed to be measured at lamda_g
(in many cases we assume that is the same as where
the dispersion is measured at).
"""
M = gama / (n2*(2*pi/lamda_g))
return M
|
00e1d59a6a8e5b908acfa3097cfb9818edaf608f
| 3,646,535
|
def fill_none(pre_made_replays_list):
"""Fill none and reformat some fields in a pre-made replays list.
:param pre_made_replays_list: pre-made replays list from ballchasing.com.
:return: formatted list.
"""
for replay in pre_made_replays_list:
if replay["region"] is None:
replay["region"] = "North America"
replay["phase"] = "Qualifier"
replay["stage"] = "Tiebreaker"
replay["round"] = "Finals"
if replay['region'] == 'Main Event':
replay['region'] = 'World'
elif replay['region'] == 'Europe' and replay['phase'] == 'Tiebreaker':
replay["phase"] = "Qualifier"
replay["stage"] = "Tiebreaker"
if replay["match"] == "EG vs 00":
replay["round"] = "Lower Finals"
else:
replay["round"] = "Upper Finals"
return pre_made_replays_list
|
ee900227a8afcba71e6a00ef475892da4fdc3e3b
| 3,646,536
|
def parse_args_from_str(arg_str, arg_defs): # , context=None):
"""
Args:
args_str (str): argument string, optionally comma-separated
arg_defs (tuple): list of argument definitions
context (dict, optional):
When passed, the arguments are parsed for ``$(var_name)`` macros,
to lookup values from that dict.
Returns:
(dict) keyword args
Raises:
TypeError: if `argument` is of an unexpected type
ValueError: if `argument` does not fulfill the optional condition
AssertionError:
if `parse_args_from_str` was called with a wrong syntax, i.e.
`arg_defs` is not well-formed.
Examples::
arg_defs = (
("name", str),
("amount", float),
("options", dict, {}),
)
def order(raw_arg_string):
kwargs = parse_args_from_str(arg_defs)
assert isisnstance(kwargs["name"], str)
assert type(kwargs["amount"]) is float
assert isisnstance(kwargs["options"], dict)
"""
check_arg(arg_str, str)
check_arg(arg_defs, (list, tuple))
res = {}
# Special case: '$name()' should not be interpreted as having one "" arg
# if arg_defs defines a default for the first arg
if arg_str.strip() == "" and len(arg_defs[0]) == 3:
arg_str = str(arg_defs[0][2])
arg_list = [a.strip() for a in arg_str.split(",")]
optional_mode = False
for arg_def in arg_defs:
check_arg(arg_def, (list, tuple))
if len(arg_def) == 2:
arg_name, arg_type = arg_def
arg_default = NO_DEFAULT
if optional_mode:
raise AssertionError(
"Mandatory arg definition must not follow optional args: `{}`".format(
arg_def
)
)
elif len(arg_def) == 3:
arg_name, arg_type, arg_default = arg_def
optional_mode = True
else:
raise AssertionError("Expected 2- or 3-tuple: {}".format(arg_def))
if arg_type not in (float, int, str):
raise AssertionError(
"Unsupported argument definition type: {}".format(arg_def)
)
try:
# Get next arg
arg_val = arg_list.pop(0)
# Allow quotes
is_quoted = (arg_val.startswith('"') and arg_val.endswith('"')) or (
arg_val.startswith("'") and arg_val.endswith("'")
)
if is_quoted:
# Strip quotes and return as string (don't cast to other types)
arg_val = arg_val[1:-1]
elif "$(" in arg_val:
# The arg seems to be a macro: don't try to cast.
pass
else:
# Raises ValueError:
arg_val = arg_type(arg_val)
except IndexError:
if arg_default is NO_DEFAULT:
raise ValueError(
"Missing mandatory arg `{}` in '{}'.".format(arg_name, arg_str)
)
arg_val = arg_default
res[arg_name] = arg_val
if arg_list:
raise ValueError("Extra args `{}`.".format(", ".join(arg_list)))
return res
|
1c21cf170c360c7b429b1303bd19e1a23ea5cd3c
| 3,646,537
|
import torch
def model_evaluation(
data_loader,
ml_model_name,
ml_model,
smiles_dictionary,
max_length_smiles,
device_to_use,
):
"""
Evaluation per batch of a pytorch machine learning model.
Parameters
----------
data_loader : torch.utils.data
The training data as seen by Pytorch for mini-batches.
ml_model_name : str
Name of the machine learning model. It can be either "CONV1D", "CONV2D", or "RNN".
ml_model : nn.Module
Instance of the pytorch machine learning model.
smiles_dictionary : dict
The dictionary of SMILES characters.
max_length_smiles : int
The length of the longest SMILES.
device_to_use : torch.device
The device to use for model instance, "cpu" or "cuda".
Returns
-------
tuple of dict:
Dictionary of the predicted, true output values, respectively, in the data loader, with SMILES as keys.
"""
ml_model.eval()
with torch.no_grad():
all_output_pred = {}
all_output_true = {}
for _, data in enumerate(data_loader):
# SMILES and target
smiles, target = data
input_true, output_true = data_to_pytorch_format(
smiles,
target,
smiles_dictionary,
max_length_smiles,
ml_model_name,
device_to_use,
)
# Prediction
output_pred = ml_model(input_true)
# Convert to numpy arrays
output_pred = output_pred.cpu().detach().numpy()
output_true = output_true.cpu().detach().numpy()
for smile in smiles:
all_output_pred[smile] = output_pred
all_output_true[smile] = output_true
return (all_output_pred, all_output_true)
|
8c381eee394e989f8920cc52ad4b94ca4b502741
| 3,646,538
|
def reverse_dict2(d):
"""Reverses direction of dependence dict
>>> d = {'a': (1, 2), 'b': (2, 3), 'c':()}
>>> reverse_dict(d) # doctest: +SKIP
{1: ('a',), 2: ('a', 'b'), 3: ('b',)}
:note: dict order are not deterministic. As we iterate on the
input dict, it make the output of this function depend on the
dict order. So this function output order should be considered
as undeterministic.
"""
result = {}
for key in d:
for val in d[key]:
result[val] = result.get(val, tuple()) + (key, )
return result
|
2419538a13699015f8fefa156e89cf9b1960e358
| 3,646,539
|
import random
def Flip(p, y='Y', n='N'):
"""Returns y with probability p; otherwise n."""
return y if random.random() <= p else n
|
072e170e3f37508a04f8bdbed22470b178f05ab9
| 3,646,540
|
def sub_to_db(sub,
add_area=True,
area_srid=3005,
wkt=True,
wkb=False,
as_multi=True,
to_disk=False,
procs=1,
engine=None):
"""
Convert the object to a SQLite database. Returns the |db| module exposing
the database ORM and additional SQLAlchemy objects. Note that |procs|
greater than one results in the database being written to disk (if the
desired database is SQLite).
sub (SubOcgDataset) -- The object to convert to the database.
add_area=True -- Insert the geometric area.
area_srid=3005 -- SRID to use for geometric transformation.
wkt=True -- Insert the geomtry's WKT representation.
wkb=False -- Insert the geometry's WKB representation.
as_multi=True -- Convert geometries to shapely.MultiPolygon.
to_disk=False -- Write the database to disk (applicable for SQLite).
procs=1 -- Number of processes to use when loading data.
engine=None (sqlalchemy.Engine) -- An optional engine to pass overloading
the creation of other backends. Useful to use PostGRES instead of
SQLite for example.
"""
if engine is None:
use_lock = True
else:
use_lock = False
## initialize the db
db = init_db(engine=engine,to_disk=to_disk,procs=procs)
print(' loading geometry...')
## spatial reference for area calculation
sr = get_sr(4326)
sr2 = get_sr(area_srid)
# data = dict([[key,list()] for key in ['gid','wkt','wkb','area_m2']])
# for dd in self.dim_data:
# data['gid'].append(int(self.gid[dd]))
# geom = self.geometry[dd]
# if isinstance(geom,Polygon):
# geom = MultiPolygon([geom])
# if wkt:
# wkt = str(geom.wkt)
# else:
# wkt = None
# data['wkt'].append(wkt)
# if wkb:
# wkb = str(geom.wkb)
# else:
# wkb = None
# data['wkb'].append(wkb)
# data['area_m2'].append(get_area(geom,sr,sr2))
# self.load_parallel(db.Geometry,data,procs)
def f(idx,geometry=sub.geometry,gid=sub.gid,wkt=wkt,wkb=wkb,sr=sr,sr2=sr2,get_area=get_area):
geom = geometry[idx]
if isinstance(geom,Polygon):
geom = MultiPolygon([geom])
if wkt:
wkt = str(geom.wkt)
else:
wkt = None
if wkb:
wkb = str(geom.wkb)
else:
wkb = None
return(dict(gid=int(gid[idx]),
wkt=wkt,
wkb=wkb,
area_m2=get_area(geom,sr,sr2)))
fkwds = dict(geometry=sub.geometry,gid=sub.gid,wkt=wkt,wkb=wkb,sr=sr,sr2=sr2,get_area=get_area)
gen = pl.ParallelGenerator(db.Geometry,
sub.dim_data,
f,
fkwds=fkwds,
procs=procs,
use_lock=use_lock)
gen.load()
print(' loading time...')
## load the time data
data = dict([[key,list()] for key in ['tid','time','day','month','year']])
for dt in sub.dim_time:
data['tid'].append(int(sub.tid[dt]))
data['time'].append(sub.timevec[dt])
data['day'].append(sub.timevec[dt].day)
data['month'].append(sub.timevec[dt].month)
data['year'].append(sub.timevec[dt].year)
load_parallel(db.Time,data,procs,use_lock=use_lock)
print(' loading value...')
## set up parallel loading data
data = dict([key,list()] for key in ['gid','level','tid','value'])
for dt in sub.dim_time:
for dl in sub.dim_level:
for dd in sub.dim_data:
data['gid'].append(int(sub.gid[dd]))
data['level'].append(int(sub.levelvec[dl]))
data['tid'].append(int(sub.tid[dt]))
data['value'].append(float(sub.value[dt,dl,dd]))
load_parallel(db.Value,data,procs,use_lock=use_lock)
return(db)
|
6f3d3763a129a4235c0e5c0e884f7ab62bdfc391
| 3,646,542
|
def T_autoignition_methods(CASRN):
"""Return all methods available to obtain T_autoignition for the desired
chemical.
Parameters
----------
CASRN : str
CASRN, [-]
Returns
-------
methods : list[str]
Methods which can be used to obtain T_autoignition with the given inputs.
See Also
--------
T_autoignition
"""
if not _safety_data_loaded: _load_safety_data()
return list_available_methods_from_df_dict(Tautoignition_sources, CASRN, 'T_autoignition')
|
ab194547a1cc7b5eeb2032b1decad366bc4b43c2
| 3,646,543
|
def masa(jd, place):
"""Returns lunar month and if it is adhika or not.
1 = Chaitra, 2 = Vaisakha, ..., 12 = Phalguna"""
ti = tithi(jd, place)[0]
critical = sunrise(jd, place)[0] # - tz/24 ?
last_new_moon = new_moon(critical, ti, -1)
next_new_moon = new_moon(critical, ti, +1)
this_solar_month = raasi(last_new_moon)
next_solar_month = raasi(next_new_moon)
is_leap_month = (this_solar_month == next_solar_month)
maasa = this_solar_month + 1
if maasa > 12: maasa = (maasa % 12)
return [int(maasa), is_leap_month]
|
b8b7572f4b5dc597d844683e30c92be618e32c43
| 3,646,545
|
from scipy.special import erf
def sigma(s):
"""The probablity a normal variate will be `<s` sigma from the mean.
Parameters
----------
s : float
The number of sigma from the mean.
Returns
-------
p : float
The probability that a value within +/-s would occur.
"""
return 0.5 * (erf(s / np.sqrt(2.0)) - erf(-s / np.sqrt(2.0)))
|
88727617b1cca678613818be8fdb90e114b25438
| 3,646,546
|
def addneq_parse_residualline(line: str) -> dict:
"""
Parse en linje med dagsløsningsresidualer fra en ADDNEQ-fil.
Udtræk stationsnavn, samt retning (N/E/U), spredning og derefter et vilkårligt
antal døgnresidualer.
En serie linjer kan se således ud:
GESR N 0.07 0.02 -0.06
GESR E 0.10 -0.00 -0.10
GESR U 0.23 -0.10 0.20
"""
params = line.split()
return {
"STATION NAME": params[0],
"DIRECTION": params[1],
"STDDEV": float(params[2]),
"RES": [float(x) for x in params[3:]],
}
|
6d1556cbd01f3fe4cd66dcad231e41fa6b1b9470
| 3,646,547
|
def get_xsd_schema(url):
"""Request the XSD schema from DOV webservices and return it.
Parameters
----------
url : str
URL of the XSD schema to download.
Returns
-------
xml : bytes
The raw XML data of this XSD schema as bytes.
"""
response = HookRunner.execute_inject_meta_response(url)
if response is None:
response = get_remote_url(url)
HookRunner.execute_meta_received(url, response)
return response
|
12f5088fea1b9268d75ee90d60b729c8a9577dd0
| 3,646,549
|
def get_char_pmi(data):
"""
获取 pmi
:param data:
:return:
"""
print('get_char_pmi')
model = kenlm.LanguageModel('../software/kenlm/test.bin')
res = []
for line in data:
words = line.strip().split()
length = len(words)
words.append('\n')
i = 0
pmi_out = ""
while i < length:
p_union = get_proba(model, words[i] + " " + words[i + 1])
p1 = get_proba(model, words[i])
p2 = get_proba(model, words[i + 1])
p = pmi(p_union, p1, p2)
# 拆到 char level
word = words[i]
if len(word) > 0:
# 拆到 char level
j = 0
char = word[j]
pmi_out += char + "@@"
pmi_out += "B#" + str(p) + " "
j += 1
while j < len(word):
pmi_out += word[j] + '@@'
pmi_out += 'I#' + str(p) + " "
j += 1
i += 1
# last_char = words[i]
# p_union = get_proba(model, last_char + " \n")
# p1 = get_proba(model, last_char)
# p2 = get_proba(model, '\n')
# p = pmi(p_union, p1, p2)
# pmi_out += last_char + "@@" + 'B#' + str(p)
res.append(pmi_out.strip())
return res
|
2cb28e7671561a52efbbf98431e3c938700f691a
| 3,646,550
|
def fahrenheit_to_celsius(fahrenheit):
"""Convert a Fahrenheit temperature to Celsius."""
return (fahrenheit - 32.0) / 1.8
|
4aee3dd0b54450fabf7a3a01d340b45a89caeaa3
| 3,646,551
|
import random
import itertools
def sample_blocks(num_layers, num_approx):
"""Generate approx block permutations by sampling w/o replacement. Leave the
first and last blocks as ReLU"""
perms = []
for _ in range(1000):
perms.append(sorted(random.sample(list(range(0,num_layers)), num_approx)))
# Remove duplicates
perms.sort()
return [p for p,_ in itertools.groupby(perms) if len(p) == num_approx]
|
b4b75e77b3749bc7766c709d86bf1f694898fc0d
| 3,646,552
|
def adjacent_values(vals, q1, q3):
"""Helper function for violinplot visualisation (courtesy of
https://matplotlib.org/gallery/statistics/customized_violin.html#sphx-glr-gallery-statistics-customized-violin-py)
"""
upper_adjacent_value = q3 + (q3 - q1) * 1.5
upper_adjacent_value = np.clip(upper_adjacent_value, q3, vals[-1])
lower_adjacent_value = q1 - (q3 - q1) * 1.5
lower_adjacent_value = np.clip(lower_adjacent_value, vals[0], q1)
return lower_adjacent_value, upper_adjacent_value
|
a596ed82a1d66213dbdd3f19b29d58b36979c60d
| 3,646,553
|
def l2_first_moment(freq, n_trials, weights):
"""Return the first raw moment of the squared l2-norm of a vector (f-p), where `f` is an MLE
estimate
of the `p` parameter of the multinomial distribution with `n_trials`."""
return (np.einsum("aiai,ai->", weights, freq) - np.einsum("aiaj,ai,aj->", weights, freq, freq)) / n_trials
|
bf597aaa57759dc6d4f0ee1f5ed4f99f49ea271b
| 3,646,554
|
def sigmoid(x: float, a: float = 1, b: float = 1, shift: float = 0) -> float:
"""
Sigmoid function represented by b * \frac{1}{1 + e^{-a * (x - shift)}}}
Args:
x (float): Input x
a (float, optional): Rate of inflection. Defaults to 1.
b (float, optional): Difference of lowest to highest value. Defaults to 1.
shift (float, optional): Horizontal shift. Defaults to 0.
Returns:
float: sigmoid function at x
"""
result = b * (1 / (1 + np.exp(-a * (x - shift))))
return result
|
761497db712619008c1261d2388cea997ae3fff8
| 3,646,555
|
def db_credentials():
"""Load creds and returns dict of postgres keyword arguments."""
creds = load_json('creds.json')
return {
'host': creds['db_host'],
'user': creds['db_username'],
'password': creds['db_password'],
'database': creds['db_database']
}
|
4248452ffb5a9c05b14449972c1db7a18d906b73
| 3,646,556
|
import logging
def generate_corpus_output( cfg, docL, tfidfL ):
""" Generate a list of OutputRecords where the number of key words
is limited to the cfg.corpusKeywordCount highest scoring terms.
(i.e. cfg.usePerDocWordCount == False)
"""
outL = []
# for the cfg.corpusKeyWordCount highest scoring keywords
for i,(wordL,tfidf,termNL) in enumerate(tfidfL[0:min(cfg.corpusKeyWordCount,len(tfidfL))]):
out_recd = OutputRecord(wordL,tfidf,termNL)
logging.info("%i %f %s",i,tfidf,wordL)
# for each document
for doc in docL:
doc.find_sentences_in_doc(out_recd)
outL.append(out_recd)
return outL
|
2296d319fd00022df73da9e7d8484adfd5ab16ad
| 3,646,557
|
import tqdm
def harmonic_fitter(progressions, J_thres=0.01):
"""
Function that will sequentially fit every progression
with a simple harmonic model defined by B and D. The
"B" value here actually corresponds to B+C for a near-prolate,
or 2B for a prolate top.
There are a number of filters applied in order to minimize
calculations that won't be meaningful - these parameters
may have to be tuned for different test cases.
Because the model is not actually quantized, J is
represented as a float. To our advantage, this will
actually separate real (molecular) progressions from
fake news; at least half of the J values must be
close to being an integer for us to consider fitting.
parameters:
---------------
progressions - iterable containing arrays of progressions
J_thres - optional argument corresponding to how close a
value must be to an integer
returns:
---------------
pandas dataframe containing the fit results; columns
are B, D, fit RMS, and pairs of columns corresponding
to the fitted frequency and approximate J value.
"""
BJ_fit_model = lmfit.models.Model(calc_harmonic_transition)
params = BJ_fit_model.make_params()
data = list()
fit_objs = list()
failed = list()
for index, progression in tqdm(enumerate(progressions)):
# Determine the approximate value of B based on
# the differences between observed transitions
approx_B = np.average(np.diff(progression))
# Calculate the values of J that are assigned based on B
J = (progression / approx_B) / 2.0
# We want at least half of the lines to be close to being integer
if len(progression) >= 2:
if np.sum(quant_check(J, J_thres)) >= len(progression) / 1.5:
# Let B vary a bit
params["B"].set(approx_B, min=approx_B * 0.9, max=approx_B * 1.1)
# Constrain D to be less than 5 MHz
params["D"].set(0.001, min=0.0, max=1.0)
fit = BJ_fit_model.fit(
data=progression, J=J, params=params, fit_kws={"maxfev": 100}
)
# Only include progressions that can be fit successfully
if fit.success is True:
# Calculate fit RMS
rms = np.sqrt(np.average(np.square(fit.residual)))
# Only add it to the list of the RMS is
# sufficiently low
return_dict = dict()
return_dict["RMS"] = rms
return_dict.update(fit.best_values)
# Make columns for frequency and J
for i, frequency in enumerate(progression):
return_dict[i] = frequency
return_dict["J{}".format(i)] = J[i]
data.append(return_dict)
fit_objs.append(fit)
else:
failed.append([index, fit.fit_report()])
else:
failed.append(index)
else:
return_dict = dict()
return_dict["RMS"] = 0.0
return_dict["B"] = approx_B
# reformat the frequencies and approximate J values
for i, frequency in enumerate(progression):
return_dict[i] = frequency
return_dict["J{}".format(i)] = J[i]
data.append(return_dict)
full_df = pd.DataFrame(data=data)
full_df.sort_values(["RMS", "B", "D"], ascending=False, inplace=True)
return full_df, fit_objs
|
55a2c4080938c947501ed830f4236ca8f87608e8
| 3,646,560
|
def print_KruskalWallisH(div_calc):
"""
Compute the Kruskal-Wallis H-test for independent samples. A typical rule is that
each group must have at least 5 measurements.
"""
calc = defaultdict(list)
try:
for k1, v1 in div_calc.iteritems():
for k2, v2 in v1.iteritems():
calc[k1].append(v2)
except:
return "Error setting up input arrays for Kruskal-Wallis H-Test. Skipping "\
"significance testing."
h, p = stats.kruskal(*calc.values())
print "\nKruskal-Wallis H-test statistic for {} groups: {}".format(str(len(div_calc)), h)
print "p-value: {}".format(p)
|
74579ad2f9ee4336ab33f099982a9419d723774e
| 3,646,561
|
import random
import string
def _random_exptname():
"""Generate randome expt name NNNNNNNN_NNNNNN, where N is any number 0..9"""
r = ''.join(random.choice(string.digits) for _ in range(8))
r = r + '_' + ''.join(random.choice(string.digits) for _ in range(6))
return r
|
d9c72ed4bf742adf50e1fdad4f6acb1cc0046167
| 3,646,562
|
def remove_store(store_name):
""" Deletes the named data store.
:param store_name:
:return:
"""
return get_data_engine().remove_store(store_name)
|
ea8ada276095c2ceb85b339b2a925fa53fd93a1e
| 3,646,563
|
import random
def limit_checkins_per_user(checkins: list, num_checkins_per_user: int, random_seed=1):
"""
Limit for each user a maximum number of check-ins by randomly select check-ins.
Parameters
----------
checkins: list
list of check-ins
num_checkins_per_user: int
max number of check-ins per user, -1 for unlimited
random_seed: int
a random seed for random check-ins selection
Returns
-------
list
limited check-ins
"""
if num_checkins_per_user < 0:
return checkins
# convert check-in list to dict per user
checkins_per_user = defaultdict(list)
for c in checkins:
checkins_per_user[c.user_id].append(c)
# randomly select check-ins of users
limited_checkins = list()
for user_id, v in checkins_per_user.items():
if len(v) <= num_checkins_per_user:
# there are not enough check-ins, so get them all
limited_checkins.extend(v)
else:
# there are more check-ins than needed, so randomly choose some of them
random.seed(random_seed)
limited_checkins.extend(random.sample(v, k=num_checkins_per_user))
return limited_checkins
|
286760c3630162b78c314f9f8be0943350f47859
| 3,646,565
|
import warnings
import warnings
def getCharacterFilmography(characterID, charIF, charDF, movieIF, movieKF,
personIF, personKF, limit=None):
"""Build a filmography list for the specified characterID."""
try:
ifptr = open(charIF, 'rb')
except IOError, e:
warnings.warn('Unable to access characters information, '
'please run the characters4local.py script: %s' % e)
return None
ifptr.seek(4L*characterID)
piddata = ifptr.read(4)
ifptr.close()
if len(piddata) != 4:
return None
idx = convBin(piddata, 'fulloffset')
try:
dfptr = open(charDF, 'rb')
except IOError, e:
warnings.warn('Unable to access characters information, '
'please run the characters4local.py script: %s' % e)
return None
dfptr.seek(idx)
# Check characterID.
chID = dfptr.read(3)
if characterID != convBin(chID, 'characterID'):
dfptr.close()
return None
length = convBin(dfptr.read(2), 'longlength')
# Skip character name.
latin2utf(dfptr.read(length))
nrItems = convBin(dfptr.read(3), 'nrCharacterItems')
if limit is not None and nrItems/2 > limit:
nrItems = limit*2
filmography = []
for i in xrange(nrItems/2):
personID = convBin(dfptr.read(3), 'personID')
name = getLabel(personID, personIF, personKF)
movieID = convBin(dfptr.read(3), 'movieID')
title = getLabel(movieID, movieIF, movieKF)
# XXX: notes are not retrieved: they can be found scanning
# actors.list and acresses.list, but it will slow down everything.
m = Movie(title=title, movieID=movieID, currentRole=name,
roleID=personID, roleIsPerson=True, accessSystem='local')
filmography.append(m)
dfptr.close()
return filmography
|
ddf7f1da3e95441a2da9d3fe2f16065e0a13f634
| 3,646,566
|
def sqrt_fixed_full(x, config, is_training=True, causal=True):
"""Full attention matrix with sqrt decomposition."""
bsize = x.shape[0]
query, key, value = attention.get_qkv(x, x, x, hidden_size=config.model_size,
num_heads=config.num_heads,
bias=config.dense_use_bias)
head_dim = config.model_size // config.num_heads
assert config.max_seq_len % config.max_seg_len == 0
num_seg = config.max_seq_len // config.max_seg_len
cur_query = tf.reshape(query, [-1,
num_seg,
config.max_seg_len,
config.num_heads,
head_dim])
with tf.variable_scope('pooling_query'):
merged_query = pooling_summary(cur_query, axis=2,
local_summary=config.local_summary,
keepdims=True)
cur_key = tf.reshape(key, cur_query.shape)
cur_val = tf.reshape(value, cur_query.shape)
span_val = attention.dot_product_attention(merged_query,
cur_key,
cur_val,
is_training=is_training,
attn_axis=1,
dropatt=config.dropatt)
span_val = tf.squeeze(span_val, axis=2)
with tf.variable_scope('pooling_key'):
span_key = pooling_summary(cur_key, axis=2,
local_summary=config.local_summary,
keepdims=False)
local_logits = tf.einsum('bsqhd,bskhd->bsqhk', cur_query, cur_key)
if causal:
local_mask = get_causal_mask(cur_query, axis=2, is_strict=False)
local_mask = tf.expand_dims(local_mask, axis=-2)
local_logits += local_mask
prev_logits = tf.einsum('bqhd,bkhd->bqhk', query, span_key)
if causal:
prev_mask = get_causal_mask(cur_query, axis=1, is_strict=True)
prev_mask = tf.repeat(prev_mask, [config.max_seg_len] * num_seg, axis=0)
prev_logits += tf.expand_dims(prev_mask, axis=1)
joint_logits = tf.concat([tf.reshape(local_logits,
[bsize, config.max_seq_len,
config.num_heads, -1]),
prev_logits], axis=-1)
attn_weights = attention.float32_softmax(joint_logits, axis=-1)
local_att, prev_att = tf.split(attn_weights, [config.max_seg_len, num_seg],
axis=-1)
if is_training:
local_att = tf.nn.dropout(local_att, rate=config.dropatt)
local_att = tf.reshape(local_att, [bsize, num_seg,
config.max_seg_len,
config.num_heads,
config.max_seg_len])
local_merged = tf.einsum('bsqhk,bskhd->bsqhd', local_att, cur_val)
prev_merged = tf.einsum('bqhk,bkhd->bqhd', prev_att, span_val)
joint_merged = prev_merged + tf.reshape(local_merged, prev_merged.shape)
output = ops.trail_dense(joint_merged, config.model_size, begin_axis=-2)
return output
|
3ee88f2adf767c6fb6e0f1c006ff301c45ffc322
| 3,646,567
|
def mcf_from_row(row, gene_to_dcid_list):
"""Generate data mcf from each row of the dataframe"""
gene = row['Gene name']
tissue = get_class_name(row['Tissue'])
cell = get_class_name(row['Cell type'])
expression = EXPRESSION_MAP[row['Level']]
reliability = RELIABILITY_MAP[row['Reliability']]
if gene not in gene_to_dcid_list:
# skip case when there is no gene to dcid mapping
return None
dcid_list = gene_to_dcid_list[gene]
mcf_list = []
for protein_dcid in dcid_list:
mcf_list.append(
generate_mcf(protein_dcid, tissue, cell, expression, reliability))
return '\n\n'.join(mcf_list)
|
ee78c68bb89a100fa4e0b972d0907e14dcb6d289
| 3,646,568
|
def loads(json_str, target=None):
"""
Shortcut for instantiating a new :class:`JSONDecoder` and calling the :func:`from_json_str` function.
.. seealso::
For more information you can look at the doc of :func:`JSONDecoder.from_json_str`.
"""
return _decoder.from_json_str(json_str, target)
|
76eab90dd544d695f55967969d81ef9cccb1c2fd
| 3,646,569
|
def discover(discover_system: bool = True) -> Discovery:
"""
Discover capabilities offered by this extension.
"""
logger.info("Discovering capabilities from aws-az-failure-chaostoolkit")
discovery = initialize_discovery_result(
"aws-az-failure-chaostoolkit", __version__, "aws"
)
discovery["activities"].extend(__load_exported_activities())
return discovery
|
ad9b7674f8f8f7cc06ce21dacba2138231b7e69c
| 3,646,570
|
def getter_nofancy(a, b, asarray=True, lock=None):
""" A simple wrapper around ``getter``.
Used to indicate to the optimization passes that the backend doesn't
support fancy indexing.
"""
return getter(a, b, asarray=asarray, lock=lock)
|
63e355eb3245c8f394c345fb2ebd4e469fcd7500
| 3,646,571
|
def xy_to_array_origin(image):
"""Return view of image transformed from Cartesian to array origin."""
return rgb_transpose(image[:, ::-1])
|
e2e47f95093e1808cfbe7c2ba28af8c3e5b40307
| 3,646,572
|
import csv
def read_csv(infile, delimiter=',', encoding='utf-8', named=False):
"""Reads a csv as a list of lists (unnamed) or a list of named tuples (named)
Args:
string infile: the file to read in
OPTIONAL:
string delimiter: the delimiter used (default ',')
encoding encoding: the encoding of the file (default 'utf-8')
boolean named: if true, loads rows as named tuples
(default lists), (default False)
Returns list of lists or named tuples"""
with open(infile, encoding=encoding) as f:
reader = csv.reader(f, delimiter=delimiter)
if named:
headers = next(reader)
# strip spaces and annoying things from headers
names = [identifier.replace('-', '_').replace(' ', '_').lower()
for identifier in headers]
Data = namedtuple("Data", names)
named_rows = map(Data._make, reader)
return [row for row in named_rows]
else:
return list(list(row for row in reader))
|
7318293d884fa80a7d93d8046f66b3801d809f42
| 3,646,573
|
def get_directions_id(destination):
"""Get place ID for directions, which is place ID for associated destination, if an event"""
if hasattr(destination, 'destination'):
# event with a related destination; use it for directions
if destination.destination:
return destination.destination.id
else:
# event without a destination
return None
else:
# not an event
return destination.id
|
f7cd182cb5ea344c341bf9bfaa7a4389335ae353
| 3,646,575
|
def decode_token(params, token_field=None):
"""
This function is used to decode the jwt token into the data that was used
to generate it
Args:
session_obj: sqlalchemy obj used to interact with the db
params: json data received with request
token_field: name of the field that token can be found in
Return:
resulting data from the token decode process
"""
try:
if not token_field:
token = params[TOKEN_FIELD]
else:
token = params[token_field]
# token_use_details = find_token_use(session_obj, token)
# check_token_validate_period(session_obj, token_use_details)
account_details = jwt.decode(token, _SECRET, algorithms=ALGORITHM)
# check_login_access_revoked(
# session_obj, account_details, token_use_details
# )
# extend_token_validity(session_obj, token_use_details)
return account_details
except orm_exc.NoResultFound:
raise exc.LoggedOutError()
|
8adac31df7d5659c06f5c4d66fc86ae556531aae
| 3,646,576
|
def find_storage_pool_type(apiclient, storagetype='NetworkFileSystem'):
"""
@name : find_storage_pool_type
@Desc : Returns true if the given storage pool type exists
@Input : type : type of the storage pool[NFS, RBD, etc.,]
@Output : True : if the type of storage is found
False : if the type of storage is not found
FAILED In case the cmd failed
"""
cmd = listStoragePools.listStoragePoolsCmd()
cmd_out = apiclient.listStoragePools(cmd)
if validateList(cmd_out)[0] != PASS:
return FAILED
for storage_pool in cmd_out:
if storage_pool.type.lower() == storagetype:
return True
return False
|
1d3e64185e0361f02a8cc7e2e4316895e22e517e
| 3,646,579
|
from typing import Any
from typing import cast
def parse_year(candidate: Any) -> int:
"""Parses the given candidate as a year literal. Raises a ValueError
when the candidate is not a valid year."""
if candidate is not None and not isinstance(candidate, int):
raise TypeError("Argument year is expected to be an int, "
"but is {}".format(type(candidate)))
return cast(int, candidate)
|
337cc3be16e1e1246d1d1f02b55665c655fe131f
| 3,646,580
|
def dropout2d(tensor: Tensor, p: float = 0.2) -> Tensor:
"""
Method performs 2D channel-wise dropout with a autograd tensor.
:param tensor: (Tensor) Input tensor
:param p: (float) Probability that a activation element is set to zero
:return: (Tensor) Output tensor
"""
# Check argument
assert 0.0 <= p <= 1.0, 'Parameter p must be in the range of [0, 1].'
# Apply dropout
mask = (np.random.randint(0, 2, size=tensor.shape[0]) > p).astype(float).reshape(1, -1, 1, 1)
output = tensor.data * mask
# Check if grad is needed
requires_grad = tensor.requires_grad
# Add grad function
dependencies = [Dependency(tensor, lambda grad: grad * mask)] if requires_grad else None
return Tensor(data=output, requires_grad=requires_grad, dependencies=dependencies)
|
6719fa5a3e55665770faf1034677642d78561f83
| 3,646,581
|
def svn_repos_finish_report(*args):
"""svn_repos_finish_report(void * report_baton, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_finish_report(*args)
|
19b42660beb7fa5995a8c5e6e0cb5df39116ddb5
| 3,646,582
|
import array
import itertools
def problem451():
"""
Consider the number 15.
There are eight positive numbers less than 15 which are coprime to 15: 1,
2, 4, 7, 8, 11, 13, 14.
The modular inverses of these numbers modulo 15 are: 1, 8, 4, 13, 2, 11,
7, 14
because
1*1 mod 15=1
2*8=16 mod 15=1
4*4=16 mod 15=1
7*13=91 mod 15=1
11*11=121 mod 15=1
14*14=196 mod 15=1
Let I(n) be the largest positive number m smaller than n-1 such that the
modular inverse of m modulo n equals m itself.
So I(15)=11.
Also I(100)=51 and I(7)=1.
Find ∑I(n) for 3≤n≤2·10^7
"""
LIMIT = 20000000
# Build table of smallest prime factors
smallestprimefactor = array.array("L", itertools.repeat(0, LIMIT + 1))
end = eulerlib.sqrt(len(smallestprimefactor) - 1)
for i in range(2, len(smallestprimefactor)):
if smallestprimefactor[i] == 0:
smallestprimefactor[i] = i
if i <= end:
for j in range(i * i, len(smallestprimefactor), i):
if smallestprimefactor[j] == 0:
smallestprimefactor[j] = i
# Returns all the solutions (in ascending order) such that
# for each k, 1 <= k < n and k^2 = 1 mod n.
def get_solutions(n):
if smallestprimefactor[n] == n: # n is prime
return (1, n - 1)
else:
temp = []
p = smallestprimefactor[n]
sols = solutions[n // p]
for i in range(0, n, n // p):
for j in sols:
k = i + j
if k * k % n == 1:
temp.append(k)
return tuple(temp)
# Process every integer in range
solutions = [(), (), (1,)]
ans = 0
for i in range(3, LIMIT + 1):
sols = get_solutions(i)
if i <= LIMIT // 2:
solutions.append(sols)
ans += sols[-2] # Second-largest solution
return ans
|
efb000a8f367cf13e7aec2117efed092e3d5a5f3
| 3,646,583
|
import torch
def collate_molgraphs(data):
"""Batching a list of datapoints for dataloader.
Parameters
----------
data : list of 3-tuples or 4-tuples.
Each tuple is for a single datapoint, consisting of
a SMILES, a DGLGraph, all-task labels and optionally
a binary mask indicating the existence of labels.
Returns
-------
smiles : list
List of smiles
bg : BatchedDGLGraph
Batched DGLGraphs
labels : Tensor of dtype float32 and shape (B, T)
Batched datapoint labels. B is len(data) and
T is the number of total tasks.
masks : Tensor of dtype float32 and shape (B, T)
Batched datapoint binary mask, indicating the
existence of labels. If binary masks are not
provided, return a tensor with ones.
"""
assert len(data[0]) in [3, 4], \
'Expect the tuple to be of length 3 or 4, got {:d}'.format(len(data[0]))
if len(data[0]) == 3:
smiles, graphs, labels = map(list, zip(*data))
masks = None
else:
smiles, graphs, labels, masks = map(list, zip(*data))
bg = dgl.batch(graphs)
bg.set_n_initializer(dgl.init.zero_initializer)
bg.set_e_initializer(dgl.init.zero_initializer)
labels = torch.stack(labels, dim=0)
if masks is None:
masks = torch.ones(labels.shape)
else:
masks = torch.stack(masks, dim=0)
return smiles, bg, labels, masks
|
3ff726fca71ab64ec1e2e665babd8f46b027e819
| 3,646,584
|
def recouvrement_view(request, id):
"""
Fonction Detail
"""
user = request.user
recouvrement = Recouvrement.objects.filter(user=user).get(id=id)
context = {
'recouvrement': recouvrement,
}
template_name = 'pages/recouvrement/recouvrement_view.html'
return render(request, template_name, context)
|
f0e26257a39ef385b9dfaa51bff68b0fec51a263
| 3,646,586
|
def getFileServicesNames(fileServices=None, verbose=True):
"""
Returns the names and description of the fileServices available to the user.
:param fileServices: a list of FileService objects (dictionaries), as returned by Files.getFileServices(). If not set, then an extra internal call to Jobs.getFileServices() is made.
:param verbose: boolean parameter defining whether warnings will be printed (set to True) or not (set to False).
:return: an array of dicts, where each dict has the name and description of a file service available to the user.
:raises: Throws an exception if the user is not logged into SciServer (use Authentication.login for that purpose). Throws an exception if the HTTP request to the RACM API returns an error.
:example: fileServiceNames = Files.getFileServicesNames();
.. seealso:: Files.getFileServices
"""
if fileServices is None:
fileServices = getFileServices(verbose);
fileServiceNames = [];
for fileService in fileServices:
fileServiceNames.append({"name":fileService.get('name'),"description":fileService.get('description')})
return fileServiceNames
|
ef476f2c661dadebee8e8a16863ff2f4c286d99e
| 3,646,587
|
def username_in_path(username, path_):
"""Checks if a username is contained in URL"""
if username in path_:
return True
return False
|
131a8fa102fd0a0f036da81030b005f92ea9aab0
| 3,646,588
|
def str_parse_as_utf8(content) -> str:
"""Returns the provided content decoded as utf-8."""
return content.decode('utf-8')
|
75b8d5f1f8867c50b08146cc3edc1d0ab630280a
| 3,646,589
|
def TypeProviderClient(version):
"""Return a Type Provider client specially suited for listing types.
Listing types requires many API calls, some of which may fail due to bad
user configurations which show up as errors that are retryable. We can
alleviate some of the latency and usability issues this causes by tuning
the client.
Args:
version: DM API version used for the client.
Returns:
A Type Provider API client.
"""
main_client = apis.GetClientInstance('deploymentmanager', version.id)
main_client.num_retries = 2
return main_client.typeProviders
|
2e735b37d01b9a9a0b44d5cf04acd89d2a8d9b90
| 3,646,590
|
from typing import Optional
from datetime import datetime
from typing import List
from typing import Dict
from typing import Any
def create_indicator(
pattern: str,
pattern_type: str,
created_by: Optional[Identity] = None,
name: Optional[str] = None,
description: Optional[str] = None,
valid_from: Optional[datetime] = None,
kill_chain_phases: Optional[List[KillChainPhase]] = None,
labels: Optional[List[str]] = None,
confidence: Optional[int] = None,
object_markings: Optional[List[MarkingDefinition]] = None,
x_opencti_main_observable_type: Optional[str] = None,
x_opencti_score: Optional[int] = None,
) -> STIXIndicator:
"""Create an indicator."""
custom_properties: Dict[str, Any] = {X_OPENCTI_SCORE: DEFAULT_X_OPENCTI_SCORE}
if x_opencti_score is not None:
custom_properties[X_OPENCTI_SCORE] = x_opencti_score
if x_opencti_main_observable_type is not None:
custom_properties[
X_OPENCTI_MAIN_OBSERVABLE_TYPE
] = x_opencti_main_observable_type
return STIXIndicator(
id=_create_random_identifier("indicator"),
created_by_ref=created_by,
name=name,
description=description,
pattern=pattern,
pattern_type=pattern_type,
valid_from=valid_from,
kill_chain_phases=kill_chain_phases,
labels=labels,
confidence=confidence,
object_marking_refs=object_markings,
custom_properties=custom_properties,
)
|
421e9d1d060709facb9a8b8d6831b6a45ef479c9
| 3,646,591
|
def import_data(
path_to_csv: str,
response_colname: str,
standards_colname: str,
header: int = 0,
nrows: int = None,
skip_rows: int = None,
) -> pd.DataFrame:
"""Import standard curve data from a csv file.
Args:
path_to_csv: Refer to pd.read_csv docs.
response_colname: Name of column with response data.
standards_colname: Name of column with standard concentrations.
header: Refer to pd.read_csv().
nrows: Refer to pd.read_csv().
skip_rows: Skips the first n rows when reading data.
# kwargs: Additional arguments to parse to pd.read_csv().
Returns:
Formatted data as a dataframe.
Raises:
ValueError: If response_colname or standards_colname not in data.columns
"""
data = pd.read_csv(path_to_csv, header=header, nrows=nrows)
if skip_rows:
data = data.iloc[skip_rows:, :]
data.dropna(axis=1, how="all", inplace=True)
data.dropna(inplace=True)
data.rename({response_colname: "response", standards_colname: "standard_concentrations"}, axis=1, inplace=True)
try:
return data.loc[:, ["standard_concentrations", "response"]]
except KeyError:
raise ValueError("Check `response_colname` and `standards_colname` values are valid column names.")
|
fffc650ac7b672e0585b0dc307977c4adf9a0a69
| 3,646,592
|
def plus(x: np.ndarray, y: np.ndarray) -> np.ndarray:
""" 矩阵相加"""
if x.shape == y.shape:
return x + y
|
9d042d90c8d3ca9588c02ddd9ed53ec725785d13
| 3,646,593
|
def add_noise(wave, noise, fs, snr, start_time, duration, wave_power):
"""Add a noise to wave.
"""
noise_power = np.dot(noise, noise) / noise.shape[0]
scale_factor = np.sqrt(10**(-snr/10.0) * wave_power / noise_power)
noise = noise * scale_factor
offset = int(start_time * fs)
add_length = min(wave.shape[0] - offset, int(duration * fs), noise.shape[0])
if add_length > 0:
wave[offset: offset + add_length] += noise[0: add_length]
return wave
|
3f8df3098751b081f93b61da16682bdac2bf6a02
| 3,646,594
|
def _sort_factors(factors, **args):
"""Sort low-level factors in increasing 'complexity' order."""
def order_if_multiple_key(factor):
f, n = factor
return len(f), n, default_sort_key(f)
def order_no_multiple_key(f):
return len(f), default_sort_key(f)
if args.get('multiple', True):
return sorted(factors, key=order_if_multiple_key)
else:
return sorted(factors, key=order_no_multiple_key)
|
60be823e0f12b0e33d6a9567458cc98d95d1f900
| 3,646,595
|
def get_affix(text):
"""
This method gets the affix information
:param str text: Input text.
"""
return " ".join(
[word[-4:] if len(word) >= 4 else word for word in text.split()])
|
eb0aa68e803ce6c0ae218f4e0e2fd1855936b50f
| 3,646,596
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.