content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def round_time(t, to=timedelta(seconds=1)):
""" cftime will introduces noise when decoding values into date objects.
This rounds time in the date object to the nearest second, assuming the init time
is at most 1 sec away from a round minute. This is used when merging datasets so
their time dims match up.
Args:
t: datetime or cftime object
to: size of increment to round off to. By default round to closest integer
second.
Returns:
datetime or cftime object rounded to nearest minute
"""
midnight = t.replace(hour=0, minute=0, second=0, microsecond=0)
time_since_midnight = exact_cftime_datetime_difference(midnight, t)
remainder = time_since_midnight % to
quotient = time_since_midnight // to
if remainder <= to / 2:
closest_multiple_of_to = quotient
else:
closest_multiple_of_to = quotient + 1
rounded_time_since_midnight = closest_multiple_of_to * to
return midnight + rounded_time_since_midnight
|
dcc7d0caa4e4787f710a386968d8967661e662ca
| 3,647,765
|
def decide_end(match_list, return_whole_match_object = False):
"""
Among all the match objects, return the march string the closest to the end of the text
Return : a string. If return_whole_match_object is True, return a match object
"""
if len(match_list) == 0:
return pd.NA
ends = np.array(list(map(lambda match_object : match_object.span()[1], match_list)))
closest_index = np.argmax(ends)
if return_whole_match_object:
return match_list[closest_index]
else:
return match_list[closest_index].group()
|
72e9a4f63c9c7b95e5728b798bc1cd508d1911e6
| 3,647,766
|
def get_level_refactorings_count(level: int, dataset: str = "") -> str:
"""
Get the count of all refactorings for the given level
Parameter:
level (int): get the refactoring instances for this level
dataset (str) (optional): filter for these specific projects
"""
return f"SELECT refactoring, count(*) FROM (" + \
get_instance_fields(refactoringCommits, [(refactoringCommits, ["refactoring"])],
f"{refactoringCommits}.level = {str(level)}", dataset) + \
f" AND {valid_refactorings_filter(refactoringCommits)} AND {file_type_filter(refactoringCommits)}) t group by refactoring order by count(*) desc"
|
8150537a35161541d7eb4b483d06ef8096611d37
| 3,647,767
|
def repeat_batch(t, K, dim=0):
"""Repeat a tensor while keeping the concept of a batch.
:param t: `torch.Tensor`: The tensor to repeat.
:param K: `int`: The number of times to repeat the tensor.
:param dim: `int`: The dimension to repeat in. This should be the
batch dimension.
:returns: `torch.Tensor`: The repeated tensor. The new shape will be
batch size * K at dim, the rest of the shapes will be the same.
Example::
>>> a = torch.arange(10).view(2, -1)
>>> a
tensor([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> a.repeat(2, 1)
tensor([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> repeat_batch(a, 2)
tensor([[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[5, 6, 7, 8, 9]])
"""
shape = t.shape
tiling = [1] * (len(shape) + 1)
tiling[dim + 1] = K
tiled = t.unsqueeze(dim + 1).repeat(tiling)
old_bsz = shape[dim]
new_bsz = old_bsz * K
new_shape = list(shape[:dim]) + [new_bsz] + list(shape[dim + 1 :])
return tiled.view(new_shape)
|
31ae6e02bd23c56049a4f8e5ea9f36e5b6186678
| 3,647,768
|
def ifte(s, g_cond, g_true, g_false):
"""goal that succeeds if g_cond and g_true succeed or g_cond fails and g_false succeeds"""
def loop(s_inf=g_cond(s)):
try:
first_cond = next(s_inf)
except StopIteration:
yield from g_false(s)
return
except SuspendIteration as suspension:
raise SuspendIteration(loop(suspension.stream))
yield from append_inf(g_true(first_cond),
append_map_inf(g_true, s_inf))
return loop()
|
899ed78b53e056804e9515e2f01125831ae0dfba
| 3,647,769
|
def filter_by_continue_threshold_variance_threshold(peak_info, acc, cont_win_size=3, cont_thres=4, var_thres=0.001):
"""
Calculate the continuity by a given window length, then calculate the variance and filter the data by
a given threshold
:param peak_info: a 5D matrix
:param cont_win_size: continue window len
:param cont_thres: continue threshold
:param var_thres: variance threshold
:param fs: frequency of accelerometer data
:return: all_steps: step count list
"""
end_for = len(peak_info[:,2])-1
for i in np.arange(cont_thres-1, end_for):
v_count = 0
for x in np.arange(1, cont_thres+1):
if np.var(acc[int(peak_info[i-x+1, 0]):int(peak_info[i-x+2, 0]+1)], ddof=1) > var_thres:
v_count = v_count + 1
if v_count >= cont_win_size:
peak_info[i, 4] = 1
else:
peak_info[i, 4] = 0
peak_info = peak_info[peak_info[:, 4] == 1, 0]
return peak_info
|
7cdbe81b8c0931d315a9d928b6a32105e6da56fb
| 3,647,770
|
from datetime import datetime
def send_update(*args: str) -> bool:
""" Updates the path endpoint to contain the current UTC timestamp """
assert args, "Firebase path cannot be empty"
endpoint = args[-1]
value = {endpoint: datetime.utcnow().isoformat()}
return send_message(value, *args[:-1])
|
b9b9b7a277bc2a0ffd9ae0c4d658eb5f3d017d20
| 3,647,771
|
def execute_custom(datatype, runtype, driver, data_repository, step_list):
"""
Execute a custom testcase
"""
print_info("{0} {1}".format(datatype, runtype))
tc_status = False
if data_repository.has_key("suite_exectype") and \
data_repository["suite_exectype"].upper() == "ITERATIVE":
print_info("Testsuite execute type=iterative but the testcase datatype=custom. "
"All testcases in a iterative testsuite should have datatype=iterative, "
"Hence this testcase will be marked as failure.")
elif runtype.upper() == 'SEQUENTIAL_KEYWORDS' or runtype.upper() == 'PARALLEL_KEYWORDS':
tc_status = driver.main(step_list, data_repository, tc_status, system_name=None)
else:
print_error("Unsuppored runtype found, please check testcase file")
return tc_status
|
884ab4ff7f66f1ad969b03ec406513b301739169
| 3,647,772
|
def parseSolFile(filename):
"""Parses SOL file and extract soil profiles."""
data = {}
profile = None
lat = None
lon = None
with open(filename) as fin:
for line in fin:
if line.startswith("*"):
if profile is not None:
data[(lat, lon)] = "{0}\r\n".format(profile)
profile = line[1:].strip()
elif not line.startswith("@") and len(line.strip()) > 0:
toks = line.split()
if len(toks) == 5:
lat = float(toks[2])
lon = float(toks[3])
profile += "\r\n{0}".format(line.strip())
else:
try:
float(toks[0])
line = line.replace(toks[1], "".join([" "]*len(toks[1])))
profile += "\r\n{0}".format(line.rstrip())
except:
profile += "\r\n {0}".format(line.rstrip())
return data
|
7c3876f1e4899eff5b0036045df4348903a11306
| 3,647,773
|
import collections
def get_deps_info(projects, configs):
"""Calculates dependency information (forward and backwards) given configs."""
deps = {p: configs[p].get('deps', {}) for p in projects}
# Figure out the backwards version of the deps graph. This allows us to figure
# out which projects we need to test given a project. So, given
#
# A
# / \
# B C
#
# We want to test B and C, if A changes. Recipe projects only know about the
# B-> A and C-> A dependencies, so we have to reverse this to get the
# information we want.
downstream_projects = collections.defaultdict(set)
for proj, targets in deps.items():
for target in targets:
downstream_projects[target].add(proj)
return deps, downstream_projects
|
10215dfb623b8ebaaabdb2d1bcffd876d37f9f66
| 3,647,774
|
def write_cflags():
"""Adds C-Flags. C++ version is defined at the beginning of this file"""
text = f"""CFLAGS = ${{TF_CFLAGS}} ${{OMP_CFLAGS}} -fPIC -O2 -std={CPPVERSION}
LDFLAGS = -shared ${{TF_LFLAGS}}
"""
text += write_cflags_cuda()
return text
|
1348c70b5bdbe168760dba677f9bcc4507957510
| 3,647,775
|
def get_coverage(inputs):
"""Get edge coverage.
Returns:
A dictionary of inputs and corresponding coverage
"""
cov_dict = dict()
for test_input in inputs:
"Get coverage by running the program"
cov = coverage(input)
"Update coverage dictionary of test input"
cov_dict[test_input] = cov
return cov_dict
|
5a80399b7877d968654e8c6fc069ff0f70d10a62
| 3,647,777
|
from operator import add
def average(arr, mode = "mixed"):
"""
average(arr, mode) takes the average of a given array
Once again, the modes of add() can be used here to denote what the type of the array is
The function below, determine_mode(arr) can be used to determine the correct mode for your array
"""
if len(arr) == 0: return 0.0
return add(arr, mode)/len(arr)
|
74d0b836e6877d1f7d23b69a191e653bcffd6f00
| 3,647,779
|
def non_halting(p):
"""Return a non-halting part of parser `p` or `None`."""
return left_recursive(p) or non_halting_many(p)
|
d9d8b87cad15c5416041c40396bd3e51b0c28051
| 3,647,780
|
def _isValidWord(word):
"""Determine whether a word is valid. A valid word is a valid english
non-stop word."""
if word in _englishStopWords:
return False
elif word in _englishWords:
return True
elif wordnet.synsets(word):
return True
else:
return False
|
aa0dd1ceecc807b3aa6ecf740d5ec547bf748e7c
| 3,647,781
|
def compare_floats(value1: float, value2: float):
"""Função que compara 2 floats"""
return True if abs(value1 - value2) <= 10**-6 else False
|
225a8fd4d472fe630efe32c506cb1ac3f7ff4b5f
| 3,647,782
|
from cuml.utils.import_utils import has_treelite, has_xgboost
import treelite
import treelite.runtime
import xgboost as xgb
def _build_treelite_classifier(m, data, arg={}):
"""Setup function for treelite classification benchmarking"""
if has_treelite():
else:
raise ImportError("No treelite package found")
if has_xgboost():
else:
raise ImportError("No XGBoost package found")
# use maximum 1e5 rows to train the model
train_size = min(data[0].shape[0], 100000)
dtrain = xgb.DMatrix(data[0][:train_size, :], label=data[1][:train_size])
params = {
"silent": 1, "eval_metric": "error", "objective": "binary:logistic"
}
params.update(arg)
max_depth = arg["max_depth"]
num_rounds = arg["num_rounds"]
n_feature = data[0].shape[1]
tmpdir = tempfile.mkdtemp()
model_name = f"xgb_{max_depth}_{num_rounds}_{n_feature}_{train_size}.model"
model_path = os.path.join(tmpdir, model_name)
bst = xgb.train(params, dtrain, num_rounds)
tl_model = treelite.Model.from_xgboost(bst)
tl_model.export_lib(
toolchain="gcc", libpath=model_path+"treelite.so",
params={'parallel_comp': 40}, verbose=False
)
return treelite.runtime.Predictor(model_path+"treelite.so", verbose=False)
|
095d9748988d55d2b578c0cb74fc4a662aa660c3
| 3,647,784
|
def _pkq(pk):
"""
Returns a query based on pk.
Note that these are designed to integrate with cells and how they are saved in the database
:Parameters:
----------------
pk : list
list of primary keys
:Returns:
-------
dict
mongo query filtering for table
:Examples:
----------
>>> import datetime
>>> assert _pkq(None) == {}
>>> assert dict(_pkq(['world', 'hello'])) == {"_pk": {"$eq": ["hello", "world"]}}
"""
if pk is None or len(pk) == 0:
return {}
else:
return q[_pk] == [pk]
|
d17527132c26c7e3504471f8456baccea295c71e
| 3,647,785
|
import torch
def inspect_decode_labels(pred, num_images=1, num_classes=NUM_CLASSES,
inspect_split=[0.9, 0.8, 0.7, 0.5, 0.0], inspect_ratio=[1.0, 0.8, 0.6, 0.3]):
"""Decode batch of segmentation masks accroding to the prediction probability.
Args:
pred: result of inference.
num_images: number of images to decode from the batch.
num_classes: number of classes to predict (including background).
inspect_split: probability between different split has different brightness.
Returns:
A batch with num_images RGB images of the same size as the input.
"""
if isinstance(pred, torch.Tensor):
pred = pred.data.cpu().numpy()
n, c, h, w = pred.shape
pred = pred.transpose([0, 2, 3, 1])
if n < num_images:
num_images = n
outputs = np.zeros((num_images, h, w, 3), dtype=np.uint8)
for i in range(num_images):
img = Image.new('RGB', (w, h))
pixels = img.load()
for j_, j in enumerate(pred[i, :, :, :]):
for k_, k in enumerate(j):
assert k.shape[0] == num_classes
k_value = np.max(softmax(k))
k_class = np.argmax(k)
for it, iv in enumerate(inspect_split):
if k_value > iv: break
if iv > 0:
pixels[k_,j_] = tuple(map(lambda x: int(inspect_ratio[it]*x), label_colours[k_class]))
outputs[i] = np.array(img)
return torch.from_numpy(outputs.transpose([0, 3, 1, 2]).astype('float32')).div_(255.0)
|
d8ee386e2088428b7bfe5579cc5558cf4d6890f1
| 3,647,786
|
from typing import Dict
from typing import Union
def set_default_values(
**attributes: Dict[str, Union[float, int, str]],
) -> Dict[str, Union[float, int, str]]:
"""Set the default value of various parameters.
:param attributes: the attribute dict for the electronic filter being calculated.
:return: attributes; the updated attribute dict.
:rtype: dict
"""
if attributes["quality_id"] <= 0:
attributes["quality_id"] = 1
return attributes
|
3c8871706446b2bd0aec1879b06e443a57898a96
| 3,647,787
|
import inspect
def validate_function(fn: FunctionType, config: Configuration, module_type: ModuleType) -> FunctionValidationResult:
"""Validates the docstring of a function against its signature.
Args:
fn (FunctionType): The function to validate.
config (Configuration): The configuration to use while validating.
module_type (ModuleType): The module from which the function was extracted.
Returns:
FunctionValidationResult: The result of validating this function.
"""
log(f"Validating function: {fn}")
result = FunctionValidationResult(fn)
doc = inspect.getdoc(fn)
if not doc:
if config.fail_on_missing_docstring:
result.result = ResultType.FAILED
result.fail_reason = f"Function does not have a docstring"
_, line_number = inspect.getsourcelines(fn)
result.range = Range(line_number, line_number, 0, 0)
else:
result.result = ResultType.NO_DOC
return result
parser = config.get_parser()
summary = parser.get_summary(doc, module_type)
if not summary and config.fail_on_missing_summary:
result.result = ResultType.FAILED
result.fail_reason = f"Function does not have a summary"
result.range = __get_docstring_range(fn, module_type, doc)
return result
sig = inspect.signature(fn)
sig_parameters = [Parameter(name, proxy.annotation) for name, proxy in sig.parameters.items() if name != "self"]
sig_return_type = type(None) if sig.return_annotation is None else sig.return_annotation
try:
doc_parameters = parser.get_parameters(doc, module_type)
doc_return_type = parser.get_return_type(doc, module_type)
except ParseException as e:
result.result = ResultType.FAILED
result.fail_reason = f"Unable to parse docstring: {str(e)}"
result.range = __get_docstring_range(fn, module_type, doc)
return result
# Validate return type
if sig_return_type != doc_return_type:
result.result = ResultType.FAILED
result.fail_reason = f"Return type differ. Expected (from signature) {sig_return_type}, but got (in docs) {doc_return_type}."
result.range = __get_docstring_range(fn, module_type, doc)
return result
# Validate equal number of parameters
if len(sig_parameters) != len(doc_parameters):
result.result = ResultType.FAILED
result.fail_reason = f"Number of arguments differ. Expected (from signature) {len(sig_parameters)} arguments, but found (in docs) {len(doc_parameters)}."
result.range = __get_docstring_range(fn, module_type, doc)
return result
# Validate name and type of function parameters
for sigparam, docparam in zip(sig_parameters, doc_parameters):
if sigparam.name != docparam.name:
result.result = ResultType.FAILED
result.fail_reason = f"Argument name differ. Expected (from signature) '{sigparam.name}', but got (in docs) '{docparam.name}'"
result.range = __get_docstring_range(fn, module_type, doc)
return result
# NOTE: Optional[str] == Union[str, None] # True
if sigparam.type != docparam.type:
result.result = ResultType.FAILED
result.fail_reason = f"Argument type differ. Argument '{sigparam.name}' was expected (from signature) to have type '{sigparam.type}', but has (in docs) type '{docparam.type}'"
result.range = __get_docstring_range(fn, module_type, doc)
return result
# Validate exceptions raised
if config.fail_on_raises_section:
sig_exceptions = get_exceptions_raised(fn, module_type)
doc_exceptions = parser.get_exceptions_raised(doc)
if len(sig_exceptions) != len(doc_exceptions):
result.result = ResultType.FAILED
result.fail_reason = f"Number of listed raised exceptions does not match actual. Doc: {doc_exceptions}, expected: {sig_exceptions}"
result.range = __get_docstring_range(fn, module_type, doc)
return result
intersection = set(sig_exceptions) - set(doc_exceptions)
if len(intersection) > 0:
result.result = ResultType.FAILED
result.fail_reason = f"Listed raised exceptions does not match actual. Docstring: {doc_exceptions}, expected: {sig_exceptions}"
result.range = __get_docstring_range(fn, module_type, doc)
return result
result.result = ResultType.OK
return result
|
cc9c858f8ade844b89d944dc149c0233ed5741e7
| 3,647,788
|
def say(l, b, i):
"""
!d Repeat a word or phrase
!a <message...>
!r moderator
"""
try:
print 'Saying the phrase:', ' '.join(i.args)
b.l_say(' '.join(i.args), i, 1)
return True
except TypeError:
return False
|
260867612cd468babd42654c6d823649cbc73d41
| 3,647,790
|
import re
def rSanderSelect(dbItem,index=0,interactive=False):
"""
rSanderSelect(dbItem,index=0,interactive=False)
select which rSander henry data to use in dbItem
Parameters:
dbItem, db[key] dictionary object with keys = ['hbpSIP','hbpSIPL',
'hbpSI_index']
index, positive integer index for list item in hbpSIPL to move into
hbpSIP. Use interactive=True to display choices and ask for
user input for an index.
interactive, True to display choices and ask user input for an index,
False to make the change silently
Returns:
Nothing on success (dbItem is succussfully changed) or error messages
if there is an issue
"""
keys = ['hbpSIP','hbpSIPL','hbpSI_index']
for key in keys: #test if dbItem has valid dictionary keys
if key not in dbItem.keys():
return print("Henry data (%s) not found in dbItem[%s]\n" %
(key,dbItem['name']))
nHbpSIPL =len(dbItem['hbpSIPL'])
if not interactive:
invalIndex = "Invalid index: %s\n0 <= index <= %s\n" % (index,nHbpSIPL-1)
if re.match(r'^[0-9]+$',str(index)): #make sure index is positive integer
if index > nHbpSIPL-1: #check for valid index
return print(invalIndex)
dbItem['hbpSI_index'] = index
dbItem['hbpSIP'] = [float(dbItem['hbpSIPL'][index][0]),
float(dbItem['hbpSIPL'][index][1])]
else:
return print(invalIndex)
else:
header = ['Index','Ho /mol/kg/Pa','dln(H)/d(1/T) /K','Code','Ref.']
inStr = "Select an index (%s to %s) or e(x)it: " % (0,nHbpSIPL-1)
choice = ''
while choice != 'x':
table = []
for idx in range(nHbpSIPL):
table.append([idx])
table[idx].extend(dbItem['hbpSIPL'][idx])
print('\n'+tabulate(table,headers=header,numalign='center',
stralign='center')+'\n')
choice = input(inStr)
inStr = "Select an index (%s to %s) or e(x)it: " % (0,nHbpSIPL-1)
invalStr = "Invalid input: %s\n0 <= index <= %s or \'x\' to exit\n" % (choice,nHbpSIPL-1)
if re.match(r'^[0-9]+$',choice):
index = int(choice)
if index > nHbpSIPL-1: #check for valid index
inStr = invalStr + inStr
else:
dbItem['hbpSI_index'] = index
dbItem['hbpSIP'] = [float(dbItem['hbpSIPL'][index][0]),
float(dbItem['hbpSIPL'][index][1])]
else:
inStr = invalStr + inStr
|
54e6a79a2095810e10032c2da59972e89ca186eb
| 3,647,791
|
def dataset_w_pedigree_field():
"""
:return: Return model Dataset example with `pedigree_field` defined.
"""
search_pattern = SearchPattern(left="*/*/*_R1.fastq.gz", right="*/*/*_R2.fastq.gz")
dataset = DataSet(
sheet_file="sheet.tsv",
sheet_type="germline_variants",
search_paths=("/path",),
search_patterns=(search_pattern,),
naming_scheme="only_secondary_id",
sodar_uuid="99999999-aaaa-bbbb-cccc-999999999999",
pedigree_field="familyId",
)
return dataset
|
2fce0d1391e234a7bb4f2a0bcab5ba24fc27abe0
| 3,647,792
|
import requests
def get_new_access_token(client_id, client_secret, refresh_token):
"""Use long-lived refresh token to get short-lived access token."""
response = requests.post(
'https://www.googleapis.com/oauth2/v4/token',
data={
'client_id': client_id,
'client_secret': client_secret,
'refresh_token': refresh_token,
'grant_type': 'refresh_token',
},
timeout=TIMEOUT,
)
response.raise_for_status()
access_token = response.json()['access_token']
return access_token
|
a8f79511f8f0078121cf291752c2b315023df6de
| 3,647,793
|
def prettify_seconds(seconds):
"""
Prettifies seconds.
Takes number of seconds (int) as input and returns a prettified string.
Example:
>>> prettify_seconds(342543)
'3 days, 23 hours, 9 minutes and 3 seconds'
"""
if seconds < 0:
raise ValueError("negative input not allowed")
signs = {"s": {"singular": "second", "plural": "seconds", },
"h": {"singular": "hour", "plural": "hours"},
"min": {"singular": "minute", "plural": "minutes"},
"d": {"singular": "day", "plural": "days"}
}
seperator = ", "
last_seperator = " and "
def get_sign(unit, value):
if value == 1 or value == -1:
return signs[unit]["singular"]
else:
return signs[unit]["plural"]
days, remainder = divmod(seconds, 86400)
hours, remainder = divmod(remainder, 3600)
minutes, seconds = divmod(remainder, 60)
daystext = "{} {}".format(days, get_sign("d", days)) if days else ""
hourstext = "{} {}".format(hours, get_sign("h", hours)) if hours else ""
minutestext = "{} {}".format(minutes, get_sign("min", minutes)) if minutes else ""
if (not seconds) and (days or hours or minutes):
secondstext = ""
else:
secondstext = "{} {}".format(seconds, get_sign("s", seconds))
output_list = [daystext, hourstext, minutestext, secondstext]
filtered = [item for item in output_list if item]
if len(filtered) <= 2:
output = last_seperator.join(filtered)
else:
output = seperator.join(filtered[:-1]) + last_seperator + filtered[-1]
return output
|
4b77f9ed3d2085895ef15c6be30b7bfe83d1f49d
| 3,647,794
|
import re
def get_regions_prodigal(fn):
"""Parse prodigal output"""
regions = {}
with open(fn, 'r') as f:
for line in f:
if line[:12] == '# Model Data':
continue
if line[:15] == '# Sequence Data':
m = re.search('seqhdr="(\S+)"', line)
if m:
id = m.group(1)
regions[id] = {}
regions[id]['+'] = []
regions[id]['-'] = []
else:
r = line[1:].rstrip().split('_')
n = int(r[
0]) # also store the index of the fragment - prodigal uses these (rather than coords) to identify sequences in the fasta output
s = int(r[1])
e = int(r[2])
regions[id][r[3]].append(NumberedRegion(s, e, n))
return regions
|
d69f7b6d9dfc6802ad4dab3472f90a2d68b95bdd
| 3,647,795
|
from typing import Optional
def get_transform(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
transform_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetTransformResult:
"""
Use this data source to access information about an existing resource.
:param str account_name: The Media Services account name.
:param str resource_group_name: The name of the resource group within the Azure subscription.
:param str transform_name: The Transform name.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
__args__['transformName'] = transform_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:media/v20180701:getTransform', __args__, opts=opts, typ=GetTransformResult).value
return AwaitableGetTransformResult(
created=__ret__.created,
description=__ret__.description,
last_modified=__ret__.last_modified,
name=__ret__.name,
outputs=__ret__.outputs,
type=__ret__.type)
|
533ff2c95303c25b0a9741c36b34a755e18948e5
| 3,647,796
|
def default_preprocessing(df):
"""Perform the same preprocessing as the original analysis:
https://github.com/propublica/compas-analysis/blob/master/Compas%20Analysis.ipynb
"""
return df[(df.days_b_screening_arrest <= 30)
& (df.days_b_screening_arrest >= -30)
& (df.is_recid != -1)
& (df.c_charge_degree != 'O')
& (df.score_text != 'N/A')]
|
e6f4d8ceaa09fe71657e7936db886c3eabfb7aa0
| 3,647,797
|
def get_step_type_udfs(
step_type: str,
workflow: str,
adapter: ArnoldAdapter = Depends(get_arnold_adapter),
):
"""Get available artifact udfs for a step type"""
artifact_udfs = find_step_type_artifact_udfs(
adapter=adapter, step_type=step_type, workflow=workflow
)
process_udfs = find_step_type_process_udfs(
adapter=adapter, step_type=step_type, workflow=workflow
)
return artifact_udfs + process_udfs
|
f3ad3ad96d3f33e343afbb2ffcfa176fd4c6e654
| 3,647,798
|
def decode_base58(s: str) -> bytes:
"""
Decode base58.
:param s: base58 encoded string
:return: decoded data
"""
num = 0
for c in s:
if c not in BASE58_ALPHABET:
raise ValueError(
"character {} is not valid base58 character".format(c)
)
num *= 58
num += BASE58_ALPHABET.index(c)
h = hex(num)[2:]
h = '0' + h if len(h) % 2 else h
res = bytes.fromhex(h)
# Add padding back.
pad = 0
for c in s[:-1]:
if c == BASE58_ALPHABET[0]:
pad += 1
else:
break
return b'\x00' * pad + res
|
ee56c73e4fd22f25cd0caf63651abc13a4ba147d
| 3,647,799
|
import random
def ports_info(ptfadapter, duthost, setup, tx_dut_ports):
"""
Return:
dut_iface - DUT interface name expected to receive packtes from PTF
ptf_tx_port_id - Port ID used by PTF for sending packets from expected PTF interface
dst_mac - DUT interface destination MAC address
src_mac - PTF interface source MAC address
"""
data = {}
data["dut_iface"] = random.choice(tx_dut_ports.keys())
data["ptf_tx_port_id"] = setup["dut_to_ptf_port_map"][data["dut_iface"]]
data["dst_mac"] = get_dut_iface_mac(duthost, data["dut_iface"])
data["src_mac"] = ptfadapter.dataplane.ports[(0, data["ptf_tx_port_id"])].mac()
return data
|
14aef7e68386872a1d960329f2f8bee452aa9e29
| 3,647,800
|
def test_text_single_line_of_text(region, projection):
"""
Place a single line text of text at some x, y location.
"""
fig = Figure()
fig.text(
region=region,
projection=projection,
x=1.2,
y=2.4,
text="This is a line of text",
)
return fig
|
0e82165a2717fe9279015d3823b717a870b94e05
| 3,647,801
|
def safely_get_form(request, domain, instance_id):
"""Fetches a form and verifies that the user can access it."""
form = get_form_or_404(domain, instance_id)
if not can_edit_form_location(domain, request.couch_user, form):
raise location_restricted_exception(request)
return form
|
b3ba8da253a6455f5aeb65f828f8c28c826ac2d2
| 3,647,802
|
def generate_hazard_rates(n, d, timelines, constant=False, independent=0, n_binary=0, model="aalen"):
"""
n: the number of instances
d: the number of covariates
lifelines: the observational times
constant: make the coeffients constant (not time dependent)
n_binary: the number of binary covariates
model: from ["aalen", "cox"]
Returns:s
hazard rates: (t,n) dataframe,
coefficients: (t,d+1) dataframe of coefficients,
covarites: (n,d) dataframe
"""
covariates = generate_covariates(n, d, n_binary=n_binary)
if model == "aalen":
coefficients = time_varying_coefficients(d + 1, timelines, independent=independent, constant=constant)
hazard_rates = np.dot(covariates, coefficients.T)
return pd.DataFrame(hazard_rates.T, index=timelines), coefficients, pd.DataFrame(covariates)
elif model == "cox":
covariates = covariates[:, :-1]
coefficients = constant_coefficients(d, timelines, independent)
baseline = time_varying_coefficients(1, timelines)
hazard_rates = np.exp(np.dot(covariates, coefficients.T)) * baseline[baseline.columns[0]].values
coefficients["baseline: " + baseline.columns[0]] = baseline.values
return pd.DataFrame(hazard_rates.T, index=timelines), coefficients, pd.DataFrame(covariates)
else:
raise Exception
|
9c0da64f5796f57d474822121e1af5ca8ebb25e2
| 3,647,803
|
def load_graph(graph_url):
"""
Function that loads a graph given the URL
for a text representation of the graph
Returns a dictionary that models a graph
"""
graph_file = urllib2.urlopen(graph_url)
graph_text = graph_file.read()
graph_lines = graph_text.split('\n')
graph_lines = graph_lines[ : -1]
print "Loaded graph with", len(graph_lines), "nodes"
answer_graph = {}
for line in graph_lines:
neighbors = line.split(' ')
node = int(neighbors[0])
answer_graph[node] = set([])
for neighbor in neighbors[1 : -1]:
answer_graph[node].add(int(neighbor))
return answer_graph
|
d346fb75f5ff872147a166948af65bb52bab739c
| 3,647,804
|
import torch
def calculate_regularization_term(means, n_objects, norm):
"""means: bs, n_instances, n_filters"""
bs, n_instances, n_filters = means.size()
reg_term = 0.0
for i in range(bs):
if n_objects[i]:
_mean_sample = means[i, : n_objects[i], :] # n_objects, n_filters
_norm = torch.norm(_mean_sample, norm, 1)
reg_term += torch.mean(_norm)
reg_term = reg_term / bs
return reg_term
|
b6eb43a8915449c7e86d01a08b3ea2e77ae51064
| 3,647,805
|
def mode(x):
""" Find most frequent element in array.
Args:
x (List or Array)
Returns:
Input array element type: Most frequent element
"""
vals, counts = np.unique(x, return_counts=True)
return vals[np.argmax(counts)]
|
b73bf301ca9ebf45f3a6698f8b6d45a5640cb301
| 3,647,807
|
def has_path(matrix, path: str) -> bool:
"""
Given a matrix, make sure there is a path for a given string or not.
Parameters
----------
path: str
A given path, like "abcd"
Returns
-------
out: bool
Whether the given path can be found in the matrix
"""
if not path:
return True
if not matrix[0]:
return False
rows, cols = len(matrix), len(matrix[0])
visited = []
for i in range(rows):
tmp = []
for j in range(cols):
tmp.append(False)
visited.append(tmp)
plen = 0
for row in range(rows):
for col in range(cols):
hasp = has_path_core(matrix, row, col, rows, cols,
path, plen, visited)
if hasp:
return True
return False
|
bbde72992b762dd73c44c60da675da829255000d
| 3,647,808
|
def gensim_processing(data):
"""
Here we use gensim to define bi-grams and tri-grams which enable us to create a create a dictonary and corpus
We then process the data by calling the process_words function from our utils folder
"""
#build the models first
bigram = gensim.models.Phrases(data, min_count=3, threshold=15) #We're lowering the threshold as we've not a lot of data
trigram = gensim.models.Phrases(bigram[data], threshold=15)
#Then fit them to the data
bigram_mod = gensim.models.phrases.Phraser(bigram)
trigram_mod = gensim.models.phrases.Phraser(trigram)
#We further process the data using spacy and allow Nouns and Adjectives to pass (not verbs or adverbs!)
data_processed = lda_utils.process_words(data,nlp, bigram_mod, trigram_mod, allowed_postags=["NOUN","ADJ"])
#We now have a list of words which can be used to train the LDA model
return data_processed
|
67a4d9a90c8ea9809980d9871b769288915fe3cc
| 3,647,809
|
def _distances(value_domain, distance_metric, n_v):
"""Distances of the different possible values.
Parameters
----------
value_domain : array_like, with shape (V,)
Possible values V the units can take.
If the level of measurement is not nominal, it must be ordered.
distance_metric : callable
Callable that return the distance of two given values.
n_v : ndarray, with shape (V,)
Number of pairable elements for each value.
Returns
-------
d : ndarray, with shape (V, V)
Distance matrix for each value pair.
"""
return np.array([[distance_metric(v1, v2, i1=i1, i2=i2, n_v=n_v)
for i2, v2 in enumerate(value_domain)]
for i1, v1 in enumerate(value_domain)])
|
90c362db28497569a50475d7f6040755b1cfffea
| 3,647,812
|
import torch
import math
def log_mvn_likelihood(mean: torch.FloatTensor, covariance: torch.FloatTensor, observation: torch.FloatTensor) -> torch.FloatTensor:
"""
all torch primitives
all non-diagonal elements of covariance matrix are assumed to be zero
"""
k = mean.shape[0]
variances = covariance.diag()
log_likelihood = 0
for i in range(k):
log_likelihood += - 0.5 * torch.log(variances[i]) \
- 0.5 * k * math.log(2 * math.pi) \
- 0.5 * ((observation[i] - mean[i])**2 / variances[i])
return log_likelihood
|
6333ea91ddff9ac685f18954c5b7344846810ec3
| 3,647,813
|
def M_Mobs(H0, M_obs):
"""
Given an observed absolute magnitude, returns absolute magnitude
"""
return M_obs + 5.*np.log10(H0/100.)
|
e7f817eaf281f2dd64f33ea4af44cd1cf9da31fa
| 3,647,814
|
def generate_proctoring_requirements_email_context(user, course_id):
"""
Constructs a dictionary for use in proctoring requirements email context
Arguments:
user: Currently logged-in user
course_id: ID of the proctoring-enabled course the user is enrolled in
"""
course_module = modulestore().get_course(course_id)
return {
'user': user,
'course_name': course_module.display_name,
'proctoring_provider': capwords(course_module.proctoring_provider.replace('_', ' ')),
'proctoring_requirements_url': settings.PROCTORING_SETTINGS.get('LINK_URLS', {}).get('faq', ''),
'id_verification_url': IDVerificationService.get_verify_location(),
}
|
fc594882b68b7f1f554fa1681943d49b722ae229
| 3,647,815
|
import random
def mutate_strings(s):
"""Return s with a random mutation applied"""
mutators = [
delete_random_character,
insert_random_character,
flip_random_character
]
mutator = random.choice(mutators)
# print(mutator)
return mutator(s)
|
0ba9dd533da44bc2051a7076b775177f29f4aaa6
| 3,647,816
|
def get_one_hot(inputs, num_classes):
"""Get one hot tensor.
Parameters
----------
inputs: 3d numpy array (a x b x 1)
Input array.
num_classes: integer
Number of classes.
Returns
-------
One hot tensor.
3d numpy array (a x b x n).
"""
onehots = np.zeros(shape=tuple(list(inputs.shape[:-1]) + [num_classes]))
for i in range(inputs.shape[0]):
for j in range(inputs.shape[1]):
try:
onehots[i, j, inputs[i, j, 0]] = 1.0
except IndexError:
onehots[i, j, 0] = 1.0
return onehots
|
2f4a8b3a60a90a8f81579dd5938a1bab91cb5537
| 3,647,817
|
def one_hot_encoder(batch_inds, num_categories):
"""Applies one-hot encoding from jax.nn."""
one_hots = jax.nn.one_hot(batch_inds, num_classes=num_categories)
return one_hots
|
85c15859555ee1bdec64adc627f34cc161c7e66c
| 3,647,818
|
def part1(entries: defaultdict) -> int:
"""part1 solver take the entries and return the part1 solution"""
return calculate(entries, 80)
|
a35a559395f0c53eeac4600aaa28bc04d3e1766f
| 3,647,819
|
def ceki_filter(data, bound):
""" Check if convergence checks ceki are within bounds"""
ceki = data["ceki"].abs() < bound
return ceki
|
09cd53f44241b13cf77eb2299c802ed238580259
| 3,647,820
|
def get_middleware(folder, request_name, middlewares=None):
""" Gets the middleware for the given folder + request """
middlewares = middlewares or MW
if folder:
middleware = middlewares[folder.META.folder_name + "_" + request_name]
else:
middleware = middlewares[request_name]
if middleware is None:
def default_middleware(run, kwargs, env):
return run(kwargs)
middleware = default_middleware
return middleware
|
720aafa5a3d0ef265eeaa8fe40a68c7024b0adc3
| 3,647,821
|
def tf_repeat_2d(a, repeats):
"""Tensorflow version of np.repeat for 2D"""
assert len(a.get_shape()) == 2
a = tf.expand_dims(a, 0)
a = tf.tile(a, [repeats, 1, 1])
return a
|
8337cbef8459a1403fc6a681f89c14d6ae3a00a5
| 3,647,823
|
import torch
def accuracy(output, target, topk=(1,), output_has_class_ids=False):
"""Computes the accuracy over the k top predictions for the specified values of k"""
if not output_has_class_ids:
output = torch.Tensor(output)
else:
output = torch.LongTensor(output)
target = torch.LongTensor(target)
with torch.no_grad():
maxk = max(topk)
batch_size = output.shape[0]
if not output_has_class_ids:
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
else:
pred = output[:, :maxk].t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
|
f702000a64db1bb6f53b7686f1143656f9864e8d
| 3,647,824
|
def masked_residual_block(c, k, nonlinearity, init, scope):
"""
Residual Block for PixelCNN. See https://arxiv.org/abs/1601.06759
"""
with tf.variable_scope(scope):
n_ch = c.get_shape()[3].value
half_ch = n_ch // 2
c1 = nonlinearity(c)
c1 = conv(c1, k=1, out_ch=half_ch, stride=False, mask_type='B', init=init, scope='1x1_a')
c1 = nonlinearity(c1)
c1 = conv(c1, k=k, out_ch=half_ch, stride=False, mask_type='B', init=init, scope='conv')
c1 = nonlinearity(c1)
c1 = conv(c1, k=1, out_ch=n_ch, stride=False, mask_type='B', init=init, scope='1x1_b')
c = c1 + c
return c
|
ffd4bb042affc0250472d50b6b824be66f808878
| 3,647,825
|
def calculate_lookup(src_cdf: np.ndarray, ref_cdf: np.ndarray) -> np.ndarray:
"""
This method creates the lookup table
:param array src_cdf: The cdf for the source image
:param array ref_cdf: The cdf for the reference image
:return: lookup_table: The lookup table
:rtype: array
"""
lookup_table = np.zeros(256)
lookup_val = 0
for src_pixel_val in range(len(src_cdf)):
for ref_pixel_val in range(len(ref_cdf)):
if ref_cdf[ref_pixel_val] >= src_cdf[src_pixel_val]:
lookup_val = ref_pixel_val
break
lookup_table[src_pixel_val] = lookup_val
return lookup_table
|
f1433e6af001ddcda44c740dabfb1ee643cd2260
| 3,647,826
|
def measureInTransitAndDiffCentroidForOneImg(prfObj, ccdMod, ccdOut, cube, rin, bbox, rollPhase, flags, hdr=None, plot=False):
"""Measure image centroid of in-transit and difference images
Inputs:
-----------
prfObj
An object of the class prf.KeplerPrf()
ccdMod, ccdOut
(int) CCD module and output of image. Needed to
create the correct PRF model
cube
(3d np array) A TPF data cube as returned by
dave.fileio.getTargetPixelArrayFromFits()
rin
(int) Which image to process. rin should be in the range 0..len(cube)
bbox
[c1, c2, r1, r2]. Define the range of columns (c1..c2)
and rows (r1..r2) defined by the image.
An exception raised if the following equality not true
img.shape = (c2-c1), (r2-r1)
rollPhase
(1d np array) An array of roll phases for each row
of cube. len(rollPhase) == len(cube). Units of this
array don't matter, so long as cadences with similar
roll angles have similar values of rollPhase
flags
(1d array) flag values indicating bad cadences.
Currently a non-zero value of flags indicates a bad
cadence.
Optional Inputs:
---------------
hdr
Fits header object for TPF file. Useful if you want to plot
plot
(bool) Request plots.
Returns:
-------------
A two element tuple
A 4 element numpy array
ic In transit centroid column
ir In transit centroid row
dc Difference image centroid column
dr Difference image centroid row
A dictionary containing some diagnostics describing the cadences used
then creating the difference image.
"""
diff, oot, diagnostics = diffimg.constructK2DifferenceImage(cube, rin, \
rollPhase, flags)
if np.max(np.fabs(oot)) == 0:
return np.array([-1,-1,-1,-1]), diagnostics
ootRes = fitPrfCentroidForImage(oot, ccdMod, ccdOut, bbox, prfObj)
diffRes = fitPrfCentroidForImage(diff, ccdMod, ccdOut, bbox, prfObj)
#Fit the difference image. I don't think this is the right thing to do
# snr = diff / np.sqrt(cube[rin])
# snr[ np.isnan(snr) ] = 0
# diffRes = fitPrfCentroidForImage(snr, ccdMod, ccdOut, bbox, prfObj)
# print rin, diffRes.x
return np.array([ootRes.x[0], ootRes.x[1], diffRes.x[0], diffRes.x[1]]), diagnostics
|
655477460e5841736f07106d5e6afd666d95f450
| 3,647,827
|
def readGlobalFileWithoutCache(fileStore, jobStoreID):
"""Reads a jobStoreID into a file and returns it, without touching
the cache.
Works around toil issue #1532.
"""
f = fileStore.getLocalTempFile()
fileStore.jobStore.readFile(jobStoreID, f)
return f
|
8c784e809acdc1a7fb3d8c108f85ce61bd1ad11c
| 3,647,828
|
def get_user_granted_assets_direct(user):
"""Return assets granted of the user directly
:param user: Instance of :class: ``User``
:return: {asset1: {system_user1, system_user2}, asset2: {...}}
"""
assets = {}
asset_permissions_direct = user.asset_permissions.all()
for asset_permission in asset_permissions_direct:
if not asset_permission.is_valid:
continue
for asset in asset_permission.get_granted_assets():
if not asset.is_active:
continue
if asset in assets:
assets[asset] |= set(asset_permission.system_users.all())
else:
setattr(asset, 'inherited', False)
assets[asset] = set(asset_permission.system_users.all())
return assets
|
602bd104835cc85dcf59339c8b4b2e2e2b5f747b
| 3,647,829
|
def nullColumns(fileHeaders, allKeys):
"""
Return a set of column names that don't exist in the file.
"""
s1 = set(fileHeaders)
s2 = set(allKeys)
return s2.difference(s1)
|
17a0bb80414fe88f213399958b217ccf6fb5d1e9
| 3,647,830
|
def listable_attachment_tags(obj, joiner=" "):
"""
Return an html string containing links for each of the attachments for
input object. Images will be shown as hover images and other attachments will be
shown as paperclip icons.
"""
items = []
attachments = obj.attachment_set.all()
label = mark_safe('<i class="fa fa-paperclip fa-fw" aria-hidden="true"></i>')
img_label = mark_safe('<i class="fa fa-photo fa-fw" aria-hidden="true"></i>')
for a in attachments:
if a.is_image:
img = attachment_img(a, klass="listable-image")
items.append(
'<div class="hover-img"><a href="%s" target="_blank">%s<span>%s</span></a></div>' %
(a.attachment.url, img_label, img)
)
else:
items.append(attachment_link(a, label=label))
return joiner.join(items)
|
b2fa3fd249469334e42616f0e4392ce16d4076d1
| 3,647,831
|
import math
def distance_km(lat1, lon1, lat2, lon2):
""" return distance between two points in km using haversine
http://en.wikipedia.org/wiki/Haversine_formula
http://www.platoscave.net/blog/2009/oct/5/calculate-distance-latitude-longitude-python/
Author: Wayne Dyck
"""
ret_val = 0
radius = 6371 # km
lat1 = float(lat1)
lon1 = float(lon1)
lat2 = float(lat2)
lon2 = float(lon2)
dlat = math.radians(lat2-lat1)
dlon = math.radians(lon2-lon1)
a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) \
* math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
ret_val = radius * c
return ret_val
|
f50d444b5769b1d00045429e3d577ec22f922774
| 3,647,832
|
def _flip(r, u):
"""Negate `r` if `u` is negated, else identity."""
return ~ r if u.negated else r
|
18ddcf5132867f5646c729bdadcb2c5077df8c03
| 3,647,833
|
def get_arguments():
"""Defines command-line arguments, and parses them."""
parser = ArgumentParser()
# Execution mode
parser.add_argument(
"--mode",
"-m",
choices=['train', 'test', 'full'],
default='train',
help=(
"train: performs training and validation; test: tests the model "
"found in \"--checkpoint-dir\" with name "
"\"--name\" on \"--dataset\"; "
"full: combines train and test modes. Default: train"
)
)
parser.add_argument(
"--resume",
action='store_true',
help=(
"The model found in \"--checkpoint-dir/--name/\" and filename "
"\"--name.h5\" is loaded."
)
)
parser.add_argument(
"--initial-epoch",
type=int,
default=0,
help="Epoch at which to start training. Default: 0"
)
parser.add_argument(
"--no-pretrained-encoder",
dest='pretrained_encoder',
action='store_false',
help=(
"Pretrained encoder weights are not loaded."
)
)
parser.add_argument(
"--weights-path",
type=str,
default="./checkpoints/linknet_encoder_weights.h5",
help=(
"HDF5 file where the weights are stored. This setting is ignored "
"if \"--no-pretrained-encoder\" is set. Default: "
"/checkpoints/linknet_encoder_weights.h5"
)
)
# Hyperparameters
parser.add_argument(
"--batch-size",
"-b",
type=int,
default=16,
help="The batch size. Default: 10"
)
parser.add_argument(
"--epochs",
type=int,
default=200,
help="Number of training epochs. Default: 300"
)
parser.add_argument(
"--learning-rate",
"-lr",
type=float,
default=5e-4,
help="The learning rate. Default: 5e-4"
)
parser.add_argument(
"--lr-decay",
type=float,
default=0.1,
help="The learning rate decay factor. Default: 0.1"
)
parser.add_argument(
"--lr-decay-epochs",
type=int,
default=200,
help=(
"The number of epochs before adjusting the learning rate. "
"Default: 100"
)
)
parser.add_argument(
"--dataset-dir",
type=str,
default="../data/ForDataGenTrainTestVal/",
help=(
"Path to the root directory of the selected dataset. "
"Default: data/CamVid"
)
)
# Settings
parser.add_argument(
"--workers",
type=int,
default=24,
help="Number of subprocesses to use for data loading. Default: 4"
)
parser.add_argument(
"--verbose",
choices=[0, 1, 2],
default=1,
help=(
"Verbosity mode: 0 - silent, 1 - progress bar, 2 - one line per "
"epoch. Default: 1"
)
)
# Storage settings
parser.add_argument(
"--name",
type=str,
default='LinkNet',
help="Name given to the model when saving. Default: LinkNet"
)
parser.add_argument(
"--checkpoint-dir",
type=str,
default='edge_point',
help="The directory where models are saved. Default: checkpoints"
)
return parser.parse_args()
|
5385c75524460ed4968def0ab98fc29112d72434
| 3,647,834
|
def twoThreeMove(tri, angle, face_num, perform = True, return_edge = False):
"""Apply a 2-3 move to a taut triangulation, if possible.
If perform = False, returns if the move is possible.
If perform = True, modifies tri, returns (tri, angle) for the performed move"""
face = tri.triangle(face_num)
embed0 = face.embedding(0)
tet0 = embed0.simplex()
tet_num0 = tet0.index()
tet_0_face_num = embed0.face()
vertices0 = embed0.vertices() # Maps vertices (0,1,2) of face to the corresponding vertex numbers of tet0
embed1 = face.embedding(1)
tet1 = embed1.simplex()
tet_num1 = tet1.index()
tet_1_face_num = embed1.face()
vertices1 = embed1.vertices() # Maps vertices (0,1,2) of face to the corresponding vertex numbers of tet1
if tet0 == tet1: ### Cannot perform a 2-3 move across a self-gluing
return False
### taut 2-3 move is valid if the pis are on different edges of face
### this never happens if we start with a veering triangulation.
### for veering, the two-tetrahedron ball is always a continent.
for i in range(3):
j = (i+1) % 3
k = (i+2) % 3
if angle[tet_num0] == unsorted_vert_pair_to_edge_pair[(vertices0[j], vertices0[k])]:
pi_num_0 = i
if angle[tet_num1] == unsorted_vert_pair_to_edge_pair[(vertices1[j], vertices1[k])]:
pi_num_1 = i
if pi_num_0 == pi_num_1:
return False
if perform == False:
return True
### check we do the same as regina...
tri2 = regina.Triangulation3(tri) ## make a copy
tri2.pachner(tri2.triangle(face_num))
### We have to implement twoThreeMove ourselves. e.g. we do a 2-3 move to canonical fig 8 knot complement triangulation.
### All of the original tetrahedra are removed. I don't see any way to carry the angle structure through without knowing
### exactly how Ben's implementation works.
## record the tetrahedra and gluings adjacent to tet0 and tet1
tets = [tet0, tet1]
vertices = [vertices0, vertices1]
# print('2-3 vertices signs')
# print([v.sign() for v in vertices])
gluings = []
for i in range(2):
tet_gluings = []
for j in range(3):
tet_gluings.append( [ tets[i].adjacentTetrahedron(vertices[i][j]), tets[i].adjacentGluing(vertices[i][j])] )
# if tets[i].adjacentTetrahedron(vertices[i][j]) in tets:
# print('self gluing')
gluings.append(tet_gluings)
### add new tetrahedra
new_tets = []
for i in range(3):
new_tets.append(tri.newTetrahedron())
### glue around degree 3 edge
for i in range(3):
new_tets[i].join(2, new_tets[(i+1)%3], regina.Perm4(0,1,3,2))
### replace mapping info with corresponding info for the 3 tet. Self gluings will be annoying...
### write verticesi[j] as vij
### tet0 new_tet0
### _________ _________
### ,'\ /`. ,'\`. ,'/`.
### ,' \ v03 / `. ,' \ `0' / `.
### ,' \ / `. ,' \ | / `.
### / \ \ / / \ /|\ \|/ /|\
### /v02\ * /v01\ / | \ * / | \
### / _\..... | ...../_ \ / | 3\..... | ...../2 | \
### /_--"" / * \ ""--_\ /2 ,' / * \ `. 3\
### \`.v12/ / \ \v11,'/ `. \`.| / /|\ \ |,'/
### \ `./ / \ \,' / ----} \ `./ / | \ \,' /
### \ /`. / v00 \ ,'\ / ,' \|/`. / | \ ,'\|/
### \ `. / \ ,' / \ `. / | \ ,' /
### \ `---------' / \ * 3 | 2 * /
### \ \ / / \ \ | / /
### \ \ v10 / / new_tet1 \ \ | / / new_tet2
### \ \ / / \ \ | / /
### \ \ / / \ \|/ /
### \ * / \ * /
### tet1 \...|.../ \...|.../
### \ | / \`.|.'/
### \v13/ \ 1 /
### \|/ \|/
### * *
# permutations taking the vertices for a face of the 3-tet ball to the
# vertices of the same face for the 2-tet ball
# these should be even in order to preserve orientability.
# exactly one of vertices[0] and vertices[1] is even, but it seems to depend on the face.
# perms = [[regina.Perm4( vertices[0][3], vertices[0][0], vertices[0][1], vertices[0][2] ), ### opposite v00
# regina.Perm4( vertices[0][3], vertices[0][1], vertices[0][2], vertices[0][0] ), ### opposite v01
# regina.Perm4( vertices[0][3], vertices[0][2], vertices[0][0], vertices[0][1] ) ### opposite v02
# ],
# [regina.Perm4( vertices[1][0], vertices[1][3], vertices[1][1], vertices[1][2] ), ### opposite v10
# regina.Perm4( vertices[1][1], vertices[1][3], vertices[1][2], vertices[1][0] ), ### opposite v11
# regina.Perm4( vertices[1][2], vertices[1][3], vertices[1][0], vertices[1][1] ) ### opposite v12
# ]
# ]
perms = [[vertices[0] * regina.Perm4( 3,0,1,2 ), ### opposite v00
vertices[0] * regina.Perm4( 3,1,2,0 ), ### opposite v01
vertices[0] * regina.Perm4( 3,2,0,1 ) ### opposite v02
],
[vertices[1] * regina.Perm4( 0,3,1,2 ), ### opposite v10
vertices[1] * regina.Perm4( 1,3,2,0 ), ### opposite v11
vertices[1] * regina.Perm4( 2,3,0,1 ) ### opposite v12
]
]
flip = perms[0][0].sign() == -1
if flip: #then all of the signs are wrong, switch 0 and 1 on input
perms = [[p * regina.Perm4( 1,0,2,3 ) for p in a] for a in perms]
# print('2-3 perms signs')
# print([[p.sign() for p in a] for a in perms])
for i in range(2):
for j in range(3):
gluing = gluings[i][j]
if gluing != None:
if gluing[0] not in tets: ### not a self gluing
gluing[1] = gluing[1] * perms[i][j]
else:
i_other = tets.index( gluing[0] )
otherfacenum = gluing[1][vertices[i][j]]
j_other = [vertices[i_other][k] for k in range(4)].index(otherfacenum)
assert gluings[i_other][j_other][0] == tets[i]
assert gluings[i_other][j_other][1].inverse() == gluings[i][j][1]
gluings[i_other][j_other] = None ### only do a self gluing from one side
gluing[0] = new_tets[j_other]
gluing[1] = perms[i_other][j_other].inverse() * gluing[1] * perms[i][j]
### unglue two tetrahedra
tet0.isolate()
tet1.isolate()
### remove the tetrahedra
tri.removeSimplex(tet0)
tri.removeSimplex(tet1)
### make the gluings on the boundary of the new ball
for i in range(2):
for j in range(3):
if gluings[i][j] != None:
if flip:
new_tets[j].join(i, gluings[i][j][0], gluings[i][j][1])
else:
new_tets[j].join(1 - i, gluings[i][j][0], gluings[i][j][1])
assert tri.isIsomorphicTo(tri2)
assert tri.isOriented()
### update the angle structure
tet_indices = [tet_num0, tet_num1]
tet_indices.sort()
angle.pop(tet_indices[1])
angle.pop(tet_indices[0]) ## remove from the list in the correct order!
new_angle = [None, None, None]
new_angle[pi_num_0] = 0
new_angle[pi_num_1] = 0 ### these two tetrahedra have their pi's on the new degree three edge
third_index = 3 - (pi_num_0 + pi_num_1)
if (pi_num_0 - third_index) % 3 == 1:
new_angle[third_index] = 1
else:
assert (pi_num_0 - third_index) % 3 == 2
new_angle[third_index] = 2
if flip:
new_angle[third_index] = 3 - new_angle[third_index]
angle.extend(new_angle)
assert is_taut(tri, angle)
if not return_edge:
return [ tri, angle ]
else:
return [ tri, angle, new_tets[0].edge(0).index() ]
|
18abe14b2b8446d39e285f1facda82568b808b60
| 3,647,835
|
import csv
def obterUFEstadoPorNome(estado):
"""
Retorna o codigo UF do estado a partir do nome do estado
:param estado: Nome do estado
:return codigoDoEstado: Código UF do estado
"""
try:
with open("./recursos/estados.csv", newline="") as csvfile:
reader = csv.DictReader(csvfile, delimiter=";")
for state in reader:
if state["Unidade_Federativa"].lower() == estado.strip().lower():
return state["UF"]
except Exception as exc:
print("[ERROR]{0}".format(exc))
|
9b136fe8c557e5f75bca235cf66168f92244a4e6
| 3,647,836
|
import random
def get_random_byte_string(byte_length):
""" Use this function to generate random byte string
"""
byte_list = []
i = 0
while i < byte_length:
byte_list.append(chr(random.getrandbits(8)))
i = i + 1
# Make into a string
byte_string = ''.join(byte_list)
return byte_string
|
0ea923a045beb476501dc3d8983f3fe89efef008
| 3,647,837
|
def find_all_indexes(text, pattern):
"""Return a list of starting indexes of all occurrences of pattern in text,
or an empty list if not found.
Complexity Analysis:
Best case: O(t)
Worst Case: O(t)
In the best case the pattern is the empty string(''). In that scenario
this implementation returns a list of all the index positions present
in the text string, using a list comprehension which requires t
iterations, where t is the length of the text. One iteration is required
for each character in the text string.
In the worst case, there are no occurrences of the pattern present in
the text. In that scenario this implementation would be most impacted by
the worst case scenario of the find_index function, which would be O(t)
as well. The find_index function would require iterating through the
entire length of the text string to discover there are pattern matches.
In the average case, there are occurences of the pattern string in the
text string. Since there is no difference between the best and worst
case, the complexity of this function asymptotically approaches O(t) as
well, on the average case.
"""
assert isinstance(text, str), 'text is not a string: {}'.format(text)
assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text)
# edge case: return every index if the pattern is an empty str
if len(pattern) == 0:
return [i for i in range(len(text))]
# otherwise find all indices
else:
# set indices to an empty list on the first pass
indices = list()
# now find all the indices
return find_next_index(text, pattern, indices)
|
0101efe77570b5d027928495dc25cb4e02d5c2f5
| 3,647,838
|
def is_igb(request):
"""
Checks the headers for IGB headers.
"""
if 'HTTP_EVE_TRUSTED' in request.META:
return True
return False
|
1e6485614063a9f4eec36407b60154300d38db76
| 3,647,839
|
from typing import OrderedDict
from re import T
def compile_ADAM_train_function(model, gparams, learning_rate=0.001, b1=0.9, b2=0.999, e=1e-8,
gamma=1 - 1e-8):
"""
ADAM update rules
Default values are taken from [Kingma2014]
References:
[Kingma2014] Kingma, Diederik, and Jimmy Ba.
"Adam: A Method for Stochastic Optimization."
arXiv preprint arXiv:1412.6980 (2014).
http://arxiv.org/pdf/1412.6980v4.pdf
"""
updates = OrderedDict()
all_params = model.params
all_grads = gparams
alpha = learning_rate
t = theano.shared(np.float32(1))
b1_t = b1 * gamma ** (t - 1) # (Decay the first moment running average coefficient)
for theta_previous, g in zip(all_params, all_grads):
m_previous = theano.shared(np.zeros(theta_previous.get_value().shape,
dtype=theano.config.floatX))
v_previous = theano.shared(np.zeros(theta_previous.get_value().shape,
dtype=theano.config.floatX))
m = b1_t * m_previous + (1 - b1_t) * g # (Update biased first moment estimate)
v = b2 * v_previous + (1 - b2) * g ** 2 # (Update biased second raw moment estimate)
m_hat = m / (1 - b1 ** t) # (Compute bias-corrected first moment estimate)
v_hat = v / (1 - b2 ** t) # (Compute bias-corrected second raw moment estimate)
theta = theta_previous - (alpha * m_hat) / (T.sqrt(v_hat) + e) # (Update parameters)
# updates.append((m_previous, m))
# updates.append((v_previous, v))
# updates.append((theta_previous, theta) )
updates[m_previous] = m
updates[v_previous] = v
updates[theta_previous] = theta
updates[t] = t + 1.
return updates
|
a60f27c3b314d3adc2ec2f7bb0f8c92875d7625b
| 3,647,840
|
def linear_svr_pred(X_train, Y_train):
"""
Train a linear model with Support Vector Regression
"""
svr_model = LinearSVR(random_state=RANDOM_STATE)
svr_model.fit(X_train, Y_train)
Y_pred = svr_model.predict(X_train)
return Y_pred
|
336325ec53da4d4008c3219aa737365a40263bdf
| 3,647,843
|
import math
def area(rad: float = 1.0) -> float:
"""
return area of a circle
>>> area(2.0)
3.141592653589793
>>> area(3.0)
7.0685834705770345
>>> area(4.0)
12.566370614359172
"""
return rad * rad * math.pi / 4
|
702fc4a9fa370804d88d1182f966890bc0634466
| 3,647,844
|
import requests
import json
def check_coverage_running(url, coverage_name):
"""
Check if Navitia coverage is up and running
:param url: Navitia server coverage url
:param coverage_name: the name of the coverage to check
:return: Whether a Navitia coverage is up and running
"""
_log.info("checking if %s is up", coverage_name)
response = requests.get(url)
# Get the status of the coverage as Json
json_data = json.loads(response.text)
if "regions" not in json_data or "running" not in json_data["regions"][0]['status']:
_log.info("%s coverage is down", coverage_name)
return False
else:
_log.info("%s coverage is up", coverage_name)
return True
|
3d3d9b1403c541aa0cdb8867845b21bf387431fb
| 3,647,845
|
import random
def make_random_board(row_count, col_count, density=0.5):
"""create a random chess board with given size and density"""
board = {}
for row_num in range(row_count):
for col_num in range(col_count):
factor = random.random() / density
if factor >= 1:
continue
index = int(factor * len(ChessPiece.class_list))
board[(row_num, col_num)] = ChessPiece.class_list[index].symbol
return board
|
ea40883989675c99aa70af0b180957aa677233a5
| 3,647,846
|
def create_roots(batch_data):
"""
Create root nodes for use in MCTS simulation. Takes as a parameter a list of tuples,
containing data for each game. This data consist of: gametype, state, type of player 1
and type of player 2
"""
root_nodes = []
for data in batch_data:
game = data[0]
state = data[1]
player_1 = data[2]
player_2 = data[3]
player = player_1 if game.player(state) else player_2
root_nodes.append(player.create_root_node(state))
return root_nodes
|
d07b0781605b01d08c9ef78f30dad9254ade9907
| 3,647,847
|
def _parse_crs(crs):
"""Parse a coordinate reference system from a variety of representations.
Parameters
----------
crs : {str, dict, int, CRS}
Must be either a rasterio CRS object, a proj-string, rasterio supported
dictionary, WKT string, or EPSG integer.
Returns
-------
rasterio.crs.CRS
The parsed CRS.
Raises
------
CRSError
Raises an error if the input cannot be parsed.
"""
#
# NOTE: This doesn't currently throw an error if the EPSG code is invalid.
#
parsed = None
if isinstance(crs, CRS):
parsed = crs
elif isinstance(crs, str):
try:
# proj-string or wkt
parsed = CRS.from_string(crs)
except CRSError:
# wkt
parsed = CRS.from_wkt(crs)
elif isinstance(crs, dict):
parsed = CRS(crs)
elif isinstance(crs, int):
parsed = CRS.from_epsg(crs)
elif isinstance(crs, pyproj.Proj):
parsed = CRS.from_proj4(crs.proj4_init)
if parsed is None or not parsed.is_valid:
raise CRSError('Could not parse CRS: {}'.format(crs))
return parsed
|
559692b146ec99a9fe5407c8bca340c72dddf0a5
| 3,647,848
|
def hs_instance_get_all(context):
"""Get a list of hyperstash instances."""
return IMPL.hs_instance_get_all(context)
|
e09991f71e3713eea96956306a1ab4813bfb8b1a
| 3,647,849
|
import importlib
def import_from_file(module_name: str, filepath: str):
"""
Imports a module from file.
Args:
module_name (str): Assigned to the module's __name__ parameter (does not
influence how the module is named outside of this function)
filepath (str): Path to the .py file
Returns:
The module
"""
spec = importlib.util.spec_from_file_location(module_name, filepath)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
|
89ac082cbc7d3dd5d9158a8cc8eb5ef061c444e6
| 3,647,850
|
def plot_chirp(stim_inten, spike_bins, smooth=True, ax=None):
"""
Plot the response to a chirp stimulus (but could be any repeated stimulus, non-shuffled).
The response is plotted with seaborn's lineplot.
params:
- stim_inten: The whole stimulus intensity
- spike_bins: The cell's response to the whole stimulus
- smooth: Flag to smooth or not the cell's response
- ax: The axis for the plot. If None, a new plot is created
return:
- The axis of the plot
"""
if ax is None:
fig, ax = plt.subplots()
#Getting the number of repeats by convolving a part of the stimulus
conv_res = np.convolve(stim_inten[360:600].astype(float), stim_inten.astype(float), mode="full")
n_repeats = np.sum(conv_res.max()==conv_res)
trace = spike_bins.reshape(n_repeats,-1)
len_ = trace.shape[1]
df = pd.DataFrame(columns=["timepoint","repeat","signal"])
for i, repeat_am in enumerate(trace):
if smooth:
repeat_am = np.convolve([.333]*3, repeat_am, mode="same")
repeat_df = pd.DataFrame(list(zip(np.linspace(0,len_/60,len_),
[str(i)]*len_,
repeat_am)), columns=["timepoint","repeat","signal"])
df = df.append(repeat_df, ignore_index=True)
g = sns.lineplot(x="timepoint", y="signal", data=df, ax=ax, n_boot=100) #Small n_boot to speed_up plotting
# (default n_boot=10000)
min_val, max_val = ax.get_ylim()
ax.set_ylim(min_val , (max_val-min_val)*6/5)
ax.set(xlabel='', ylabel='')
ax.imshow([stim_inten.reshape(n_repeats,-1)[0]], aspect='auto', cmap="gray", extent=(0,len_/60,(max_val-min_val)*6/5,max_val))
return ax
|
75fe6defcb23a2c59e2241c9a68bf753dc6828b7
| 3,647,851
|
def init_context_processor(app):
"""定义html模板方法"""
@app.context_processor
def pjax_processor():
"""
pjax处理器
"""
def get_template(base, pjax=None):
pjax = pjax or 'pjax.html'
if 'X-PJAX' in request.headers:
return pjax
else:
return base
return dict(pjax=get_template)
@app.context_processor
def pagination_processor():
"""
分页处理器
"""
def pagination(url, pager, template=None, params={}):
template = template or 'common/pagination.html'
pager._dict['current'] = (pager.offset + pager.limit - 1) // pager.limit
pager._dict['total_page'] = (pager.rows_found + pager.limit - 1) // pager.limit
prev_offset = pager.offset - 2 * pager.limit
pager._dict['prev_offset'] = prev_offset if prev_offset >= 0 else 0
pager._dict['params'] = params
pager._dict['url'] = url
return Markup(render_template(template, data=pager))
return dict(pagination=pagination)
@app.context_processor
def column_order_processor():
"""
获取排序字段的css
"""
def column_order(column, order, active):
column = 'sorttable-column-%s' % column
if active:
order = 'sorttable-sorted-reverse' if order == 'desc' else 'sorttable-sorted'
return '%s %s' % (column, order)
else:
return column
return dict(column_order=column_order)
@app.context_processor
def try_active_processor():
"""
尝试激活导航栏项目
"""
def try_active(page_type):
if g.page_type == page_type:
return 'curr'
else:
return ''
return dict(try_active=try_active)
@app.context_processor
def if_else_processor():
"""
gives t if condition evaluates to True, and f if it evaluates to False
"""
def if_else(condition, t, f):
return t if condition else f
return dict(ifelse=if_else)
@app.context_processor
def present_processor():
u"""
present enum to it's name
eg:
>> present(1, {1: 'Android', 2: 'iOS'}
Android
>> present(2, {1: 'Android', 2: 'iOS'}
iOs
"""
def present(enum, dict):
return dict.get(enum, enum)
return dict(present=present)
@app.context_processor
def hostname_processor():
"""
get hostname of url
ex: http://ng.d.cn/xianbian2/news/detail_402586_1.html => ng.d.cn
"""
def hostname(url):
return parse.urlparse(url).netloc
return dict(hostname=hostname)
@app.context_processor
def utility_processor():
def permission(per):
if g.modules == []:
return True
if per in g.modules:
return True
return False
return dict(permission=permission)
@app.context_processor
def utility_processor():
"""激活左边栏当前模块样式"""
def active_cur_menu(per):
if g.uri_path.startswith(per):
# if g.uri_path == per:
return True
return False
return dict(active_cur_menu=active_cur_menu)
|
6b5cf03ec48a1b1324a158388098da5e4884286f
| 3,647,854
|
def tiered(backup_tier, R):
"""Returns a tier aware checker.
The returned checker ensures that it's possible to construct a set
(of length R) including given set s that will contain exactly one
node from the backup tier.
`backup_tier` is a list of node ids that count as backups.
A typical invocation looks something like:
build_copysets(primary_tier + backup_tier, 6, 2,
checker=tiered(backup_tier, 6))
"""
def _checker(backup_tier, R, copysets, copyset):
num_backups = len(copyset.intersection(set(backup_tier)))
if len(copyset) < R:
return num_backups <= 1
else:
return num_backups == 1
return partial(_checker, backup_tier, R)
|
ecde647738fad88ea806948a0df7bee22a73abfa
| 3,647,855
|
def ls_chebyshev( A, b, s_max, s_min, tol = 1e-8, iter_lim = None ):
"""
Chebyshev iteration for linear least squares problems
"""
A = aslinearoperator(A)
m, n = A.shape
d = (s_max*s_max+s_min*s_min)/2.0
c = (s_max*s_max-s_min*s_min)/2.0
theta = (1.0-s_min/s_max)/(1.0+s_min/s_max) # convergence rate
itn_est = np.ceil((log(tol)-log(2))/log(theta))
if (iter_lim is None) or (iter_lim < itn_est) : iter_lim = itn_est
alpha = 0.0
beta = 0.0
r = b.copy()
x = np.zeros( np.int64( n ) )
v = np.zeros( np.int64( n ) )
# print( iter_lim )
for k in range(np.int64(iter_lim)):
if k == 0:
beta = 0.0
alpha = 1.0/d
elif k == 1:
beta = -1.0/2.0*(c*c)/(d*d)
alpha = 1.0*(d-c*c/(2.0*d))
else:
beta = -(c*c)/4.0*(alpha*alpha)
alpha = 1.0/(d-(c*c)/4.0*alpha)
v = A.rmatvec(r) - beta*v
x += alpha*v
r -= alpha*A.matvec(v)
return x
|
05e50ac0167d1ed03ae3e9fa6876c94a50db7893
| 3,647,856
|
def compute_confusion_matrix(args, df_inference, strata):
"""From a list of prediction summary (as produced by get_cloud_prediction_summary), compute a confusion matrix."""
y_true = df_inference["vt_" + strata].values
y_predicted = df_inference["pred_" + strata].values
y_true = np.vectorize(get_closest_class_center_index)(y_true)
y_predicted = np.vectorize(get_closest_class_center_index)(y_predicted)
cm = confusion_matrix(
y_true,
y_predicted,
labels=range(len(bins_centers)),
normalize=args.normalize_cm,
)
return cm
|
0662638c4db5ee9e1d94b1e582d9b0824eefd3ff
| 3,647,857
|
def get_metadata(**kwargs):
"""Metadata
Get account metadata
Reference: https://iexcloud.io/docs/api/#metadata
Data Weighting: ``Free``
.. warning:: This endpoint is only available using IEX Cloud. See
:ref:`Migrating` for more information.
"""
return Metadata(**kwargs).fetch()
|
9f4b506bdf978f525e26d7f976a0fdc2f483ae0f
| 3,647,858
|
def load_data():
"""
Carrega os dados do dataset iris
:return: dados carregados em uma matriz
"""
data = pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data", header=None)
# utiliza somente as duas primeiras classes
data = data[:100]
# transforma as classes em 0 e 1
data[4] = np.where(data.iloc[:, -1] == 'Iris-setosa', 0, 1)
data = np.asmatrix(data, dtype='float64')
return data
|
fe2a1a999406f23676e58f75f1d5999e9f0697e8
| 3,647,863
|
import select
from datetime import datetime
async def activate_clients(
*,
client_id: int,
session: Session = Depends(get_session),
):
"""
Activate a client using its id as a key.
Parameters
----------
client_id : int
ID of the client to be activated.
session : Session
SQL session that is to be used to activate a client.
Defaults to creating a dependency on the running SQL model session.
"""
statement = select(Client).where(Client.id == client_id)
client_to_update = session.exec(statement).one()
client_to_update.is_active = True
client_to_update.updated_at = datetime.now()
session.add(client_to_update)
session.commit()
session.refresh(client_to_update)
return client_to_update
|
bdd679d94fc68d4c4c75f410d1ed3eec193f868b
| 3,647,864
|
def simulate_until_target_substate_or_max_t(
_simulate_until_attractor_or_target_substate_or_max_t, initial_state, perturbed_nodes_by_t,
predecessor_node_lists, truth_tables):
"""
Perform simulation to figure whether it reaches target substate.
Does not return states of simulations that don't reach target substate.
Target substate is not considered as reached until all the
perturbations are carried out. Initial state can be considered as
reached target substate if no perturbations are present.
:param _simulate_until_attractor_or_target_substate_or_max_t: [function] to perform simulation
:param initial_state: initial state of the network
:param perturbed_nodes_by_t: dict (by time steps) of dicts (by nodes) of node states
:param predecessor_node_lists: list of predecessor node lists
:param truth_tables: list of dicts (key: tuple of predecessor node states, value: resulting node state)
:return: list of states where last state contains target substate,
or None if target substate was not reached
"""
states, *_, target_substate_is_reached, _ = _simulate_until_attractor_or_target_substate_or_max_t(
initial_state, perturbed_nodes_by_t, predecessor_node_lists, truth_tables)
return states if target_substate_is_reached else None
|
526ef8085dcbe4bcbc112c3bd4626ec5247e2f97
| 3,647,866
|
import requests
from bs4 import BeautifulSoup
def query_snpedia_online(rsid):
"""
@param soup:
@param rsid:
"""
rsid = rsid.capitalize()
url = "https://bots.snpedia.com/index.php"
rsid_url = f"{url}/{rsid}"
page = requests.get(rsid_url)
soup = BeautifulSoup(page.content, "html.parser")
columns, genotypes = parse_snpedia_online(soup, rsid)
return columns, genotypes
|
138b252917b027564826212cfe96abafef3071b3
| 3,647,867
|
def lower(value: str): # Only one argument.
"""Converts a string into all lowercase"""
return value.lower()
|
59da46b7df5a2afdb106703568635b94174ea57c
| 3,647,869
|
import pprint
def validate_oidc():
"""Demonstrates how an access token is validated"""
token = request.headers['Authorization'].split(' ')[1]
message = check_oidc_token(token)
pprint.pprint(message)
return jsonify({
'success': message['success']
})
|
d76d510d1b53a10e12ac9a5c085c0650bc8fb965
| 3,647,870
|
def merge(a, b, path=None):
"""From https://stackoverflow.com/a/7205107"""
if path is None:
path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
merge(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
pass # same leaf value
else:
pass # ignore conflicts, left dict wins.
else:
a[key] = b[key]
return a
|
8f7990f28168fe0e3eaca790baddc0088baedf65
| 3,647,871
|
def norm_sq(f,alpha,n,L_mat_long,step):
""" This function is the log-likelihood functional with the squared L2 norm
of \hat{f_\beta} as the regularization term.
"""
L_mat=L_mat_long.reshape(n,len(f))
f[f <=0] = 1e-6
val=np.log(np.dot(L_mat,f))
return -sum(val)/n+ alpha*step**2*sum(f**2)
|
11a2b0fbd296b344b94cd3d5509bb0d4a12ab5fc
| 3,647,872
|
def get_applications(device_id: str = None, rpc_channel: InstrumentServer = None):
"""
获取手机应用列表
:param device_id:
:param rpc_channel:
:return:
"""
if not rpc_channel:
_rpc_channel = init(device_id)
else:
_rpc_channel = rpc_channel
application_list = _rpc_channel.call(
"com.apple.instruments.server.services.device.applictionListing",
"installedApplicationsMatching:registerUpdateToken:",
{}, "").parsed
if not rpc_channel:
_rpc_channel.stop()
return application_list
|
150884e18349003e33011477603e2a6462bd8492
| 3,647,873
|
def open_mfdataset(files, use_cftime=True, parallel=True, data_vars='minimal', chunks={'time':1},
coords='minimal', compat='override', drop=None, **kwargs):
"""optimized function for opening large cf datasets.
based on https://github.com/pydata/xarray/issues/1385#issuecomment-561920115
"""
def drop_all_coords(ds):
return ds.reset_coords(drop=True)
ds = xr.open_mfdataset(files, parallel=parallel, decode_times=False, combine='by_coords',
preprocess=drop_all_coords, decode_cf=False, chunks=chunks,
data_vars=data_vars, coords=coords, compat=compat, **kwargs)
return xr.decode_cf(ds, use_cftime=use_cftime)
|
ef31c732919f6b3cda0c6e5d9114fac7c39f40f7
| 3,647,874
|
def wls_sparse(X, y, w=1., calc_cov=False, verbose=False, **kwargs):
"""
Parameters
----------
X
y
w
calc_cov
verbose
kwargs
Returns
-------
"""
# The var returned by ln.lsqr is normalized by the variance of the error. To
# obtain the correct variance, it needs to be scaled by the variance of the error.
if w is None: # gracefully default to unweighted
w = 1.
w_std = np.asarray(np.sqrt(w))
wy = np.asarray(w_std * y)
w_std = np.broadcast_to(
np.atleast_2d(np.squeeze(w_std)).T, (X.shape[0], 1))
if not sp.issparse(X):
wX = w_std * X
else:
wX = X.multiply(w_std)
# noinspection PyTypeChecker
out_sol = ln.lsqr(wX, wy, show=verbose, calc_var=True, **kwargs)
p_sol = out_sol[0]
# The residual degree of freedom, defined as the number of observations
# minus the rank of the regressor matrix.
nobs = len(y)
npar = X.shape[1] # ==rank
degrees_of_freedom_err = nobs - npar
# wresid = np.exp(wy) - np.exp(wX.dot(p_sol)) # this option is better.
# difference is small
wresid = wy - wX.dot(p_sol) # this option is done by statsmodel
err_var = np.dot(wresid, wresid) / degrees_of_freedom_err
if calc_cov:
# assert np.any()
arg = wX.T.dot(wX)
if sp.issparse(arg):
# arg is square of size double: 1 + nt + no; single: 2 : nt
# arg_inv = np.linalg.inv(arg.toarray())
arg_inv = np.linalg.lstsq(
arg.todense(), np.eye(npar), rcond=None)[0]
else:
# arg_inv = np.linalg.inv(arg)
arg_inv = np.linalg.lstsq(
arg, np.eye(npar), rcond=None)[0]
# for tall systems pinv (approximate) is recommended above inv
# https://vene.ro/blog/inverses-pseudoinverses-numerical-issues-spee
# d-symmetry.html
# but better to solve with eye
# p_cov = np.array(np.linalg.pinv(arg) * err_var)
# arg_inv = np.linalg.pinv(arg)
# else:
# try:
# arg_inv = np.linalg.lstsq(arg, np.eye(nobs), rcond=None)[0]
#
# except MemoryError:
# print('Try calc_cov = False and p_cov = np.diag(p_var); '
# 'And neglect the covariances.')
# arg_inv = np.linalg.lstsq(arg, np.eye(nobs), rcond=None)[0]
p_cov = np.array(arg_inv * err_var)
p_var = np.diagonal(p_cov)
assert np.all(p_var >= 0), 'Unable to invert the matrix' + str(p_var)
return p_sol, p_var, p_cov
else:
p_var = out_sol[-1] * err_var # normalized covariance
return p_sol, p_var
|
ff0bec6d6cdcee85506514348e8a812926427dee
| 3,647,875
|
from typing import Tuple
def sobel_gradients(source: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Computes partial derivations to detect angle gradients.
"""
grad_x = generic_filter(source, np.matrix([
[1, 0, -1],
[2, 0, -2],
[1, 0, -1]]
))
grad_y = generic_filter(source, np.matrix([
[1, 2, 1],
[0, 0, 0],
[-1, -2, -1]]
))
def normalize_angle(x: float) -> int:
x = round(x % 180)
if x >= 0 and x <= 22.5:
return 0
elif x > 22.5 and x <= 67.5:
return 45
elif x > 67.5 and x <= 112.5:
return 90
elif x > 112.5 and x <= 157.5:
return 135
elif x > 157.5 and x <= 180:
return 0
thetas = np.arctan2(grad_y, grad_x)
thetas = np.vectorize(normalize_angle)(thetas)
grads = np.hypot(grad_y, grad_x)
return grads, thetas
|
19c3e3eec46bee738b1e80dd73c5477f72dcf73c
| 3,647,877
|
from typing import Mapping
def flat_dict(d, prefix=""):
"""
Loop through dictionary d
Append any key, val pairs to the return list ret
Add the prefix to any key param
Recurse if encountered value is a nested dictionary.
"""
if not isinstance(d, Mapping):
return d
ret = {}
for key, val in d.items():
if isinstance(val, Mapping):
ret = {**ret, **flat_dict(val, prefix=prefix + str(key) + "_")}
else:
ret[prefix + str(key)] = val
return ret
|
f0c1f519126dea89c25ee38a9b0dd788c40d2088
| 3,647,878
|
import logging
def _get_filehandler_with_formatter(logname, formatter=None):
""" Return a logging FileHandler for given logname using a given
logging formatter
:param logname: Name of the file where logs will be stored, ".log"
extension will be added
:param formatter: An instance of logging.Formatter or None if the default
should be used
:return:
"""
handler = logging.FileHandler(logname)
if formatter is not None:
handler.setFormatter(formatter)
return handler
|
1cc6f83480e691c4c54c359deabd6364da65f320
| 3,647,879
|
import torch
def gen_data_tensors(
df: pd.DataFrame,
lag: int = 6,
batch_size: int = 32,
validation_ratio: float = 0.2
) -> (DataLoader, DataLoader, TensorDataset, TensorDataset):
"""
Primary goal: create dataloader object.
"""
x_train, y_train = generate_supervised(df, lag=lag)
# Transform DataFrame to NumpyArray.
x_train, y_train = map(lambda x: x.values, (x_train, y_train))
# Generating Validation Set.
x_train, x_val, y_train, y_val = train_test_split(
x_train, y_train, test_size=validation_ratio, shuffle=True
)
# Transform to Tensor
x_train, y_train, x_val, y_val = map(
torch.tensor, (x_train, y_train, x_val, y_val)
)
assert batch_size <= x_train.shape[0] and batch_size <= x_val.shape[0],\
"Batch size cannot be greater than number of training instances."
train_ds = TensorDataset(x_train, y_train)
train_dl = DataLoader(train_ds, batch_size=batch_size, shuffle=True)
val_ds = TensorDataset(x_val, y_val)
val_dl = DataLoader(val_ds, batch_size=batch_size, shuffle=True)
return train_dl, val_dl, train_ds, val_ds
|
1451d38bd695163d84784f5a6b9b791c3987d56b
| 3,647,880
|
import json
def staff_for_site():
"""
Used by the Req/Req/Create page
- note that this returns Person IDs
"""
try:
site_id = request.args[0]
except:
result = current.xml.json_message(False, 400, "No Site provided!")
else:
table = s3db.hrm_human_resource
ptable = db.pr_person
query = (table.site_id == site_id) & \
(table.deleted == False) & \
(table.status == 1) & \
((table.end_date == None) | \
(table.end_date > request.utcnow)) & \
(ptable.id == table.person_id)
rows = db(query).select(ptable.id,
ptable.first_name,
ptable.middle_name,
ptable.last_name,
orderby=ptable.first_name)
result = []
append = result.append
for row in rows:
append({"id" : row.id,
"name" : s3_fullname(row)
})
result = json.dumps(result)
response.headers["Content-Type"] = "application/json"
return result
|
d8890f31ae67abf72cdfbd14dd2af08762131e90
| 3,647,881
|
def element_z(sym_or_name):
"""Convert element symbol or name into a valid element atomic number Z.
Args:
sym_or_name: string type representing an element symbol or name.
Returns:
Integer z that is a valid atomic number matching the symbol or name.
Raises:
ElementZError: if the symbol or name cannot be converted.
"""
try:
return _Z_FROM_SYMBOL[validated_symbol(sym_or_name)]
except ElementSymbolError:
pass
try:
return _Z_FROM_NAME[validated_name(sym_or_name)]
except ElementNameError:
raise ElementZError("Must supply either the element symbol or name")
|
b79fec9062539f98ad8c96cdc41a52f7e9c67fd9
| 3,647,882
|
from typing import Tuple
def to_int(s: str) -> Tuple[bool, int]:
"""Convert a string s to an int, if possible."""
try:
n = int(s)
return True, n
except Exception:
return False, 0
|
27d24b881f5987037f750a1cee022f7b1daa7c33
| 3,647,883
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.