content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def url_path_join(*items):
"""
Make it easier to build url path by joining every arguments with a '/'
character.
Args:
items (list): Path elements
"""
return "/".join([item.lstrip("/").rstrip("/") for item in items])
|
d864c870f9d52bad1268c843098a9f7e1fa69158
| 3,639,604
|
def f_match (pattern, string, flags = None):
""" Match function
Args:
pattern (string): regexp (pattern|/pattern/flags)
string (string): tested string
flags (int): regexp flage
Return:
boolean
"""
if build_regexp(pattern, flags).search(to_string(string)):
return True
return False
|
31871f35568ca71c86535cfda5d434a57008f981
| 3,639,605
|
def validate_epoch(val_loader, model, criterion, epoch, args):
"""Perform validation on the validation set"""
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
batch_time = AverageMeter()
data_time = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
bar = Bar('Processing', max=len(val_loader))
for batch_idx, (input, target) in enumerate(val_loader):
data_time.update(time.time() - end)
target = target.cuda(async=True)
input = input.cuda()
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1 = accuracy(output.data, target, topk=(1,))[0]
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# plot progress
bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} '.format(
batch=batch_idx + 1,
size=len(val_loader),
data=data_time.avg,
bt=batch_time.avg,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
top1=top1.avg,
)
bar.next()
bar.finish()
return losses.avg, top1.avg
|
084d3c5b200470cd9b3a3d905c83c1046df0b96e
| 3,639,606
|
def utf8_german_fix( uglystring ):
"""
If your string contains ugly characters (like ü, ö, ä or ß) in your source file, run this string through here.
This adds the German "Umlaute" to your string, making (ÄÖÜäöü߀) compatible for processing.
\tprint( utf8_german_fix("ü߀") ) == ü߀
"""
uglystring = uglystring.replace('ü','ü')
uglystring = uglystring.replace('ö','ö')
uglystring = uglystring.replace('ä','ä')
uglystring = uglystring.replace('Ä','Ä')
uglystring = uglystring.replace('Ö','Ö')
uglystring = uglystring.replace('Ü','Ü')
uglystring = uglystring.replace('ß','ß')
# This was born out of necessity, as there were some issues with a certain API not processing German properly.
# I am always looking for a smarter way to do this.
nicestring = uglystring.replace('€','€')
return nicestring
|
7ed12d819b384e3bb5cb019ce7b7afe3d6bb8b86
| 3,639,607
|
def subtends(a1, b1, a2, b2, units='radians'):
""" Calculate the angle subtended by 2 positions on a sphere """
if units.lower() == 'degrees':
a1 = radians(a1)
b1 = radians(b1)
a2 = radians(a2)
b2 = radians(b2)
x1 = cos(a1) * cos(b1)
y1 = sin(a1) * cos(b1)
z1 = sin(b1)
x2 = cos(a2) * cos(b2)
y2 = sin(a2) * cos(b2)
z2 = sin(b2)
theta = Angle.fromDegrees(degrees(acos(x1 * x2 + y1 * y2 + z1 * z2)))
return theta
|
f9e99119666fba375240111668229d400f1e37e5
| 3,639,608
|
import inspect
def get_args(obj):
"""Get a list of argument names for a callable."""
if inspect.isfunction(obj):
return inspect.getargspec(obj).args
elif inspect.ismethod(obj):
return inspect.getargspec(obj).args[1:]
elif inspect.isclass(obj):
return inspect.getargspec(obj.__init__).args[1:]
elif hasattr(obj, '__call__'):
return inspect.getargspec(obj.__call__).args[1:]
else:
raise TypeError("Can't inspect signature of '%s' object." % obj)
|
e9fb13c155a8d8589a619491d44be1c9194c29bc
| 3,639,610
|
def name_value(obj):
"""
Convert (key, value) pairs to HAR format.
"""
return [{"name": k, "value": v} for k, v in obj.items()]
|
d9a5bef186b259401302f3b489033325e32af1f5
| 3,639,612
|
def get_ids(id_type):
"""Get unique article identifiers from the dataset.
Parameters
----------
id_type : str
Dataframe column name, e.g. 'pubmed_id', 'pmcid', 'doi'.
Returns
-------
list of str
List of unique identifiers in the dataset, e.g. all unique PMCIDs.
"""
global doc_df
if doc_df is None:
doc_df = get_metadata_df()
unique_ids = list(doc_df[~pd.isna(doc_df[id_type])][id_type].unique())
return unique_ids
|
6b70d74d79ce7dcdd3654c09f1413ab468514eaa
| 3,639,613
|
def get_items_info(request):
"""Get a collection of person objects"""
result = request.dbsession.query(Item).all()
results=[]
for c in result:
results.append({'id':c.id, 'markup':c.markup})
return results
|
29265a41ffba7cda211fc86b8c60cae872167b12
| 3,639,614
|
def _test(value, *args, **keywargs):
"""
A function that exists for test purposes.
>>> checks = [
... '3, 6, min=1, max=3, test=list(a, b, c)',
... '3',
... '3, 6',
... '3,',
... 'min=1, test="a b c"',
... 'min=5, test="a, b, c"',
... 'min=1, max=3, test="a, b, c"',
... 'min=-100, test=-99',
... 'min=1, max=3',
... '3, 6, test="36"',
... '3, 6, test="a, b, c"',
... '3, max=3, test=list("a", "b", "c")',
... '''3, max=3, test=list("'a'", 'b', "x=(c)")''',
... "test='x=fish(3)'",
... ]
>>> v = Validator({'test': _test})
>>> for entry in checks:
... print v.check(('test(%s)' % entry), 3)
(3, ('3', '6'), {'test': ['a', 'b', 'c'], 'max': '3', 'min': '1'})
(3, ('3',), {})
(3, ('3', '6'), {})
(3, ('3',), {})
(3, (), {'test': 'a b c', 'min': '1'})
(3, (), {'test': 'a, b, c', 'min': '5'})
(3, (), {'test': 'a, b, c', 'max': '3', 'min': '1'})
(3, (), {'test': '-99', 'min': '-100'})
(3, (), {'max': '3', 'min': '1'})
(3, ('3', '6'), {'test': '36'})
(3, ('3', '6'), {'test': 'a, b, c'})
(3, ('3',), {'test': ['a', 'b', 'c'], 'max': '3'})
(3, ('3',), {'test': ["'a'", 'b', 'x=(c)'], 'max': '3'})
(3, (), {'test': 'x=fish(3)'})
"""
return (value, args, keywargs)
|
c011c9386392c4b8dc8034fee33bfcfdec9845ed
| 3,639,615
|
from typing import Union
from typing import Sequence
from typing import Any
from typing import Tuple
def plot_chromaticity_diagram_CIE1976UCS(
cmfs: Union[
MultiSpectralDistributions,
str,
Sequence[Union[MultiSpectralDistributions, str]],
] = "CIE 1931 2 Degree Standard Observer",
show_diagram_colours: Boolean = True,
show_spectral_locus: Boolean = True,
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot the *CIE 1976 UCS Chromaticity Diagram*.
Parameters
----------
cmfs
Standard observer colour matching functions used for computing the
spectral locus boundaries. ``cmfs`` can be of any type or form
supported by the :func:`colour.plotting.filter_cmfs` definition.
show_diagram_colours
Whether to display the *Chromaticity Diagram* background colours.
show_spectral_locus
Whether to display the *Spectral Locus*.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.diagrams.plot_chromaticity_diagram`,
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> plot_chromaticity_diagram_CIE1976UCS() # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...AxesSubplot...>)
.. image:: ../_static/Plotting_Plot_Chromaticity_Diagram_CIE1976UCS.png
:align: center
:alt: plot_chromaticity_diagram_CIE1976UCS
"""
settings = dict(kwargs)
settings.update({"method": "CIE 1976 UCS"})
return plot_chromaticity_diagram(
cmfs, show_diagram_colours, show_spectral_locus, **settings
)
|
e9621c2e94dc7a43401905e9a633692a28a1a4d1
| 3,639,616
|
def generateFilter(targetType, left = False):
"""Generate filter function for loaded plugins"""
def filter(plugins):
for pi in plugins:
if left:
if not pi.isThisType(targetType):
plugins.remove(pi)
logger.info("Plugin: {} is filtered out by predefined filter"\
.format(pi.namePlugin()))
else:
if pi.isThisType(targetType):
plugins.remove(pi)
logger.info("Plugin: {} is filtered out by predefined filter"\
.format(pi.namePlugin()))
return filter
|
db97ecd3700bd3c7b56a26cc3d49d4825fb9dc61
| 3,639,617
|
def format_sample_case(s: str) -> str:
"""format_sample_case convert a string s to a good form as a sample case.
A good form means that, it use LR instead of CRLF, it has the trailing newline, and it has no superfluous whitespaces.
"""
if not s.strip():
return ''
lines = s.strip().splitlines()
lines = [line.strip() + '\n' for line in lines]
return ''.join(lines)
|
cd691f2bfc8cc56db85f2a55ff3bf4b5afd5f30e
| 3,639,620
|
from typing import Any
import json
def replace_floats_with_decimals(obj: Any, round_digits: int = 9) -> Any:
"""Convert all instances in `obj` of `float` to `Decimal`.
Args:
obj: Input object.
round_digits: Rounding precision of `Decimal` values.
Returns:
Input `obj` with all `float` types replaced by `Decimal`s rounded to
`round_digits` decimal places.
"""
def _float_to_rounded_decimal(s: Text) -> Decimal:
return Decimal(s).quantize(Decimal(10) ** -round_digits)
return json.loads(json.dumps(obj), parse_float=_float_to_rounded_decimal)
|
60529b4542a3b969b6b6fbe67fd6f26b3b7f3c25
| 3,639,621
|
def _calc_range_mixed_data_columns(data, observation, dtypes):
""" Return range for each numeric column, 0 for categorical variables """
_, cols = data.shape
result = np.zeros(cols)
for col in range(cols):
if np.issubdtype(dtypes[col], np.number):
result[col] = max(max(data[:, col]), observation[col]) - min(min(data[:, col]), observation[col])
return result
|
c135227d50b5dd7c6fb1a047ed959ef6c22733f4
| 3,639,622
|
def search(request):
"""
Display search form/results for events (using distance-based search).
Template: events/search.html
Context:
form - ``anthill.events.forms.SearchForm``
event_list - events in the near future
searched - True/False based on if a search was done
search_results - list of results (may be empty)
"""
upcoming_events = Event.objects.future().select_related()[0:5]
if request.GET:
form = SearchForm(request.GET)
form.is_valid()
name = form.cleaned_data['name']
location = form.cleaned_data['location']
location_range = form.cleaned_data['location_range']
# only events that haven't happened
events = Event.objects.future().select_related()
if name:
events = events.filter(title__icontains=name)
if location:
events = events.search_by_distance(location, location_range)
context = {'form': form, 'searched': True, 'search_results': events,
'event_list': upcoming_events}
else:
context = {'form': SearchForm(), 'event_list': upcoming_events}
return render_to_response('events/search.html', context,
context_instance=RequestContext(request))
|
adeb3f509854ab9dcd2a50aa6833d96714d8603b
| 3,639,624
|
def logout() -> Response:
"""Logout route. Logs the current user out.
:return: A redirect to the landing page.
"""
name: str = current_user.name
logout_user()
flash(f'User "{name}" logged out.', 'info')
url: str = url_for('root')
output: Response = redirect(url)
return output
|
26577da8f5a4bf5feb884c493043877e7c9bd5e7
| 3,639,625
|
def load_room(name):
"""
There is a potential security problem here.
Who gets to set name? Can that expose a variable?
"""
return globals().get(name)
|
14034adf76b8fd086b798cd312977930d42b6e07
| 3,639,626
|
def call_ipt_func(ipt_id: str, function_name: str, source, **kwargs):
"""Processes an image/wrapper with an IPT using an function like syntax
:param ipt_id:
:param function_name:
:param source:
:param kwargs:
:return:
"""
cls_ = get_ipt_class(ipt_id)
if cls_ is not None:
item = cls_(**kwargs)
func = getattr(item, function_name, None)
if callable(func):
return func(wrapper=source)
return None
|
08645a857981088f6fbde79c8a2aa7057c67445f
| 3,639,627
|
def is_running(service):
"""
Checks if service is running using sysdmanager library.
:param service: Service to be checked.
:return: Information if service is running or not.
"""
manager = get_manager()
if manager.is_active(service + ".service"):
return 1
return 0
|
55cef1df395c2082fa5e0243704a0804807a0b22
| 3,639,628
|
def smoothen(data, kernel):
"""Convolve data with odd-size kernel, with boundary handling."""
n, = kernel.shape
assert n % 2 == 1
m = (n-1) // 2
# pad input data
k = m//2 + 1
data_padded = np.concatenate([
np.full(m, data[:k].mean()),
data,
np.full(m, data[-k:].mean())
])
smooth = np.convolve(data_padded, kernel, mode='same')[m:-m]
assert smooth.shape == data.shape
return smooth
|
06381249118dc54524ad1617f7e0c01a273cf4a8
| 3,639,629
|
def find_node_name(node_id, g):
"""Go through the attributes and find the node with the given name"""
return g.node[node_id]["label"]
|
a4656659aeef0427a74822991c2594064b1a9411
| 3,639,630
|
from operator import xor
def aes_cbc_decrypt(data, key, iv):
"""
Decrypt with aes in CBC mode
@param {int[]} data cipher
@param {int[]} key 16/24/32-Byte cipher key
@param {int[]} iv 16-Byte IV
@returns {int[]} decrypted data
"""
expanded_key = key_expansion(key)
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
decrypted_data = []
previous_cipher_block = iv
for i in range(block_count):
block = data[i * BLOCK_SIZE_BYTES : (i + 1) * BLOCK_SIZE_BYTES]
block += [0] * (BLOCK_SIZE_BYTES - len(block))
decrypted_block = aes_decrypt(block, expanded_key)
decrypted_data += xor(decrypted_block, previous_cipher_block)
previous_cipher_block = block
decrypted_data = decrypted_data[: len(data)]
return decrypted_data
|
37b685f9e497456e75e3a3e83de9b3b4572da328
| 3,639,631
|
from typing import Dict
from typing import Counter
def score_concepts(merged_graph: AMR, counts: tuple, concept_alignments: Dict[str, str]) -> Counter:
"""
Calculate TF-IDF counts for each node(concept) in `merged_graph` according to their aligned words.
Parameters:
merged_graph(AMR): Graph which contains the concept to be scored.
counts(tuple): A tuple returned by the DohareEtAl2018.get_tf_idf() function.
concept_alignments(dict): A dictionary that maps concepts into a list of words.
Returns:
Counter: All TF-IDF scores for each concept. If the concept does not exist, the score is 0.
"""
tf_idf, tf_counts, df_counts, num_docs = counts
# Get score for each node
concept_scores = dict()
for c in merged_graph.get_concept_nodes():
concept = merged_graph.nodes[c]['label']
if concept in concept_alignments:
tf = 0
df = 0
for w in concept_alignments[concept]:
try:
tf += tf_counts[0, tf_idf.vocabulary_[w]]
df += df_counts[0, tf_idf.vocabulary_[w]]
except KeyError:
pass
concept_scores[concept] = tf * \
np.log((num_docs/(df + 1))) # TF-IDF
concept_scores = Counter(concept_scores)
return concept_scores
|
73739ede67ddbc74a3f7c17740b6f31929215e11
| 3,639,632
|
def timestamp_to_double(sparkdf):
"""
Utility function to cast columns of type 'timestamp' to type 'double.'
"""
for dtype in sparkdf.dtypes:
if dtype[1] == 'timestamp':
sparkdf = sparkdf.withColumn(dtype[0], col(dtype[0]).cast(DoubleType()))
return sparkdf
|
5ee647dd5452c3c1f51140db944170698e81d7be
| 3,639,633
|
from typing import List
import time
def get_entrez_id_from_organism_full_name_batch(organism_full_names: List[str]) -> List[str]:
"""Retrieves the Entrez numeric ID of the given organisms.
This numeric identifier is neccessary for BLAST and NCBI TAXONOMY
searches.
This function uses Biopython functions. Returns BLAST-compatible ID as
txid + NCBI ID + [ORGN].
Arguments:
>organism_full_names: List[str] ~ A list of full names of organisms, e.g. "Xanthomonas
campestris pv. campesris B100"
"""
batch_start = 0
organism_ncbi_ids_result: List[str] = []
# Go through each organism :D
while batch_start < len(organism_full_names):
organism_full_names_slice = organism_full_names[batch_start:batch_start+NCBI_BATCH_SIZE]
query_names = " OR ".join(organism_full_names_slice)
# An e-mail has to be set, you may change it to yours if you want to
# be notified if any problems occur.
Entrez.email = "x@x.x"
# Set the Entrez search to the NCBI TAXONOMY database.
handle = Entrez.esearch(db="Taxonomy", term=query_names)
# Wait in order to not overload the NCBI's server
time.sleep(WAIT_TIME)
# Reformat the Entrez search result in order to extract the Entrez ID
record = Entrez.read(handle)
organism_ncbi_ids = record["IdList"][::-1]
# txid+NUMBER+[ORGN] is the form that is used for NCBI BLASTP searches to restrict a search
# to an organism using the Entrez query constraint input.
organism_ncbi_ids_result += ["txid"+x +
"[ORGN]" for x in organism_ncbi_ids]
batch_start += NCBI_BATCH_SIZE
time.sleep(WAIT_TIME)
# Return the retrieved IDs :D
return organism_ncbi_ids_result
|
e0a84006a6646633c4462a1e68dcefe78d3b3bb1
| 3,639,634
|
def gunzip(content):
"""
Decompression is applied if the first to bytes matches with
the gzip magic numbers.
There is once chance in 65536 that a file that is not gzipped will
be ungzipped.
"""
if len(content) == 0:
raise DecompressionError('File contains zero bytes.')
gzip_magic_numbers = [ 0x1f, 0x8b ]
first_two_bytes = [ byte for byte in bytearray(content)[:2] ]
if first_two_bytes != gzip_magic_numbers:
raise DecompressionError('File is not in gzip format. Magic numbers {}, {} did not match {}, {}.'.format(
hex(first_two_bytes[0]), hex(first_two_bytes[1]), hex(gzip_magic_numbers[0]), hex(gzip_magic_numbers[1])
))
return deflate.gzip_decompress(content)
|
8a74d6ce4d34589bb04a9ba48d32d6e8d6b6e530
| 3,639,635
|
def get_rigid_elements_with_node_ids(model: BDF, node_ids):
"""
Gets the series of rigid elements that use specific nodes
Parameters
----------
node_ids : List[int]
the node ids to check
Returns
-------
rbes : List[int]
the set of self.rigid_elements
"""
try:
nids = set(node_ids)
except TypeError:
print(node_ids)
raise
rbes = []
for eid, rigid_element in model.rigid_elements.items():
if rigid_element.type in ['RBE3', 'RBE2', 'RBE1', 'RBAR', 'RSPLINE', 'RROD', 'RBAR1']:
independent_nodes = set(rigid_element.independent_nodes)
dependent_nodes = set(rigid_element.dependent_nodes)
rbe_nids = independent_nodes | dependent_nodes
if nids.intersection(rbe_nids):
rbes.append(eid)
elif rigid_element.type == 'RSSCON':
msg = 'skipping card in get_rigid_elements_with_node_ids\n%s' % str(rigid_element)
model.log.warning(msg)
else:
raise RuntimeError(rigid_element.type)
return rbes
|
58f264bff7a4fe71a5cd57b719762eaf06aa6120
| 3,639,636
|
def genFileBase(f):
""" Given a filename, generate a safe 'base' name for
HTML and PNG filenames """
baseName = w2res.getBaseMulti(f)
baseName = "R"+w2res.removeGDBCharacters(baseName)
return baseName
|
ce0e5b8e9261eb0410d8a912e1b77cbe5e25bde3
| 3,639,637
|
import base64
def _json_custom_hook(d):
"""Serialize NumPy arrays."""
if isinstance(d, dict) and '__ndarray__' in d:
data = base64.b64decode(d['__ndarray__'])
return np.frombuffer(data, d['dtype']).reshape(d['shape'])
elif isinstance(d, dict) and '__qbytearray__' in d:
return _decode_qbytearray(d['__qbytearray__'])
return d
|
f5fb62ad38b8822ae304ea00e537b66b7e3b75ee
| 3,639,639
|
def basic_pyxll_function_3(x):
"""docstrings appear as help text in Excel"""
return x
|
3709d1bce92456b1456ed90d81002f71b7d9e754
| 3,639,640
|
import torch
def log_mean_exp(x, dim=1):
"""
log(1/k * sum(exp(x))): this normalizes x.
@param x: PyTorch.Tensor
samples from gaussian
@param dim: integer (default: 1)
which dimension to take the mean over
@return: PyTorch.Tensor
mean of x
"""
m = torch.max(x, dim=dim, keepdim=True)[0]
return m + torch.log(torch.mean(torch.exp(x - m),
dim=dim, keepdim=True))
|
7f6476ba3a7ec7873ddb9f66754728bb77452721
| 3,639,641
|
def get_shot_end_frame(shot_node):
"""
Returns the end frame of the given shot
:param shot_node: str
:return: int
"""
return maya.cmds.getAttr('{}.endFrame'.format(shot_node))
|
efb67eb44afc807202ed46b0096627e8794d2bac
| 3,639,642
|
def is_integer():
""" Generates a validator to validate if the value
of a property is an integer.
"""
def wrapper(obj, prop):
value = getattr(obj, prop)
if value is None:
return (True, None)
try:
int(value)
except ValueError:
return (False, (msg.INVALID_VALUE,))
except TypeError:
return (False, (msg.INVALID_VALUE,))
return (True, None)
return wrapper
|
0f8a5a48c7b9c45666f20f6feede58fa4fc2ff5a
| 3,639,643
|
def int_inputs(n):
"""An error handling function to get integer inputs from the user"""
while True:
try:
option = int(input(Fore.LIGHTCYAN_EX + "\n >>> "))
if option not in range(1, n + 1):
i_print_r("Invalid Entry :( Please Try Again.")
continue
else:
return option
except ValueError:
i_print_r("Invalid Entry :( Please Try again")
continue
|
b3554bc13a2c8a43d0279b6e800ed2f6409e755a
| 3,639,644
|
def gen_binder_rst(fname, binder_conf):
"""Generate the RST + link for the Binder badge.
Parameters
----------
fname: str
The path to the `.py` file for which a Binder badge will be generated.
binder_conf: dict | None
If a dictionary it must have the following keys:
'url': The URL of the BinderHub instance that's running a Binder
service.
'org': The GitHub organization to which the documentation will be
pushed.
'repo': The GitHub repository to which the documentation will be
pushed.
'branch': The Git branch on which the documentation exists (e.g.,
gh-pages).
'dependencies': A list of paths to dependency files that match the
Binderspec.
Returns
-------
rst : str
The reStructuredText for the Binder badge that links to this file.
"""
binder_url = gen_binder_url(fname, binder_conf)
rst = (
"\n"
" .. container:: binder-badge\n\n"
" .. image:: https://static.mybinder.org/badge.svg\n"
" :target: {}\n"
" :width: 150 px\n").format(binder_url)
return rst
|
65f8cfc04a11d6660c37cce669a85a133083517e
| 3,639,645
|
from datetime import datetime
def downgrade():
"""Make refresh token field not nullable."""
bind = op.get_bind()
session = Session(bind=bind)
class CRUDMixin(object):
"""Mixin that adds convenience methods for CRUD (create, read, update, delete) ops."""
@classmethod
def create_as(cls, current_user, **kwargs):
"""Create a new record and save it to the database as 'current_user'."""
assert hasattr(cls, 'modified_by') and hasattr(cls, 'created_by')
instance = cls(**kwargs)
return instance.save_as(current_user)
@classmethod
def create(cls, **kwargs):
"""Create a new record and save it to the database."""
instance = cls(**kwargs)
return instance.save()
def update_as(self, current_user, commit=True, preserve_modified=False, **kwargs):
"""Update specific fields of the record and save as 'current_user'."""
for attr, value in kwargs.items():
setattr(self, attr, value)
return self.save_as(current_user, commit=commit, preserve_modified=preserve_modified)
def update(self, commit=True, preserve_modified=False, **kwargs):
"""Update specific fields of a record."""
for attr, value in kwargs.items():
setattr(self, attr, value)
return self.save(commit=commit, preserve_modified=preserve_modified)
def save_as(self, current_user, commit=True, preserve_modified=False):
"""Save instance as 'current_user'."""
assert hasattr(self, 'modified_by') and hasattr(self, 'created_by')
# noinspection PyUnresolvedReferences
if current_user and not self.created_at:
# noinspection PyAttributeOutsideInit
self.created_by = current_user
if current_user and not preserve_modified:
# noinspection PyAttributeOutsideInit
self.modified_by = current_user
return self.save(commit=commit, preserve_modified=preserve_modified)
def save(self, commit=True, preserve_modified=False):
"""Save the record."""
session.add(self)
if commit:
if preserve_modified and hasattr(self, 'modified_at'):
modified_dt = self.modified_at
session.commit()
self.modified_at = modified_dt
session.commit()
return self
def delete(self, commit=True):
"""Remove the record from the database."""
session.delete(self)
return commit and session.commit()
class Model(CRUDMixin, Base):
"""Base model class that includes CRUD convenience methods."""
__abstract__ = True
@staticmethod
def _get_rand_hex_str(length=32):
"""Create random hex string."""
return getencoder('hex')(urandom(length // 2))[0].decode('utf-8')
class SurrogatePK(object):
"""A mixin that adds a surrogate integer primary key column to declarative-mapped class."""
__table_args__ = {'extend_existing': True}
id = Column(Integer, primary_key=True)
@classmethod
def get_by_id(cls, record_id):
"""Get record by ID."""
if any((isinstance(record_id, basestring) and record_id.isdigit(),
isinstance(record_id, (int, float))),):
# noinspection PyUnresolvedReferences
return cls.query.get(int(record_id))
else:
return None
def reference_col(tablename, nullable=False, pk_name='id', ondelete=None, **kwargs):
"""Column that adds primary key foreign key reference.
Usage ::
category_id = reference_col('category')
category = relationship('Category', backref='categories')
"""
return Column(
ForeignKey('{0}.{1}'.format(tablename, pk_name), ondelete=ondelete),
nullable=nullable, **kwargs)
class Client(Model):
"""An OAuth2 Client."""
__tablename__ = 'clients'
client_id = Column(String(32), primary_key=True)
client_secret = Column(String(256), unique=True, nullable=False)
is_confidential = Column(Boolean(), default=True, nullable=False)
_redirect_uris = Column(Text(), nullable=False)
_default_scopes = Column(Text(), nullable=False)
# Human readable info fields
name = Column(String(64), nullable=False)
description = Column(String(400), nullable=False)
modified_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow,
nullable=False)
modified_by_id = reference_col('users', nullable=False)
modified_by = relationship('User', foreign_keys=modified_by_id)
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
created_by_id = reference_col('users', nullable=False)
created_by = relationship('User', foreign_keys=created_by_id)
def __init__(self, redirect_uris=None, default_scopes=None, **kwargs):
"""Create instance."""
client_id = Client._get_rand_hex_str(32)
client_secret = Client._get_rand_hex_str(256)
Model.__init__(self, client_id=client_id, client_secret=client_secret, **kwargs)
self.redirect_uris = redirect_uris
self.default_scopes = default_scopes
def __repr__(self):
"""Represent instance as a unique string."""
return '<Client({name!r})>'.format(name=self.name)
class Collection(SurrogatePK, Model):
"""A collection of library stuff, a.k.a. 'a sigel'."""
__tablename__ = 'collections'
code = Column(String(255), unique=True, nullable=False)
friendly_name = Column(String(255), unique=False, nullable=False)
category = Column(String(255), nullable=False)
is_active = Column(Boolean(), default=True)
permissions = relationship('Permission', back_populates='collection', lazy='joined')
replaces = Column(String(255))
replaced_by = Column(String(255))
modified_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow,
nullable=False)
modified_by_id = reference_col('users', nullable=False)
modified_by = relationship('User', foreign_keys=modified_by_id)
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
created_by_id = reference_col('users', nullable=False)
created_by = relationship('User', foreign_keys=created_by_id)
def __init__(self, code, friendly_name, category, **kwargs):
"""Create instance."""
Model.__init__(self, code=code, friendly_name=friendly_name, category=category,
**kwargs)
def __repr__(self):
"""Represent instance as a unique string."""
return '<Collection({code!r})>'.format(code=self.code)
class Role(SurrogatePK, Model):
"""A role for a user."""
__tablename__ = 'roles'
name = Column(String(80), unique=True, nullable=False)
user_id = reference_col('users', nullable=True)
user = relationship('User', back_populates='roles')
def __init__(self, name, **kwargs):
"""Create instance."""
Model.__init__(self, name=name, **kwargs)
def __repr__(self):
"""Represent instance as a unique string."""
return '<Role({name})>'.format(name=self.name)
class PasswordReset(SurrogatePK, Model):
"""Password reset token for a user."""
__tablename__ = 'password_resets'
user_id = reference_col('users', nullable=True)
user = relationship('User', back_populates='password_resets')
code = Column(String(32), unique=True, nullable=False)
is_active = Column(Boolean(), default=True, nullable=False)
expires_at = Column(DateTime, nullable=False,
default=lambda: datetime.utcnow() + timedelta(hours=7 * 24))
modified_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
def __init__(self, user, **kwargs):
"""Create instance."""
Model.__init__(self, user=user, code=self._get_rand_hex_str(32), **kwargs)
def __repr__(self):
"""Represent instance as a unique string."""
return '<PasswordReset({email!r})>'.format(email=self.user.email)
class User(UserMixin, SurrogatePK, Model):
"""A user of the app."""
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
email = Column(String(255), unique=True, nullable=False)
full_name = Column(String(255), unique=False, nullable=False)
password = Column(LargeBinary(128), nullable=False)
last_login_at = Column(DateTime, default=None)
tos_approved_at = Column(DateTime, default=None)
is_active = Column(Boolean(), default=False, nullable=False)
is_admin = Column(Boolean(), default=False, nullable=False)
permissions = relationship('Permission', back_populates='user',
foreign_keys='Permission.user_id', lazy='joined')
roles = relationship('Role', back_populates='user')
password_resets = relationship('PasswordReset', back_populates='user')
modified_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow,
nullable=False)
modified_by_id = reference_col('users', nullable=False)
modified_by = relationship('User', remote_side=id, foreign_keys=modified_by_id)
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
created_by_id = reference_col('users', nullable=False)
created_by = relationship('User', remote_side=id, foreign_keys=created_by_id)
def __init__(self, email, full_name, password=None, **kwargs):
"""Create instance."""
Model.__init__(self, email=email, full_name=full_name, **kwargs)
if password:
self.set_password(password)
else:
self.set_password(hexlify(urandom(16)))
def __repr__(self):
"""Represent instance as a unique string."""
return '<User({email!r})>'.format(email=self.email)
class Permission(SurrogatePK, Model):
"""A permission on a Collection, granted to a User."""
__table_args__ = (UniqueConstraint('user_id', 'collection_id'), SurrogatePK.__table_args__)
__tablename__ = 'permissions'
user_id = reference_col('users', nullable=False)
user = relationship('User', back_populates='permissions',
foreign_keys=user_id, lazy='joined')
collection_id = reference_col('collections', nullable=False)
collection = relationship('Collection', back_populates='permissions', lazy='joined')
registrant = Column(Boolean(), default=False, nullable=False)
cataloger = Column(Boolean(), default=False, nullable=False)
cataloging_admin = Column(Boolean(), default=False, nullable=False)
modified_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow,
nullable=False)
modified_by_id = reference_col('users', nullable=False)
modified_by = relationship('User', foreign_keys=modified_by_id)
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
created_by_id = reference_col('users', nullable=False)
created_by = relationship('User', foreign_keys=created_by_id)
def __init__(self, **kwargs):
"""Create instance."""
Model.__init__(self, **kwargs)
def __repr__(self):
"""Represent instance as a unique string."""
return '<Permission({user!r}@{collection!r})>'.format(user=self.user,
collection=self.collection)
class Token(SurrogatePK, Model):
"""An OAuth2 Bearer token."""
__tablename__ = 'tokens'
user_id = reference_col('users', nullable=False, ondelete='CASCADE')
user = relationship('User')
client_id = reference_col('clients', pk_name='client_id',
nullable=False, ondelete='CASCADE')
client = relationship('Client')
token_type = Column(String(40), nullable=False, default='Bearer')
access_token = Column(String(256), nullable=False, unique=True)
refresh_token = Column(String(256), unique=True)
expires_at = Column(DateTime, nullable=False,
default=lambda: datetime.utcnow() + timedelta(seconds=3600))
_scopes = Column(Text, nullable=False)
def __init__(self, scopes=None, **kwargs):
"""Create instance."""
Model.__init__(self, **kwargs)
self.scopes = scopes
@staticmethod
def get_all_by_user(user):
"""Get all tokens for specified user."""
return Token.query.filter_by(user=user).all()
@staticmethod
def delete_all_by_user(user):
"""Delete all tokens for specified user."""
Token.query.filter_by(user=user).delete()
@hybrid_property
def expires(self):
"""Return 'expires_at'."""
return self.expires_at
@hybrid_property
def is_active(self):
"""Return still active (now < expires_at)."""
return self.expires_at > datetime.utcnow()
@hybrid_property
def scopes(self):
"""Return scopes list."""
return self._scopes.split(' ')
@scopes.setter
def scopes(self, value):
"""Store scopes list as string."""
if isinstance(value, string_types):
self._scopes = value
elif isinstance(value, list):
self._scopes = ' '.join(value)
else:
self._scopes = value
def __repr__(self):
"""Represent instance as a unique string."""
return '<Token({user!r},{client!r})>'.format(user=self.user.email,
client=self.client.name)
# ensure all tokens have a refresh_token
for token in session.query(Token).filter(Token.refresh_token == None).all(): # noqa: E711
token.refresh_token = Model._get_rand_hex_str()
token.save(commit=True, preserve_modified=True)
with op.batch_alter_table('tokens', schema=None) as batch_op:
batch_op.alter_column('refresh_token',
existing_type=sa.VARCHAR(length=256),
nullable=False)
|
ce9f1e8665d126b08fde6f0b4652b431b111f34c
| 3,639,646
|
def height(grid):
"""Gets the height of the grid (stored in row-major order)."""
return len(grid)
|
b90bdb029518cfdaaa4bf93dd77b8996e646b322
| 3,639,647
|
import uuid
import json
def test_blank_index_upload_missing_indexd_credentials_unable_to_load_json(
app, client, auth_client, encoded_creds_jwt, user_client
):
"""
test BlankIndex upload call but unable to load json with a ValueError
"""
class MockArboristResponse:
"""
Mock response for requests lib for Arborist
"""
def __init__(self, data, status_code=200):
"""
Set up mock response
"""
self.data = data
self.status_code = status_code
def json(self):
"""
Mock json() call
"""
return self.data
class MockResponse:
"""
Mock response for requests lib
"""
def __init__(self, data, status_code=200):
"""
Set up mock response
"""
self.data = data
self.status_code = status_code
def json(self):
"""
Mock json() call
"""
raise ValueError("unable to get json")
def text(self):
"""
Mock text() call
"""
return self.data
data_requests_mocker = mock.patch(
"fence.blueprints.data.indexd.requests", new_callable=mock.Mock
)
arborist_requests_mocker = mock.patch(
"gen3authz.client.arborist.client.httpx.Client.request", new_callable=mock.Mock
)
with data_requests_mocker as data_requests, arborist_requests_mocker as arborist_requests:
data_requests.post.return_value = MockResponse(
{
"did": str(uuid.uuid4()),
"rev": str(uuid.uuid4())[:8],
"baseid": str(uuid.uuid4()),
}
)
data_requests.post.return_value.status_code = 401
arborist_requests.return_value = MockArboristResponse({"auth": True})
arborist_requests.return_value.status_code = 200
headers = {
"Authorization": "Bearer " + encoded_creds_jwt.jwt,
"Content-Type": "application/json",
}
file_name = "asdf"
data = json.dumps({"file_name": file_name})
response = client.post("/data/upload", headers=headers, data=data)
indexd_url = app.config.get("INDEXD") or app.config.get("BASE_URL") + "/index"
endpoint = indexd_url + "/index/blank/"
indexd_auth = (config["INDEXD_USERNAME"], config["INDEXD_PASSWORD"])
data_requests.post.assert_called_once_with(
endpoint,
auth=indexd_auth,
json={"file_name": file_name, "uploader": user_client.username},
headers={},
)
assert response.status_code == 500, response
assert not response.json
|
b91b921893d2d6c672a313d20fe3820b2027fbcd
| 3,639,648
|
import urllib
def parameterize(url):
"""Encode input URL as POST parameter.
url: a string which is the URL to be passed to ur1.ca service.
Returns the POST parameter constructed from the URL.
"""
return urllib.urlencode({"longurl": url})
|
f665b67d3637074dcf419a1ebfb153dd7f69acb7
| 3,639,649
|
def sum_ints(*args, **kwargs):
""" This function is contrived to illustrate args in a function.
"""
print args
return sum(args)
|
4eb1f78d2e26c63b7e9d6086e55e9588d0257534
| 3,639,650
|
def set_have_mods(have_mods: bool) -> None:
"""set_have_mods(have_mods: bool) -> None
(internal)
"""
return None
|
a8a504e19450887e473fa607fb7a33253d3de4f3
| 3,639,651
|
def user_required(handler):
"""
Decorator for checking if there's a user associated
with the current session.
Will also fail if there's no session present.
"""
def check_login(self, *args, **kwargs):
"""
If handler has no login_url specified invoke a 403 error
"""
if self.request.query_string != '':
query_string = '?' + self.request.query_string
else:
query_string = ''
continue_url = self.request.path_url + query_string
login_url = self.uri_for('login', **{'continue': continue_url})
try:
auth = self.auth.get_user_by_session()
if not auth:
try:
self.redirect(login_url, abort=True)
except (AttributeError, KeyError), e:
self.abort(403)
except AttributeError, e:
# avoid AttributeError when the session was delete from the server
logging.error(e)
self.auth.unset_session()
self.redirect(login_url)
return handler(self, *args, **kwargs)
return check_login
|
4bc794d08989729aa0e8cd8100fa66166083917a
| 3,639,652
|
def student_editapplication(request):
"""View allowing a student to edit and/or submit their saved application"""
FSJ_user = get_FSJ_user(request.user.username)
award_id = request.GET.get('award_id', '')
try:
award = Award.objects.get(awardid = award_id)
application = Application.objects.get(award = award, student = FSJ_user)
if (not application.award.is_active) or (not application.award.is_open()):
return redirect('/awards/')
if application.is_submitted:
return redirect('/awards/')
if request.method == "POST":
form = ApplicationRestrictedForm(request.POST, request.FILES, instance = application)
if form.is_valid():
application = form.save(commit = False)
if '_save' in request.POST:
application.is_submitted = False
application.save()
return redirect('/awards/')
elif '_submit' in request.POST:
if not award.is_open():
return redirect('/awards/')
application.is_submitted = True
if award.documents_needed == True and not application.application_file:
messages.warning(request, 'Please upload a document.')
else:
application.save()
return redirect('/awards/')
elif '_delete' in request.POST:
try:
application = Application.objects.get(award=award, student=FSJ_user)
if (not award.is_active) or (not award.is_open()):
return redirect('/awards/')
else:
application.delete()
except:
pass
return redirect('/awards/')
else:
form = ApplicationRestrictedForm(instance=application)
context = get_standard_context(FSJ_user)
template = loader.get_template("FSJ/student_apply.html")
context["form"] = form
context['award'] = award
url = "/awards/edit/?award_id=" + str(award.awardid)
context["url"] = url
return HttpResponse(template.render(context, request))
except Application.DoesNotExist:
return redirect('/awards/')
|
ebfda9d2ac12c3d75e4ffe0dd8a7d2a170e6f80c
| 3,639,653
|
def get_haps_from_variants(translation_table_path: str, vcf_data: str,
sample_id: str, solver: str = "CBC",
config_path: str = None, phased = False) -> tuple:
"""
Same as get_haps_from_vcf, but bypasses the VCF file so that you can provide formatted variants from another input
Get called haplotypes and additional information
Args:
translation_table_path (str): [description]
vcf_file_path (str): [description]
sample_id (str): [description]
config_path ([type], optional): [description]. Defaults to None.
Returns:
tuple: translation_table_version, called_haplotypes, variants_associated_with_haplotye, matched_translation_table
"""
config = get_config(config_path)
gene = AbstractGene(translation_table_path, variants = vcf_data, solver = solver, config = config, phased = phased)
haplotype = Haplotype(gene, sample_id, config = config)
haplotype.table_matcher()
return haplotype.optimize_hap()
|
018e623532de1d414157610a9e63a3657dfdc061
| 3,639,655
|
import torch
def _populate_number_fields(data_dict):
"""Returns a dict with the number fields N_NODE, N_EDGE filled in.
The N_NODE field is filled if the graph contains a non-`None` NODES field;
otherwise, it is set to 0.
The N_EDGE field is filled if the graph contains a non-`None` RECEIVERS field;
otherwise, it is set to 0.
Args:
data_dict: An input `dict`.
Returns:
The data `dict` with number fields.
"""
dct = data_dict.copy()
for number_field, data_field in [[N_NODE, NODES], [N_EDGE, RECEIVERS]]:
if dct.get(number_field) is None:
if dct[data_field] is not None:
dct[number_field] = torch.tensor(dct[data_field].size()[0], dtype=torch.int64)
else:
dct[number_field] = torch.tensor(0, dtype=torch.int64)
return dct
|
999eee8573d3a11d889a361905f65ce5b996a3c0
| 3,639,656
|
def to_frame(nc):
"""
Convert netCDF4 dataset to pandas frames
"""
s_params = ["time", "bmnum", "noise.sky", "tfreq", "scan", "nrang", "intt.sc", "intt.us", "mppul", "scnum"]
v_params = ["v", "w_l", "gflg", "p_l", "slist", "gflg_conv", "gflg_kde", "v_mad", "cluster_tag", "ribiero_gflg"]
_dict_ = {k: [] for k in s_params + v_params}
tparam = {"units":nc.variables["time"].units, "calendar":nc.variables["time"].calendar,
"only_use_cftime_datetimes":False}
for i in range(nc.variables["slist"].shape[0]):
sl = nc.variables["slist"][:][i,:]
idx = np.isnan(sl)
L = len(sl[~idx])
for k in s_params:
_dict_[k].extend(L*[nc.variables[k][i]])
for k in v_params:
_dict_[k].extend(nc.variables[k][i,~idx])
o = pd.DataFrame.from_dict(_dict_)
time = o.time.apply(lambda x: num2date(x, tparam["units"], tparam["calendar"],
only_use_cftime_datetimes=tparam["only_use_cftime_datetimes"])).tolist()
time = np.array([x._to_real_datetime() for x in time]).astype("datetime64[ns]")
time = [dt.datetime.utcfromtimestamp(x.astype(int) * 1e-9) for x in time]
o["dates"] = time
o["mdates"] = o.dates.apply(lambda x: mdates.date2num(x)).tolist()
o = o.sort_values(by=["dates"])
return o
|
5db0e24b113c0b19dba45df66ec8e42dee3e4b1a
| 3,639,658
|
def gather_grade_info(fctx, flow_session, answer_visits):
"""
:returns: a :class:`GradeInfo`
"""
all_page_data = (FlowPageData.objects
.filter(
flow_session=flow_session,
ordinal__isnull=False)
.order_by("ordinal"))
points = 0
provisional_points = 0
max_points = 0
max_reachable_points = 0
fully_correct_count = 0
partially_correct_count = 0
incorrect_count = 0
unknown_count = 0
for i, page_data in enumerate(all_page_data):
page = instantiate_flow_page_with_ctx(fctx, page_data)
assert i == page_data.ordinal
if answer_visits[i] is None:
# This is true in principle, but early code to deal with survey questions
# didn't generate synthetic answer visits for survey questions, so this
# can't actually be enforced.
# assert not page.expects_answer()
continue
if not page.is_answer_gradable():
continue
grade = answer_visits[i].get_most_recent_grade()
assert grade is not None
feedback = get_feedback_for_grade(grade)
max_points += grade.max_points
if feedback is None or feedback.correctness is None:
unknown_count += 1
points = None
continue
max_reachable_points += grade.max_points
page_points = grade.max_points*feedback.correctness
if points is not None:
points += page_points
provisional_points += page_points
if grade.max_points > 0:
if feedback.correctness == 1:
fully_correct_count += 1
elif feedback.correctness == 0:
incorrect_count += 1
else:
partially_correct_count += 1
return GradeInfo(
points=points,
provisional_points=provisional_points,
max_points=max_points,
max_reachable_points=max_reachable_points,
fully_correct_count=fully_correct_count,
partially_correct_count=partially_correct_count,
incorrect_count=incorrect_count,
unknown_count=unknown_count)
|
516beddad0b9d58239e1d3c9ef675d2b078dd141
| 3,639,659
|
import re
def numericalSort(value):
"""
複数ファイルの入力の際、ファイル名を昇順に並べる。
Input
------
value : 読み込みたいファイルへのパス
Output
------
parts : ファイル中の数字
"""
numbers = re.compile(r'(\d+)')
parts = numbers.split(value)
parts[1::2] = map(int, parts[1::2])
return parts
|
1fc8c748b37a89fe9ea3fb0283b5ec8012781028
| 3,639,660
|
def add_markings(obj, marking, selectors):
"""
Append a granular marking to the granular_markings collection. The method
makes a best-effort attempt to distinguish between a marking-definition
or language granular marking.
Args:
obj: An SDO or SRO object.
marking: identifier or list of marking identifiers that apply to the
properties selected by `selectors`.
selectors: list of type string, selectors must be relative to the TLO
in which the properties appear.
Raises:
InvalidSelectorError: If `selectors` fail validation.
Returns:
A new version of the given SDO or SRO with specified markings added.
"""
selectors = utils.convert_to_list(selectors)
marking = utils.convert_to_marking_list(marking)
utils.validate(obj, selectors)
granular_marking = []
for m in marking:
if is_marking(m):
granular_marking.append({'marking_ref': m, 'selectors': sorted(selectors)})
else:
granular_marking.append({'lang': m, 'selectors': sorted(selectors)})
if obj.get('granular_markings'):
granular_marking.extend(obj.get('granular_markings'))
granular_marking = utils.expand_markings(granular_marking)
granular_marking = utils.compress_markings(granular_marking)
return new_version(obj, granular_markings=granular_marking, allow_custom=True)
|
b7ede77fac6524cba906fd736edb9d43fe41676b
| 3,639,661
|
def add_to_list(str_to_add, dns_names):
"""
This will add a string to the dns_names array if it does not exist.
It will then return the index of the string within the Array
"""
if str_to_add not in dns_names:
dns_names.append(str_to_add)
return dns_names.index(str_to_add)
|
4720708778fccc7a16dc66ad52ec911a5acb1f94
| 3,639,662
|
def check_icmp_path(sniffer, path, nodes, icmp_type = ipv6.ICMP_ECHO_REQUEST):
"""Verify icmp message is forwarded along the path.
"""
len_path = len(path)
# Verify icmp message is forwarded to the next node of the path.
for i in range(0, len_path):
node_msg = sniffer.get_messages_sent_by(path[i])
node_icmp_msg = node_msg.get_icmp_message(icmp_type)
if i < len_path - 1:
next_node = nodes[path[i + 1]]
next_node_rloc16 = next_node.get_addr16()
assert next_node_rloc16 == node_icmp_msg.mac_header.dest_address.rloc, "Error: The path is unexpected."
else:
return True
return False
|
0080837e5f79435396d9cf6566c60bdf40d736c9
| 3,639,663
|
def ping():
"""Determine if the container is working and healthy. In this sample container, we declare
it healthy if we can load the model successfully."""
health = scoring_service.get_model() is not None # You can insert a health check here
status = 200 if health else 404
return flask.Response(response="\n", status=status, mimetype="application/json")
|
8e3cde6098db42be1f93ee04ad4092bef1aec36f
| 3,639,664
|
def cyber_pose_to_carla_transform(cyber_pose):
"""
Convert a Cyber pose a carla transform.
"""
return carla.Transform(
cyber_point_to_carla_location(cyber_pose.position),
cyber_quaternion_to_carla_rotation(cyber_pose.orientation))
|
3bd700c8a3f31cadedcaea798f611d97b379115d
| 3,639,665
|
def _is_predator_testcase(testcase):
"""Return bool and error message for whether this testcase is applicable to
predator or not."""
if build_manager.is_custom_binary():
return False, 'Not applicable to custom binaries.'
if testcase.regression != 'NA':
if not testcase.regression:
return False, 'No regression range, wait for regression task to finish.'
if ':' not in testcase.regression:
return False, 'Invalid regression range %s.' % testcase.regression
return True, None
|
4f9975801bf878522b729035a31685bef170f2dd
| 3,639,666
|
def _a_ij_Aij_Dij2(A):
"""A term that appears in the ASE of Kendall's tau and Somers' D."""
# See `somersd` References [2] section 4: Modified ASEs to test the null hypothesis...
m, n = A.shape
count = 0
for i in range(m):
for j in range(n):
count += A[i, j]*(_Aij(A, i, j) - _Dij(A, i, j))**2
return count
|
5deb884310984d23b70d3364d75d0795e847dcb3
| 3,639,667
|
import requests
from bs4 import BeautifulSoup
def getWeekHouseMsg():
"""
获取一周的房产信息
:return:
"""
response = requests.get(url=week_host, headers=headers).text
soup = BeautifulSoup(response, 'lxml')
house_raw = soup.select('div[class=xfjj]')
# 二手房均价
second_hand_price = house_raw[0].select('.f36')[0].string
# 二手房成交数目
second_hand_num = house_raw[1].select('.f36')[0].string
# 新手房均价
new_house_price = house_raw[2].select('.f36')[0].string
# 新房成交数目
new_house_num = house_raw[3].select('.f36')[0].string
# print(second_hand_price, second_hand_num, new_house_price, new_house_num)
return new_house_price, new_house_num, second_hand_price, second_hand_num
|
775fc1b2fa26c1f48890206d5a278f842c5aeaac
| 3,639,668
|
def _match(x, y):
"""Returns an array of the positions of (first) matches of y in x
This is similar to R's `match` or Matlab's `[Lia, Locb] = ismember`
See https://stackoverflow.com/a/8251757
This assumes that all values in y are in x, but no check is made
Parameters
----------
x : 1-d array
y : 1-d array
Returns
-------
yindex : 1-d array
np.all(x[yindex] == y) should be True
"""
index = np.argsort(x)
sorted_index = np.searchsorted(x, y, sorter=index)
yindex = index[sorted_index]
return yindex
|
e36b5ad1dce2b7ed18039da16aa6de7a741ecb14
| 3,639,669
|
def buildMeanAndCovMatFromRow(row):
"""
Build a covariance matrix from a row
Paramters
---------
row : astropy Table row
Entries: {X, Y, Z, U, V, W, dX, dY, ..., cXY, cXZ, ...}
Return
------
cov_mat : [6,6] numpy array
Diagonal elements are dX^2, dY^2, ...
Off-diagonal elements are cXY*dX*dY, cXZ*dX*dZ, ...
"""
dim = 6
# CART_COL_NAMES = ['X', 'Y', 'Z', 'U', 'V', 'W',
# 'dX', 'dY', 'dZ', 'dU', 'dV', 'dW',
# 'c_XY', 'c_XZ', 'c_XU', 'c_XV', 'c_XW',
# 'c_YZ', 'c_YU', 'c_YV', 'c_YW',
# 'c_ZU', 'c_ZV', 'c_ZW',
# 'c_UV', 'c_UW',
# 'c_VW']
mean = np.zeros(dim)
for i, col_name in enumerate(CART_COL_NAMES[:6]):
mean[i] = row[col_name]
std_vec = np.zeros(dim)
for i, col_name in enumerate(CART_COL_NAMES[6:12]):
std_vec[i] = row[col_name]
corr_tri = np.zeros((dim,dim))
# Insert upper triangle (top right) correlations
for i, col_name in enumerate(CART_COL_NAMES[12:]):
corr_tri[np.triu_indices(dim,1)[0][i],np.triu_indices(dim,1)[1][i]]\
=row[col_name]
# Build correlation matrix
corr_mat = np.eye(6) + corr_tri + corr_tri.T
# Multiply through by standard deviations
cov_mat = corr_mat * std_vec * std_vec.reshape(6,1)
return mean, cov_mat
|
f680035a39e72c9685cd563fb092109d3beb3add
| 3,639,671
|
import inspect
def getNumArgs(obj):
"""Return the number of "normal" arguments a callable object takes."""
sig = inspect.signature(obj)
return sum(1 for p in sig.parameters.values()
if p.kind == inspect.Parameter.POSITIONAL_ONLY or
p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD)
|
c2e9edef0b2d8c18a0f9e2af90a6a1573705d590
| 3,639,672
|
def min_distance_from_point(vec, p):
"""
Minimial distance between a single point and each point along a vector (in N dimensions)
"""
return np.apply_along_axis(np.linalg.norm, 1, vec - p).min()
|
2b21dec14dcb4026d97d6321d4549f49a9520218
| 3,639,673
|
def create_environment(env_config):
"""Creates an simple sequential testing environment."""
if env_config['num_candidates'] < 4:
raise ValueError('num_candidates must be at least 4.')
SimpleSequentialResponse.MAX_DOC_ID = env_config['num_candidates'] - 1
user_model = SimpleSequentialUserModel(
env_config['slate_size'],
seed=env_config['seed'],
starting_probs=env_config['starting_probs'])
document_sampler = SimpleSequentialDocumentSampler(seed=env_config['seed'])
simple_seq_env = environment.Environment(
user_model,
document_sampler,
env_config['num_candidates'],
env_config['slate_size'],
resample_documents=env_config['resample_documents'])
return recsim_gym.RecSimGymEnv(simple_seq_env, total_reward,
lambda _, __, ___: None, lambda _, __: None)
|
eef78ba1f134b492126b51dd13357ea8687df319
| 3,639,674
|
def nb_to_python(nb_path):
"""convert notebook to python script"""
exporter = python.PythonExporter()
output, resources = exporter.from_filename(nb_path)
return output
|
4a918102fc9e6c35e3c7db89f33dc5c081a17df1
| 3,639,675
|
def add(data_source: DataSource) -> DataSource:
"""
Add a new data source to AuroraX
Args:
data_source: the data source to add (note: it must be a fully-defined
DataSource object)
Returns:
the newly created data source
Raises:
pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error
pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error
pyaurorax.exceptions.AuroraXDuplicateException: duplicate data source, already exists
"""
# set up request
request_data = {
"program": data_source.program,
"platform": data_source.platform,
"instrument_type": data_source.instrument_type,
"source_type": data_source.source_type,
"display_name": data_source.display_name,
"ephemeris_metadata_schema": data_source.ephemeris_metadata_schema,
"data_product_metadata_schema": data_source.data_product_metadata_schema,
"metadata": data_source.metadata
}
if (data_source.identifier is not None):
request_data["identifier"] = data_source.identifier
# make request
req = AuroraXRequest(method="post",
url=urls.data_sources_url,
body=request_data)
res = req.execute()
# evaluate response
if (res.status_code == 409):
raise AuroraXDuplicateException("%s - %s" % (res.data["error_code"],
res.data["error_message"]))
# return
try:
return DataSource(**res.data)
except Exception:
raise AuroraXException("Could not create data source")
|
4a1d39c9280308b6dda8835663a57ba62aca7f21
| 3,639,676
|
def read_text(file, num=False):
""" Read from txt [file].
If [num], then data is numerical data and will need to convert each
string to an int.
"""
with open(file,'r') as f:
data = f.read().splitlines()
if num:
data = [int(i) for i in data]
return data
|
f9b61d254b1c2188ae6be3b9260f94f0657bcd3a
| 3,639,678
|
def interpolate_rbf(x, y, z, x_val, y_val, z_val):
"""Radial basis function interpolation.
Parameters
----------
x : np.ndarray
x-faces or x-edges of a mesh
y : np.ndarray
y-faces or y-edges of a mesh
z : np.ndarray
z-faces or z-edges of a mesh
x_val : np.ndarray
curl values or electric field values in the x-direction
y_val : np.ndarray
curl values or electric field values in the y-direction
z_val : np.ndarray
curl values or electric field values in the z-direction
Returns
-------
scipy.interpolate.rbf.Rbf
a radial basis function interpolation object
"""
x_interpolated = Rbf(x[:, 0], x[:, 1], x[:, 2], x_val)
y_interpolated = Rbf(y[:, 0], y[:, 1], y[:, 2], y_val)
z_interpolated = Rbf(z[:, 0], z[:, 1], z[:, 2], z_val)
return x_interpolated, y_interpolated, z_interpolated
|
35f833a620fabbfa786b1d8e829e378b24d202ad
| 3,639,679
|
def get_batch(image_files, width, height, mode='RGB'):
"""
Get a single batch of data as an NumPy array
"""
data_batch = np.array(
[get_image(sample_file, width, height, mode) for sample_file in image_files]).astype(np.float32)
# Make sure the images are in 4 dimensions
if len(data_batch.shape) < 4:
data_batch = data_batch.reshape(data_batch.shape + (1,))
return data_batch
|
b94d095712c14bee2d856b1dba7a6e7286f5f16e
| 3,639,681
|
def ScriptProvenanceConst_get_decorator_type_name():
"""ScriptProvenanceConst_get_decorator_type_name() -> std::string"""
return _RMF.ScriptProvenanceConst_get_decorator_type_name()
|
a15d001dea73333e16c21697c95a8c11d6567264
| 3,639,682
|
def parse_annotation(parameter):
"""
Tries to parse an internal annotation referencing ``Client`` or ``InteractionEvent``.
Parameters
----------
parameter : ``Parameter``
The respective parameter's representation.
Returns
-------
choices : `None` or `dict` of (`str` or `int`, `str`) items
Parameter's choices.
description : `None` or `str`
Parameter's description.
> Returned as `None` for internal parameters or if `description` could nto be detected.
name : `str`
The parameter's name.
type_ : `int`
The parameter's internal type identifier.
channel_types : `None` or `tuple` of `int`
The accepted channel types.
max_value : `None`, `int`, `float`
The maximal accepted value.
min_value : `None`, `int`, `float`
The minimal accepted value.
Raises
------
ValueError
- If `parameter` annotation tuple's length is out of range [2:3].
- If `parameter` annotation tuple refers to an internal type.
TypeError
Parameter's type refers to an unknown type or string value.
"""
if parameter.has_annotation:
annotation_value = parameter.annotation
if isinstance(annotation_value, tuple):
if len(annotation_value) == 0:
annotation_value = parameter.name
else:
return parse_annotation_tuple(parameter)
elif isinstance(annotation_value, SlashParameter):
return parse_annotation_slash_parameter(annotation_value, parameter.name)
else:
annotation_value = parameter.name
if not isinstance(annotation_value, (str, type)):
raise TypeError(f'Parameter `{parameter.name}` is not `tuple`, `str`, nor `str` instance '
f'{annotation_value.__class__.__name__}; {annotation_value!r}.')
else:
annotation_type = parse_annotation_internal(annotation_value)
if annotation_type is None:
annotation_type, choices, channel_types = parse_annotation_type_and_choice(annotation_value, parameter.name)
else:
choices = None
channel_types = None
return choices, None, parameter.name, annotation_type, channel_types, None, None
|
076e0cf5dd60eec8624310bac96dccf53d11d441
| 3,639,684
|
def search(request):
"""
Search results
"""
query = request.GET.get('query')
res = MsVerse.objects.filter(raw_text__icontains=query).order_by(
'verse__chapter__book__num',
'verse__chapter__num',
'verse__num',
'hand__manuscript__liste_id')
return default_response(request,
'search.html',
{'results': res,
'query': query})
|
4d5fafad400018981de68006540f4d990a1ebcea
| 3,639,685
|
from typing import Tuple
from typing import List
def _compare(pair: Tuple[List[int], List[int]]) -> float:
"""Just a wrapper for fingerprints.compare, that unpack its first argument"""
return fingerprints.compare(*pair)
|
9b7947898e2cbf5579a7e31dc385b54a0a1bdd62
| 3,639,686
|
import operator
import re
def output_onto(conll_tokens, markstart_dict, markend_dict, file_name):
"""
Outputs analysis results in OntoNotes .coref XML format
:param conll_tokens: List of all processed ParsedToken objects in the document
:param markstart_dict: Dictionary from markable starting token ids to Markable objects
:param markend_dict: Dictionary from markable ending token ids to Markable objects
:return: serialized XML
"""
output_string = '<DOC DOCNO="' + file_name + '">\n<TEXT PARTNO="000">\n'
for out_tok in conll_tokens:
if int(out_tok.id) in markstart_dict:
for out_mark in sorted(markstart_dict[int(out_tok.id)], key=operator.attrgetter('end'), reverse=True):
output_string += '<COREF ID="' + str(out_mark.group) + '" ENTITY="' + out_mark.entity + '" INFSTAT="' + out_mark.infstat
if not out_mark.antecedent == "none":
output_string += '" TYPE="' + out_mark.coref_type
output_string += '">'
if int(out_tok.id) > 0:
output_string += re.sub("&","&",out_tok.text) if ";" not in out_tok.text else out_tok.text
if int(out_tok.id) in markend_dict:
for out_mark in markend_dict[int(out_tok.id)]:
output_string += "</COREF>"
if int(out_tok.id) > 0:
output_string += ' '
return output_string + "\n</TEXT>\n</DOC>\n"
|
f1a917e85735e9581326e60e3add94176e4f84cc
| 3,639,687
|
def vertical() -> np.array:
"""Returns the Jones matrix for a horizontal linear polarizer."""
return np.asarray([[0, 0], [0, 1]])
|
692653446e0e7f96bf2970353f7de702b9e502ca
| 3,639,688
|
def resource_id(d, i, r):
"""Get resource id from meter reading.
:param d: Report definition
:type: d: Dict
:param i: Item definition
:type i: Dict
:param r: Meter reading
:type r: usage.reading.Reading
"""
return _get_reading_attr(r, 'resource_id')
|
73700abbbf34f634435e1f95d52d2730cc3d532b
| 3,639,689
|
import logging
def create_provider_router(neutron_client, project_id):
"""Create the provider router.
:param neutron_client: Authenticated neutronclient
:type neutron_client: neutronclient.Client object
:param project_id: Project ID
:type project_id: string
:returns: Router object
:rtype: dict
"""
routers = neutron_client.list_routers(name='provider-router')
if len(routers['routers']) == 0:
logging.info('Creating provider router for external network access')
router_info = {
'router': {
'name': 'provider-router',
'tenant_id': project_id
}
}
router = neutron_client.create_router(router_info)['router']
logging.info('New router created: %s', (router['id']))
else:
logging.warning('Router provider-router already exists.')
router = routers['routers'][0]
return router
|
c9eb1de728d141d73c9f7b169df87c01829892f6
| 3,639,690
|
from typing import List
import shlex
def split(string: str) -> List[str]:
"""
Split string (which represents a command) into a list.
This allows us to just copy/paste command prefixes without having to define a full list.
"""
return shlex.split(string)
|
360fceeba7d6280e27068f61d2420cfd9fbfbcc2
| 3,639,691
|
def compute_prevalence_percentage(df, groupby_fields):
"""
base: ['topic_id', 'year']
"""
# agg_df = df.groupby(groupby_fields)['topic_weight'].sum().reset_index()
# groupby_fields.append('topic_weight')
# wide_df = agg_df[groupby_fields].copy().pivot(index=groupby_fields[0],columns=groupby_fields[1],values="topic_weight").fillna(0)
# new_df = pd.DataFrame(index=wide_df.index.values)
# for column in list(wide_df.columns.values):
# new_df[column] = (wide_df[column]/wide_df[column].sum())*100
# long_df = new_df.unstack().reset_index()
# merged_df = pd.merge(agg_df,
# long_df,
# how='left',
# left_on=[groupby_fields[0],groupby_fields[1]],
# right_on = ['level_1','level_0'])
# merged_df.rename(columns = {0:'normalized_weights'}, inplace = True)
# merged_df.drop(['level_0','level_1'], axis=1, inplace=True)
pdf = df.groupby(groupby_fields).agg({'norm_topic_weight': 'sum'})
pdf2 = pdf.groupby(level=0).apply(lambda x: x / x.sum()).reset_index()
groupby_fields.append('proportional_weight')
pdf2.columns = groupby_fields
pdf2 = pdf2.merge(labels, on=groupby_fields[1])
return merged_df
|
72bc8f04c6cf05d64ddd36b93a73a81136dfedf9
| 3,639,692
|
from datetime import datetime
def get_token_history(address) -> pd.DataFrame:
"""Get info about token historical transactions. [Source: Ethplorer]
Parameters
----------
address: str
Token e.g. 0xf3db5fa2c66b7af3eb0c0b782510816cbe4813b8
Returns
-------
pd.DataFrame:
DataFrame with token historical transactions.
"""
response = make_request("getTokenHistory", address, limit=1000)
all_operations = []
operations = response["operations"]
try:
first_row = operations[0]["tokenInfo"]
name, symbol, _ = (
first_row.get("name"),
first_row.get("symbol"),
first_row.get("balance"),
)
decimals = first_row.get("decimals")
except Exception:
name, symbol = "", ""
decimals = None
for operation in operations:
operation.pop("type")
operation.pop("tokenInfo")
operation["timestamp"] = datetime.fromtimestamp(operation["timestamp"])
all_operations.append(operation)
df = pd.DataFrame(all_operations)
if df.empty:
return df
df[["name", "symbol"]] = name, symbol
df["value"] = df["value"].astype(float) / (10 ** int(decimals))
return df[["timestamp", "name", "symbol", "value", "from", "to", "transactionHash"]]
|
941d02b3ef4a4525e376c1b90519391c97e128eb
| 3,639,693
|
def top1_accuracy(pred, y):
"""Main evaluation metric."""
return sum(pred.argmax(axis=1) == y) / float(len(y))
|
d011b432c7c04331ff09d16ba8151c8c4f056ead
| 3,639,694
|
import requests
def dividend_history (symbol):
"""
This function returns the dividend historical data of the seed stock symbol.
Args:
symbol (:obj:`str`, required): 3 digits name of the desired stock.
"""
data = requests.get('https://apipubaws.tcbs.com.vn/tcanalysis/v1/company/{}/dividend-payment-histories?page=0&size=20'.format(symbol)).json()
df = json_normalize(data['listDividendPaymentHis']).drop(columns=['no', 'ticker'])
return df
|
0775deaeaa4a6a574af62821273cbd052625c889
| 3,639,696
|
def reftype_to_pipelines(reftype, cal_ver=None, context=None):
"""Given `exp_type` and `cal_ver` and `context`, locate the appropriate SYSTEM CRDSCFG
reference file and determine the sequence of pipeline .cfgs required to process that
exp_type.
"""
context = _get_missing_context(context)
cal_ver = _get_missing_calver(cal_ver)
with log.augment_exception("Failed determining required pipeline .cfgs for",
"EXP_TYPE", srepr(reftype), "CAL_VER", srepr(cal_ver)):
config_manager = _get_config_manager(context, cal_ver)
return config_manager.reftype_to_pipelines(reftype)
|
a8443ae6e762322681272bb4b348f535aa4b954b
| 3,639,697
|
def levy(x: np.ndarray):
"""
The function is usually evaluated on the hypercube xi ∈ [-10, 10], for all i = 1, …, d.
:param x: c(x1, x2, ..., xd)
:return: the y-value (float)
"""
w = 1 + (x - 1) / 4 # same shape as x
term1 = (np.sin(np.pi * w.T[0])) ** 2
term3 = (w.T[-1] - 1) ** 2 * (1 + 1 * (np.sin(2 * np.pi * w.T[-1])) ** 2)
wi = w.T[:-1]
sum = np.sum((wi - 1) ** 2 * (1 + 10 * (np.sin(np.pi * wi + 1)) ** 2))
return term1 + sum + term3
|
e24744982def1509548dd269be596bf310ff6eb6
| 3,639,698
|
import warnings
def _select_programme(state, audio_programme=None):
"""Select an audioProgramme to render.
If audio_programme_id is provided, use that to make the selection,
otherwise select the only audioProgramme, or the one with the lowest id.
Parameters:
state (_ItemSelectionState): 'adm' must be set.
audio_programme (AudioProgramme): audioProgramme to select if there are
multiple programmes.
Returns:
_ItemSelectionState: state with audioProgramme set if one is found, None otherwise.
"""
if audio_programme is None:
if len(state.adm.audioProgrammes) > 1:
warnings.warn("more than one audioProgramme; selecting the one with the lowest id")
return evolve(state,
audioProgramme=min(state.adm.audioProgrammes, key=lambda programme: programme.id))
elif len(state.adm.audioProgrammes) == 1:
return evolve(state, audioProgramme=state.adm.audioProgrammes[0])
else:
return evolve(state, audioProgramme=None)
else:
assert in_by_id(audio_programme, state.adm.audioProgrammes), "selected audioProgramme not in ADM."
return evolve(state, audioProgramme=audio_programme)
|
a7e5cbc9ad2be80b7bfd5f3651b610c83b3f15fe
| 3,639,699
|
def puzzles():
"""
Pick one of the TOP95 puzzle strings
"""
return [l for l in TOP95.split("\n") if l]
|
def2fefe114fe2867f2d465dbe4b55ae74287e09
| 3,639,700
|
from datetime import datetime
def test_declarative_sfc_obs_full(ccrs):
"""Test making a full surface observation plot."""
data = pd.read_csv(get_test_data('SFC_obs.csv', as_file_obj=False),
infer_datetime_format=True, parse_dates=['valid'])
obs = PlotObs()
obs.data = data
obs.time = datetime(1993, 3, 12, 13)
obs.time_window = timedelta(minutes=15)
obs.level = None
obs.fields = ['tmpf', 'dwpf', 'emsl', 'cloud_cover', 'wxsym']
obs.locations = ['NW', 'SW', 'NE', 'C', 'W']
obs.colors = ['red', 'green', 'black', 'black', 'blue']
obs.formats = [None, None, lambda v: format(10 * v, '.0f')[-3:], 'sky_cover',
'current_weather']
obs.vector_field = ('uwind', 'vwind')
obs.reduce_points = 1
# Panel for plot with Map features
panel = MapPanel()
panel.layout = (1, 1, 1)
panel.area = (-124, -72, 20, 53)
panel.area = 'il'
panel.projection = ccrs.PlateCarree()
panel.layers = ['coastline', 'borders', 'states']
panel.plots = [obs]
# Bringing it all together
pc = PanelContainer()
pc.size = (10, 10)
pc.panels = [panel]
pc.draw()
return pc.figure
|
780b4462ba01ddcd20a1e87ef8637ca174293af8
| 3,639,701
|
def standardize_ants_data(ants_data, subject_ID_col):
""" Takes df from ANTs output and stadardizes column names for both left and right hemi
"""
ants_useful_cols = ['Structure Name']
ants_to_std_naming_dict = {}
ants_to_std_naming_dict['Structure Name'] = subject_ID_col #'SubjID'
for roi in ants_data.columns:
prefix = None
name_split = roi.split(' ')
if name_split[0] == 'left':
prefix = 'L'
if name_split[0] == 'right':
prefix = 'R'
if prefix is not None:
ants_useful_cols.append(roi)
std_name = prefix + '_' + ''.join(name_split[1:])
ants_to_std_naming_dict[roi] = std_name
ants_data_std = ants_data[ants_useful_cols].copy()
ants_data_std = ants_data_std.rename(columns=ants_to_std_naming_dict)
# Splitting SubjID column to ignore site name
_, ants_data_std[subject_ID_col] = ants_data_std[subject_ID_col].str.rsplit('_', 1).str
return ants_data_std
|
0f5216fd75244b0b9b60fdcdf05d63bfd02a2ed9
| 3,639,702
|
def make_gridpoints(bbox, resolution=1, return_coords=False):
"""It constructs a grid of points regularly spaced.
Parameters
----------
bbox : str, GeoDataFrame or dict.
Corresponds to the boundary box in which the grid will be formed.
If a str is provided, it should be in '(S,W,N,E)' format. With a
GeoDataFrame, we will use the coordinates of the extremities. Also
one can provide a dict with 'south', 'north', 'east', 'west'.
resolution : float, default is 1.
Space between the arbitrary points of resulting grid.
return_coords : bool
If it is wanted to return the coordinate sequences.
"""
bbox_ = parse_bbox(bbox)
b_s, b_w, b_n, b_e = map(float, bbox_[1:-1].split(','))
nlon = int(ceil((b_e-b_w) / (resolution/111.32)))
nlat = int(ceil((b_n-b_s) / (resolution/110.57)))
lonv, latv = meshgrid(linspace(b_w, b_e, nlon), linspace(b_s, b_n, nlat))
gridpoints = pd.DataFrame(vstack([lonv.ravel(), latv.ravel()]).T,
columns=['lon', 'lat'])
gridpoints['geometry'] = gridpoints.apply(lambda x: Point([x['lon'], x['lat']]),
axis=1)
gridpoints = gpd.GeoDataFrame(gridpoints, crs={'init': 'epsg:4326'})
if isinstance(bbox, gpd.GeoDataFrame):
grid_ix = gpd.sjoin(gridpoints, bbox, op='intersects').index.unique()
gridpoints = gridpoints.loc[grid_ix]
if return_coords:
return gridpoints, lonv, latv
return gridpoints
|
8ccc5b257666cb8bd3a87662c6b021b4ed49ccb9
| 3,639,703
|
def removeElement_2(nums, val):
"""
Using one loop and two pointers
Don't preserve order
"""
# Remove the elment from the list
i = 0
j = len(nums) - 1
count = 0
while i < j:
if nums[i] == val:
while j > i and nums[j] == val:
j -= 1
print('i:', i, 'j:', j)
# swap elements
temp = nums[i]
nums[i] = nums[j]
nums[j] = temp
count += 1
print(nums)
i += 1
if count == 0:
j = j + 1
return j
|
e1a836514a09fc925a49b144880960b057dfff80
| 3,639,704
|
def decrypt_ballot_shares(
request: DecryptBallotSharesRequest = Body(...),
scheduler: Scheduler = Depends(get_scheduler),
) -> DecryptBallotSharesResponse:
"""
Decrypt this guardian's share of one or more ballots
"""
ballots = [
SubmittedBallot.from_json_object(ballot) for ballot in request.encrypted_ballots
]
context = CiphertextElectionContext.from_json_object(request.context)
election_key_pair = read_json_object(
request.guardian.election_keys, ElectionKeyPair
)
shares = [
compute_decryption_share_for_ballot(
election_key_pair, ballot, context, scheduler
)
for ballot in ballots
]
response = DecryptBallotSharesResponse(
shares=[write_json_object(share) for share in shares]
)
return response
|
c10b9961c2f86e9d9cf26d75e276cd65b3dcfdc4
| 3,639,705
|
def returnHumidity(dd):
""" Returns humidity data if it exists in the dictionary"""
rh = []
if 'RH' in dd:
rh = dd['RH']
elif 'RH1' in dd:
rh = dd['RH1']
else:
# Convert the dew point temperature to relative humidity
Pmb = dd['airpres']/10 # hPa to mb
rh = airsea.relHumFromTdew(dd['airtemp'],dd['airdewpoint'],Pmb)
return rh
|
b51d5d23247780683d9d644f59e442b1c77210e8
| 3,639,706
|
def fixture_penn_chime_raw_df_no_beta(penn_chime_setup) -> DataFrame:
"""Runs penn_chime SIR model for no social policies
"""
p, simsir = penn_chime_setup
n_days = simsir.raw_df.day.max() - simsir.raw_df.day.min()
policies = [(simsir.beta, n_days)]
raw = sim_sir(
simsir.susceptible,
simsir.infected,
p.recovered,
simsir.gamma,
-simsir.i_day,
policies,
)
calculate_dispositions(raw, simsir.rates, market_share=p.market_share)
calculate_admits(raw, simsir.rates)
calculate_census(raw, simsir.days)
raw_df = DataFrame(raw)
return raw_df
|
8e1d654b4e171e8ab55023bfb55135d5067d7052
| 3,639,707
|
import json
import hashlib
def _verify_manifest_signature(manifest, text, digest):
"""
Verify the manifest digest and signature
"""
format_length = None
format_tail = None
if 'signatures' in manifest:
for sig in manifest['signatures']:
protected_json = _jose_decode_base64(sig['protected'])
protected = json.loads(protected_json)
curr_tail = _jose_decode_base64(protected['formatTail'])
if format_tail is None:
format_tail = curr_tail
elif format_tail != curr_tail:
msg = 'formatTail did not match between signature blocks'
raise ValueError(msg)
if format_length is None:
format_length = protected['formatLength']
elif format_length != protected['formatLength']:
msg = 'formatLen did not match between signature blocks'
raise ValueError(msg)
message = text[0:format_length] + format_tail
if hashlib.sha256(message).hexdigest() != digest:
msg = 'Failed to match manifest digest to downloaded content'
raise ValueError(msg)
return True
|
d3c5cebcb6f63723d7356be8def0824bb3cd2726
| 3,639,708
|
def ETL_work():
""" ETL page"""
return render_template("ETL_work.html")
|
08806ed7154f4820db961b54a5c852bd0c275532
| 3,639,710
|
def advanced_perm_check_function(*rules_sets, restrictions=None):
"""
Check channels and permissions, use -s -sudo or -a -admin to run it.
Args:
*rules_sets: list of rules, 1d or 2d,
restrictions: Restrictions must be always met
Returns:
message object returned by calling given function with given params
"""
def decorator(coro):
async def f(ctx, *args, **kwargs):
valid = _check_advanced_perm(ctx,
*args,
**kwargs,
rule_sets=[*rules_sets], restrictions=restrictions)
if valid:
output = await coro(ctx, *args, **kwargs)
return output
else:
# logger.error(f"Permission check failed! Exceptions should be raised earlier!")
raise CommandError("Permission check failed.")
f.__name__ = coro.__name__
f.__doc__ = coro.__doc__
return f
return decorator
|
acf6f5494fcc632fec3bb665778a6bed3e58f19d
| 3,639,711
|
def worker_id():
"""Return a predefined worker ID.
Returns:
int: The static work id
"""
return 123
|
8c8e9c570a2355a15fd9a4d1d03d0159a33ffba0
| 3,639,712
|
def get_urls(spec):
"""Small convenience method to construct the URLs of the Jupyter server."""
host_url = f"http{'s' if spec['routing']['tls']['enabled'] else ''}://{spec['routing']['host']}"
full_url = urljoin(
host_url,
spec["routing"]["path"].rstrip("/"),
)
return host_url, full_url
|
059913ed12b021fce5964d54bf8b9b22132f914f
| 3,639,713
|
def resubs(resubpairs,target):
"""takes several regex find replace pairs [(find1, replace1), (find2,replace2), ... ]
and applies them to a target on the order given"""
return resubpair[0].sub(resubpair[1],target)
for resubpair in resubpairs:
target = resub(resubpair,target)
return target
|
49b371211de991c323fdec313801aba0ded8ff93
| 3,639,714
|
import functools
import time
def time_profile(func):
"""Time Profiled for optimisation
Notes:
* Do not use this in production
"""
@functools.wraps(func)
def profile(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
print(f"{func.__name__} : {time.time() - start}")
return result
return profile
|
a4dde1d66f5987b4be1e9179da1570c252540363
| 3,639,715
|
from typing import Type
import uuid
from datetime import datetime
async def create_guid(data: GuidIn) -> Type[GuidOut]:
"""
Create a record w/o specifying a guid.
Also cleans up expired records & caches the new record.
"""
guid = uuid.uuid4().hex
validated = data.dict()
try:
await create_guid_record(guid, validated['name'], validated['expire'])
except Exception as detail:
raise HTTPException(status_code=400, detail=f'{detail}')
# Build serialized response
out = GuidOut(
id=guid,
expire=validated['expire'],
name=validated['name'],
)
# Cache stuff
ttl = validated['expire'] - datetime.now(timezone.utc)
await cache.set(guid, out, ttl=ttl.seconds)
return out
|
0ff0f95acc6268e5ccd081156cc9355c8db520db
| 3,639,716
|
def exp_for(iterable, filename, display = False):
"""
Run an experiment for words in given iterable and save its
results to PATH_RESULTS/filename. If display is set to True, also
print output to screen.
The output is formated as follows:
[word]RESULT_SEP[size]RESULT_SEP[n+x]RESULT_SEP[diff with upperbound]
"""
fp = open(os.path.join(PATH_RESULTS, 'mdfa_'+filename), 'w')
i = 0
for word, size in results_for(iterable):
output = '%s%s%d%s%d+%d%s%d' % (word, RESULT_SEP,
size, RESULT_SEP, len(word), size - len(word),
RESULT_SEP, size - (len(word) + len(word) / 2))
fp.write(output+'\n')
if display:
print output
i += 1
fp.close()
return i
|
11933d4b4c77dcae354b307cb27ab27af3e49311
| 3,639,717
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.