content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
import torch
from typing import Tuple
def get_median_and_stdev(arr: torch.Tensor) -> Tuple[float, float]:
"""Returns the median and standard deviation from a tensor."""
return torch.median(arr).item(), torch.std(arr).item()
|
d8fca5a97f00d14beecaa4b508442bc7a3637f86
| 3,640,180
|
def connect(user, host, port):
"""Create and return a new SSHClient connected to the given host."""
client = ssh.SSHClient()
if not env.disable_known_hosts:
client.load_system_host_keys()
if not env.reject_unknown_hosts:
client.set_missing_host_key_policy(ssh.AutoAddPolicy())
connected = False
password = get_password()
while not connected:
try:
client.connect(
hostname=host,
port=int(port),
username=user,
password=password,
key_filename=env.key_filename,
timeout=10,
allow_agent=not env.no_agent,
look_for_keys=not env.no_keys
)
connected = True
return client
# BadHostKeyException corresponds to key mismatch, i.e. what on the
# command line results in the big banner error about man-in-the-middle
# attacks.
except ssh.BadHostKeyException:
abort("Host key for %s did not match pre-existing key! Server's key was changed recently, or possible man-in-the-middle attack." % env.host)
# Prompt for new password to try on auth failure
except (
ssh.AuthenticationException,
ssh.PasswordRequiredException,
ssh.SSHException
), e:
# For whatever reason, empty password + no ssh key or agent results
# in an SSHException instead of an AuthenticationException. Since
# it's difficult to do otherwise, we must assume empty password +
# SSHException == auth exception. Conversely: if we get
# SSHException and there *was* a password -- it is probably
# something non auth related, and should be sent upwards.
if e.__class__ is ssh.SSHException and password:
abort(str(e))
# Otherwise, assume an auth exception, and prompt for new/better
# password.
#
# Paramiko doesn't handle prompting for locked private keys (i.e.
# keys with a passphrase and not loaded into an agent) so we have
# to detect this and tweak our prompt slightly. (Otherwise,
# however, the logic flow is the same, because Paramiko's connect()
# method overrides the password argument to be either the login
# password OR the private key passphrase. Meh.)
#
# NOTE: This will come up if you normally use a
# passphrase-protected private key with ssh-agent, and enter an
# incorrect remote username, because Paramiko:
#
# * Tries the agent first, which will fail as you gave the wrong
# username, so obviously any loaded keys aren't gonna work for a
# nonexistent remote account;
# * Then tries the on-disk key file, which is passphrased;
# * Realizes there's no password to try unlocking that key with,
# because you didn't enter a password, because you're using
# ssh-agent;
# * In this condition (trying a key file, password is None)
# Paramiko raises PasswordRequiredException.
#
text = None
if e.__class__ is ssh.PasswordRequiredException:
# NOTE: we can't easily say WHICH key's passphrase is needed,
# because Paramiko doesn't provide us with that info, and
# env.key_filename may be a list of keys, so we can't know
# which one raised the exception. Best not to try.
prompt = "[%s] Passphrase for private key"
text = prompt % env.host_string
password = prompt_for_password(text, user=user)
# Update env.password, env.passwords if empty
set_password(password)
# Ctrl-D / Ctrl-C for exit
except (EOFError, TypeError):
# Print a newline (in case user was sitting at prompt)
print('')
sys.exit(0)
# Handle timeouts
except timeout:
abort('Timed out trying to connect to %s' % host)
# Handle DNS error / name lookup failure
except gaierror:
abort('Name lookup failed for %s' % host)
# Handle generic network-related errors
# NOTE: In 2.6, socket.error subclasses IOError
except socketerror, e:
abort('Low level socket error connecting to host %s: %s' % (
host, e[1])
)
|
a13a3ce5e80f603f21933c9e6ad48b073368b97e
| 3,640,181
|
import scipy
def _chf_to_pdf(t, x, chf, **chf_args):
"""
Estimate by numerical integration, using ``scipy.integrate.quad``,
of the probability distribution described by the given characteristic
function. Integration errors are not reported/checked.
Either ``t`` or ``x`` must be a scalar.
"""
t = np.asarray(t)
x = np.asarray(x)
def f(u, t, x):
return np.real(
exp(-1j*u*x) / (2*np.pi) * chf(t, u, **chf_args))
if t.shape != ():
pdf = np.empty(t.shape)
for i in np.ndindex(t.shape):
pdf[i] = scipy.integrate.quad(
lambda u: f(u, t[i], x), -np.inf, np.inf)[0]
else:
pdf = np.empty(x.shape)
for i in np.ndindex(x.shape):
pdf[i] = scipy.integrate.quad(
lambda u: f(u, t, x[i]), -np.inf, np.inf)[0]
return pdf
|
7022d335d39c25b73203b63b40b1ebb8c178154b
| 3,640,182
|
import re
import logging
def ParseTraceLocationLine(msg):
"""Parse the location line of a stack trace. If successfully parsed, returns (filename, line, method)."""
parsed = re.match(kCodeLocationLine, msg)
if not parsed:
return None
try:
return (parsed.group(1), parsed.group(2), parsed.group(3))
except IndexError as e:
logging.warning('RE matched "%s", but extracted wrong number of items: %r' % (msg, e))
return None
|
15e74bb26a7c213cf24171ffdfa32b8d4e6d818a
| 3,640,183
|
import functools
def return_arg_type(at_position):
"""
Wrap the return value with the result of `type(args[at_position])`
"""
def decorator(to_wrap):
@functools.wraps(to_wrap)
def wrapper(*args, **kwargs):
result = to_wrap(*args, **kwargs)
ReturnType = type(args[at_position])
return ReturnType(result)
return wrapper
return decorator
|
30bf4e4a46b0b64b6cb5752286a13c0e6f7618df
| 3,640,185
|
def extend(s, var, val):
"""Copy dict s and extend it by setting var to val; return copy."""
try: # Python 3.5 and later
return eval('{**s, var: val}')
except SyntaxError: # Python 3.4
s2 = s.copy()
s2[var] = val
return s2
|
919e7102bf7f8766d9ddb9ea61a07ddd020d1bb8
| 3,640,186
|
from typing import Optional
from typing import Any
def rx_reduce(observable: Observable, accumulator: AccumulatorOperator, seed: Optional[Any] = None) -> Observable:
"""Create an observable which reduce source with accumulator and seed value.
Args:
observable (Observable): source
accumulator (AccumulatorOperator): accumulator function (two argument, one result) async or sync.
seed (Optional[Any]): optional seed value (default none)
Returns:
(Observable): a new observable
"""
is_awaitable = iscoroutinefunction(accumulator)
async def _subscribe(an_observer: Observer) -> Subscription:
nonlocal is_awaitable
_buffer = seed
async def _on_next(item: Any):
nonlocal _buffer
_buffer = await accumulator(_buffer, item) if is_awaitable else accumulator(_buffer, item)
async def _on_completed():
nonlocal _buffer
await an_observer.on_next(_buffer)
await an_observer.on_completed()
return await observable.subscribe(an_observer=rx_observer_from(observer=an_observer, on_next=_on_next, on_completed=_on_completed))
return rx_create(subscribe=_subscribe)
|
600d5c47fd7b29ead5293c7a172c8ebdb026706a
| 3,640,187
|
def minimum_filter(
input,
size=None,
footprint=None,
output=None,
mode="reflect",
cval=0.0,
origin=0,
):
"""Multi-dimensional minimum filter.
Args:
input (cupy.ndarray): The input array.
size (int or sequence of int): One of ``size`` or ``footprint`` must be
provided. If ``footprint`` is given, ``size`` is ignored. Otherwise
``footprint = cupy.ones(size)`` with ``size`` automatically made to
match the number of dimensions in ``input``.
footprint (cupy.ndarray): a boolean array which specifies which of the
elements within this shape will get passed to the filter function.
output (cupy.ndarray, dtype or None): The array in which to place the
output. Default is is same dtype as the input.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``'constant'``. Default is ``0.0``.
origin (int or sequence of int): The origin parameter controls the
placement of the filter, relative to the center of the current
element of the input. Default of 0 is equivalent to
``(0,)*input.ndim``.
Returns:
cupy.ndarray: The result of the filtering.
.. seealso:: :func:`scipy.ndimage.minimum_filter`
"""
return _min_or_max_filter(
input, size, footprint, None, output, mode, cval, origin, "min"
)
|
fbbda2abbd470b98cb03158377256c7397c17da6
| 3,640,189
|
import copy
def lowpass(data, cutoff=0.25, fs=30, order=2, nyq=0.75):
"""
Butter low pass filter for a single or spectra or a list of them.
:type data: list[float]
:param data: List of vectors in line format (each line is a vector).
:type cutoff: float
:param cutoff: Desired cutoff frequency of the filter. The default is 0.25.
:type fs: int
:param fs: Sample rate in Hz. The default is 30.
:type order: int
:param order: Sin wave can be approx represented as quadratic. The default is 2.
:type nyq: float
:param nyq: Nyquist frequency, 0.75*fs is a good value to start. The default is 0.75*30.
:returns: Filtered data
:rtype: list[float]
"""
y = copy.deepcopy(data) # so it does not change the input list
normal_cutoff = cutoff / (nyq * fs)
b, a = butter(order, normal_cutoff, btype='low', analog=False)
if len(np.array(y).shape) > 1:
for i in range(len(y)):
y[i] = filtfilt(b, a, y[i])
else:
y = filtfilt(b, a, y)
return y
|
ac42a32c406b1c5a182a1af805e86bf0b0c0606f
| 3,640,190
|
from re import M
def format(value, limit=LIMIT, code=True, offset=0, hard_stop=None, hard_end=0):
"""
Recursively dereferences an address into string representation, or convert the list representation
of address dereferences into string representation.
Arguments:
value(int|list): Either the starting address to be sent to get, or the result of get (a list)
limit(int): Number of valid pointers
code(bool): Hint that indicates the value may be an instruction
offset(int): Offset into the address to get the next pointer
hard_stop(int): Value to stop on
hard_end: Value to append when hard_stop is reached: null, value of hard stop, a string.
Returns:
A string representing pointers of each address and reference
Strings format: 0x0804a10 —▸ 0x08061000 ◂— 0x41414141
"""
limit = int(limit)
# Allow results from get function to be passed to format
if isinstance(value, list):
chain = value
else:
chain = get(value, limit, offset, hard_stop, hard_end)
arrow_left = C.arrow(' %s ' % config_arrow_left)
arrow_right = C.arrow(' %s ' % config_arrow_right)
# Colorize the chain
rest = []
for link in chain:
symbol = pwndbg.symbol.get(link) or None
if symbol:
symbol = '%#x (%s)' % (link, symbol)
rest.append(M.get(link, symbol))
# If the dereference limit is zero, skip any enhancements.
if limit == 0:
return rest[0]
# Otherwise replace last element with the enhanced information.
rest = rest[:-1]
# Enhance the last entry
# If there are no pointers (e.g. eax = 0x41414141), then enhance
# the only element there is.
if len(chain) == 1:
enhanced = pwndbg.enhance.enhance(chain[-1], code=code)
# Otherwise, the last element in the chain is the non-pointer value.
# We want to enhance the last pointer value. If an offset was used
# chain failed at that offset, so display that offset.
elif len(chain) < limit + 1:
enhanced = pwndbg.enhance.enhance(chain[-2] + offset, code=code)
else:
enhanced = C.contiguous('%s' % config_contiguous)
if len(chain) == 1:
return enhanced
return arrow_right.join(rest) + arrow_left + enhanced
|
d8eae5b2cc8dbab9a26d7248faf17fe638f5e603
| 3,640,191
|
def sogs_put(client, url, json, user):
"""
PUTs a test `client` request to `url` with the given `json` as body and X-SOGS-* signature
headers signing the request for `user`.
"""
data = dumps(json).encode()
return client.put(
url, data=data, content_type='application/json', headers=x_sogs_for(user, "PUT", url, data)
)
|
7bb3f34d7aff75f422b898ba6eee2908c8bc4ca4
| 3,640,192
|
import tqdm
import requests
def get_results(heading):
"""Get all records under a given record heading from PubChem/
Update results from those records."""
page = 1
results = {}
with tqdm(total=100) as pbar:
while True:
url = (f"https://pubchem.ncbi.nlm.nih.gov/rest/pug_view/annotations/heading/"
f"JSON?heading_type=Compound&heading={heading}&page={page}")
response = requests.get(url)
records = response.json()
update_results(records, results)
totalPages = records['Annotations']['TotalPages']
if page==1:
pbar.reset(total=totalPages)
pbar.set_description("%d CIDs described" % len(results))
pbar.update()
page += 1
if page > totalPages:
break
return results
|
fba50023290dfde12a54f6d7792f578ecc66e3d9
| 3,640,193
|
def create_dictionary(documents):
"""Creates word dictionary for given corpus.
Parameters:
documents (list of str): set of documents
Returns:
dictionary (gensim.corpora.Dictionary): gensim dicionary of words from dataset
"""
dictionary = Dictionary(documents)
dictionary.compactify()
return dictionary
|
bba8e6af363da3fcdde983c6ebf52432323ccf96
| 3,640,194
|
def clone_bitarray(other, src=None):
"""
Fast clone of the bit array. The actual function used depends on the implementation
:param other:
:param src:
:return:
"""
if FAST_IMPL_PH4 and src is not None:
src.fast_copy(other)
return src
return to_bitarray(other)
|
9196474dff0e6c1b79f9307409c5f351f2c015d7
| 3,640,195
|
from typing import List
from typing import Optional
from typing import Dict
def _create_graph(
expressions: List[expression.Expression],
options: calculate_options.Options,
feed_dict: Optional[Dict[expression.Expression, prensor.Prensor]] = None
) -> "ExpressionGraph":
"""Create graph and calculate expressions."""
expression_graph = OriginalExpressionGraph(expressions)
canonical_graph = CanonicalExpressionGraph(expression_graph)
canonical_graph.calculate_values(options, feed_dict=feed_dict)
return canonical_graph
|
e7f5a9dbaf3c34a3c925c5d390d6ee44fac57062
| 3,640,196
|
def generate_arn(service, arn_suffix, region=None):
"""Returns a formatted arn for AWS.
Keyword arguments:
service -- the AWS service
arn_suffix -- the majority of the arn after the initial common data
region -- the region (can be None for region free arns)
"""
arn_value = "arn"
aws_value = "aws"
region_qualified = region if region else ""
return f"{arn_value}:{aws_value}:{service}:{region_qualified}:{arn_suffix}"
|
53dcf55c3fb15784770d1c2d62375d1e750469f8
| 3,640,198
|
def prod_list(lst):
"""returns the product of all numbers in a list"""
if lst:
res = 1
for num in lst:
res *= num
return res
else:
raise ValueError("List cannot be empty.")
|
8179e2906fb4b517d02972fd4647095d37caf6cd
| 3,640,199
|
def create_attribute(representation_uuid, attribute_name):
"""create a representation of an attribute of a representation"""
try:
uuid = get_bus().create_attribute(representation_uuid, attribute_name, public=True)
return JsonResponse({'type': 'uuid', 'uuid': uuid})
except Exception as exception:
message, status = handle_exception(exception)
return JsonResponse({'message': message}, status=status)
|
f1ddfbc634b459a7e3f179125fd323beb871957d
| 3,640,200
|
def check_shift(start_time, end_time, final_minute, starting_minute, record):
"""
Função que verifica o turno da chamada e calcula o valor da ligação
:param start_time:
:param end_time:
:param final_minute:
:param starting_minute:
:return value:
"""
nem_start_time = start_time + (starting_minute / 60)
nem_end_time = end_time + (final_minute / 60)
call_time = (record['end'] - record['start']) // 60
if 6 < nem_start_time < 22:
if 6 < nem_end_time < 22:
# Portanto a ligação foi completada no periodo diurno
value = 0.36 + call_time * 0.09
else:
# Portanto a ligação iniciou no periodo diurno e terminou
# no periodo noturno
hour_max = 22
value = 0.36 + ((hour_max - nem_start_time) * 60) * 0.09
value = value + 0.36
else:
if not 6 < nem_end_time < 22:
# Portanto a ligação foi completada no periodo noturno
value = 0.36
else:
# Portanto a ligação iniciou no periodo noturno e terminou
# no periodo diurno
hour_min = 6
value = 0.36 + ((nem_end_time - hour_min) * 60) * 0.09
value = value + 0.36
return value
|
666883348347e8408b087ac63acd8608ff589a1c
| 3,640,202
|
import hashlib
def s3_avatar_represent(user_id, tablename="auth_user", gravatar=False, **attr):
"""
Represent a User as their profile picture or Gravatar
@param tablename: either "auth_user" or "pr_person" depending on which
table the 'user_id' refers to
@param attr: additional HTML attributes for the IMG(), such as _class
"""
size = (50, 50)
if user_id:
db = current.db
s3db = current.s3db
cache = s3db.cache
table = s3db[tablename]
email = None
image = None
if tablename == "auth_user":
user = db(table.id == user_id).select(table.email,
cache = cache,
limitby = (0, 1),
).first()
if user:
email = user.email.strip().lower()
ltable = s3db.pr_person_user
itable = s3db.pr_image
query = (ltable.user_id == user_id) & \
(ltable.pe_id == itable.pe_id) & \
(itable.profile == True)
image = db(query).select(itable.image,
limitby = (0, 1),
).first()
if image:
image = image.image
elif tablename == "pr_person":
user = db(table.id == user_id).select(table.pe_id,
cache = cache,
limitby = (0, 1),
).first()
if user:
ctable = s3db.pr_contact
query = (ctable.pe_id == user.pe_id) & \
(ctable.contact_method == "EMAIL")
email = db(query).select(ctable.value,
cache = cache,
limitby = (0, 1),
).first()
if email:
email = email.value
itable = s3db.pr_image
query = (itable.pe_id == user.pe_id) & \
(itable.profile == True)
image = db(query).select(itable.image,
limitby = (0, 1),
).first()
if image:
image = image.image
if image:
image = s3db.pr_image_library_represent(image, size=size)
size = s3db.pr_image_size(image, size)
url = URL(c="default", f="download",
args=image)
elif gravatar:
if email:
# If no Image uploaded, try Gravatar, which also provides a nice fallback identicon
email_hash = hashlib.md5(email).hexdigest()
url = "//www.gravatar.com/avatar/%s?s=50&d=identicon" % email_hash
else:
url = "//www.gravatar.com/avatar/00000000000000000000000000000000?d=mm"
else:
url = URL(c="static", f="img", args="blank-user.gif")
else:
url = URL(c="static", f="img", args="blank-user.gif")
if "_class" not in attr:
attr["_class"] = "avatar"
if "_width" not in attr:
attr["_width"] = size[0]
if "_height" not in attr:
attr["_height"] = size[1]
return IMG(_src=url, **attr)
|
2c537a57a5d20ed8b4329338883f209fa9678fc4
| 3,640,203
|
from typing import Dict
from typing import List
import pathlib
import json
def json_loader(path_to_json_file: str) -> Dict[str, List[str]]:
"""Reads a JSON file and converts its content in a dictionary.
Parameters
----------
path_to_json_file: str
The path to the JSON file.
Returns
-------
Dict[str, List[str]]
A dictionary of source codes with the corresponding lists of instrument symbols of
interest for each source.
"""
with pathlib.Path(path_to_json_file).open('r') as infile:
return json.loads(infile.read())
|
d3f26504078e72e1522981a4d8ca5b60c3b8cf23
| 3,640,204
|
def get_region_solution_attribute(data, region_id, attribute, func, intervention):
"""Extract region solution attribute"""
regions = data.get('NEMSPDCaseFile').get('NemSpdOutputs').get('RegionSolution')
for i in regions:
if (i['@RegionID'] == region_id) and (i['@Intervention'] == intervention):
return func(i[attribute])
message = f'Attribute not found: {region_id} {attribute} {intervention}'
raise CasefileLookupError(message)
|
0dc26e54ae5f16f8b3158ec00a5dc0bc58776408
| 3,640,205
|
def conv1x1(in_planes: int, out_planes: int) -> nn.Conv2d:
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=True)
|
2c8fcb8e04084ce35a2ae595b457fdf68fd27723
| 3,640,206
|
def make_ratio_map(amap, bmap):
"""Get the ratio of two PISA 2 style maps (amap/bmap) and return as another
PISA 2 style map."""
validate_maps(amap, bmap)
with np.errstate(divide='ignore', invalid='ignore'):
result = {'ebins': amap['ebins'],
'czbins': amap['czbins'],
'map': amap['map']/bmap['map']}
return result
|
9497ddadd6d983b3094107aceadbdc1e2f1fb0a7
| 3,640,207
|
from typing import Collection
from typing import Mapping
from typing import Sequence
import math
def _compose_duration(
components_tags: Collection[Tags]) -> Mapping[str, Sequence[str]]:
"""Returns summed duration tags."""
duration_seconds_values = [
component_tags.one_or_none(DURATION_SECONDS)
for component_tags in components_tags
]
if duration_seconds_values and None not in duration_seconds_values:
try:
return {
DURATION_SECONDS.name:
(str(math.fsum(map(float, duration_seconds_values))),),
}
except ValueError:
pass
return {}
|
99bebed06628627211a117c738d89790d11adc1b
| 3,640,208
|
def feature_spatial(fslDir, tempDir, aromaDir, melIC):
""" This function extracts the spatial feature scores. For each IC it determines the fraction of the mixture modeled thresholded Z-maps respecitvely located within the CSF or at the brain edges, using predefined standardized masks.
Parameters
---------------------------------------------------------------------------------
fslDir: Full path of the bin-directory of FSL
tempDir: Full path of a directory where temporary files can be stored (called 'temp_IC.nii.gz')
aromaDir: Full path of the ICA-AROMA directory, containing the mask-files (mask_edge.nii.gz, mask_csf.nii.gz & mask_out.nii.gz)
melIC: Full path of the nii.gz file containing mixture-modeled threholded (p>0.5) Z-maps, registered to the MNI152 2mm template
Returns
---------------------------------------------------------------------------------
edgeFract: Array of the edge fraction feature scores for the components of the melIC file
csfFract: Array of the CSF fraction feature scores for the components of the melIC file"""
# Get the number of ICs
numICs = int(commands.getoutput('%sfslinfo %s | grep dim4 | head -n1 | awk \'{print $2}\'' % (fslDir, melIC) ))
# Loop over ICs
edgeFract=np.zeros(numICs)
csfFract=np.zeros(numICs)
for i in range(0,numICs):
# Define temporary IC-file
tempIC = os.path.join(tempDir,'temp_IC.nii.gz')
# Extract IC from the merged melodic_IC_thr2MNI2mm file
os.system(' '.join([os.path.join(fslDir,'fslroi'),
melIC,
tempIC,
str(i),
'1']))
# Change to absolute Z-values
os.system(' '.join([os.path.join(fslDir,'fslmaths'),
tempIC,
'-abs',
tempIC]))
# Get sum of Z-values within the total Z-map (calculate via the mean and number of non-zero voxels)
totVox = int(commands.getoutput(' '.join([os.path.join(fslDir,'fslstats'),
tempIC,
'-V | awk \'{print $1}\''])))
if not (totVox == 0):
totMean = float(commands.getoutput(' '.join([os.path.join(fslDir,'fslstats'),
tempIC,
'-M'])))
else:
print ' - The spatial map of component ' + str(i+1) + ' is empty. Please check!'
totMean = 0
totSum = totMean * totVox
# Get sum of Z-values of the voxels located within the CSF (calculate via the mean and number of non-zero voxels)
csfVox = int(commands.getoutput(' '.join([os.path.join(fslDir,'fslstats'),
tempIC,
'-k mask_csf.nii.gz',
'-V | awk \'{print $1}\''])))
if not (csfVox == 0):
csfMean = float(commands.getoutput(' '.join([os.path.join(fslDir,'fslstats'),
tempIC,
'-k mask_csf.nii.gz',
'-M'])))
else:
csfMean = 0
csfSum = csfMean * csfVox
# Get sum of Z-values of the voxels located within the Edge (calculate via the mean and number of non-zero voxels)
edgeVox = int(commands.getoutput(' '.join([os.path.join(fslDir,'fslstats'),
tempIC,
'-k mask_edge.nii.gz',
'-V | awk \'{print $1}\''])))
if not (edgeVox == 0):
edgeMean = float(commands.getoutput(' '.join([os.path.join(fslDir,'fslstats'),
tempIC,
'-k mask_edge.nii.gz',
'-M'])))
else:
edgeMean = 0
edgeSum = edgeMean * edgeVox
# Get sum of Z-values of the voxels located outside the brain (calculate via the mean and number of non-zero voxels)
outVox = int(commands.getoutput(' '.join([os.path.join(fslDir,'fslstats'),
tempIC,
'-k mask_out.nii.gz',
'-V | awk \'{print $1}\''])))
if not (outVox == 0):
outMean = float(commands.getoutput(' '.join([os.path.join(fslDir,'fslstats'),
tempIC,
'-k mask_out.nii.gz',
'-M'])))
else:
outMean = 0
outSum = outMean * outVox
# Determine edge and CSF fraction
if not (totSum == 0):
edgeFract[i] = (outSum + edgeSum)/(totSum - csfSum)
csfFract[i] = csfSum / totSum
else:
edgeFract[i]=0
csfFract[i]=0
# Remove the temporary IC-file
os.remove(tempIC)
# Return feature scores
return edgeFract, csfFract
|
fb515a61bd81533b3b79f9cd500ad5b77723b527
| 3,640,209
|
from datetime import datetime
def str_to_date(date_str, fmt=DATE_STR_FMT):
"""Convert string date to datetime object."""
return datetime.datetime.strptime(date_str, fmt).date()
|
102f384b479b217259c9bbfe36c8b66909daee50
| 3,640,210
|
import time
import json
def create_elasticsearch_domain(name, account_id, boto_session, lambda_role, cidr):
"""
Create Elastic Search Domain
"""
boto_elasticsearch = boto_session.client('es')
total_time = 0
resource = "arn:aws:es:ap-southeast-2:{0}:domain/{1}/*".format(account_id, name)
access_policy = {"Version": "2012-10-17", "Statement": [
{"Effect": "Allow", "Principal": {"AWS": str(lambda_role)}, "Action": "es:*", "Resource": resource},
{"Effect": "Allow", "Principal": {"AWS": "*"}, "Action": "es:*", "Resource": resource,
"Condition": {"IpAddress": {"aws:SourceIp": "{0}".format(cidr)}}}
]}
endpoint = None
time.sleep(5)
try:
print('Creating elasticsearch domain: {0}'.format(name))
boto_elasticsearch.create_elasticsearch_domain(
DomainName=name,
ElasticsearchVersion='2.3',
ElasticsearchClusterConfig={
'InstanceType': 't2.micro.elasticsearch',
'InstanceCount': 1,
'DedicatedMasterEnabled': False,
'ZoneAwarenessEnabled': False
},
EBSOptions={
'EBSEnabled': True,
'VolumeType': 'gp2',
'VolumeSize': 20
}
)
time.sleep(10)
attempts = 1
while True:
print('Trying to apply access policies to elasticsearch domain: {0} (attempt: {1})'.format(name, attempts))
try:
boto_elasticsearch.update_elasticsearch_domain_config(
DomainName=name,
AccessPolicies=json.dumps(access_policy)
)
break
except Exception as e:
attempts += 1
if attempts > 3:
print('Failed to apply access policies. Please run this script again with `-a delete -n {0}`'
'and wait approx 20 minutes before trying again'.format(name))
print('Full error was: {0}'.format(e))
exit(1)
else:
time.sleep(2)
except Exception as e:
print('Could not create elasticsearch domain: {0}.'.format(name))
print('Error was: {0}'.format(e))
exit(1)
while True:
try:
es_status = boto_elasticsearch.describe_elasticsearch_domain(DomainName=name)
processing = es_status['DomainStatus']['Processing']
if not processing:
endpoint = es_status['DomainStatus']['Endpoint']
print('Domain: {0} has been created!'.format(name))
break
else:
print('Domain: {0} is still processing. Waiting for 120 seconds before checking again'.format(name))
time.sleep(120)
except Exception:
print('Domain: {0} is still processing. Waiting for 120 seconds before checking again'.format(name))
total_time += 120
if total_time > 1800:
print('Script has been running for over 30 minutes... This likely means that your elastic search domain'
' has not created successfully. Please check the Elasticsearch Service dashboard in AWS console'
' and delete the domain named {0} if it exists before trying again'.format(name))
exit(1)
time.sleep(120)
return endpoint
|
5e33bd1454a2b3d1ce3bc1cc181b44497ce6035a
| 3,640,211
|
def get_user(email):
"""
param: username
returns User instance with user data, the MySQL error handle by the try-except senteces
"""
result = {}
connection = _connect_to_db()
try:
with connection.cursor() as cursor:
row_count = 0
e = 'none'
# Read a single record
sql = f"SELECT `id`,`name`, \
`last_name`, \
`email`, \
`password`, \
`phone_number`, \
`address`, \
`profile_image_url`, \
`city_id`, \
`account_type_id`, \
`lat_location`, \
`long_location`, \
`created_at`, \
`updated_at`, \
`active` FROM users WHERE users.email='{email}'"
cursor.execute(sql)
result = cursor.fetchone()
except Exception as ex:
#print(ex.args[1])
e = ex.args[0]
finally:
connection.close()
return result,e
|
f05a59dc95ade5157d09d26a7d899fd9b19d3526
| 3,640,212
|
def index(request):
"""
A example of Function-based view
method:
get
request:
None
response:
type: html
"""
return HttpResponse("Hello, world. You're at the polls index.")
|
1fe400e5f08728eef5834268d219a3f109325114
| 3,640,213
|
import requests
def make_https_request(logger, url, jobs_manager, download=False, timeout_attempt=0):
"""
Utility function for making HTTPs requests.
"""
try:
req = requests_retry_session().get(url, timeout=120)
req.raise_for_status()
except requests.exceptions.ConnectionError as c_err:
logger.error("Connection Error while fetching the cert list")
logger.error(str(c_err))
jobs_manager.record_job_error()
exit(1)
except requests.exceptions.HTTPError as h_err:
logger.warning("HTTP Error while fetching the cert list")
logger.warning(str(h_err))
return None
except requests.exceptions.RequestException as err:
logger.error("Request exception while fetching the cert list")
logger.error(str(err))
jobs_manager.record_job_error()
exit(1)
except Timeout:
if timeout_attempt == 0:
logger.warning("Timeout occurred. Attempting again...")
result = make_https_request(
logger, url, jobs_manager, download, timeout_attempt=1
)
return result
else:
logger.error("Too many timeouts. Exiting")
jobs_manager.record_job_error()
exit(1)
except Exception as e:
logger.error("UNKNOWN ERROR with the HTTP Request: " + str(e))
jobs_manager.record_job_error()
exit(1)
if req.status_code != 200:
logger.error("ERROR: Status code " + str(req.status_code))
return None
if download:
return req.content
return req.text
|
825d1d79dc44e571fed4437fb1fbebc60bfef669
| 3,640,214
|
def falshsort():
"""
Do from here: https://en.wikipedia.org/wiki/Flashsort
:return: None
"""
return None
|
359dc737a6611ebd6a73dd7761a6ade97b44b7ab
| 3,640,216
|
import random
def random_word(text, label, label_map, tokenizer, sel_prob):
"""
Masking some random tokens for Language Model task with probabilities as in the original BERT paper.
:param tokens: list of str, tokenized sentence.
:param tokenizer: Tokenizer, object used for tokenization (we need it's vocab here)
:param label: labels such as ["D","O","O","D"]
:param label_map: labels such as [0,1,,0]
:param sel_prob: the prob to caluate the loss for each token
:return: (list of str, list of int), masked tokens and related labels for LM prediction
"""
text = text.strip().split(" ")
orig_to_map_label = []
orig_to_map_token = []
assert len(text) == len(label_map)
assert len(text) == len(label)
for i in range(0, len(text)):
orig_token = text[i]
orig_label = label[i]
orig_label_map = label_map[i]
tokens = tokenizer.tokenize(orig_token)
orig_to_map_token.extend(tokens)
prob = random.random()
if orig_label == "D":
if prob < sel_prob:
orig_to_map_label.append(orig_label_map)
else:
orig_to_map_label.append(-1)
else:
if prob < sel_prob / 5.0:
orig_to_map_label.append(orig_label_map)
else:
orig_to_map_label.append(-1)
for j in range(1, len(tokens)):
orig_to_map_label.append(-1)
assert len(orig_to_map_label) == len(orig_to_map_token)
return orig_to_map_token, orig_to_map_label
|
46e7ac7d9fd0d82bbfef97a9a88efd0599bbc3b3
| 3,640,217
|
import numpy
def _match_storm_objects(first_prediction_dict, second_prediction_dict,
top_match_dir_name):
"""Matches storm objects between first and second prediction files.
F = number of storm objects in first prediction file
:param first_prediction_dict: Dictionary returned by
`prediction_io.read_ungridded_predictions` for first prediction file.
:param second_prediction_dict: Same but for second prediction file.
:param top_match_dir_name: See documentation at top of file.
:return: first_prediction_dict: Same as input, but containing only storm
objects matched with one in the second file.
:return: second_prediction_dict: Same as input, but containing only storm
objects matched with one in the first file. Both dictionaries have
storm objects in the same order.
"""
first_storm_times_unix_sec = first_prediction_dict[
prediction_io.STORM_TIMES_KEY]
first_unique_times_unix_sec = numpy.unique(first_storm_times_unix_sec)
first_indices = numpy.array([], dtype=int)
second_indices = numpy.array([], dtype=int)
for i in range(len(first_unique_times_unix_sec)):
this_match_file_name = tracking_io.find_match_file(
top_directory_name=top_match_dir_name,
valid_time_unix_sec=first_unique_times_unix_sec[i],
raise_error_if_missing=True)
print('Reading data from: "{0:s}"...'.format(this_match_file_name))
this_match_dict = tracking_io.read_matches(this_match_file_name)[0]
these_first_indices, these_second_indices = (
_match_storm_objects_one_time(
first_prediction_dict=first_prediction_dict,
second_prediction_dict=second_prediction_dict,
match_dict=this_match_dict)
)
first_indices = numpy.concatenate((first_indices, these_first_indices))
second_indices = numpy.concatenate((
second_indices, these_second_indices
))
_, unique_subindices = numpy.unique(first_indices, return_index=True)
first_indices = first_indices[unique_subindices]
second_indices = second_indices[unique_subindices]
_, unique_subindices = numpy.unique(second_indices, return_index=True)
first_indices = first_indices[unique_subindices]
second_indices = second_indices[unique_subindices]
first_prediction_dict = prediction_io.subset_ungridded_predictions(
prediction_dict=first_prediction_dict,
desired_storm_indices=first_indices)
second_prediction_dict = prediction_io.subset_ungridded_predictions(
prediction_dict=second_prediction_dict,
desired_storm_indices=second_indices)
return first_prediction_dict, second_prediction_dict
|
9c3bc60e99dc3d07cbd661b7187833e26f18d6f7
| 3,640,219
|
from typing import Optional
from typing import cast
def get_default_branch(base_url: str, auth: Optional[AuthBase], ssl_verify: bool = True) -> dict:
"""Fetch a reference.
:param base_url: base Nessie url
:param auth: Authentication settings
:param ssl_verify: ignore ssl errors if False
:return: json Nessie branch
"""
return cast(dict, _get(base_url + "/trees/tree", auth, ssl_verify=ssl_verify))
|
e2ac587705c82c95edeb415b79ca15746e9e9b78
| 3,640,220
|
from typing import Tuple
from typing import Union
def sigmoid(
x,
sigmoid_type: str = "tanh",
normalization_range: Tuple[Union[float, int], Union[float, int]] = (0, 1)
):
"""
A sigmoid function. From Wikipedia (https://en.wikipedia.org/wiki/Sigmoid_function):
A sigmoid function is a mathematical function having a characteristic "S"-shaped curve
or sigmoid curve.
Args:
x: The input
sigmoid_type: Type of sigmoid function to use [str]. Can be one of:
* "tanh" or "logistic" (same thing)
* "arctan"
* "polynomial"
normalization_type: Range in which to normalize the sigmoid, shorthanded here in the
documentation as "N". This parameter is given as a two-element tuple (min, max).
After normalization:
>>> sigmoid(-Inf) == normalization_range[0]
>>> sigmoid(Inf) == normalization_range[1]
* In the special case of N = (0, 1):
>>> sigmoid(-Inf) == 0
>>> sigmoid(Inf) == 1
>>> sigmoid(0) == 0.5
>>> d(sigmoid)/dx at x=0 == 0.5
* In the special case of N = (-1, 1):
>>> sigmoid(-Inf) == -1
>>> sigmoid(Inf) == 1
>>> sigmoid(0) == 0
>>> d(sigmoid)/dx at x=0 == 1
Returns: The value of the sigmoid.
"""
### Sigmoid equations given here under the (-1, 1) normalization:
if sigmoid_type == ("tanh" or "logistic"):
# Note: tanh(x) is simply a scaled and shifted version of a logistic curve; after
# normalization these functions are identical.
s = np.tanh(x)
elif sigmoid_type == "arctan":
s = 2 / pi * np.arctan(pi / 2 * x)
elif sigmoid_type == "polynomial":
s = x / (1 + x ** 2) ** 0.5
else:
raise ValueError("Bad value of parameter 'type'!")
### Normalize
min = normalization_range[0]
max = normalization_range[1]
s_normalized = s * (max - min) / 2 + (max + min) / 2
return s_normalized
|
b9d660d20f7e398a2e57ad0b907ab52c4a88cc36
| 3,640,223
|
def interp(x, x1, y1, x2, y2):
"""Find a point along a line"""
return ((x2 - x) * y1 + (x - x1) * y2) / (x2 - x1)
|
3af4575c017a32619a5bb2866a7faea5ff5c760d
| 3,640,224
|
def gen_cpmfgp_test_data_from_config_file(config_file_name, raw_func,
num_tr_data, num_te_data):
""" Generates datasets for CP Multi-fidelity GP fitting. """
# Generate data
def _generate_data(_proc_func, _config, _num_data):
""" Generates data. """
ZX_proc = sample_from_config_space(_config, _num_data)
YY_proc = [_proc_func(z, x) for (z, x) in ZX_proc]
ZZ_proc = get_idxs_from_list_of_lists(ZX_proc, 0)
XX_proc = get_idxs_from_list_of_lists(ZX_proc, 1)
return ZZ_proc, XX_proc, YY_proc, ZX_proc
# Get dataset for testing
def _get_dataset_for_testing(_proc_func, _config, _num_tr_data, _num_te_data):
""" Get dataset for testing. """
ZZ_train, XX_train, YY_train, ZX_train = _generate_data(_proc_func, _config,
_num_tr_data)
ZZ_test, XX_test, YY_test, ZX_test = _generate_data(_proc_func, _config, _num_te_data)
return Namespace(config_file_name=config_file_name, config=config, raw_func=raw_func,
ZZ_train=ZZ_train, XX_train=XX_train, YY_train=YY_train, ZX_train=ZX_train,
ZZ_test=ZZ_test, XX_test=XX_test, YY_test=YY_test, ZX_test=ZX_test)
# Generate the data and return
config = load_config_file(config_file_name)
proc_func = get_processed_func_from_raw_func_via_config(raw_func, config)
return _get_dataset_for_testing(proc_func, config, num_tr_data, num_te_data)
|
c6efaf34601f5b02153fca3ea0926115d1adb918
| 3,640,226
|
def csv_to_postgres(engine,
file: str,
table_name: str):
"""
Given a *.csv filepath, create a populated table in a database
:param engine: SQLAlchemy connection/engine for the target database
:param file: Full filepath of the *.csv file
:param table_name: Name of the table to be created
:return:
"""
df = pd.read_csv(file,
index_col=False)
# print(df.head())
# Postgres columns are case-sensitive; make lowercase
df.columns = df.columns.str.lower()
df.rename(columns={'unnamed: 0': 'id'},
inplace=True)
df.to_sql(con=engine,
name=table_name,
if_exists='replace',
index=False)
return None
|
e8a913a32a3b0f7d9d617fa000f0b232e9824736
| 3,640,227
|
def _custom_padd(a, min_power_of_2=1024, min_zero_padd=50,
zero_padd_ratio=0.5):
""" Private helper to make a zeros-mirror-zeros padd to the next power of
two of a.
Parameters
----------
arrays : np.ndarray,
array to padd.
min_power_of_2 : int (default=512),
min length (power of two) for the padded array.
zero_padd_ratio : float (default=0.5),
determine the ratio of the length of zero padds (either for the first
or the second zero-padd) w.r.t the array length.
min_zero_padd : int (default=50)
min zero padd, either for the first or the second zero-padd.
Note:
-----
Having a signal close to ~200 can make trouble.
Results
-------
arrays : np.ndarray or list of np.ndarray
the unpadded array.
p : tuple of int,
the applied padd.
"""
if not np.log2(min_power_of_2).is_integer():
raise ValueError("min_power_of_2 should be a power of two, "
"got {0}".format(min_power_of_2))
nextpow2 = int(np.power(2, np.ceil(np.log2(len(a)))))
nextpow2 = min_power_of_2 if nextpow2 < min_power_of_2 else nextpow2
diff = nextpow2 - len(a)
# define the three possible padding
zero_padd_len = int(zero_padd_ratio * len(a))
too_short = zero_padd_len < min_zero_padd
zero_padd_len = min_zero_padd if too_short else zero_padd_len
p_zeros = (zero_padd_len, zero_padd_len)
len_padd_left = int(diff / 2)
len_padd_right = int(diff / 2) + (len(a) % 2)
p_total = (len_padd_left, len_padd_right)
if diff == 0:
# [ s ]
p_total = 0
return a, p_total
elif (0 < diff) and (diff < 2 * zero_padd_len):
# [ /zeros | s | zeros/ ]
a = padd(a, p_total)
return a, p_total
elif (2 * zero_padd_len < diff) and (diff < 4 * zero_padd_len):
# [ zeros | mirror-signal | s | mirror-signal | zeros ]
len_reflect_padd_left = len_padd_left - zero_padd_len
len_reflect_padd_right = len_padd_right - zero_padd_len
p_reflect = (len_reflect_padd_left, len_reflect_padd_right)
# padding
a = np.pad(a, p_reflect, mode='reflect')
a = padd(a, p_zeros)
return a, p_total
else:
# [ zeros | mirror-signal | zeros | s | zeros | mirror-signal | zeros ]
len_reflect_padd_left = len_padd_left - 2 * zero_padd_len
len_reflect_padd_right = len_padd_right - 2 * zero_padd_len
p_reflect = (len_reflect_padd_left, len_reflect_padd_right)
# padding
a = padd(a, p_zeros)
a = np.pad(a, p_reflect, mode='reflect')
a = padd(a, p_zeros)
return a, p_total
|
eb7f9d675113b8f53558d911462209c2f72c3ce3
| 3,640,228
|
def encode_multipart_formdata(fields, files):
"""
Encode multipart data to be used in data import
adapted from: http://code.activestate.com/recipes/146306/
:param fields: sequence of (name, value) elements for regular form fields.
:param files: sequence of (name, filename, value) elements for data to be uploaded as files
:return: (content_type, body) ready for httplib.HTTP instance
"""
boundary = '-------tHISiSsoMeMulTIFoRMbOUNDaRY---'
fls = []
for (key, value) in fields:
fls.append('--' + boundary)
fls.append('Content-Disposition: form-data; name="%s"' % key)
fls.append('')
fls.append(value)
for (key, filename, value) in files:
fls.append('--' + boundary)
fls.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
fls.append('Content-Type: %s' % get_content_type(filename))
fls.append('')
fls.append(value)
fls.append('--' + boundary + '--')
fls.append('')
output = BytesIO()
for content in fls:
if isinstance(content, bytes):
output.write(content)
else:
output.write(content.encode())
output.write(b"\r\n")
body = output.getvalue()
content_type = 'multipart/form-data; boundary=%s' % boundary
return content_type, body
|
17cd1ce08d7aac005a07b77517c4644565083cb8
| 3,640,229
|
def is_recording():
""" return current state of recording key macro """
global recording
return recording
|
73f3a28d5d37bdc300768d48a6cc8f2ac81c2cf0
| 3,640,230
|
from typing import Union
from pathlib import Path
from typing import List
from typing import Dict
from typing import Optional
import asyncio
def download_image_urls(
urls_filename: Union[Path, str],
synsets: List[str],
max_concurrent: int = 50,
rewrite: bool = False
) -> Dict[str, Optional[List[str]]]:
"""Downloads urls for each synset and saves them in json format in a given path.
Args:
urls_filename: a path to the file where to save the urls.
synsets: a list of synsets for which to download urls.
max_concurrent (optional): a maximum number of concurrent requests.
rewrite (optional): if True, will download new urls even if file exists.
"""
print("Downloading image urls.")
synsets_to_urls = asyncio.run(_download_image_urls(urls_filename, synsets, max_concurrent, rewrite))
return synsets_to_urls
|
36bc1e2993cfd01ef9fca91354f970ffb980a919
| 3,640,231
|
def generate_new_split(lines1, lines2, rng, cutoff=14937):
"""Takes lines1 and lines2 and combines, shuffles and split again. Useful for working with random splits of data"""
lines = [l for l in lines1] # lines1 may not be a list but rather iterable
lines.extend(lines2)
perm = rng.permutation(len(lines))
lines = [lines[i] for i in perm]
lines1 = lines[:cutoff]
lines2 = lines[cutoff:]
a1 = confusion_matrix.get_embedding_matrix(lines1, normalize=True)
a2 = confusion_matrix.get_embedding_matrix(lines2, normalize=True)
return (lines1, a1, lines2, a2)
|
cac058f44bd5cb729517a1aeb67295a30dac2eb5
| 3,640,232
|
import time
def train_classification(base_iter,
model,
dataloader,
epoch,
criterion,
optimizer,
cfg,
writer=None):
"""Task of training video classification"""
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
model.train()
end = time.time()
for step, data in enumerate(dataloader):
base_iter = base_iter + 1
train_batch = data[0].cuda()
train_label = data[1].cuda()
data_time.update(time.time() - end)
outputs = model(train_batch)
loss = criterion(outputs, train_label)
prec1, prec5 = accuracy(outputs.data, train_label, topk=(1, 5))
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses.update(loss.item(), train_label.size(0))
top1.update(prec1.item(), train_label.size(0))
top5.update(prec5.item(), train_label.size(0))
batch_time.update(time.time() - end)
end = time.time()
if step % cfg.CONFIG.LOG.DISPLAY_FREQ == 0 and cfg.DDP_CONFIG.GPU_WORLD_RANK == 0:
print('-------------------------------------------------------')
for param in optimizer.param_groups:
lr = param['lr']
print('lr: ', lr)
print_string = 'Epoch: [{0}][{1}/{2}]'.format(
epoch, step + 1, len(dataloader))
print(print_string)
print_string = 'data_time: {data_time:.3f}, batch time: {batch_time:.3f}'.format(
data_time=data_time.val, batch_time=batch_time.val)
print(print_string)
print_string = 'loss: {loss:.5f}'.format(loss=losses.avg)
print(print_string)
print_string = 'Top-1 accuracy: {top1_acc:.2f}%, Top-5 accuracy: {top5_acc:.2f}%'.format(
top1_acc=top1.avg, top5_acc=top5.avg)
print(print_string)
iteration = base_iter
writer.add_scalar('train_loss_iteration', losses.avg, iteration)
writer.add_scalar('train_top1_acc_iteration', top1.avg, iteration)
writer.add_scalar('train_top5_acc_iteration', top5.avg, iteration)
writer.add_scalar('train_batch_size_iteration',
train_label.size(0), iteration)
writer.add_scalar('learning_rate', lr, iteration)
return base_iter
|
8564fe2d520d54c81165adb5025aebb9916ab9b5
| 3,640,233
|
import logging
def get_top5(prediction):
"""return top5 index and value of input array"""
length = np.prod(prediction.size)
pre = np.reshape(prediction, [length])
ind = np.argsort(pre)
ind = ind[length - 5 :]
value = pre[ind]
ind = ind[::-1]
value = value[::-1]
res_str = ""
logging.info("============ top5 ===========")
for (i, v) in zip(ind, value):
logging.info("{}:{}".format(i, v))
res_str = res_str + "{}:{}".format(i, v) + "\n"
return res_str
|
607ad0b9eee550e1bb389a9a92a76472155bea16
| 3,640,234
|
def calculate_parentheses(cases):
"""Calculate all cases in parameter 'cases'
return : case that calculate and it's 24 else return 'No Solutions'
Example and Doctest :
>>> nums = [5, 5, 9, 5]
>>> cases = generate_all_combinations(nums, '+-*/')
>>> calculate_parentheses(cases)
'( ( 5 + 5 ) + 5 ) + 9 = 24'
>>> nums = [13, 2, 13, 13]
>>> cases = generate_all_combinations(nums, '+-*/')
>>> calculate_parentheses(cases)
'2 * ( 13 - ( 13 / 13 ) ) = 24'
>>> nums = [1, 1, 2, 7]
>>> cases = generate_all_combinations(nums, '+-*/')
>>> calculate_parentheses(cases)
'( 1 + 2 ) * ( 1 + 7 ) = 24'
>>> nums = [200, -120, 10, 3]
>>> cases = generate_all_combinations(nums, '+-*/')
>>> calculate_parentheses(cases)
'( ( -120 + 200 ) * 3 ) / 10 = 24'
>>> nums = [1, 1, 1, 9]
>>> cases = generate_all_combinations(nums, '+-*/')
>>> calculate_parentheses(cases)
'No Solutions'
>>> # Check case that can occured 'divided by zero' problem
>>> nums = [13, 13, 13, 13]
>>> cases = generate_all_combinations(nums, '+-*/')
>>> calculate_parentheses(cases)
'No Solutions'
"""
# Use try except because some combination are error because 'divided by zero'
for i in cases:
case_to_calculate = i
num1 = case_to_calculate[0]
operation1 = case_to_calculate[1]
num2 = case_to_calculate[2]
operation2 = case_to_calculate[3]
num3 = case_to_calculate[4]
operation3 = case_to_calculate[5]
num4 = case_to_calculate[6]
# I use different variable name to make you read a program easier
# Case 1 : ( ( num1 'operation1' num2 ) 'operation2' num3 ) 'operation3' num4
case1 = f"( ( {num1} {operation1} {num2} ) {operation2} {num3} ) {operation3} {num4} = 24"
try :
calc1 = calc(num1, operation1, num2)
calc1 = calc(calc1, operation2, num3)
calc1 = calc(calc1, operation3, num4)
except ZeroDivisionError:
calc1 = 0
if calc1 == 24:
return case1
# Case 2 : ( num1 'operation1' ( num2 'operation2' num3 ) ) 'operation3' num4
case2 = f"( {num1} {operation1} ( {num2} {operation2} {num3} ) ){operation3} {num4} = 24"
try :
calc2 = calc(num2, operation2, num3)
calc2 = calc(num1, operation1, calc2)
calc2 = calc(calc2, operation3, num4)
except ZeroDivisionError:
calc2 = 0
if calc2 == 24:
return case2
# Case 3 : ( num1 'operation1' num2 ) 'operation2' ( num3 'operation3' num4 )
case3 = f"( {num1} {operation1} {num2} ) {operation2} ( {num3} {operation3} {num4} ) = 24"
try:
calc31 = calc(num1, operation1, num2)
calc32 = calc(num3, operation3, num4)
calc3 = calc(calc31, operation2, calc32)
except ZeroDivisionError:
calc3 = 0
if calc3 == 24:
return case3
# Case 4 : num1 'operation1' ( ( num2 'operation2' num3 ) 'operation3' num4 )
case4 = f"{num1} {operation1} ( ( {num2} {operation2} {num3} ) {operation3} {num4} ) = 24"
try:
calc4 = calc(num2, operation2, num3)
calc4 = calc(calc4, operation3, num4)
calc4 = calc(num1, operation1, calc4)
except ZeroDivisionError:
calc4 = 0
if calc4 == 24:
return case4
# Case 5 : num1 'operation1' ( num2 'operation2' ( num3 'operation3' num4 ) )
case5 = f"{num1} {operation1} ( {num2} {operation2} ( {num3} {operation3} {num4} ) ) = 24"
try :
calc5 = calc(num3, operation3, num4)
calc5 = calc(num2, operation2, calc5)
calc5 = calc(num1, operation1, calc5)
except ZeroDivisionError:
calc5 = 0
if calc5 == 24:
return case5
return 'No Solutions'
|
b8d51d677e16ed27c0b46ad0f4fe98751ea9759e
| 3,640,235
|
import hashlib
import hmac
import struct
def pbkdf2(hash_algorithm, password, salt, iterations, key_length):
"""
PBKDF2 from PKCS#5
:param hash_algorithm:
The string name of the hash algorithm to use: "sha1", "sha224", "sha256", "sha384", "sha512"
:param password:
A byte string of the password to use an input to the KDF
:param salt:
A cryptographic random byte string
:param iterations:
The numbers of iterations to use when deriving the key
:param key_length:
The length of the desired key in bytes
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
:return:
The derived key as a byte string
"""
if not isinstance(password, byte_cls):
raise TypeError(pretty_message(
'''
password must be a byte string, not %s
''',
type_name(password)
))
if not isinstance(salt, byte_cls):
raise TypeError(pretty_message(
'''
salt must be a byte string, not %s
''',
type_name(salt)
))
if not isinstance(iterations, int_types):
raise TypeError(pretty_message(
'''
iterations must be an integer, not %s
''',
type_name(iterations)
))
if iterations < 1:
raise ValueError('iterations must be greater than 0')
if not isinstance(key_length, int_types):
raise TypeError(pretty_message(
'''
key_length must be an integer, not %s
''',
type_name(key_length)
))
if key_length < 1:
raise ValueError('key_length must be greater than 0')
if hash_algorithm not in set(['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512']):
raise ValueError(pretty_message(
'''
hash_algorithm must be one of "md5", "sha1", "sha224", "sha256", "sha384",
"sha512", not %s
''',
repr(hash_algorithm)
))
ld = {
'md5' : hashlib.md5,
'sha1' : hashlib.sha1,
'sha224': hashlib.sha224,
'sha256': hashlib.sha256,
'sha384': hashlib.sha384,
'sha512': hashlib.sha512,
}
h = hmac.new(password, digestmod=ld[hash_algorithm])
def prf(data):
hm = h.copy()
hm.update(data)
return bytearray(hm.digest())
key = bytearray()
i = 1
while len(key) < key_length:
T = U = prf(salt + struct.pack('>i', i))
for _ in range(iterations - 1):
U = prf(U)
T = bytearray(x ^ y for x, y in zip(T, U))
key += T
i += 1
return key[:key_length]
|
8a3799a2c73b3b3be96f67252f210bc5d114d334
| 3,640,236
|
import time
def getLocalUtcTimeStamp():
"""
Get the universal timestamp for this machine.
"""
t = time.mktime(time.gmtime())
isDst = time.localtime().tm_isdst
return t - isDst * 60 * 60
|
54eed0893d03f3b6a76de0d36fc3f1ff5b35f64f
| 3,640,237
|
def melt_then_pivot_query(df, inspect_result, semiology_term):
"""
if happy all are the same semiology, after insepction of QUERY_SEMIOLOGY, melt then pivot_table:
---
inspect_result is a df
Ali Alim-Marvasti July 2019
"""
# find all localisation columns present:
localisation_labels = anatomical_regions(df)
relevant_localisations = [
cols for cols in inspect_result.columns if cols in localisation_labels]
# MELT
# first determine id_vars: in this case we don't use lateralisation add that too
full_id_cols = full_id_vars() + lateralisation_vars()
id_vars_present_in_query = [
cols for cols in inspect_result.columns if cols in full_id_cols]
inspect_result_melted = inspect_result.melt(id_vars=id_vars_present_in_query, value_vars=relevant_localisations,
var_name='melted_variable', value_name='melted_numbers')
# replace NaNs with 0s as melting creates many:
inspect_result_melted.fillna(value=0, inplace=True)
# PIVOT_TABLE
inspect_result_melted['pivot_by_column'] = semiology_term
pivot_result = inspect_result_melted.pivot_table(
index='pivot_by_column', columns='melted_variable', values='melted_numbers', aggfunc='sum')
# sort the columns of the pivot_table by ascending value:
pivot_result.sort_values(by=semiology_term, axis=1,
inplace=True, ascending=False)
return pivot_result
|
97ebd8f30d4b031a6b12412421ad0e4e2458c003
| 3,640,238
|
from typing import List
from typing import Tuple
def maze_solver(maze: List[List[int]]) -> List[Tuple[int, int]]:
"""
Finds the path that a light ray would take through a maze.
:param maze: 2D grid of cells, where 0 = empty cell, -1 = mirror at -45 degrees, 1 = mirror at 45 degrees
:return: The coordinates that the light passed, ordered by time
"""
validate_maze(maze)
coordinate = (0, 0)
direction = DIRECTION_RIGHT
path: List[Tuple[int, int]] = []
while 0 <= coordinate[0] < len(maze) and 0 <= coordinate[1] < len(maze[0]):
path.append(coordinate)
direction = next_direction(direction, maze[coordinate[0]][coordinate[1]])
coordinate = next_coordinate(coordinate, direction)
return path
|
4ea26e1dec318c9a41babf617d26662d35fe54c1
| 3,640,239
|
def looks_like_fasta(test_text):
"""Determine if text looks like FASTA formatted data.
Looks to find at least two lines. The first line MUST
start with '>' and the second line must NOT start with '>'.
Ignores any starting whitespace.
"""
text = test_text.strip()
return FASTA_START.match(text) is not None
|
352cde2d0d4de692e0598a96d19107ac04a66f53
| 3,640,240
|
import signal
def firwin_kaiser_bsf(f_stop1, f_pass1, f_pass2, f_stop2, d_stop,
fs=1.0, N_bump=0, status=True):
"""
Design an FIR bandstop filter using the sinc() kernel and
a Kaiser window. The filter order is determined based on
f_stop1 Hz, f_pass1 Hz, f_pass2 Hz, f_stop2 Hz, and the
desired stopband attenuation d_stop in dB for both stopbands,
all relative to a sampling rate of fs Hz.
Note: The passband ripple cannot be set independent of the
stopband attenuation.
Note: The filter order is forced to be even (odd number of taps)
so there is a center tap that can be used to form 1 - H_BPF.
Mark Wickert October 2016
"""
# First design a BPF starting from simple LPF equivalent
# The upper and lower stopbands are assumed to have
# the same attenuation level. The LPF equivalent critical
# frequencies:
f_pass = (f_pass2 - f_pass1) / 2
f_stop = (f_stop2 - f_stop1) / 2
# Continue to design equivalent LPF
wc = 2 * np.pi * (f_pass + f_stop) / 2 / fs
delta_w = 2 * np.pi * (f_stop - f_pass) / fs
# Find the filter order
M = np.ceil((d_stop - 8) / (2.285 * delta_w))
# Adjust filter order up or down as needed
M += N_bump
# Make filter order even (odd number of taps)
if ((M + 1) / 2.0 - int((M + 1) / 2.0)) == 0:
M += 1
N_taps = M + 1
# Obtain the Kaiser window
beta = signal.kaiser_beta(d_stop)
w_k = signal.kaiser(N_taps, beta)
n = np.arange(N_taps)
b_k = wc / np.pi * np.sinc(wc / np.pi * (n - M / 2)) * w_k
b_k /= np.sum(b_k)
# Transform LPF to BPF
f0 = (f_pass2 + f_pass1) / 2
w0 = 2 * np.pi * f0 / fs
n = np.arange(len(b_k))
b_k_bs = 2 * b_k * np.cos(w0 * (n - M / 2))
# Transform BPF to BSF via 1 - BPF for odd N_taps
b_k_bs = -b_k_bs
b_k_bs[int(M / 2)] += 1
if status:
log.info('Kaiser Win filter taps = %d.' % N_taps)
return b_k_bs
|
d5fccbcb9721707f0653065ad7c27d904cc05b97
| 3,640,241
|
from typing import List
def extract_text(html_text) -> List[List[str]]:
"""
:param html_text:
:return:
"""
lines = [i.text.replace("\xa0", "") for i in html_text.find("div", attrs={"class": "contentus"}).findAll("h3")]
return [line.split(" ") for line in lines if line]
|
19dfdd513e48f2662dc51661bfeca4b1155762a3
| 3,640,242
|
from typing import Iterable
from typing import Tuple
from re import X
from typing import Mapping
from typing import List
def multidict(pairs: Iterable[Tuple[X, Y]]) -> Mapping[X, List[Y]]:
"""Accumulate a multidict from a list of pairs."""
rv = defaultdict(list)
for key, value in pairs:
rv[key].append(value)
return dict(rv)
|
c94567169a8ea4e3d7fd9a8e5c2a990625181be8
| 3,640,243
|
import glob
def read_images(pathname):
"""
Read the images to a list given a path like 'images/cropped/*'
:param pathname: file path
:return: a list of color images and a list of corresponding file names
"""
images_path = sorted(glob.glob(pathname))
images = []
names = []
for path in images_path:
images.append(cv2.imread(path, cv2.IMREAD_COLOR))
name = path[-10:].split('.')[0]
names.append(name)
return images, names
|
9fd4644baa2ca5db204cd7ce442e85bc0d9e3166
| 3,640,244
|
def visualize_camera_movement(image1, image1_points, image2, image2_points, is_show_img_after_move=False):
"""
Plot the camera movement between two consecutive image frames
:param image1: First image at time stamp t
:param image1_points: Feature vector for the first image
:param image2: First image at time stamp t + 1
:param image2_points: Feature vectir for the second image
:param is_show_img_after_move: Bool variable to plot movement or not
"""
image1 = image1.copy()
image2 = image2.copy()
for i in range(0, len(image1_points)):
# Coordinates of a point on t frame
p1 = (int(image1_points[i][0]), int(image1_points[i][1]))
# Coordinates of the same point on t+1 frame
p2 = (int(image2_points[i][0]), int(image2_points[i][1]))
cv2.circle(image1, p1, 5, (0, 255, 0), 1)
cv2.arrowedLine(image1, p1, p2, (0, 255, 0), 1)
cv2.circle(image1, p2, 5, (255, 0, 0), 1)
if is_show_img_after_move:
cv2.circle(image2, p2, 5, (255, 0, 0), 1)
if is_show_img_after_move:
return image2
else:
return image1
|
5f92bf44885e62ebdc502e8c98ba1466cb8d5279
| 3,640,246
|
def get_rgb_masks(data, separate_green=False):
"""Get the RGGB Bayer pattern for the given data.
See `get_rgb_data` for description of data.
Args:
data (`numpy.array`): An array of data representing an image.
separate_green (bool, optional): If the two green channels should be separated,
default False.
Returns:
tuple(np.array, np.array, np.array): A 3-tuple of numpy arrays of `bool` type.
"""
r_mask = np.ones_like(data).astype(bool)
g1_mask = np.ones_like(data).astype(bool)
b_mask = np.ones_like(data).astype(bool)
if separate_green:
g2_mask = np.ones_like(data).astype(bool)
else:
g2_mask = g1_mask
if data.ndim == 2:
r_mask[1::2, 0::2] = False
g1_mask[1::2, 1::2] = False
g2_mask[0::2, 0::2] = False
b_mask[0::2, 1::2] = False
elif data.ndim == 3:
r_mask[..., 1::2, 0::2] = False
g1_mask[..., 1::2, 1::2] = False
g2_mask[..., 0::2, 0::2] = False
b_mask[..., 0::2, 1::2] = False
else:
raise Exception('Only 2D and 3D data allowed')
if separate_green:
return np.array([r_mask, g1_mask, g2_mask, b_mask])
else:
return np.array([r_mask, g1_mask, b_mask])
|
cf31103ca4248ccd96fb89181d0e48d5b71201c5
| 3,640,247
|
def _parse_ax(*args, **kwargs):
""" Parse plotting *args, **kwargs for an AxesSubplot. This allows for
axes and colormap to be passed as keyword or position.
Returns AxesSubplot, colormap, kwargs with *args removed"""
axes = kwargs.pop('axes', None)
cmap = kwargs.get('cmap', None)
if not axes:
indicies = [idx for (idx, arg) in enumerate(args) if isinstance(arg, Subplot)]
if len(indicies) < 1:
axes = None
elif len(indicies) > 1:
raise UtilsError("Multiple axes not understood")
else:
args = list(args)
axes = args.pop(indicies[0])
if args and not cmap:
if len(args) > 1:
raise UtilsError("Please only pass a colormap and/or Axes"
" subplot to Canvas plotting")
elif len(args) == 1:
kwargs['cmap'] = args[0]
# If string, replace cmap with true cmap instance (used by show())
if 'cmap' in kwargs:
cmap = kwargs['cmap']
if isinstance(cmap, str):
if cmap != 'pbinary' and cmap != 'pbinary_r': #special canvas word
kwargs['cmap'] = cm.get_cmap(cmap)
return axes, kwargs
|
78658e2cf66fad184c057b9392b918ffb48406be
| 3,640,248
|
import google.colab # noqa: F401
import IPython
import IPython
def _get_context():
"""Determine the most specific context that we're in.
Implementation from TensorBoard: https://git.io/JvObD.
Returns:
_CONTEXT_COLAB: If in Colab with an IPython notebook context.
_CONTEXT_IPYTHON: If not in Colab, but we are in an IPython notebook
context (e.g., from running `jupyter notebook` at the command
line).
_CONTEXT_NONE: Otherwise (e.g., by running a Python script at the
command-line or using the `ipython` interactive shell).
"""
# In Colab, the `google.colab` module is available, but the shell
# returned by `IPython.get_ipython` does not have a `get_trait`
# method.
try:
except ImportError:
pass
else:
if IPython.get_ipython() is not None:
# We'll assume that we're in a Colab notebook context.
return _CONTEXT_COLAB
# In an IPython command line shell or Jupyter notebook, we can
# directly query whether we're in a notebook context.
try:
except ImportError:
pass
else:
ipython = IPython.get_ipython()
if ipython is not None and ipython.has_trait("kernel"):
return _CONTEXT_IPYTHON
# Otherwise, we're not in a known notebook context.
return _CONTEXT_NONE
|
b81205aedbe2222019fa6b7e9dc5fb638536869f
| 3,640,249
|
def loadSHSMFCCs(IDs):
"""
Load all of the 12-dim MFCC features
"""
IDDict = getSHSIDDict()
fin = open("SHSDataset/MFCC/bt_aligned_mfccs_shs.txt")
mfccs = {}
count = 0
while True:
ID = fin.readline().rstrip()
if not ID:
break
ID = IDDict[ID]
if count%1000 == 0:
print("Loaded mfccs for %i songs..."%count)
if not ID in IDs:
fin.readline()
count += 1
continue
x = fin.readline().rstrip()
x = x.split(",")
if len(x[-1]) == 0:
x = x[0:-1]
x = np.array([float(a) for a in x])
x = np.reshape(x, (len(x)/12, 12))
mfccs[ID] = x
count += 1
fin.close()
return mfccs
|
04d82be79d6e89c5f9b6304be82d049bf6af63f5
| 3,640,250
|
def __createTransactionElement(doc,tran):
"""
Return a DOM element represents the transaction given (tran)
"""
tranEle = doc.createElement("transaction")
symbolEle = __createSimpleNodeWithText(doc, "symbol", tran.symbol)
buyEle = __createSimpleNodeWithText(doc, "buy", "true" if tran.buy else "false")
quantityEle = __createSimpleNodeWithText(doc, "quantity", str(tran.num_of_shares))
priceEle = __createSimpleNodeWithText(doc, "price", str(tran.price_per_share))
netAmountEle = __createSimpleNodeWithText(doc, "net_amount", str(tran.net_amount))
timeEle = __createSimpleNodeWithText(doc, "time", tran.getTimeStr())
accountTypeEle = __createSimpleNodeWithText(doc, "account_type", str(tran.account_type))
tranEle.appendChild(symbolEle)
tranEle.appendChild(buyEle)
tranEle.appendChild(quantityEle)
tranEle.appendChild(priceEle)
tranEle.appendChild(netAmountEle)
tranEle.appendChild(timeEle)
tranEle.appendChild(accountTypeEle)
return tranEle
|
c09fc6abf4cb9599be23bcf6c91c6cc60330df0a
| 3,640,251
|
def received_information(update: Update, context: CallbackContext) -> int:
"""Store info provided by user and ask for the next category."""
text = update.message.text
category = context.user_data['choice']
context.user_data[category] = text.lower()
del context.user_data['choice']
update.message.reply_text(
"Neat! Just so you know, this is what you already told me:"
f"{facts_to_str(context.user_data)}"
"You can tell me more, or change your opinion on something.",
reply_markup=markup,
)
return CHOOSING
|
e7b93516975a497f6da11383969a14aeb31e6278
| 3,640,252
|
def set_payout_amount():
"""
define amount of insurance payout
NB must match what was defined in contract constructor at deployment
"""
return 500000e18
|
30ff7b07cbbe28b3150be2f1f470236875c8d0e3
| 3,640,253
|
def process_season_data(*args) -> pd.DataFrame:
"""
Takes multiple season data frames, cleans each and combines into single dataframe.
"""
return pd.concat(
map(
lambda df: basketball_reference.process_df_season_summary(
df=df, url_type="season_summary_per_game"
),
[*args],
),
axis=0,
)
|
9c2d46ba2b491382e91613e0dc0a35b68e4188cf
| 3,640,254
|
def build_frame(station_num: int, snapshots_num: int):
"""Function to build citi_bike Frame.
Args:
station_num (int): Number of stations.
snapshot_num (int): Number of in-memory snapshots.
Returns:
CitibikeFrame: Frame instance for citi-bike scenario.
"""
matrices_cls = gen_matrices_node_definition(station_num)
class CitibikeFrame(FrameBase):
stations = FrameNode(Station, station_num)
# for adj frame, we only need 1 node to hold the data
matrices = FrameNode(matrices_cls, 1)
def __init__(self):
super().__init__(enable_snapshot=True, total_snapshot=snapshots_num)
return CitibikeFrame()
|
2fd09885f488a4b42f9a2a4a19dfdd5c10743ef9
| 3,640,255
|
def rucklidge(XYZ, t, k=2, a=6.7):
"""
The Rucklidge Attractor.
x0 = (0.1,0,0)
"""
x, y, z = XYZ
x_dt = -k * x + y * (a - z)
y_dt = x
z_dt = -z + y**2
return x_dt, y_dt, z_dt
|
9d10aa89fb684a95474d45399ae09a38b507913c
| 3,640,256
|
def multiext(prefix, *extensions):
"""Expand a given prefix with multiple extensions (e.g. .txt, .csv, _peaks.bed, ...)."""
if any((r"/" in ext or r"\\" in ext) for ext in extensions):
raise WorkflowError(
r"Extensions for multiext may not contain path delimiters " r"(/,\)."
)
return [flag(prefix + ext, "multiext", flag_value=prefix) for ext in extensions]
|
39bda078a856cb14fc65174ff48be81909b9034a
| 3,640,258
|
def sum_range(n, total=0):
"""Sum the integers from 1 to n.
Obviously the same as n(n+1)/2, but this is a test, not a demo.
>>> sum_range(1)
1
>>> sum_range(100)
5050
>>> sum_range(100000)
5000050000L
"""
if not n:
return total
else:
raise TailCall(sum_range, n - 1, n + total)
|
6126dd1012346a388ddc37c5a8965f3662b8ad7d
| 3,640,259
|
from tcrsampler.sampler import TCRsampler
def _default_tcrsampler_human_beta(default_background = None, default_background_if_missing=None):
"""
Responsible for providing the default human beta sampler 'britanova_human_beta_t_cb.tsv.sampler.tsv'
Returns
-------
t : tcrsampler.sampler.TCRsampler
"""
if default_background is None:
default_background = 'britanova_human_beta_t_cb.tsv.sampler.tsv'
if default_background_if_missing is None:
default_background_if_missing ='britanova_human_beta_t_cb.tsv.sampler.tsv.zip'
print(default_background)
try:
t = TCRsampler(default_background=default_background)
except OSError:
t = TCRsampler()
t.download_background_file(default_background_if_missing)
t = TCRsampler(default_background=default_background)
return t
|
15b682b3e14e9496514efaf287b10ae6acb12441
| 3,640,261
|
def _get_positive_mask(positive_selection, cls_softmax, cls_gt):
"""Gets the positive mask based on the ground truth box classifications
Args:
positive_selection: positive selection method
(e.g. 'corr_cls', 'not_bkg')
cls_softmax: prediction classification softmax scores
cls_gt: ground truth classification one-hot vector
Returns:
positive_mask: positive mask
"""
# Get argmax for predicted class
classification_argmax = tf.argmax(cls_softmax, axis=1)
# Get the ground truth class indices back from one_hot vector
class_indices_gt = tf.argmax(cls_gt, axis=1)
# class_indices_gt = tf.Print(class_indices_gt, ['^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^line 88(pplp loss) : class_indices_gt =', class_indices_gt], summarize=1000)
# Mask for which predictions are not background
not_background_mask = tf.greater(class_indices_gt, 0)
# Combine the masks
if positive_selection == 'corr_cls':
# Which prediction classifications match ground truth
correct_classifications_mask = tf.equal(
classification_argmax, class_indices_gt)
positive_mask = tf.logical_and(
correct_classifications_mask, not_background_mask)
elif positive_selection == 'not_bkg':
positive_mask = not_background_mask
else:
raise ValueError('Invalid positive selection', positive_selection)
return positive_mask
|
8a8e4317f99f691c038b40d7656656f532eba884
| 3,640,263
|
def max_min_index(name_index):
"""Return maximum and minimum value with country of a column from df."""
country_and_name = df_copy[["country", name_index]]
counrties_in_name_index = country_and_name.sort_values(name_index).dropna()
min_value = [
list(counrties_in_name_index[name_index])[0],
list(counrties_in_name_index["country"])[0],
]
max_value = [
list(counrties_in_name_index[name_index])[-1],
list(counrties_in_name_index["country"])[-1],
]
return max_value, min_value
|
7bce42a4d05b52b8e6f0a6d91cecf7775a9484a4
| 3,640,264
|
def add_bold_line(latex: str, index: int) -> str:
"""Makes a provided line number bold
"""
lines = latex.splitlines()
cells = lines[index].split("&")
lines[index] = r'\bfseries ' + r'& \bfseries '.join(cells)
return '\n'.join(lines)
|
637338ea9ec576c780ccfa0b37d47a670465cdbb
| 3,640,265
|
def square(V,resp,weight):
"""Computes the expansion coefficients with a least squares regression"""
if np.any(weight):
Vt = V.T
v1 = Vt.dot(np.transpose(weight*Vt))
v2 = Vt.dot(np.transpose(weight*resp.T))
coef = np.linalg.solve(v1,v2)
else: coef = np.linalg.lstsq(V,resp,rcond=None)[0]
return coef
|
076f580a16c233a087edafb1efd57b9fa194a666
| 3,640,266
|
def _unwrap_function(func):
"""Unwrap decorated functions to allow fetching types from them."""
while hasattr(func, "__wrapped__"):
func = func.__wrapped__
return func
|
8c7a9f5b08dc91b9ae2c8387fbd4860bb554d561
| 3,640,267
|
def get_data(exception: bool = True, key_form: str = "data") -> "dict or None":
"""Função captura os dados de uma rota.
A captura é feita caso a rota seja com `JSON` ou `Multipart-form` caso contrario lança um exceção.
As imagens são capturadas na função `get_files`.
`exception` campo boleano opcional que define se é levantado exceções. O valor por padrão é True.
Exceções:
`from app.errors.JSONNotFound` - Body vazio.
"""
data = {}
if request.get_json():
data: dict = request.get_json()
elif request.form.get(key_form):
data: dict = loads(request.form.get(key_form))
if data.get("file"):
data.pop("file")
return data
|
15cd4257e7849d4400231d7e5c2a025b16ad7db5
| 3,640,268
|
def get_dec_log(uid):
"""Convenience method to look up inc_log for a uid."""
rv = query_db('select dec_log from user where uid = ?',
[uid], one=True)
return rv[0] if rv else None
|
8927ce657cbabfd1e0f4e852e6163702ab5b3841
| 3,640,269
|
def removeItem(request):
"""
Removes item from logged in customers basket.
"""
if request.method == 'POST':
cust = User.objects.get(username=request.user.username)
item_id = request.POST["item_id"]
item = Item.objects.get(id=item_id)
b_item = Basket.objects.get(customer=cust, item=item)
if(b_item):
b_item.delete()
return HttpResponse(None)
return HttpResponseBadRequest(None)
|
80514a322f727478311b7a1b49bd9da8ac7b0d28
| 3,640,270
|
def read_file(repo, name):
"""Read JSON files."""
with open(repo + '/' + name + '.txt') as file:
data = [d.rstrip() for d in file.readlines()]
file.close()
return data
|
4d91e4c68a4f132dc6ebb41cc51df66bd555107a
| 3,640,271
|
def add_end_slash(value: str):
""" Added a slash at the end of value """
if type(value) != str:
return value
return value if value.endswith("/") else value + "/"
|
bc8f41898c50120ad7ca8b814ff03d19c1c64c27
| 3,640,272
|
def shift_fill(a, n, axis=0, fill=0.0, reverse=False):
""" shift n spaces backward along axis, filling rest in with 0's. if n is negative, shifts forward. """
shifted = np.roll(a, n, axis=axis)
shifted[:n] = fill
return shifted
|
5287eefe7491442e3192069bce4faf975e54344a
| 3,640,273
|
from typing import Union
def str2bool(v:Union[str, bool]) -> bool:
""" finished, checked,
converts a "boolean" value possibly in the format of str to bool
Parameters
----------
v: str or bool,
the "boolean" value
Returns
-------
b: bool,
`v` in the format of bool
References
----------
https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
"""
if isinstance(v, bool):
b = v
elif v.lower() in ("yes", "true", "t", "y", "1"):
b = True
elif v.lower() in ("no", "false", "f", "n", "0"):
b = False
else:
raise ValueError("Boolean value expected.")
return b
|
2f102239ce395ece25022320443ffc6d7183968e
| 3,640,274
|
def _none_or_int_or_list(val):
"""Input conversion - expecting None, int, or a list of ints"""
if val is None:
return None
elif isinstance(val, list):
return list(map(int, val))
else:
return int(val)
|
1958c64175a1cd63f8a42044b40b84d7cf8baed2
| 3,640,275
|
def hour_of_day(datetime_col):
"""Returns the hour from a datetime column."""
return datetime_col.dt.hour
|
18b2f6e16ccbcb488f3863968466fda14f669d8b
| 3,640,276
|
def pad_lists(lists, pad_token, seq_lens_idx=[]):
"""
Pads unordered lists of different lengths to all have the same length (max length) and orders
length descendingly
Arguments:
lists : list of 1d lists with different lengths (list[list[int]])
pad_token : padding value (int)
seq_lens_idx : list of sorted indices (list[int])
Returns:
ordered_lists (list[list[int]]) : List of padded 1d lists with equal lengths, ordered descendingly
from original lengths
ordered_seq_lens (list[int]) : List of sequence lengths of corresponding lists (i.e. len(lst))
seq_lens_idx (list[int]) : Order of original indices in descending sequence length
"""
seq_lens = [len(lst) for lst in lists]
max_seq_len = max(seq_lens)
ordered_seq_lens = []
ordered_lists = []
if len(seq_lens_idx) == 0:
seq_lens_idx = np.flip(np.argsort(seq_lens), axis=0).tolist() # descending indices based on seq_lens
for idx in seq_lens_idx: # for every sample in batch
ordered_lists.append(lists[idx] + [pad_token] * (max_seq_len - len(lists[idx])))
ordered_seq_lens.append(seq_lens[idx])
return ordered_lists, ordered_seq_lens, seq_lens_idx
|
76a2a9934dfca478e7db43a93db7e56c181a3b3f
| 3,640,277
|
def sum_naturals(n):
"""Sum the first N natural numbers
>>> sum_naturals(5)
15
"""
total = 0
k = 1
while k <= n:
total += k
k += 1
return total
|
4c59057cd82083d615c72a59f682dd218a657ea0
| 3,640,278
|
def _gaussian_log_sf(x, mu, sigma):
"""Log SF of a normal distribution."""
if not isinstance(x, chainer.Variable):
x = chainer.Variable(x)
return _log_ndtr(-(x - mu) / sigma)
|
3f33918bf78fc3ab4064f05d038232df218416f6
| 3,640,279
|
def sort_list_files(list_patches, list_masks):
"""
Sorts a list of patches and masks depending on their id.
:param list_patches: List of name of patches in the folder, that we want to sort.
:param list_masks: List of name of masks in the folder, that we want to sort.
:return: List of sorted lists, respectively of patches and masks.
"""
return sorted(list_patches, key=lambda x: int(x[1])), sorted(list_masks, key=lambda x: int(x[1]))
|
91557475bf145862ea88ad9f86cef82135eddd6c
| 3,640,280
|
def get_model(name):
""" get_model """
if name not in __factory:
raise KeyError("unknown model:", name)
return __factory[name]
|
00fff4e3596aec487b16fd1114b7d026d6790568
| 3,640,281
|
def check_sentence_for_coins(sentence: str) -> str:
"""Returns the corresponding binance pair if a string contains any words refering to a followed coins
Args:
sentence (str): the sentence
Returns:
str: the binance pair if it contains a coins, otherwise returns 'NO_PAIR'
"""
coin = next((word for word in list(TRACKED_COINS.keys()) if word in sentence.lower()), 'NO_PAIR')
if coin != 'NO_PAIR':
return TRACKED_COINS[coin]
return coin
|
a9d041ff6c2bcca1c424b059956c33016f93300d
| 3,640,283
|
def to24Bit(color8Bit):
"""The method allows you to convert the 8-bit index created by the color.to8Bit method to 24-bit color value."""
# We ignore first one, so we need to shift palette indexing by one
return palette[color8Bit + 1]
|
b84d2262883a7b3415ae46f174418dc79ef800dc
| 3,640,284
|
def make_bag():
"""Create a bag."""
return from_sequence(
[1, 2], npartitions=2
).map(allocate_50mb).sum().apply(no_allocate)
|
8c939dd389ab09811dbbf77aaf479832ab7463d0
| 3,640,286
|
def get_testdata_files(pattern="*"):
""" Return test data files from dicom3d data root directory """
data_path = join(DATA_ROOT, 'test_files')
files = walk_data(
base=data_path, pattern=pattern,
search_files=True, search_dirs=False)
return [filename for filename in files if not filename.endswith('.py')]
|
9739db355914b288b2fcb29874fcc50d6a2b4487
| 3,640,287
|
def safe_text(obj):
"""Safely turns an object into a textual representation.
Calls str(), then on Python 2 decodes the result.
"""
result = qcore.safe_str(obj)
if isinstance(result, bytes):
try:
result = result.decode("utf-8")
except Exception as e:
result = "<n/a: .decode() raised %r>" % e
return result
|
4d20d5c42b79b6dbb6f8282d74bfd461ffd1dc75
| 3,640,288
|
from typing import List
def evaluate_blueprints(blueprint_q: mp.Queue,
input_size: List[int]) -> List[BlueprintGenome]:
"""
Consumes blueprints off the blueprints queue, evaluates them and adds them back to the queue if all of their
evaluations have not been completed for the current generation. If all their evaluations have been completed, add
them to the completed_blueprints list.
:param blueprint_q: A thread safe queue of blueprints
:param input_size: The shape of the input to each network
:param num_epochs: the number of epochs to train each model for
:return: A list of evaluated blueprints
"""
completed_blueprints: List[BlueprintGenome] = []
print(f'Process {mp.current_process().name} - epochs: {config.epochs_in_evolution}')
while blueprint_q.qsize() != 0:
blueprint = blueprint_q.get()
blueprint = evaluate_blueprint(blueprint, input_size)
if blueprint.n_evaluations == config.n_evals_per_bp:
completed_blueprints.append(blueprint)
else:
blueprint_q.put(blueprint)
return completed_blueprints
|
c87d0a37fe32d2af6594564afc44d16adf616737
| 3,640,289
|
import tempfile
import shlex
def run_noble_coder(text, noble_coder):
"""
Run Noble Coder
Args:
text: the text to feed into Noble Coder
noble_coder: the execution path of Noble Coder
Returns:
The perturbation agent
"""
pert_agent = None
with tempfile.TemporaryDirectory() as dirname:
with open("{}/tmp.txt".format(dirname), "w") as f:
f.write(text)
command = "java -jar {noble_coder} -terminology NCI_Thesaurus " \
"-input {dirname} -output {dirname}".format(noble_coder=noble_coder, dirname=dirname)
proc = Popen(shlex.split(command), stdout=PIPE, stderr=PIPE)
out, err = [x.decode("utf-8") for x in proc.communicate()]
if proc.returncode != 0 or "[Errno" in err:
raise RuntimeError("Noble Coder failed to complete\nstdout: {}\n stderr {}".format(out, err))
data = tablib.Dataset().load(open("{}/RESULTS.tsv".format(dirname)).read())
for row in data:
if "Amino Acid" in row[4] and "Pharmacologic Substance" not in row[4]:
pert_agent = row[3]
break
return pert_agent
|
20bcee89ddda7c8abc108a42621867017a7e4d63
| 3,640,290
|
def xor(*args):
"""True if exactly one of the arguments of the iterable is True.
>>> xor(0,1,0,)
True
>>> xor(1,2,3,)
False
>>> xor(False, False, False)
False
>>> xor("kalimera", "kalinuxta")
False
>>> xor("", "a", "")
True
>>> xor("", "", "")
False
"""
return sum([bool(i) for i in args]) == 1
|
86bbe0350dd18a2508120cec9672661e1aa56ce0
| 3,640,291
|
def zjitter(jitter=0.0, radius=5):
"""
scan jitter is in terms of the fractional pixel difference when
moving the laser in the z-direction
"""
psfsize = np.array([2.0, 1.0, 3.0])
# create a base image of one particle
s0 = init.create_single_particle_state(imsize=4*radius,
radius=radius, psfargs={'params': psfsize, 'error': 1e-6})
sl = np.s_[s0.pad:-s0.pad,s0.pad:-s0.pad,s0.pad:-s0.pad]
# add up a bunch of trajectories
finalimage = 0*s0.get_model_image()[sl]
position = 0*s0.obj.pos[0]
for i in xrange(finalimage.shape[0]):
offset = jitter*np.random.randn(3)*np.array([1,0,0])
s0.obj.pos[0] = np.array(s0.image.shape)/2 + offset
s0.reset()
finalimage[i] = s0.get_model_image()[sl][i]
position += s0.obj.pos[0]
position /= float(finalimage.shape[0])
# place that into a new image at the expected parameters
s = init.create_single_particle_state(imsize=4*radius, sigma=0.05,
radius=radius, psfargs={'params': psfsize, 'error': 1e-6})
s.reset()
# measure the true inferred parameters
return s, finalimage, position
|
7fae2f750cd80708e7cd881a05d535a00b4ecb38
| 3,640,293
|
def append_column(rec, col, name=None, format=None):
"""
Append a column to the end of a records array.
Parameters
----------
rec : recarray
Records array.
col : array_like
Array or similar object which will be converted into the new column.
name : str, optional
Name of the column. If None col.dtypes.name will be used.
format : dtype, optional
Data type to convert the new column into before appending. Required if
col is not an ndarray.
Returns
-------
new_rec : recarray
New records array with column appended.
"""
N = len(rec.dtype.descr)
return insert_column(rec, N, col, name, format)
|
f851ef69946937cbb100d424ddb8502b906940bd
| 3,640,294
|
import copy
def dfa2nfa(dfa):
"""Copy DFA to an NFA, so remove determinism restriction."""
nfa = copy.deepcopy(dfa)
nfa.transitions._deterministic = False
nfa.automaton_type = 'Non-Deterministic Finite Automaton'
return nfa
|
eed8e651a51e71599a38288665604add3d8a0a3d
| 3,640,295
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.