content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def migrate_data(conn: redis.StrictRedis, data: dict) -> list:
"""
Uploads the given data to the given redis database connection
"""
pipe = conn.pipeline()
for key, value in data.items():
command_and_formatter = TYPE_TO_PUT_COMMAND[value["type"]]
command = command_and_formatter[0]
formatter = command_and_formatter[1]
redis_method = getattr(pipe, command)
formatted_values = formatter(value["value"])
arguments = [key] + formatted_values
redis_method(*arguments)
return pipe.execute()
|
7e54d3bd0c5e302d3e2f173a53bf6cd6a9a6f6fb
| 3,637,381
|
def dummy_location(db, create_location):
"""Give you a dummy default location."""
loc = create_location(u'Test')
db.session.flush()
return loc
|
ec6ffa3b42e07c88b8224ee2aaaf000853a4169f
| 3,637,382
|
from pathlib import Path
def get_resources_path() -> Path:
""" Convenience method to return the `resources` directory in this project """
return alpyne._ROOT_PATH.joinpath("resources")
|
35b90856e00fbee8aeb373350cef77596a5f2a71
| 3,637,383
|
import math
def sk_rot_mx(rot_vec):
"""
use Rodrigues' rotation formula to transform the rotation vector into rotation matrix
:param rot_vec:
:return:
"""
theta = np.linalg.norm(rot_vec)
vector = np.array(rot_vec) * math.sin(theta / 2.0) / theta
a = math.cos(theta / 2.0)
b = -vector[0]
c = -vector[1]
d = -vector[2]
return np.array(
[
[
a * a + b * b - c * c - d * d,
2 * (b * c + a * d),
2 * (b * d - a * c)
],
[
2 * (b * c - a * d),
a * a + c * c - b * b - d * d,
2 * (c * d + a * b)
],
[
2 * (b * d + a * c),
2 * (c * d - a * b),
a * a + d * d - b * b - c * c
]
]
)
|
9ba2abfd877d87423db02b224fed30ec59dc90f7
| 3,637,386
|
def split_line(line, points, tolerance=1e-9):
"""Split line at point or multipoint, within some tolerance
"""
to_split = snap_line(line, points, tolerance)
return list(split(to_split, points))
|
46a4ae55ff655c864154d37108689d81ad77daf1
| 3,637,387
|
import copy
def cross_validation(docs, values, k):
"""
docs: Dict with text lists separate by value
values: Target values texts
k: Steps of cross validation
"""
group_size = {}
confusion_matrix = []
m = {'true':{}, 'false':{}}
for value in values:
group_size[value] = len(docs[value])/k
m['true'][value] = 0
m['false'][value] = 0
for i in xrange(0,k):
training = copy.deepcopy(docs)
confusion_matrix.insert(i, copy.deepcopy(m))
for value in values:
begin = i * group_size[value]
end = (i + 1) * group_size[value]
test = training[value][begin:end]
del training[value][begin:end]
probabilities, vocabulary = learn(training, values)
for doc in test:
prob_value = classify(doc, probabilities, vocabulary, values)
if value == prob_value:
confusion_matrix[i]['true'][value] += 1
else:
confusion_matrix[i]['false'][prob_value] += 1
return confusion_matrix
|
625a45edf45dc88db6e8c3b5342604891f899ebc
| 3,637,389
|
def quote_plus(url, safe='/', encoding=None, errors=None):
"""Wrapper for urllib.parse.quote_plus"""
return uquote_plus(url, safe=safe, encoding=encoding, errors=errors)
|
159a5e1e25bf35ee08b14f6dca871a4d0bb7f411
| 3,637,390
|
def averageSeriesWithWildcards(requestContext, seriesList, *position): #XXX
"""
Call averageSeries after inserting wildcards at the given position(s).
Example:
.. code-block:: none
&target=averageSeriesWithWildcards(host.cpu-[0-7].cpu-{user,system}.value, 1)
This would be the equivalent of
``target=averageSeries(host.*.cpu-user.value)&target=averageSeries(host.*.cpu-system.value)``
"""
if isinstance(position, int):
positions = [position]
else:
positions = position
result = []
matchedList = {}
for series in seriesList:
newname = '.'.join(map(lambda x: x[1], filter(lambda i: i[0] not in positions, enumerate(series.name.split('.')))))
if newname not in matchedList:
matchedList[newname] = []
matchedList[newname].append(series)
for name in matchedList.keys():
result.append( averageSeries(requestContext, (matchedList[name]))[0] )
result[-1].name = name
return result
|
479be75db3498a1882c8a27d1a13de85102c52a6
| 3,637,391
|
def errore_ddp_digitale(V):
"""
Calcola l'errore della misura di ddp del multimetro digitale
supponendo che si sia scelta la scala corretta.
La ddp deve essere data in Volt
"""
V=absolute(V)
if V<0.2: return sqrt(V**2*25e-6+1e-8)
if V<2: return sqrt(V**2*25e-6+1e-6)
if V<20: return sqrt(V**2*25e-6+1e-4)
if V<200: return sqrt(V**2*25e-6+1e-2)
print("Tollerati valori minori di 200V")
return
|
d6504dfce600f5d9af2af33115c2e07b8033cf03
| 3,637,392
|
def extractLine(shape, z = 0):
"""
Extracts a line from a shape line.
"""
x = shape.exteriorpoints()[0][0] - shape.exteriorpoints()[1][0]
y = shape.exteriorpoints()[0][1] - shape.exteriorpoints()[1][1]
return (x, y, z)
|
c61021b1e3dc6372d9d7554a7033bbd3ab128343
| 3,637,395
|
import logging
def get_fragility_model_04(fmodel, fname):
"""
:param fmodel:
a fragilityModel node
:param fname:
path of the fragility file
:returns:
an :class:`openquake.risklib.scientific.FragilityModel` instance
"""
logging.warn('Please upgrade %s to NRML 0.5', fname)
node05 = convert_fragility_model_04(fmodel, fname)
node05.limitStates.text = node05.limitStates.text.split()
return get_fragility_model(node05, fname)
|
62633b156f18c6e722321cf937ea06741aa7a65f
| 3,637,396
|
def substring_in_list(substr_to_find, list_to_search):
"""
Returns a boolean value to indicate whether or not a given substring
is located within the strings of a list.
"""
result = [s for s in list_to_search if substr_to_find in s]
return len(result) > 0
|
77521a1c5d487fa110d5adecb884dd298d2515e5
| 3,637,397
|
def downsample_image(image: np.ndarray, scale: int) -> np.ndarray:
"""Downsamples the image by an integer factor to prevent artifacts."""
if scale == 1:
return image
height, width = image.shape[:2]
if height % scale > 0 or width % scale > 0:
raise ValueError(f'Image shape ({height},{width}) must be divisible by the'
f' scale ({scale}).')
out_height, out_width = height // scale, width // scale
resized = cv2.resize(image, (out_width, out_height), cv2.INTER_AREA)
return resized
|
7d011bda8dc2fccc9782e621bb61d7ab68992640
| 3,637,398
|
def getQueryString( bindings, variableName ):
""" Columns a bunch of data about the bindings. Will return properly formatted strings for
updating, inserting, and querying the SQLite table specified in the bindings dictionary. Will also
return the table name and a string that lists the columns (properly formatted for use in an SQLite
query).
variableName is the name to use for the SQLiteC++ Statement variable in the generated methods.
"""
table = ''
columns = []
queryData = []
insertData = []
updateData = []
whereClaus = []
bindData = []
index = 0
for b in bindings:
# Process table
if (b['type'] == 'table'):
table = b['table']
# Process column
elif (b['type'] == 'column'):
columns.append( b['column'] )
# Process query data
if (b['variableType'] == 'string'):
text = '{variable} = std::string( {query}.getColumn({index}).getText() );'
text = text.format(variable = b['variable'], index = index, query = variableName)
queryData.append( text )
else:
text = '{variable} = {query}.getColumn({index});'
text = text.format(variable = b['variable'], index = index, query = variableName)
queryData.append( text )
index = index + 1
# Process insert data
if (b['variableType'] == 'string' or b['variableType'] == 'char*'):
insertData.append( "\"'\" << " + b['variable'] + " << \"'\"" )
else:
insertData.append( b['variable'] )
# Process id
if (b.get('id')):
whereClaus.append( b['column'] + ' = ?' )
text = 'query.bind({index}, {variableName});'
text = text.format(index = len(whereClaus), variableName = b['variable'])
bindData.append( text )
# Process update data
for i in range(0, len(columns)):
t = columns[i] + '=" << ' + insertData[i]
updateData.append(t)
columns = ', '.join( columns )
updateData = ' << ", '.join( updateData )
insertData = ' << \", " << '.join( insertData )
queryData = '\n'.join( queryData )
whereClaus = ' AND '.join( whereClaus )
bindData = '\n\t'.join( bindData )
return {
'table': table,
'updateData': updateData,
'columns': columns,
'insertData': insertData,
'queryData': queryData,
'whereClaus': whereClaus,
'bindData': bindData
}
|
9cc81601cde229cc5f5bf53ef73997efc515ed2b
| 3,637,399
|
def multiplicities(pattern):
""" Return a dictionary keyed by the geodesics in the given pattern, with values equal to the number of times the geodesic occurs."""
g = geodesics(pattern)
ans = {}
x = 0
for i in g:
if i == x:
ans[i] += 1
else:
x = i
ans[i] = 1
return ans
|
24eee8bcb3ce39927d1f60e89216b583e9eb12db
| 3,637,400
|
def show_menu():
""" Shows a menu """
print '================== ' + util.HEADER + 'WORKFLOW MENU' + util.ENDC + ' =================='
print '1) Development - Create a git branch off of staging'
print '2) Merge - Merge your development branch to staging (GitHub)'
print '3) Build - Builds project, and attempts to reseed your database for you'
print '4) Setup - Setups project'
print '5) Exit - Exits workflow'
print '==================================================='
choice = raw_input('Enter in a number (1-5): ')
while choice not in ['1', '2', '3', '4', '5']:
choice = raw_input(util.FAIL + 'Invalid Input! ' + util.ENDC + 'Please enter in a number (1-5): ')
return choice
|
27d9733342a4fbbf64bf6635a06329043c843c9d
| 3,637,401
|
def imap4_utf7_decode(data):
"""Decode a folder name from IMAP modified UTF-7 encoding to unicode.
Input is bytes (Python 3) or str (Python 2); output is always
unicode. If non-bytes/str input is provided, the input is returned
unchanged.
"""
if not isinstance(data, bytes):
return bytearray(data, 'utf-8')
return imap_utf7_codec.imap4_utf7_decode(data)
|
9d0acbc22ce3079cff7849f992e924d9610f1154
| 3,637,402
|
def get_characters(character_path, character_dim):
"""Reads list of characters .txt file and returns embedding matrix and mappings from characters to character ids.
Input:
character_path: path to characters.txt
character_dim: integer
Returns:
emb_matrix: Numpy array shape (len(characters)+2, character_dim) containing glove embeddings
(plus PAD and UNK embeddings in first two rows).
The rows of emb_matrix correspond to the word ids given in char2id and id2char
char2id: dictionary mapping char (string) to char id (int)
id2char: dictionary mapping char id (int) to char (string)
"""
print "Loading characters from file: %s" % character_path
vocab_size = 932
char2id = {}
id2char = {}
# put start tokens in the dictionaries
idx = 0
for word in _START_VOCAB:
char2id[word] = idx
id2char[idx] = word
idx += 1
# go through glove vecs
with open(character_path, 'r') as fh:
for line in tqdm(fh, total=vocab_size):
char = line.strip()
char2id[char] = idx
id2char[idx] = char
idx += 1
final_vocab_size = vocab_size + len(_START_VOCAB)
assert len(char2id) == final_vocab_size
assert len(id2char) == final_vocab_size
assert idx == final_vocab_size
emb_matrix = np.random.randn(final_vocab_size, character_dim)
return emb_matrix, char2id, id2char
|
5f523aa15ea03f79cf2fa1a313e6196dbcb1f650
| 3,637,403
|
def french_to_english(french_text: str) -> str:
"""This function translates from french to english
Parameters
----------
french_text : str
french text to translate
Returns
-------
str
translated text
"""
language_translator = translator_instance()
response = language_translator.translate(
text=french_text, model_id="fr-en"
).get_result()
english_text = response["translations"][0]["translation"]
return english_text
|
1c2b4d3526394e22251bf68b2d2f035db9b3e5f6
| 3,637,405
|
from pathlib import Path
from typing import Iterable
def prefit_histograms(
rex_dir: str | Path,
samples: Iterable[str],
region: str,
fit_name: str = "tW",
) -> dict[str, TH1]:
"""Retrieve sample prefit histograms for a region.
Parameters
----------
rex_dir : str or pathlib.Path
Path of the TRExFitter result directory
samples : Iterable(str)
Physics samples of the desired histograms
region : str
Region to get histograms for
fit_name : str
Name of the Fit
Returns
-------
dict(str, tdub.root.TH1)
Prefit histograms.
"""
root_path = Path(rex_dir) / "Histograms" / f"{fit_name}_{region}_histos.root"
root_file = uproot.open(root_path)
histograms = {}
for samp in samples:
h = prefit_histogram(root_file, samp, region)
if h is None:
log.warn(f"Histogram for sample {samp} in region: {region} not found")
histograms[samp] = h
return histograms
|
372a9d223b58473e3a90633b88a8800f7eadeabb
| 3,637,406
|
def ymstring2mjd( ymstr ):
"""
The `ymstring2mjd` function enables array input.
Documentation see the `_ymstring2mjd` function.
"""
ymstr = np.array(ymstr,ndmin=1)
ymstr_count = np.size(ymstr)
mjd = np.zeros(ymstr_count,dtype=np.float_)
for ix in range(ymstr_count):
try:
mjd[ix] = _ymstring2mjd( ymstr[ix] )
except:
mjd[ix] = np.nan
return mjd
|
6f20be92833729ac712fe1a89c545d01015d5af2
| 3,637,407
|
def average(l):
""" Computes average of 2-D list """
llen = len(l)
def divide(x):
return x / float(llen)
return list(map(divide, map(sum, zip(*l))))
|
67395ce4417022a673565a8227c684b7649a5e6a
| 3,637,408
|
def slugify3(text, delim=u'-'):
"""Generates an ASCII-only slug."""
result = []
for word in _punct_re.split(text.lower()):
result.extend(unidecode(word).split())
return unicode(delim.join(result))
|
dbabf44a4681d613d7f7a1a096a32c66640e5185
| 3,637,409
|
from datetime import datetime
def teacher_registeration(request):
"""
Info: Registeration for the teacher.
Request-Body: email_id -> str
password -> str
image -> file
name -> str
date_of_birth -> str
education_qualification -> JSON
Response: message -> str
"""
email = request.data.get("email")
password = request.data.get("password")
image = request.data.get("image")
gender = request.data.get("gender")
name = request.data.get("name")
date_of_birth = request.data.get("date_of_birth")
education_qualification = request.data.get("education_qualification")
check_for_empty(
email, gender, password, date_of_birth, education_qualification, name
)
if Teacher.objects.filter(email=email).exists():
raise AlreadyExistsException("Email-Id already exists")
password = cryptocode.encrypt(password, CRYPTO_SECRET_KEY)
date_of_birth = datetime.strptime(date_of_birth, "%d-%m-%Y").date()
teacher = Teacher(
email=email,
gender=gender,
password=password,
date_of_birth=date_of_birth,
name=name,
education_qualification=education_qualification,
)
image_url = get_image_url_and_upload(image, teacher)
teacher.image_url = image_url
teacher.save()
send_verfication_mail(teacher)
response = {"message": "Verfication Mail Sent"}
return JsonResponse(response, status=201)
|
96152e3c628360d498d497c23c6cd4c6a39a743b
| 3,637,410
|
def check_response(response):
""" Checks that a response is successful, raising the appropriate Exceptions otherwise. """
status_code = response.status_code
if 100 < status_code < 299:
return True
elif status_code == 401 or status_code == 403:
message = get_response_data(response)
raise AuthError('Access Token Error, Received ' + str(status_code) +
' from Outlook REST Endpoint with the message: {}'.format(message))
elif status_code == 400:
message = get_response_data(response)
raise RequestError('The request made to the Outlook API was invalid. Received the following message: {}'.
format(message))
else:
message = get_response_data(response)
raise APIError('Encountered an unknown error from the Outlook API: {}'.format(message))
|
4afd0003619cc90759778f513e2a692a7b81309d
| 3,637,411
|
from typing import Iterator
from typing import Tuple
def walk_storage_from_command(command: instances.FilesRelatedCommand,
filesystem: Filesystem
) -> Iterator[Tuple[str, str, str]]:
"""Typical iteration by command settings."""
return walk(command.storage_folder, filesystem,
command.branch, command.leaf)
|
675e476fb2dcd2253181b7b4eeedbf43b58db54f
| 3,637,412
|
import logging
def parse_csv_data(csv_filename: FileIO) -> list[str]:
"""Returns contents of 'csv_filename' as list of strings by row"""
try:
return open(csv_filename).readlines()
except FileNotFoundError:
logging.warning("File with path '%s' not found", csv_filename)
return []
|
daf826d97a983b8ab3b1ebeb1b063b890c256236
| 3,637,413
|
import math
def _interpolate_sym(y0, Tkk, f_Tkk, y_half, f_yj, hs, H, k, atol, rtol,
seq=(lambda t: 4*t-2)):
"""
Symmetric dense output formula; used for example with the midpoint method.
It calculates a polynomial to interpolate any value from t0 (time at y0) to
t0+H (time at Tkk). Based on Dense Output for the GBS Method, II.9 pg
237-239.
Returns a polynomial that fulfills the conditions at II.9.40 (step 3). To
take into account: this coefficients were calculated for the shifted
polynomial with x -> x-1/2.
Parameters
----------
y0 : float
solution of ODE at the previous step, at t0
Tkk : float
solution of ODE once the step was taken, at t0+H
f_Tkk : float
function evaluation at Tkk, t0+H
y_half : 2D array
array containing for each extrapolation value (1...k) an array with the
intermediate (at half the integration interval) solution value.
f_yj : 3D array
array containing for each extrapolation value (1...k) an array with all
the function evaluations done at the intermediate solution values.
hs : array
array containing for each extrapolation value (1...k) the inner step
taken, H/nj (II.9.1 ref I)
H : float
integration step to take (the output, without interpolation, will be
calculated at t_curr+h) This value matches with the value H in ref I
and ref II.
k : int
number of extrapolation steps to take in this step (determines the
number of extrapolations performed to achieve a better integration
output, equivalent to the size of the extrapolation tableau).
rtol, atol : float
the input parameters rtol (relative tolerance) and atol (absolute
tolerance) determine the error control performed by the solver. See
function _error_norm(y1, y2, atol, rtol).
seq : callable(i), int i>=1
the step-number sequence (examples II.9.1 , 9.6, 9.35 ref I).
Returns
-------
poly (callable(t)
interpolation polynomial (see definition of poly(t) function in
_interpolation_poly())
"""
u = 2*k-3
u_1 = u - 1
ds = _compute_ds(y_half, f_yj, hs, k, seq=seq)
a_u = (u+5)*[None]
a_u_1 = (u_1+5)*[None]
for i in range(u+1):
a_u[i] = (H**i)*ds[i]/math.factorial(i)
a_u_1[0:u_1+1] = 1*a_u[0:u_1+1]
def A_inv(u):
return (2**(u-2))*np.matrix(
[[(-2*(3 + u))*(-1)**u, -(-1)**u, 2*(3 + u), -1],
[(4*(4 + u))*(-1)**u, 2*(-1)**u, 4*(4 + u), -2],
[(8*(1 + u))*(-1)**u, 4*(-1)**u, -8*(1 + u), 4],
[(-16*(2 + u))*(-1)**u, -8*(-1)**u, -16*(2 + u), 8]]
)
A_inv_u = A_inv(u)
A_inv_u_1 = A_inv(u_1)
b1_u = 1*y0
b1_u_1 = 1*y0
for i in range(u_1+1):
b1_u -= a_u[i]/(-2)**i
b1_u_1 -= a_u_1[i]/(-2)**i
b1_u -= a_u[u]/(-2)**u
b2_u = H*f_yj[1][0]
b2_u_1 = H*f_yj[1][0]
for i in range(1, u_1+1):
b2_u -= i* a_u[i]/(-2)**(i-1)
b2_u_1 -= i*a_u_1[i]/(-2)**(i-1)
b2_u -= u*a_u[u]/(-2)**(u-1)
b3_u = 1*Tkk
b3_u_1 = 1*Tkk
for i in range(u_1+1):
b3_u -= a_u[i]/(2**i)
b3_u_1 -= a_u_1[i]/(2**i)
b3_u -= a_u[u]/(2**u)
b4_u = H*f_Tkk
b4_u_1 = H*f_Tkk
for i in range(1, u_1+1):
b4_u -= i* a_u[i]/(2**(i-1))
b4_u_1 -= i*a_u_1[i]/(2**(i-1))
b4_u -= u*a_u[u]/(2**(u-1))
b_u = np.array([b1_u,b2_u,b3_u,b4_u])
b_u_1 = np.array([b1_u_1,b2_u_1,b3_u_1,b4_u_1])
x = A_inv_u*b_u
x = np.array(x)
x_1 = A_inv_u_1*b_u_1
x_1 = np.array(x_1)
a_u[u+1] = x[0]
a_u[u+2] = x[1]
a_u[u+3] = x[2]
a_u[u+4] = x[3]
a_u_1[u_1+1] = x_1[0]
a_u_1[u_1+2] = x_1[1]
a_u_1[u_1+3] = x_1[2]
a_u_1[u_1+4] = x_1[3]
return _interpolation_poly(a_u, a_u_1, H, 0.5, atol, rtol)
|
a8ab6765a2eff3c3a847c79f3524059cca831796
| 3,637,415
|
def _gtin_fails_checksum(gtin: str) -> bool:
"""Determines if the provided gtin violates the check digit calculation.
Args:
gtin: a string representing the product's GTIN
Returns:
True if the gtin fails check digit validation, otherwise False.
"""
padded_gtin = gtin.zfill(14)
existing_check_digit = int(padded_gtin[-1])
target_check_digit = _calculate_check_digit(padded_gtin[:-1])
return target_check_digit != existing_check_digit
|
04d3857fd66e592938fa352be7a59f1784e3e00f
| 3,637,416
|
import operator
def mergeGuideInfo(seq, startDict, pamPat, otMatches, inputPos, effScores, sortBy=None):
"""
merges guide information from the sequence, the efficiency scores and the off-targets.
creates rows with too many fields. Probably needs refactoring.
for each pam in startDict, retrieve the guide sequence next to it and score it
sortBy can be "effScore", "mhScore", "oofScore" or "pos"
"""
allEnzymes = readEnzymes()
guideData = []
guideScores = {}
hasNotFound = False
pamIdToSeq = {}
pamSeqs = list(flankSeqIter(seq.upper(), startDict, len(pamPat), True))
for pamId, pamStart, guideStart, strand, guideSeq, pamSeq, pamPlusSeq in pamSeqs:
# matches in genome
# one desc in last column per OT seq
if pamId in otMatches:
pamMatches = otMatches[pamId]
guideSeqFull = concatGuideAndPam(guideSeq, pamSeq)
mutEnzymes = matchRestrEnz(allEnzymes, guideSeq, pamSeq, pamPlusSeq)
posList, otDesc, guideScore, guideCfdScore, last12Desc, ontargetDesc, \
subOptMatchCount = \
makePosList(pamMatches, guideSeqFull, pamPat, inputPos)
# no off-targets found?
else:
posList, otDesc, guideScore = None, "Not found", None
guideCfdScore = None
last12Desc = ""
hasNotFound = True
mutEnzymes = []
ontargetDesc = ""
subOptMatchCount = False
seq34Mer = None
guideRow = [guideScore, guideCfdScore, effScores.get(pamId, {}), pamStart, guideStart, strand, pamId, guideSeq, pamSeq, posList, otDesc, last12Desc, mutEnzymes, ontargetDesc, subOptMatchCount]
guideData.append( guideRow )
guideScores[pamId] = guideScore
pamIdToSeq[pamId] = guideSeq
if sortBy == "pos":
sortFunc = (lambda row: row[3])
reverse = False
elif sortBy is not None and sortBy!="spec":
sortFunc = (lambda row: row[2].get(sortBy, 0))
reverse = True
else:
sortFunc = operator.itemgetter(0)
reverse = True
guideData.sort(reverse=reverse, key=sortFunc)
return guideData, guideScores, hasNotFound, pamIdToSeq
|
39ea084851f6ba6c1726ed181ac4f3ab10b71472
| 3,637,417
|
def search_orf(seq:str, min_orf:int) -> list:
"""Search full orf over ceration length in 6 frames"""
scod = "M"
send = "*"
orf_regions = {}
# Load 6 reading frames
seq1 = seq
seq2 = seq1[1: ]
seq3 = seq1[2: ]
seq4 = rc_seq(seq1)
seq5 = seq4[1: ]
seq6 = seq4[2: ]
# Shrink to times of 3
seq1 = seq1[: len(seq1)//3*3]
seq2 = seq2[: len(seq2)//3*3]
seq3 = seq3[: len(seq3)//3*3]
seq4 = seq4[: len(seq4)//3*3]
seq5 = seq5[: len(seq5)//3*3]
seq6 = seq6[: len(seq6)//3*3]
# Translate 6 frames
trans1 = translate_exon(seq1)
trans2 = translate_exon(seq2)
trans3 = translate_exon(seq3)
trans4 = translate_exon(seq4)
trans5 = translate_exon(seq5)
trans6 = translate_exon(seq6)
# All the start and stop codons
start1 = [id_ for id_, cod in enumerate(trans1) if cod == scod]
start2 = [id_ for id_, cod in enumerate(trans2) if cod == scod]
start3 = [id_ for id_, cod in enumerate(trans3) if cod == scod]
start4 = [id_ for id_, cod in enumerate(trans4) if cod == scod]
start5 = [id_ for id_, cod in enumerate(trans5) if cod == scod]
start6 = [id_ for id_, cod in enumerate(trans6) if cod == scod]
end1 = [id_ for id_, cod in enumerate(trans1) if cod == send]
end2 = [id_ for id_, cod in enumerate(trans2) if cod == send]
end3 = [id_ for id_, cod in enumerate(trans3) if cod == send]
end4 = [id_ for id_, cod in enumerate(trans4) if cod == send]
end5 = [id_ for id_, cod in enumerate(trans5) if cod == send]
end6 = [id_ for id_, cod in enumerate(trans6) if cod == send]
if start1 and end1:
pos1 = start1[0]
pos2 = end1[0]
s_i = 0
e_i = 0
while s_i < len(start1):
# search for stop codon
pos1 = start1[s_i]
while e_i < len(end1) -1:
if pos2 < pos1:
e_i += 1
pos2 = end1[e_i]
else:
break
# No full orf
if pos2 < pos1:
break
# Identify orf
else:
if pos2 - pos1 >min_orf:
orf_regions[(pos1*3 + 1, pos2*3 + 3)] = trans1[pos1: pos2 +1]
s_i += 1
while s_i < len(start1):
pos1 = start1[s_i]
if pos1 < pos2:
s_i += 1
continue
break
if start2 and end2:
pos1 = start2[0]
pos2 = end2[0]
s_i = 0
e_i = 0
while s_i < len(start2):
# search for stop codon
pos1 = start2[s_i]
while e_i < len(end2) -1:
if pos2 < pos1:
e_i += 1
pos2 = end2[e_i]
else:
break
# No full orf
if pos2 < pos1:
break
# Identify orf
else:
if pos2 - pos1 >min_orf:
orf_regions[(pos1*3 + 2, pos2*3 + 4)] = trans2[pos1: pos2 +1]
s_i += 1
while s_i < len(start2):
pos1 = start2[s_i]
if pos1 < pos2:
s_i += 1
continue
break
if start3 and end3:
pos1 = start3[0]
pos2 = end3[0]
s_i = 0
e_i = 0
while s_i < len(start3):
# search for stop codon
pos1 = start3[s_i]
while e_i < len(end3) -1:
if pos2 < pos1:
e_i += 1
pos2 = end3[e_i]
else:
break
# No full orf
if pos2 < pos1:
break
# Identify orf
else:
if pos2 - pos1 >min_orf:
orf_regions[(pos1*3 + 3, pos2*3 + 5)] = trans3[pos1: pos2 +1]
s_i += 1
while s_i < len(start3):
pos1 = start3[s_i]
if pos1 < pos2:
s_i += 1
continue
break
if start4 and end4:
pos1 = start4[0]
pos2 = end4[0]
s_i = 0
e_i = 0
while s_i < len(start4):
# search for stop codon
pos1 = start4[s_i]
while e_i < len(end4) -1:
if pos2 < pos1:
e_i += 1
pos2 = end4[e_i]
else:
break
# No full orf
if pos2 < pos1:
break
# Identify orf
else:
if pos2 - pos1 >min_orf:
orf_regions[(len(seq) - pos1*3, len(seq) - pos2*3 -2)] = trans4[pos1: pos2 +1]
s_i += 1
while s_i < len(start4):
pos1 = start4[s_i]
if pos1 < pos2:
s_i += 1
continue
break
if start5 and end5:
pos1 = start5[0]
pos2 = end5[0]
s_i = 0
e_i = 0
while s_i < len(start5):
# search for stop codon
pos1 = start5[s_i]
while e_i < len(end5) -1:
if pos2 < pos1:
e_i += 1
pos2 = end5[e_i]
else:
break
# No full orf
if pos2 < pos1:
break
# Identify orf
else:
if pos2 - pos1 >min_orf:
orf_regions[(len(seq) - pos1*3 -1, len(seq) - pos2*3 -3,)] = trans5[pos1: pos2 +1]
s_i += 1
while s_i < len(start5):
pos1 = start5[s_i]
if pos1 < pos2:
s_i += 1
continue
break
if start6 and end6:
pos1 = start6[0]
pos2 = end6[0]
s_i = 0
e_i = 0
while s_i < len(start6):
# search for stop codon
pos1 = start6[s_i]
while e_i < len(end6) -1:
if pos2 < pos1:
e_i += 1
pos2 = end6[e_i]
else:
break
# No full orf
if pos2 < pos1:
break
# Identify orf
else:
if pos2 - pos1 >min_orf:
orf_regions[(len(seq) - pos1*3 -2, len(seq) - pos2*3 -4)] = trans6[pos1: pos2 +1]
s_i += 1
while s_i < len(start6):
pos1 = start6[s_i]
if pos1 < pos2:
s_i += 1
continue
break
return orf_regions
# def prot_dotplot(seq1: str, seq2:str, length=10, score="BLOSUM62.dat"):
"""Generate protein dotplot."""
score_dict = load_blastp_score(score)
seq1 = seq1.upper()
seq2 = seq2.upper()
# Generate score
score_m1 = np.zeros((len(seq2), len(seq1)), dtype=int)
for id_1, aa_1 in enumerate(seq1):
for id_2, aa_2 in enumerate(seq2):
score_m1[id_2][id_1] = score_dict[(aa_1, aa_2)]
dot_score = np.zeros((len(seq2) - length + 1, len(seq1) - length +1), dtype=int)
for id_1 in range(len(seq2) - length + 1):
for id_2 in range(len(seq1) - length + 1):
score_ = 0
for id_ in range(length):
score_ += score_m1[id_1 + id_][id_2 + id_]
return dot_score
|
8d4e4c5d18e12aec5511bc06e5f0f532aec6e532
| 3,637,418
|
import json
def getPath():
"""
Gets path of the from ./metadata.json/
"""
with open('metadata.json', 'r') as openfile:
global path
json_object = json.load(openfile)
pairs = json_object.items()
path = json_object["renamer"]["path"]
return path
|
03047172e653b4b4aee7f096a67291ad460969c9
| 3,637,419
|
from typing import Optional
def read_ann_h5ad(file_path, spatial_key: Optional[str] = None):
"""
read the h5ad file in Anndata format, and generate the object of StereoExpData.
:param file_path: h5ad file path.
:param spatial_key: use .obsm[`'spatial_key'`] as position. If spatial data, must set.
:return: StereoExpData obj.
"""
data = StereoExpData(file_path=file_path)
# basic
# attributes = ["obsm", "varm", "obsp", "varp", "uns", "layers"]
# df_attributes = ["obs", "var"]
with h5py.File(data.file, mode='r') as f:
for k in f.keys():
if k == "raw" or k.startswith("raw."):
continue
if k == "X":
if isinstance(f[k], h5py.Group):
data.exp_matrix = h5ad.read_group(f[k])
else:
data.exp_matrix = h5ad.read_dataset(f[k])
elif k == "raw":
assert False, "unexpected raw format"
elif k == "obs":
cells_df = h5ad.read_dataframe(f[k])
data.cells.cell_name = cells_df.index.values
data.cells.total_counts = cells_df['total_counts'] if 'total_counts' in cells_df.keys() else None
data.cells.pct_counts_mt = cells_df['pct_counts_mt'] if 'pct_counts_mt' in cells_df.keys() else None
data.cells.n_genes_by_counts = cells_df['n_genes_by_counts'] if 'n_genes_by_counts' in cells_df.keys() else None
elif k == "var":
genes_df = h5ad.read_dataframe(f[k])
data.genes.gene_name = genes_df.index.values
# data.genes.n_cells = genes_df['n_cells']
# data.genes.n_counts = genes_df['n_counts']
elif k == 'obsm':
if spatial_key is not None:
if isinstance(f[k], h5py.Group):
data.position = h5ad.read_group(f[k])[spatial_key]
else:
data.position = h5ad.read_dataset(f[k])[spatial_key]
else: # Base case
pass
return data
|
fbe003cd011833b5dad215fd2ae7eea59a58aa8d
| 3,637,420
|
def get_token(token_method, acc=None, vo=None, idt=None, pwd=None):
"""
Gets a token with the token_method provided.
:param token_method: the method to get the token
:param acc: Rucio account string
:param idt: Rucio identity string
:param pwd: Rucio password string (in case of userpass auth_type)
:returns: None or token string
"""
if not acc:
acc = request.environ.get('HTTP_X_RUCIO_ACCOUNT')
if not vo:
vo = request.environ.get('HTTP_X_RUCIO_VO')
if not idt:
idt = request.environ.get('SSL_CLIENT_S_DN')
if not idt.startswith('/'):
idt = '/%s' % '/'.join(idt.split(',')[::-1])
if not (acc and vo and idt):
return None
try:
if pwd:
token = token_method(acc, idt, pwd, 'webui', request.environ.get('REMOTE_ADDR'), vo=vo).token
else:
token = token_method(acc, idt, 'webui', request.environ.get('REMOTE_ADDR'), vo=vo).token
return token
except:
return None
|
b04a60546e1aefdc2b8a2d2b0b7019f61c11ecc3
| 3,637,421
|
def valid_float_0_to_1(val):
"""
:param val: Object to check, then throw an error if it is invalid
:return: val if it is a float between 0 and 1 (otherwise invalid)
"""
return validate(val, lambda x: 0 <= float(x) <= 1, float,
'Value must be a number between 0 and 1')
|
81b92a1f8d3212905c2080d995da65d84916fae9
| 3,637,422
|
def get_usps_data():
"""
"""
trainset = dsets.USPS(root='./data',
train=True,
transform=transforms.ToTensor(),
download=True)
testset = dsets.USPS(root='./data',
train=False,
transform=transforms.ToTensor(),
download=True)
traindata = np.array(trainset.data)
trainlabels = np.array(trainset.targets)
testdata = np.array(testset.data)
testlabels = np.array(testset.targets)
print('-' * 40)
print('USPS数据集原始数据信息(pytorch):')
print("--->USPS dataset train data's shape :", traindata.shape, 'dim:', traindata.ndim)
print("--->USPS dataset train label's shape:",trainlabels.shape,'dim',trainlabels.ndim)
print("--->USPS dataset test data's shape:", testdata.shape, 'dim:', testset.data.ndim)
print("--->USPS dataset test data's shape:", testlabels.shape,'dim',testlabels.ndim)
return traindata,trainlabels,testdata,testlabels
|
2bb47e25b6d15e4b8a2e597320d9adfee6ff3008
| 3,637,423
|
def logout():
"""
Logs out a user
Returns:
(str): A JWT access token
"""
res = {}
try:
response = jsonify({"msg": "logout successful"})
unset_jwt_cookies(response)
return make_response(response), 200
except Exception as e:
res["data"] = None
res["msg"] = str(e)
return make_response(jsonify(res)), 400
|
478da4504b2804990f7e2f2c8de7a5578e23b64e
| 3,637,424
|
def bm_reduction(mat):
""" Performs the Bloch-Messiah decomposition of single mode thermal state.
Said decomposition writes a gaussian state as a a thermal squeezed-rotated-displaced state
The function returns the thermal population, rotation angle and squeezing parameters
"""
if mat.shape != (2, 2):
raise ValueError("Covariance matrix mat must be 2x2")
detm = np.linalg.det(mat)
nth = 0.5*(np.sqrt(detm)-1)
mm = mat/np.sqrt(detm)
a = mm[0, 0]
b = mm[0, 1]
r = -0.5*np.arccosh((1+a*a+b*b)/(2*a))
theta = 0.5*np.arctan2((2*a*b), (-1+a*a-b*b))
return nth, theta, r
|
1e69f610e885140442cc81d62969e96c69b294b8
| 3,637,425
|
def pinlattice_2ring_full():
"""Full, non-test instance of PinLattice object for testing
Subchannel object"""
n_ring = 2
pitch = 1.0
d_pin = 0.5
return dassh.PinLattice(n_ring, pitch, d_pin)
|
8b78274bf7ebe33808885fab24d73aaff6ed17b2
| 3,637,426
|
def dmenu_view_previous_entry(entry, folders):
"""View previous entry
Args: entry (Item)
Returns: entry (Item)
"""
if entry is not None:
text = view_entry(entry, folders)
type_text(text)
return entry
|
d39cbe86be00563bfd77c2d9254e51df726185db
| 3,637,427
|
def unary_to_gast(node):
"""
Takes unary operation such as ! and converts it to generic AST.
javascript makes negative numbers unary expressions. This is our
current workaround.
"""
if node.operator == "-":
return {"type": "num", "value": node.argument.value * -1}
return {
"type": "unaryOp",
"op": node.operator,
"arg": js_router.node_to_gast(node.argument)
}
|
5cf86896008e38510a8d6133e9f795c76b75a608
| 3,637,428
|
def _norm_intensity(spectrum_intensity: np.ndarray) -> np.ndarray:
"""
Normalize spectrum peak intensities.
Parameters
----------
spectrum_intensity : np.ndarray
The spectrum peak intensities to be normalized.
Returns
-------
np.ndarray
The normalized peak intensities.
"""
return spectrum_intensity / np.linalg.norm(spectrum_intensity)
|
daba7d3a33ea4baf630332e9528ede82dabe6691
| 3,637,429
|
def axes_to_list(axes_data: dict) -> list:
"""helper method to convert a dict of sensor axis graphs to a 2d array for graphing
"""
axes_tuples = axes_data.items()
axes_list = [axes[1].tolist() for axes in axes_tuples]
return axes_list
|
fb2e5ef1f2283e2f31e5c8828a3ec7ef94869c5c
| 3,637,430
|
from typing import List
def list(
repo_info: str,
git_host: str = DEFAULT_GIT_HOST,
use_cache: bool = True,
commit: str = None,
protocol: str = DEFAULT_PROTOCOL,
) -> List[str]:
"""Lists all entrypoints available in repo hubconf.
:param repo_info:
a string with format ``"repo_owner/repo_name[:tag_name/:branch_name]"`` with an optional
tag/branch. The default branch is ``master`` if not specified.
Example: ``"brain_sdk/MegBrain[:hub]"``
:param git_host:
host address of git repo.
Example: github.com
:param use_cache:
whether to use locally cached code or completely re-fetch.
:param commit:
commit id on github or gitlab.
:param protocol:
which protocol to use to get the repo, and HTTPS protocol only supports public repo on github.
The value should be one of HTTPS, SSH.
:return:
all entrypoint names of the model.
"""
hubmodule = _init_hub(repo_info, git_host, use_cache, commit, protocol)
return [
_
for _ in dir(hubmodule)
if not _.startswith("__") and callable(getattr(hubmodule, _))
]
|
2b9a635a8ee97ecd46fa47b06b6d872b562d7ceb
| 3,637,431
|
from typing import Callable
def repeatfunc(func: Callable, times: Int = None, *args):
"""Repeat calls to func with specified arguments.
Example: repeatfunc(random.random)
:param func: function to be called
:param times: amount of call times
"""
if times is None:
return starmap(func, repeat(args))
return starmap(func, repeat(args, times))
|
cae37683fd45a66b0ebec6ee0ab5a07d0cb128d7
| 3,637,432
|
from typing import Iterable
import functools
import operator
def prod(values: Iterable[int]) -> int:
"""Compute the product of the integers."""
return functools.reduce(operator.mul, values)
|
3f03200078daf1b0b27f777e7744144ab72ec7af
| 3,637,433
|
def get_stars_dict(stars):
"""
Transform list of stars into dictionary where keys are their names
Parameters
----------
stars : list, iterable
Star objects
Return
------
dict
Stars dictionary
"""
x = {}
for st in stars:
try:
x[st.name] = st
except:
pass
return x
|
6d627be48a96d8ba93bd13511a05c251f3a3f169
| 3,637,434
|
from typing import Tuple
def initialize_molecular_pos(
key: PRNGKey,
nchains: int,
ion_pos: Array,
ion_charges: Array,
nelec_total: int,
init_width: float = 1.0,
dtype=jnp.float32,
) -> Tuple[PRNGKey, Array]:
"""Initialize a set of plausible initial electron positions.
For each chain, each electron is assigned to a random ion and then its position is
sampled from a normal distribution centered at that ion with diagonal covariance
with diagonal entries all equal to init_width.
If there are no more electrons than there are ions, the assignment is done without
replacement. If there are more electrons than ions, the assignment is done with
replacement, and the probability of choosing ion i is its relative charge (as a
fraction of the sum of the ion charges).
"""
nion = len(ion_charges)
replace = True
if nelec_total <= nion:
replace = False
assignments = []
for _ in range(nchains):
key, subkey = jax.random.split(key)
choices = jax.random.choice(
subkey,
nion,
shape=(nelec_total,),
replace=replace,
p=ion_charges / jnp.sum(ion_charges),
)
assignments.append(ion_pos[choices])
elecs_at_ions = jnp.stack(assignments, axis=0)
key, subkey = jax.random.split(key)
return key, elecs_at_ions + init_width * jax.random.normal(
subkey, elecs_at_ions.shape, dtype=dtype
)
|
f9d787bcf24010a81789b3f6f2d65f7ea2582a95
| 3,637,435
|
def urlencode(query, *args, **kwargs):
"""Handle nested form-data queries and serialize them appropriately.
There are times when a website expects a nested form data query to be sent
but, the standard library's urlencode function does not appropriately
handle the nested structures. In that case, you need this function which
will flatten the structure first and then properly encode it for you.
When using this to send data in the body of a request, make sure you
specify the appropriate Content-Type header for the request.
.. code-block:: python
import requests
from requests_toolbelt.utils import formdata
query = {
'my_dict': {
'foo': 'bar',
'biz': 'baz",
},
'a': 'b',
}
resp = requests.get(url, params=formdata.urlencode(query))
# or
resp = requests.post(
url,
data=formdata.urlencode(query),
headers={
'Content-Type': 'application/x-www-form-urlencoded'
},
)
Similarly, you can specify a list of nested tuples, e.g.,
.. code-block:: python
import requests
from requests_toolbelt.utils import formdata
query = [
('my_list', [
('foo', 'bar'),
('biz', 'baz'),
]),
('a', 'b'),
]
resp = requests.get(url, params=formdata.urlencode(query))
# or
resp = requests.post(
url,
data=formdata.urlencode(query),
headers={
'Content-Type': 'application/x-www-form-urlencoded'
},
)
For additional parameter and return information, see the official
`urlencode`_ documentation.
.. _urlencode:
https://docs.python.org/3/library/urllib.parse.html#urllib.parse.urlencode
"""
expand_classes = (dict, list, tuple)
original_query_list = _to_kv_list(query)
if not all(_is_two_tuple(i) for i in original_query_list):
raise ValueError("Expected query to be able to be converted to a "
"list comprised of length 2 tuples.")
query_list = original_query_list
while any(isinstance(v, expand_classes) for _, v in query_list):
query_list = _expand_query_values(query_list)
return _urlencode(query_list, *args, **kwargs)
|
e73d532d7d4bc6b2534ec2e9224e2ef91074b94e
| 3,637,436
|
import scipy
def zca_whiten_np(images, epsilon=1e-6):
"""Whitening the images using numpy/scipy.
Stolen from https://github.com/keras-team/keras-preprocessing/blob/master/keras_preprocessing/image/image_data_generator.py
A good answer on ZCA vs. PCA: https://stats.stackexchange.com/questions/117427/what-is-the-difference-between-zca-whitening-and-pca-whitening
Parameters
----------
images : np.array, shape (B, H, W, C)
Returns
-------
whitened : np.array, same shape as input
"""
B, H, W, C = images.shape
# Make image into vectors
flatten = np.reshape(images, (B, H * W * C))
# Set the batch mean to 0
mean = np.mean(flatten)
flatten = flatten - mean
# Get covariance matrix
# (H * W * C, H * W * C)
sigma = np.dot(flatten.T, flatten) / B
# (H * W * C, H * W * C), (H * W * C,)
# u: Unitary matrix having left singular vectors as columns
u, s, _ = scipy.linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + epsilon)
principal_components = (u * s_inv).dot(u.T)
whitex = np.dot(flatten, principal_components)
whitex = np.reshape(whitex, (B, H, W, C))
whitex = whitex.astype(np.float32) # just in case
return whitex
|
7937cfc928e5eb04166d789bbdb58f82bd4ecc35
| 3,637,437
|
def set_kernel(kernel, **kwargs):
"""kernelsを指定する
Parameters
----------
kernel : str or :obj:`gpytorch.kernels`
使用するカーネル関数を指定する
基本はstrで指定されることを想定しているものの、自作のカーネル関数を入力することも可能
**kwargs : dict
カーネル関数に渡す設定
Returns
-------
out : :obj:`gpytorch.kernels`
カーネル関数のインスタンス
"""
if isinstance(kernel, str):
if kernel in {'CosineKernel'}:
return ScaleKernel(
CosineKernel(**kwargs)
)
elif kernel in {'LinearKernel'}:
return ScaleKernel(
LinearKernel(**kwargs)
)
elif kernel in {'MaternKernel'}:
return ScaleKernel(
MaternKernel(**kwargs)
)
elif kernel in {'PeriodicKernel'}:
return ScaleKernel(
PeriodicKernel(**kwargs)
)
elif kernel in {'RBFKernel'}:
return ScaleKernel(
RBFKernel(**kwargs)
)
elif kernel in {'RQKernel'}:
return ScaleKernel(
RQKernel(**kwargs)
)
elif kernel in {'SpectralMixtureKernel'}:
# SpectralMixtureKernelはScaleKernelを使えない
return SpectralMixtureKernel(**kwargs)
else:
raise ValueError
elif kernels.__name__ in str(type(kernel)):
return kernel
|
6ac8e4655ea775233e4470200893039c53e2d3d6
| 3,637,438
|
def holding_period_return(multivariate_df: pd.DataFrame, lag: int, ending_lag: int = 0, skip_nan: bool = False):
"""
Calculate the rolling holding period return for each column
Holding period return for stock = Price(t - ending_lag) / Price(t - lag) - 1
:param multivariate_df: DataFrame
:param lag: int
:param ending_lag: int used to shift the final observation backwards
:param skip_nan: bool
:return: DataFrame
"""
return _general_dataframe_function(multivariate_df=multivariate_df, func=_holding_period_return, return_lag=None,
skip_nan=skip_nan, lag=lag, ending_lag=ending_lag)
|
d0470a36704d11865323f1b07717f6d433ea59f5
| 3,637,439
|
def _unpack_var(var):
"""
Parses key : value pair from `var`
Parameters
----------
var : str
Entry from HEAD file
Returns
-------
name : str
Name of attribute
value : object
Value of attribute
Examples
--------
>>> var = "type = integer-attribute\\nname = BRICK_TYPES\\ncount = 1\\n1\\n"
>>> name, attr = _unpack_var(var)
>>> print(name, attr)
BRICK_TYPES 1
>>> var = "type = string-attribute\\nname = TEMPLATE_SPACE\\ncount = 5\\n'ORIG~"
>>> name, attr = _unpack_var(var)
>>> print(name, attr)
TEMPLATE_SPACE ORIG
"""
err_msg = ('Please check HEAD file to ensure it is AFNI compliant. '
f'Offending attribute:\n{var}')
atype, aname = TYPE_RE.findall(var), NAME_RE.findall(var)
if len(atype) != 1:
raise AFNIHeaderError(f'Invalid attribute type entry in HEAD file. {err_msg}')
if len(aname) != 1:
raise AFNIHeaderError(f'Invalid attribute name entry in HEAD file. {err_msg}')
atype = _attr_dic.get(atype[0], str)
attr = ' '.join(var.strip().splitlines()[3:])
if atype is not str:
try:
attr = [atype(f) for f in attr.split()]
except ValueError:
raise AFNIHeaderError('Failed to read variable from HEAD file '
f'due to improper type casting. {err_msg}')
else:
# AFNI string attributes will always start with open single quote and
# end with a tilde (NUL). These attributes CANNOT contain tildes (so
# stripping is safe), but can contain single quotes (so we replace)
attr = attr.replace('\'', '', 1).rstrip('~')
return aname[0], attr[0] if len(attr) == 1 else attr
|
ea23bee3bdddda47c3ab608e41b086e5e8796bc8
| 3,637,440
|
import torch
def get_representation(keypoint_coordinates: torch.Tensor,
image: torch.Tensor,
feature_map: torch.Tensor) -> (torch.Tensor, torch.Tensor):
"""
:param keypoint_coordinates: Tensor of key-point coordinates in (N, 2/3)
:param image: Tensor of current image in (N, C, H, W)
:param feature_map: Tensor of feature map for key-point in (N, H', W')
:return:
"""
N, C, H, W = image.shape
# Feature maps are converted to 0-1 masks given a threshold
alpha = 0.5
mask = torch.round(feature_map).unsqueeze(1).to(image.device) # (N, H', W'), rounds to the closest integer
# Use erosion iteratively
intensities = []
erosion_kernel = torch.ones(size=(3, 3)).to(image.device)
_img = mask
count = 0
while True:
_morphed = erosion(_img,
kernel=erosion_kernel,
engine='convolution')
_morphed = F.interpolate(input=_morphed, size=(H, W))
_img = torch.mul(_morphed, image)
if count == 0:
laplacian_img = laplacian(input=_img, kernel_size=3)
laplacian_sum = laplacian_img.sum(dim=(1, 2, 3))
count += 1
intensity = _img.sum(dim=(1, 2, 3))
intensities.append(intensity)
if - 1e-3 <= intensity.mean() <= 1e-3:
break
features = torch.empty(size=(image.shape[0], 5)).to(image.device)
for n in range(image.shape[0]):
features[n, ...] = torch.tensor([
keypoint_coordinates[n, 0],
keypoint_coordinates[n, 1],
intensities[-1][n],
intensities[-2][n] if len(intensities) >= 2 else intensities[-1][n],
intensities[-3][n] if len(intensities) >= 3 else intensities[-1][n]
])
return features, laplacian_sum
|
0373f45121020b868dda2bd84dd650f2a398f635
| 3,637,442
|
async def async_setup(opp: OpenPeerPowerType, config: ConfigType):
"""Set up the System Health component."""
opp.components.websocket_api.async_register_command(handle_info)
return True
|
993af39e4f1b59b535f800578df743d1dd27b925
| 3,637,443
|
import requests
def getSoup(url: str, ftrs: str = "html5lib") -> bsp:
"""
Function to extract soup from the url passed in, returns a bsp object.
"""
rspns = requests.get(url)
return bsp(rspns.content, ftrs)
|
ebd1d0914591b71b38e0977a4cdf86c964afb3c5
| 3,637,444
|
def _fit_HoRT(T_ref, HoRT_ref, a_low, a_high, T_mid):
"""Fit a[5] coefficient in a_low and a_high attributes given the
dimensionless enthalpy
Parameters
----------
T_ref : float
Reference temperature in K
HoRT_ref : float
Reference dimensionless enthalpy
T_mid : float
Temperature to fit the offset
Returns
-------
a6_low_out : float
Lower a6 value for NASA polynomial
a6_high_out : float
Higher a6 value for NASA polynomial
"""
a6_low_out = (HoRT_ref - get_nasa_HoRT(a=a_low, T=T_ref)) * T_ref
a6_high = (HoRT_ref - get_nasa_HoRT(a=a_high, T=T_ref)) * T_ref
# Correcting for offset
H_low_last_T = get_nasa_HoRT(a=a_low, T=T_mid) + a6_low_out / T_mid
H_high_first_T = get_nasa_HoRT(a=a_high, T=T_mid) + a6_high / T_mid
H_offset = H_low_last_T - H_high_first_T
a6_high_out = T_mid * (a6_high / T_mid + H_offset)
return a6_low_out, a6_high_out
|
a849ef860dd68f32fef012149eb1fc2a594b2691
| 3,637,445
|
import glob
def get_q_k_size(database,elph_save):
"""
Get number of k and q points in the (IBZ) grids
"""
# kpoints
db = Dataset(database+"/SAVE/ns.db1")
Nk = len(db.variables['K-POINTS'][:].T)
db.close()
# qpoints
Nq = len(glob('./elph_dir/s.dbph_0*'))
return Nq,Nk
|
7ed941d40e90ba81e505e8f93e135b7cce256167
| 3,637,446
|
def false(feedback, msg, comment, alias_used="false"):
"""
Marks a post as a false positive
:param feedback:
:param msg:
:return: String
"""
post_data = get_report_data(msg)
if not post_data:
raise CmdException("That message is not a report.")
post_url, owner_url = post_data
feedback_type = FALSE_FEEDBACKS[alias_used]
feedback_type.send(post_url, feedback)
post_id, site, post_type = fetch_post_id_and_site_from_url(post_url)
add_false_positive((post_id, site))
user = get_user_from_url(owner_url)
if user is not None:
if feedback_type.blacklist:
add_whitelisted_user(user)
result = "Registered " + post_type + " as false positive and whitelisted user."
elif is_blacklisted_user(user):
remove_blacklisted_user(user)
result = "Registered " + post_type + " as false positive and removed user from the blacklist."
else:
result = "Registered " + post_type + " as false positive."
else:
result = "Registered " + post_type + " as false positive."
try:
if msg.room.id != 11540:
msg.delete()
except Exception: # I don't want to dig into ChatExchange
pass
if comment:
Tasks.do(Metasmoke.post_auto_comment, comment, feedback.owner, url=post_url)
return result if not feedback_type.always_silent else ""
|
6b894eaa3debbdb8029cd6cd9ab5074f63622b5f
| 3,637,447
|
def so3_to_SO3_(so3mat):
"""
Convert so(3) to SO(3)
Parameters
----------
so3mat (tf.Tensor):
so(3)
N x 3 x 3
Returns
------
ret (tf.Tensor):
SO(3)
N x 3 x 3
"""
omgtheta = so3_to_vec(so3mat)
c_1 = near_zero(tf.norm(omgtheta,axis=1))
c_2 = tf.math.logical_not(c_1)
b_1 = tf.cast(c_1, tf.int32)
b_2 = tf.cast(c_2, tf.int32)
idx_1 = tf.cast(tf.squeeze(tf.where(b_1), axis=1), tf.int32)
idx_2 = tf.cast(tf.squeeze(tf.where(b_2), axis=1), tf.int32)
partitions = b_1*0 + b_2*1
partitioned_inp = tf.dynamic_partition(so3mat, partitions, 2)
inp_1 = partitioned_inp[0]
inp_2 = partitioned_inp[1]
ret_1 = tf.tile( tf.expand_dims(tf.eye(3), axis=0), tf.stack([tf.shape(idx_1)[0], 1, 1], 0))
omgtheta_2 = so3_to_vec(inp_2)
theta_2 = tf.expand_dims(angvel_to_axis_ang(omgtheta_2)[1], axis=1)
omgmat_2 = inp_2 / theta_2
ret_2 = tf.eye(3) + tf.sin(theta_2) * omgmat_2 + (1 - tf.cos(theta_2)) * tf.matmul(omgmat_2,omgmat_2)
rets = [ret_1,ret_2]
ids = [idx_1,idx_2]
return tf.dynamic_stitch(ids,rets)
|
2c8b6e857acb9943e519b0604d9ef47ef80329ab
| 3,637,448
|
from typing import Dict
def find_worst_offenders(
all_resource_type_stats: Dict[str, ResourceTypeStats],
version: str,
) -> Dict[str, ResourceTypeStats]:
"""
Finds the resource types with the worst polymorphing and nesting
"""
# find the resource type with the most number of shapes
most_polymorphic_resource_type = None
deepest_resource_type_by_mean = None
deepest_resource_type_by_max = None
for resource_type, resource_type_stats in all_resource_type_stats.items():
if version not in resource_type_stats.counts:
continue
shapes = resource_type_stats.shapes[version]
depths = resource_type_stats.depths[version]
if most_polymorphic_resource_type is None or len(shapes) > len(
all_resource_type_stats[most_polymorphic_resource_type].shapes[version]
):
most_polymorphic_resource_type = resource_type
if (
deepest_resource_type_by_mean is None
or depths.mean()
> all_resource_type_stats[deepest_resource_type_by_mean]
.depths[version]
.mean()
):
deepest_resource_type_by_mean = resource_type
if (
deepest_resource_type_by_max is None
or depths.max()
> all_resource_type_stats[deepest_resource_type_by_max]
.depths[version]
.max()
):
deepest_resource_type_by_max = resource_type
return {
"version": version,
"most_polymorphic": all_resource_type_stats[most_polymorphic_resource_type],
"deepest_by_mean": all_resource_type_stats[deepest_resource_type_by_mean],
"deepest_by_max": all_resource_type_stats[deepest_resource_type_by_max],
}
|
73a4a8ce2b303f75be544753fde2f74fce8a39b1
| 3,637,449
|
def optimize(nn_last_layer, correct_label, learning_rate, num_classes):
"""
Build the TensorFLow loss and optimizer operations.
:param nn_last_layer: TF Tensor of the last layer in the neural network
:param correct_label: TF Placeholder for the correct label image
:param learning_rate: TF Placeholder for the learning rate
:param num_classes: Number of classes to classify
:return: Tuple of (logits, train_op, cross_entropy_loss)
"""
#Find logits --> reshape last layer so that rows represents all pixels and
#columns represents classes
logits = tf.reshape(nn_last_layer, (-1, num_classes), name="fcn_logits")
correct_label_reshaped = tf.reshape(correct_label, (-1, num_classes))
#calculate distance from actual labels using cross entropy
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=correct_label_reshaped[:])
#take mean for total loss
loss_op = tf.reduce_mean(cross_entropy, name="fcn_loss")
#optimizer to reduce loss
train_op = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss_op, name="fcn_train_op")
return logits, train_op, loss_op
|
587ed3efcc1eb3e8c910a66590e8afd79f80627e
| 3,637,450
|
async def all_pairs(factory, weth, dai, wbtc, paused_token):
"""all_pairs set up a very specific arbitrage opportunity.
We want a opportunity that requires less then 2 WETH and provides significant profit as
to be able to separate profit from gas costs. If the numbers do not make sense it is because
they are crafted to produce a high arbitrage opportunity and high slippage.
"""
p0 = await factory.setup_pair(
[weth, dai],
[
BigNumber(small / 500 / 300.0),
BigNumber(small / 500),
],
)
p1 = await factory.setup_pair(
[wbtc, dai],
[
BigNumber(large / 10000.0),
BigNumber(large / 10),
],
)
p2 = await factory.setup_pair(
[wbtc, weth],
[
BigNumber(large / 10000.0),
BigNumber(large / 300.0 + 31000),
],
)
p3 = await factory.setup_pair(
[paused_token, weth],
[
BigNumber(large / 10000.0),
BigNumber(large / 285.0),
],
)
paused_token.pause()
return await async_map(create_pool, [p0, p1, p2, p3])
|
be03091afbcc95b6f4a46436881525f6cd4fdb63
| 3,637,451
|
def meeting_point(a, b, window=100, start=0):
"""
Determines the point where the moving average of a meets that of b
"""
cva = np.convolve(a, np.ones((window,))/window, mode='valid')
cvb = np.convolve(b, np.ones((window,))/window, mode='valid')
for x, (val_a, val_b) in enumerate(zip(cva, cvb)):
if x > start and val_a > val_b:
return x
return -1
|
aa97ab0baed01f36e44dda4f21f6a1b2de3b86d9
| 3,637,452
|
from typing import Dict
from re import T
from typing import Optional
def fixed_dictionaries(
mapping: Dict[T, SearchStrategy[Ex]],
*,
optional: Optional[Dict[T, SearchStrategy[Ex]]] = None,
) -> SearchStrategy[Dict[T, Ex]]:
"""Generates a dictionary of the same type as mapping with a fixed set of
keys mapping to strategies. ``mapping`` must be a dict subclass.
Generated values have all keys present in mapping, with the
corresponding values drawn from mapping[key]. If mapping is an
instance of OrderedDict the keys will also be in the same order,
otherwise the order is arbitrary.
If ``optional`` is passed, the generated value *may or may not* contain each
key from ``optional`` and a value drawn from the corresponding strategy.
Examples from this strategy shrink by shrinking each individual value in
the generated dictionary, and omitting optional key-value pairs.
"""
check_type(dict, mapping, "mapping")
for k, v in mapping.items():
check_strategy(v, "mapping[%r]" % (k,))
if optional is not None:
check_type(dict, optional, "optional")
for k, v in optional.items():
check_strategy(v, "optional[%r]" % (k,))
if type(mapping) != type(optional):
raise InvalidArgument(
"Got arguments of different types: mapping=%s, optional=%s"
% (nicerepr(type(mapping)), nicerepr(type(optional)))
)
if set(mapping) & set(optional):
raise InvalidArgument(
"The following keys were in both mapping and optional, "
"which is invalid: %r" % (set(mapping) & set(optional))
)
return FixedAndOptionalKeysDictStrategy(mapping, optional)
return FixedKeysDictStrategy(mapping)
|
b37429efc30989eb180dc39849cfcc2d719af6b1
| 3,637,453
|
def estimate_R0(model, curves: pd.DataFrame, method="OLS", **kwargs) -> ValueStd:
"""
Estimate R0 from epidemic curves and model.
{args}
Returns:
A ValueStd with R0 and its associated standard deviation.
See Also:
naive_R0
OLS_R0
"""
return METHODS_R0[method](model, curves, **kwargs)
|
0b2443a2af250871afba542c77d73632dc005dc1
| 3,637,454
|
def get_bone_list(armature, layer_list):
"""Get Bone name List of selected layers"""
ret = []
for bone in armature.data.bones:
if is_valid_layer(bone.layers, layer_list):
ret.append(bone.name)
return ret
|
1ed2c4030e962c0bc88119c5e5a18e7e12b49f04
| 3,637,455
|
def clean_packages_list(packages):
"""
Remove comments from the package list
"""
lines = []
for line in packages:
if not line.startswith("#"):
lines.append(line)
return lines
|
a6c942f9b90c8f6c610ba0b57728f3da48f35ded
| 3,637,456
|
import random
def IndividualBuilder(size, possList, probList):
"""
Args:
size (int) - the list size to be created
PossArr - a list of the possible mutations
types (mutation, deletion,...)
ProbArr - a list of the probibilities of the possible
mutations occuring.
Returns:
individual (list)
"""
if(len(list(possList)) != len(list(probList))):
raise Exception('len(PossArr) != len(ProbArr)')
individual = [0]*size
random.seed()
for i in range(size):
for j in range(len(possList)):
if(random.random() <= probList[j]):
individual[i] = possList[j]
return individual
|
055d582fffbc2e13a25a17012831c098fc89330d
| 3,637,457
|
def getConfigId(dsn_string, test_data):
"""Returns the integer ID of the configuration name used in this run."""
# If we have not already done so, we query the local DB for the ID
# matching this sqlbench config name. If none is there, we insert
# a new record in the bench_config table and return the newly generated
# identifier.
benchmark_name = test_data['config_name']
query = "SELECT config_id FROM bench_config WHERE name = '%s'" %benchmark_name
retcode, result= execute_query(query, dsn_string=dsn_string)
if len(result) == 0:
# Insert a new record for this config and return the new ID...
query = "INSERT INTO bench_config (config_id, name) VALUES (NULL, '%s')" %benchmark_name
retcode, result= execute_query(query, dsn_string=dsn_string)
return getConfigId(dsn_string, test_data)
else:
config_id= int(result[0][0])
return config_id
|
2f0f4581f36e1e50bf5f006ea950b2611c194aeb
| 3,637,458
|
def invert_contactmap(cmap):
"""Method to invert a contact map
:param :py:obj:`~conkit.core.ContactMap` cmap: the contact map of interest
:returns: and inverted_cmap: the contact map corresponding with the inverted sequence (1-res_seq) \
(:py:obj:`~conkit.core.ContactMap`)
"""
inverted_cmap = ContactMap('inverted')
highest_residue_number = max([max(contact.id) for contact in cmap])
for contact in cmap:
new_contact = Contact(highest_residue_number + 1 - contact.res1_seq,
highest_residue_number + 1 - contact.res2_seq,
contact.raw_score)
inverted_cmap.add(new_contact)
inverted_cmap.sequence = cmap.sequence
return inverted_cmap
|
6b13bbe75b1184854a2686c12c6ae5f824383cdd
| 3,637,460
|
def indexes_with_respect_to_y(Y):
"""
Checks Y and returns indexes with respect to the groups.
Parameters
----------
Y : numpy array like of one single output.
Corresponding to a categorical variable.
Returns
-------
List of indexes corresponding to each group.
"""
categories = np.unique(Y)
n_c = categories.shape[0]
assert n_c >= 2
indexes = []
for cat in list(categories):
i_index = np.where(Y == cat)[0]
indexes.append(i_index)
return indexes
|
b0ad9062916c3c7d76acf7e699b6a7e7798ac054
| 3,637,461
|
def rrotate(x, disp):
"""Rotate x's bits to the right by disp."""
if disp == 0:
return x
elif disp < 0:
return lrotate(x, -disp)
disp &= 31
x = trim(x)
return trim((x >> disp) | (x << (32 - disp)))
|
9ac6a4504de42a7f9f280ae169d523af6a228ce1
| 3,637,462
|
import logging
import copy
def search_for_start ( r, X, w0, applythreshold, hf0, pm=(.85,.93,1.,1.07,1.15), storeopt=False, modulation=False, doublemodulation=False):
"""Search for starting values
:Parameters:
*d*
data set
*w0*
coarse starting values
*pm*
increments or decrements
"""
logging.info ( "Initial preoptimization" )
Mwh = model.history_model ( r, X[:,:hf0],
applythreshold=applythreshold,
w0=w0,
lm=0.1, hf0=hf0, emiter=100 )
Mnh = model.history_model ( r, X[:,:hf0],
applythreshold=applythreshold,
w0=w0,
lm=0.1, hf0=hf0, emiter=100 )
M0 = model.history_model ( r, X[:,:hf0],
applythreshold=applythreshold,
w0=w0,
lm=0.1, hf0=hf0, emiter=100 )
if modulation:
Mhmod = model.history_model ( r, X[:,:hf0],
applythreshold=applythreshold,
w0=w0,
lm=0.1, hf0=hf0, emiter=100 )
# add the history weights the model with history
Mwh.w = np.concatenate ( (Mwh.w,np.zeros(6,'d')) )
Mwh.X = copy.copy(X)
Mwh.X = Mwh.X[:,:-2] # only the part that has no modulation
# add those same weights + modulatory terms to the hmod model
Mhmod.w = np.concatenate ( (Mhmod.w,np.zeros(15,'d')) )
Mhmod.X = X
# to check that the sizes work
print("X",np.shape(X))
print("Mwh",Mwh.w,np.shape(Mwh.X))
print("Mhmod",Mhmod.w,np.shape(Mhmod.X))
elif doublemodulation:
Mhmod = model.history_model ( r, X[:,:hf0],
applythreshold=applythreshold,
w0=w0,
lm=0.1, hf0=hf0, emiter=100 )
# add the history weights to the model with just history
Mwh.w = np.concatenate ( (Mwh.w,np.zeros(6,'d')) )
Mwh.X = copy.copy(X)
Mwh.X = Mwh.X[:,:-3] # only the part that has no modulation
# add those same weights + modulatory terms to the hmod model
Mhmod.w = np.concatenate ( (Mhmod.w,np.zeros(24,'d')) )
Mhmod.X = X
# to check that the sizes work
print("X",np.shape(X))
print("Mwh",Mwh.w,np.shape(Mwh.X))
print("Mhmod",Mhmod.w,np.shape(Mhmod.X))
else:
Mwh.w = np.concatenate ( (Mwh.w,np.zeros(6,'d')) )
Mwh.X = X
Mhmod = [] # return empty
nhind = 0
whind = 0
i = 1
for al in pm:
for lm in pm:
logging.info ( "::::: Optimizing from starting value %d :::::" % (i,) )
w0 = M0.w.copy()
w0[1:hf0] *= al
p0 = M0.pi.copy()
p0[0] *= lm;
p0[-1] = 1-p0[0]-p0[1]
if modulation or doublemodulation:
M_ = model.history_model ( r, X,
applythreshold=applythreshold,
w0=w0, p0=p0, nu0=M0.nu,
lm=0.1, hf0=hf0, verbose=True, emiter=300, storeopt=storeopt )
if Mhmod.loglikelihood < M_.loglikelihood:
logging.info ( " *model chosen for history + modulation*" )
Mhmod = M_
whind = i
if modulation:
M_ = model.history_model ( r, X[:,:-2],
applythreshold=applythreshold,
w0=w0, p0=p0, nu0=M0.nu,
lm=0.1, hf0=hf0, verbose=True, emiter=300, storeopt=storeopt )
elif doublemodulation:
M_ = model.history_model ( r, X[:,:-3],
applythreshold=applythreshold,
w0=w0, p0=p0, nu0=M0.nu,
lm=0.1, hf0=hf0, verbose=True, emiter=300, storeopt=storeopt )
if Mwh.loglikelihood < M_.loglikelihood:
logging.info ( " *model chosen for history*" )
Mwh = M_
whind = i
else:
M_ = model.history_model ( r, X,
applythreshold=applythreshold,
w0=w0, p0=p0, nu0=M0.nu,
lm=0.1, hf0=hf0, verbose=True, emiter=300, storeopt=storeopt )
if Mwh.loglikelihood < M_.loglikelihood:
logging.info ( " *model chosen for history*" )
Mwh = M_
whind = i
M_ = model.history_model ( r, X[:,:hf0],
applythreshold=applythreshold,
w0=w0, p0=p0, nu0=M0.nu,
lm=0.1, hf0=hf0, verbose=True, emiter=300, storeopt=storeopt )
if Mnh.loglikelihood < M_.loglikelihood:
logging.info ( " *model chosen for independent*" )
Mnh = M_
nhind = i
i += 1
logging.info ( "Mwh.w = %s\nMnh.w = %s" % (str(Mwh.w),str(Mnh.w)) )
logging.info ( "Mwh.ll = %g\nMnh.ll = %s" % (Mwh.loglikelihood,Mnh.loglikelihood) )
logging.info ( "Starting values:\n with history: %d\n without history: %d\n" % (whind,nhind) )
# NOW, THE HISTORY ONLY MODEL HAS SIZE 22!
print("X",np.shape(X))
print("Mwh",Mwh.w,np.shape(Mwh.X))
if modulation:
print("Mhmod",Mhmod.w,np.shape(Mhmod.X))
return Mnh,Mwh,Mhmod
|
d9a28ff06992624a3d60fcbe6e7f483281c7a12d
| 3,637,465
|
def point_sample(input, points, align_corners=False, **kwargs):
"""
A wrapper around :func:`grid_sample` to support 3D point_coords tensors
Unlike :func:`torch.nn.functional.grid_sample` it assumes point_coords to
lie inside ``[0, 1] x [0, 1]`` square.
Args:
input (Tensor): Feature map, shape (N, C, H, W).
points (Tensor): Image based absolute point coordinates (normalized),
range [0, 1] x [0, 1], shape (N, P, 2) or (N, Hgrid, Wgrid, 2).
align_corners (bool): Whether align_corners. Default: False
Returns:
Tensor: Features of `point` on `input`, shape (N, C, P) or
(N, C, Hgrid, Wgrid).
"""
def denormalize(grid):
"""Denormalize input grid from range [0, 1] to [-1, 1]
Args:
grid (Tensor): The grid to be denormalize, range [0, 1].
Returns:
Tensor: Denormalized grid, range [-1, 1].
"""
return grid * 2.0 - 1.0
add_dim = False
if points.dim() == 3:
add_dim = True
points = paddle.unsqueeze(points, axis=2)
output = F.grid_sample(
input, denormalize(points), align_corners=align_corners, **kwargs)
if add_dim:
output = paddle.squeeze(output, axis=3)
return output
|
31e5d19c1eff260d337b4eee65b4eed08a1e070b
| 3,637,466
|
from typing import Any
import time
def login_render(auth_url: str) -> Any:
"""Return login page.
Arguments:
auth_url {str} -- Link to last.fm authorization page.
"""
return render_template("login.html", auth_url=auth_url, timestamp=time())
|
a1569ac51d6ce38feba6d263e03404a4af9ee566
| 3,637,468
|
def get_genres_from_games(games, their_games):
"""
From the games we will get the same genres
"""
genres = set()
for d in games:
n = d['id']
if n in their_games:
genres.add(d['Genre'])
return genres
|
27bbf3c5ba40c6443e12b4119943c40879ceb622
| 3,637,471
|
def webfinger(request):
"""
A thin wrapper around Bridgy Fed's implementation of WebFinger.
In most cases, this view simply redirects to the same endpoint at Bridgy.
However, Bridgy does not support the ``mailto:`` and ``xmpp:`` resource
schemes - quite reasonably, since there's no possible way to discover the
``acct:`` they go with! - so resources with those schemes are translated
locally into an ``https:`` URL representing the same person, and *then*
redirected to Bridgy.
Additionally, WebFinger requests with a missing or malformed resource will
be rejected immediately rather than passed on to Bridgy.
Note that the translation step will only be applied if there exists a
:model:`users.User` with matching email or XMPP address. Otherwise, the
original resource will be preserved in the redirect - and likely fail to
find anything at Bridgy's end either.
"""
if 'resource' not in request.GET:
return HttpResponseBadRequest('resource parameter missing')
resource = request.GET['resource']
try:
res = urlparse(resource)
except ValueError:
return HttpResponseBadRequest('resource parameter malformed')
if res.scheme in ('mailto', 'xmpp'):
try:
resource = https_resource_matching(res)
except User.DoesNotExist:
pass
query = urlencode({'resource': resource})
return HttpResponseRedirect(BRIDGY_FED + '?' + query)
|
86b9f28cc49fd3a253ad916a426385394ae8fed3
| 3,637,472
|
def inflection_points(points, rise_axis, run_axis):
"""
Find the list of vertices that preceed inflection points in a curve. The
curve is differentiated with respect to the coordinate system defined by
`rise_axis` and `run_axis`.
Interestingly, `lambda x: 2*x + 1` should have no inflection points, but
almost every point on the line is detected. It's because a zero or zero
crossing in the second derivative is necessary but not sufficient to
detect an inflection point. You also need a higher derivative of odd
order that's non-zero. But that gets ugly to detect reliably using sparse
finite differences. Just know that if you've got a straight line this
method will go a bit haywire.
rise_axis: A vector representing the vertical axis of the coordinate system.
run_axis: A vector representing the the horiztonal axis of the coordinate system.
returns: a list of points in space corresponding to the vertices that
immediately preceed inflection points in the curve
"""
vg.shape.check(locals(), "points", (-1, 3))
vg.shape.check(locals(), "rise_axis", (3,))
vg.shape.check(locals(), "run_axis", (3,))
coords_on_run_axis = points.dot(run_axis)
coords_on_rise_axis = points.dot(rise_axis)
# Take the second order finite difference of the curve with respect to the
# defined coordinate system
finite_difference_1 = np.gradient(coords_on_rise_axis, coords_on_run_axis)
finite_difference_2 = np.gradient(finite_difference_1, coords_on_run_axis)
# Compare the product of all neighboring pairs of points in the second
# derivative. If a pair of points has a negative product, then the second
# derivative changes sign between those points. These are the inflection
# points.
is_inflection_point = np.concatenate(
[finite_difference_2[:-1] * finite_difference_2[1:] <= 0, [False]]
)
return points[is_inflection_point]
|
c0044d0a46bc286c0b827fd557bdba74a07812a0
| 3,637,475
|
def compile(raw_model):
"""Compile a raw model.
Parameters
----------
raw_model : list of dict
A raw GPTC model.
Returns
-------
dict
A compiled GPTC model.
"""
categories = {}
for portion in raw_model:
text = gptc.tokenizer.tokenize(portion['text'])
category = portion['category']
try:
categories[category] += text
except KeyError:
categories[category] = text
categories_by_count = {}
names = []
for category, text in categories.items():
if not category in names:
names.append(category)
categories_by_count[category] = {}
for word in text:
try:
categories_by_count[category][word] += 1/len(categories[category])
except KeyError:
categories_by_count[category][word] = 1/len(categories[category])
word_weights = {}
for category, words in categories_by_count.items():
for word, value in words.items():
try:
word_weights[word][category] = value
except KeyError:
word_weights[word] = {category:value}
model = {}
for word, weights in word_weights.items():
total = sum(weights.values())
model[word] = []
for category in names:
model[word].append(weights.get(category, 0)/total)
model['__names__'] = names
model['__version__'] = 2
model['__raw__'] = raw_model
return model
|
87607fdccac51acf367f0d7722b20ee8795f866b
| 3,637,476
|
def get_elements_html_by_attribute(*args, **kwargs):
"""Return the html of the tag with the specified attribute in the passed HTML document"""
return [whole for _, whole in get_elements_text_and_html_by_attribute(*args, **kwargs)]
|
726a3a6b8753fd6513f4860393076c9e3298a390
| 3,637,477
|
def patched_requests_mocker(requests_mock):
"""
This function mocks various PANOS API responses so we can accurately test the instance
"""
base_url = "{}:{}/api/".format(integration_params['server'], integration_params['port'])
# Version information
mock_version_xml = """
<response status = "success">
<result>
<sw-version>9.0.6</sw-version>
<multi-vsys>off</multi-vsys>
<model>Panorama</model>
<serial>FAKESERIALNUMBER</serial>
</result>
</response>
"""
version_path = "{}{}{}".format(base_url, "?type=version&key=", integration_params['key'])
requests_mock.get(version_path, text=mock_version_xml, status_code=200)
mock_response_xml = """
<response status="success" code="20">
<msg>command succeeded</msg>
</response>
"""
requests_mock.post(base_url, text=mock_response_xml, status_code=200)
return requests_mock
|
828425e11e38468ab2aacef397b6375c0ec65d6a
| 3,637,478
|
def validate(model, model_name: str, dataloader_valid, class_weights, epoch: int,
validations_dir: str, save_oof=True):
"""
Validate model at the epoch end
Input:
model: current model
dataloader_valid: dataloader for the validation fold
device: CUDA or CPU
epoch: current epoch
save_oof: boolean flag, if calculate oof predictions and save them in pickle
save_oof_numpy: boolean flag, if save oof predictions in numpy
predictions_dir: directory fro saving predictions
Output:
loss_valid: total validation loss, history
"""
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(device)
with torch.no_grad():
model.eval()
val_cls_losses, val_seg_losses = [], []
progress_bar = tqdm(dataloader_valid, total=len(dataloader_valid))
for iter_num, (img, target, sample_ids) in enumerate(progress_bar):
img = img.to(device) # [N, 3, H, W]
target = target.to(device) # [N, H, W] with class indices (0, 1)
(mask_pred, cls_pred) = model(img) # [N, 2, H, W] | prediction
loss_seg = F.cross_entropy(mask_pred, target, weight=class_weights)
loss_cls = F.cross_entropy(cls_pred, labels, weight=class_weights)
val_seg_losses.append(loss_seg.detach(),cpu(),numpy())
val_cls_losses.append(loss_cls.detach().cpu().numpy())
# Visualize the first prediction
if iter_num == 0:
visualize_predictions(img, mask_pred, target, validations_dir, model_name, epoch)
print(f"Epoch {epoch}, Valid Classification Loss: {np.mean(val_cls_losses)},
Train Segmentation Loss: {np.mean(val_seg_losses)}")
return np.mean(val_losses)
|
d9e1b172b9c28f30b80e43969cb39b4b1054a4b6
| 3,637,479
|
def shared_empty(dim=2, dtype=None):
"""
Shortcut to create an empty Theano shared variable with
the specified number of dimensions.
"""
if dtype is None:
dtype = theano.config.floatX
shp = tuple([1] * dim)
return theano.shared(np.zeros(shp, dtype=dtype))
|
d199a069b4a47eeb97f2a87b6c35ed797764eb9f
| 3,637,480
|
def add_kwds(dictionary, key, value):
"""
A simple helper function to initialize our dictionary if it is None and then add in a single keyword if
the value is not None.
It doesn't add any keywords at all if passed value==None.
Parameters
----------
dictionary: dict (or None)
A dictionary to copy and update. If none it will instantiate a new dictionary.
key: str
A the key to add to the dictionary
value: object (or None)
A value to add to the dictionary. If None then no key, value pair will be added to the dictionary.
Returns
-------
dictionary
A copy of dictionary with (key,value) added into it or a new dictionary with (key,value) in it.
"""
if dictionary is None:
kwds = {}
else:
kwds = dictionary.copy()
if (value is not None) and (key is not None):
kwds.update({key: value})
return kwds
|
96aa104f86e521e419d51096b6c1f86e4b506c57
| 3,637,481
|
def _get_cindex(circ, name, index):
"""
Find the classical bit index.
Args:
circ: The Qiskit QuantumCircuit in question
name: The name of the classical register
index: The qubit's relative index inside the register
Returns:
The classical bit's absolute index if all registers are concatenated.
"""
ret = 0
for reg in circ.cregs:
if name != reg.name:
ret += reg.size
else:
return ret + index
return ret + index
|
340105a2ddfe5fb2527171a7592390c9dd2937e5
| 3,637,483
|
def get_bin(pdf: str) -> str:
"""
Get the bins of the pdf, e.g. './00/02/Br_J_Cancer_1977_Jan_35(1)_78-86.tar.gz'
returns '00/02'.
"""
parts = pdf.split('/')
return parts[-3] + '/' + parts[-2] + '/'
|
a1e25162b8a353f508667ccb4fc750e51fcf611d
| 3,637,484
|
def burkert_density(r, r_s, rho_o):
"""
Burkert dark matter density profile
"""
x = r / r_s
density = rho_o / ( (x) * (1.0 + x)**2)
return density.to('g/cm**3')
|
8293a62b6c52c65e7c5fe7c676fd3807f301e40b
| 3,637,486
|
def send_file(path):
"""
Route for file downloads
"""
# If the document path has a tilde, expand it
path_prefix = expanduser(DOCUMENT_DIRECTORY_PATH)
return send_from_directory(path_prefix, path)
|
9c11acd930c7bc3851421e70b4822ac8efbc7c05
| 3,637,487
|
def locations__single(request, location_id: int):
"""
Renders the locations page, when a single location has been selected.
"""
context = {'geolocation': request.session.get('geolocation'),
'location_error': request.session.get('location_error')}
try:
location_id = validators.validate_integer(location_id)
except validators.ValidationError as e:
context['validation_error'] = True
context['invalid_data'] = e.invalid_data
request.session['no_matching_location'] = True
return redirect('/locations')
loc = location_service.get_single_location(location_id)
if loc is None:
request.session['no_matching_location'] = True
return redirect('/locations')
else:
context['current_location'] = loc
context['AQI_colors'] = location_service.get_air_pollution_colors()
return render(request, 'climate/locations.html', context)
|
0b7ab51fe021a677ca87aecde1a9981095ef56ff
| 3,637,488
|
def page_not_found(e):
"""
Application wide 404 error handler
"""
return render_template('404.html',
base_template=appbuilder.base_template,
appbuilder=appbuilder), 404
|
a1c146b4d782a35d45ec0f351d12e09cdff9be1a
| 3,637,489
|
def licols(A, tol=1e-10):
"""
Extracts a linearly independent set of columns from a given matrix A.
Solution found at https://nl.mathworks.com/matlabcentral/answers/108835-how-to-get-only-linearly-independent-rows-in-a-matrix-or-to-remove-linear-dependency-b-w-rows-in-a-m
:param A: matrix
:param tol: Rank estimation tolerance
:return: index of linearly independent columns
"""
if np.min(A.shape) == 0:
return []
elif np.min(A.shape) == 1:
return [0]
else:
Q, R, E = qr(A, mode='economic', pivoting=True)
diagr = np.abs(np.diagonal(R))
# Estimate rank
rank = np.where(diagr >= tol * diagr[0])[0][-1] + 1
# Get corresponding columns
col_ids = np.sort(E[:rank])
return col_ids
|
96c15e65c7e12dc86342642c2b6cc1e147430cb4
| 3,637,490
|
def area_description(area,theory_expt):
""" Generate plain-language name of research area from database codes.
"""
area_name_by_area = {
"As" : "Astrophysics",
"BP" : "Biophysics",
"CM" : "Condensed matter",
"HE" : "High energy",
"NS" : "Network science",
"NUC" : "Nuclear",
"" : None
}
area_name = area_name_by_area[area]
if (area=="As" and theory_expt=="Experimental"):
qualifier = "observation"
elif (theory_expt=="Experimental"):
qualifier = "experiment"
elif (theory_expt=="Theory"):
qualifier = "theory"
else:
qualifier = ""
return "{} {}".format(area_name,qualifier)
|
d7743c2d80d9a74dd6a24f735b7c0a389eb36468
| 3,637,492
|
def parse_username_password_hostname(remote_url):
"""
Parse a command line string and return username, password, remote hostname and remote path.
:param remote_url: A command line string.
:return: A tuple, containing username, password, remote hostname and remote path.
"""
assert remote_url
assert ':' in remote_url
if '@' in remote_url:
username, hostname = remote_url.rsplit('@', 1)
else:
username, hostname = None, remote_url
hostname, remote_path = hostname.split(':', 1)
password = None
if username and ':' in username:
username, password = username.split(':', 1)
assert hostname
assert remote_path
return username, password, hostname, remote_path
|
50410ad87865559af84b83ab6bdfae19e536791d
| 3,637,493
|
def _random_inverse_gaussian_no_gradient(shape, loc, concentration, seed):
"""Sample from Inverse Gaussian distribution."""
# See https://en.wikipedia.org/wiki/Inverse_Gaussian_distribution or
# https://www.jstor.org/stable/2683801
dtype = dtype_util.common_dtype([loc, concentration], tf.float32)
concentration = tf.convert_to_tensor(concentration)
loc = tf.convert_to_tensor(loc)
chi2_seed, unif_seed = samplers.split_seed(seed, salt='inverse_gaussian')
sampled_chi2 = tf.square(samplers.normal(shape, seed=chi2_seed, dtype=dtype))
sampled_uniform = samplers.uniform(shape, seed=unif_seed, dtype=dtype)
# Wikipedia defines an intermediate x with the formula
# x = loc + loc ** 2 * y / (2 * conc)
# - loc / (2 * conc) * sqrt(4 * loc * conc * y + loc ** 2 * y ** 2)
# where y ~ N(0, 1)**2 (sampled_chi2 above) and conc is the concentration.
# Let us write
# w = loc * y / (2 * conc)
# Then we can extract the common factor in the last two terms to obtain
# x = loc + loc * w * (1 - sqrt(2 / w + 1))
# Now we see that the Wikipedia formula suffers from catastrphic
# cancellation for large w (e.g., if conc << loc).
#
# Fortunately, we can fix this by multiplying both sides
# by 1 + sqrt(2 / w + 1). We get
# x * (1 + sqrt(2 / w + 1)) =
# = loc * (1 + sqrt(2 / w + 1)) + loc * w * (1 - (2 / w + 1))
# = loc * (sqrt(2 / w + 1) - 1)
# The term sqrt(2 / w + 1) + 1 no longer presents numerical
# difficulties for large w, and sqrt(2 / w + 1) - 1 is just
# sqrt1pm1(2 / w), which we know how to compute accurately.
# This just leaves the matter of small w, where 2 / w may
# overflow. In the limit a w -> 0, x -> loc, so we just mask
# that case.
sqrt1pm1_arg = 4 * concentration / (loc * sampled_chi2) # 2 / w above
safe_sqrt1pm1_arg = tf.where(sqrt1pm1_arg < np.inf, sqrt1pm1_arg, 1.0)
denominator = 1.0 + tf.sqrt(safe_sqrt1pm1_arg + 1.0)
ratio = tfp_math.sqrt1pm1(safe_sqrt1pm1_arg) / denominator
sampled = loc * tf.where(sqrt1pm1_arg < np.inf, ratio, 1.0) # x above
return tf.where(sampled_uniform <= loc / (loc + sampled),
sampled, tf.square(loc) / sampled)
|
6ce80d87c4350e7816fcd50956639906bdf7244e
| 3,637,494
|
from pathlib import Path
def resolve(path):
"""
fully resolve a path:
resolve env vars ($HOME etc.) -> expand user (~) -> make absolute
Returns:
pathlib.Path: resolved absolute path
"""
return Path(expandvars(str(path))).expanduser().resolve()
|
cc75751421206450f551617d558ec000d54ba54f
| 3,637,496
|
def sas_2J1x_x(x):
"""return 2*J1(x)/x"""
if np.isscalar(x):
retvalue = 2*sas_J1(x)/x if x != 0 else 1.
else:
with np.errstate(all='ignore'):
retvalue = 2*sas_J1(x)/x
retvalue[x == 0] = 1.
return retvalue
|
286dfb2c4df4120ff232e347f2381023a0bdaf40
| 3,637,497
|
def get_cross_matrix(vec: ndarray) -> ndarray:
"""Get the matrix equivalent of cross product. S() in (10.68)
cross_product_matrix(vec1)@vec2 == np.cross(vec1, vec2)
Hint: see (10.5)
Args:
vec (ndarray[3]): vector
Returns:
S (ndarray[3,3]): cross product matrix equivalent
"""
S = np.array([
[0, -vec[2], vec[1]],
[vec[2], 0, -vec[0]],
[-vec[1], vec[0], 0]
])
return S
|
2e95611fbe2bbd5ae6a94e490345e0d19c3a5e61
| 3,637,498
|
from typing import Tuple
from typing import Set
def apply_proteomics_elastic_relaxation(
original_model: Model,
objective_rule: Objective_rule = Objective_rule.MIN_ELASTIC_SUM_OBJECTIVE,
) -> Tuple[Model, Set]:
"""Relax the problem by relaxing the protein concentration constraints.
The relaxed problems will be determined via Elastic filtering, returning
the model and a irreducibly inconsistent set of functional constraints (
[Chinnek and Dravnieks, 1990]
(https://pubsonline.informs.org/doi/abs/10.1287/ijoc.3.2.157)).
Parameters
----------
original_model: geckopy.Model
Geckopy model. It won't be modified but copied.
objective_rule: Objective_rule
The IIS is selected by minimizing an objective as defined in
:class:`Objective_rule`.
Returns
-------
tuple: (geckopy.Model, set)
copy of the model with the relaxed variables applied and the sets
"""
model = original_model.copy()
# model is inspescted for IIS
elastics = elastic_upper_relaxation(
model,
[
prot.id
for prot in model.proteins
if isinstance(prot.concentration, float) and not isnan(prot.concentration)
],
objective_rule,
)
# model is modified in place given the elastic candidates found
iis, _ = get_upper_relaxation(model, elastics, objective_rule)
return model, iis
|
d2dd9fa8f179535cf1a8e4dcb9abb8fbf1ce5633
| 3,637,499
|
import ast
def skip_node(node):
"""Whether to skip a step in the traceback based on ast node type."""
return isinstance(node, (ast.If, ast.While, ast.For))
|
2406d02190a4dccb3d1f5d743a742f82c97f6541
| 3,637,500
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.