content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import re
def parse_report(text):
"""Split report to single cases"""
regexp = r'\d\.\s'
result_split = re.split(regexp, text)
result_none = list(filter(None, result_split))
result = [element.rstrip().rstrip(';') for element in result_none]
return result | e16e37fb7c04731199e5ee6c3ce43e75ac0e8a47 | 692,938 |
from typing import Union
def overloaded_func(a: Union[float, str]) -> Union[float, str]:
"""Overloaded function"""
return a * 2 | 88e456a43c2d7b9327ba15c1b738c2b7197e7051 | 692,939 |
from typing import List
def normalize_identity(un_normalized: List[str], verbose: bool = False) -> List[str]:
"""
Identity normalizer. Returns input unchanged
Args:
un_normalized: input string
Returns input string
"""
return un_normalized | a20268b30bc5f7ea21e4e90763d52c5c25733e85 | 692,940 |
import re
def verifyProxyFormat(proxy):
"""
检查代理格式
:param proxy:
:return:
"""
verify_regex = r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5}"
_proxy = re.findall(verify_regex, proxy)
return True if len(_proxy) == 1 and _proxy[0] == proxy else False | 9732db77805ccfb1275274d3528f64cd6b4792ff | 692,942 |
import numpy
def make_bins_ltpFR3(semArray):
"""
Creates four equal-width bins of WAS scores, identical to those used in ltpFR2. Then combine the middle two to give
three bins: low similarity, medium similarity, and high similarity.
A coordinate in semRows[i][j] and semCols[i][j] is the index of the jth word pair in semArray that falls in the ith
similarity bin.
"""
semArray_nondiag = semArray[numpy.where(semArray != 1)]
# Find lowest and highest similarity
min_sim = semArray_nondiag.min()
max_sim = semArray_nondiag.max()
# Split up the semantic space into four equal segments
semBins = list(numpy.linspace(min_sim, max_sim, 4))
# Combine the two middle bins by removing the bin boundary between them
# semBins = semBins[:2] + semBins[3:]
# Create bounds for the bins
semBins = zip(*[semBins[i:] + semBins[-1:i] for i in range(2)])
# For word pairs within the bounds of each bin, append the indices to semRows and semCols
semRows = []
semCols = []
for bin in semBins:
(i, j) = ((semArray > bin[0]) & (semArray < bin[1])).nonzero()
semRows.append(i)
semCols.append(j)
return semRows, semCols | 5fb71114435af2292de2047fa416ceccd93851ee | 692,943 |
import os
def build_database_url(project: str = os.environ.get('PROJECT_NAME', 'powderbooking')) -> str:
"""
Build the database url.
Credentials are built from environmental variables.
:return: the database url
"""
username = os.environ.get('POSTGRESQL_USER', 'postgres')
password = os.environ.get('POSTGRESQL_PASSWORD', 'password')
host = os.environ.get(f'{project}_POSTGRESQL_SERVICE_HOST'.upper(), 'localhost')
port = os.environ.get(f'{project}_POSTGRESQL_SERVICE_PORT'.upper(), '8001')
database = os.environ.get('POSTGRESQL_DB', 'powderbooking')
return f'postgresql://{username}:{password}@{host}:{port}/{database}' | 4f03964deea353e8a9bbb7be47c1792dd5ff888d | 692,944 |
def two_sum(arr, num):
"""
The two-sum problem where the input array is already in set form.
"""
combinations = []
assert isinstance(arr, set)
for term_a in arr:
term_b = num - term_a
if term_b >= term_a and term_b in arr:
combinations.append((term_a, term_b))
return combinations | 8f0da24f689b50bc9378f7e18b36b713442b6348 | 692,945 |
def getLatLong(bbox):
"""
Get the tuple of minimum and maximum latitudes and longitudes.
:param bbox: `geographic_msgs/BoundingBox`_.
:returns: (min_lat, min_lon, max_lat, max_lon)
"""
return (bbox.min_pt.latitude, bbox.min_pt.longitude,
bbox.max_pt.latitude, bbox.max_pt.longitude) | 82990d3897c94ab0f8e7e0f3b49369d689ee22bc | 692,946 |
import re
def pattern_match(item : str, pattern : str, strict : bool = True) -> bool:
"""
Check if item matches with the pattern that contains
"*" wildcards and "?" question marks.
Args:
item:
The string that pattern will be applied to.
pattern:
A wildcard (glob) pattern.
strict:
If `True`, then it will check if matched string equals with the `item` parameter.
So applying "foo?" pattern on "foobar" will result in `False`. Default is `True`.
Returns:
A boolean value.
"""
_ptn = pattern.replace(".", "\.").replace("+", "\+").replace("*", ".+").replace("?", ".")
_match = re.match(_ptn, item)
if strict and bool(_match):
return _match.group(0) == item
return bool(_match) | fb92c1782f684e6a6fbad4890a299e5670a9487e | 692,947 |
def expected_ar_m_ndp_values():
"""Values for m_ndp."""
return {
"accuracy_0.175": 0.6988,
"precision_0.175": 0.49419,
"recall_0.175": 0.86735,
"f1_score_0.175": 0.62963,
"TP_0.175": 85,
"FP_0.175": 87,
"TN_0.175": 147,
"FN_0.175": 13,
"accuracy_0.225": 0.6988,
"precision_0.225": 0.49419,
"recall_0.225": 0.86735,
"f1_score_0.225": 0.62963,
"TP_0.225": 85,
"FP_0.225": 87,
"TN_0.225": 147,
"FN_0.225": 13,
"accuracy_0.3": 0.7259,
"precision_0.3": 0.52318,
"recall_0.3": 0.80612,
"f1_score_0.3": 0.63454,
"TP_0.3": 79,
"FP_0.3": 72,
"TN_0.3": 162,
"FN_0.3": 19,
"accuracy_0.4": 0.73494,
"precision_0.4": 0.53571,
"recall_0.4": 0.76531,
"f1_score_0.4": 0.63025,
"TP_0.4": 75,
"FP_0.4": 65,
"TN_0.4": 169,
"FN_0.4": 23,
"accuracy_0.5": 0.73494,
"precision_0.5": 0.53571,
"recall_0.5": 0.76531,
"f1_score_0.5": 0.63025,
"TP_0.5": 75,
"FP_0.5": 65,
"TN_0.5": 169,
"FN_0.5": 23,
"accuracy_0.6": 0.74096,
"precision_0.6": 0.55556,
"recall_0.6": 0.61224,
"f1_score_0.6": 0.58252,
"TP_0.6": 60,
"FP_0.6": 48,
"TN_0.6": 186,
"FN_0.6": 38,
"accuracy_0.7": 0.72289,
"precision_0.7": 0.53947,
"recall_0.7": 0.41837,
"f1_score_0.7": 0.47126,
"TP_0.7": 41,
"FP_0.7": 35,
"TN_0.7": 199,
"FN_0.7": 57,
"accuracy_0.8": 0.72289,
"precision_0.8": 0.53947,
"recall_0.8": 0.41837,
"f1_score_0.8": 0.47126,
"TP_0.8": 41,
"FP_0.8": 35,
"TN_0.8": 199,
"FN_0.8": 57,
"accuracy_0.9": 0.70482,
"precision_0.9": 0.0,
"recall_0.9": 0.0,
"f1_score_0.9": 0.0,
"TP_0.9": 0,
"FP_0.9": 0,
"TN_0.9": 234,
"FN_0.9": 98,
} | 3941c21cf539e46f677ad3e5db39c25814be9e81 | 692,948 |
def process(result, labels, tensor_name, threshold, top_k):
"""Processes inference result and returns labels sorted by confidence."""
# MobileNet based classification model returns one result vector.
assert len(result.tensors) == 1
tensor = result.tensors[tensor_name]
probs, shape = tensor.data, tensor.shape
assert shape.depth == len(labels)
pairs = [pair for pair in enumerate(probs) if pair[1] > threshold]
pairs = sorted(pairs, key=lambda pair: pair[1], reverse=True)
pairs = pairs[0:top_k]
return [' %s (%.2f)' % (labels[index], prob) for index, prob in pairs] | add2992d9f3321b02c6602f6744b206743525d23 | 692,949 |
import sys
def check_float():
"""Check float limits information.
Get information from ``sys`` library.
Returns:
dict(str): Dictionary filled with respective information.
"""
info = {'FLOAT':
{'Epsilon': sys.float_info.epsilon,
'Digits': sys.float_info.dig,
'Precision': sys.float_info.mant_dig,
'Maximum': sys.float_info.max,
'Maximum Exp.': sys.float_info.max_exp,
'Max. 10 Exp.': sys.float_info.max_10_exp,
'Minimum': sys.float_info.min,
'Miminim Exp.': sys.float_info.min_exp,
'Min. 10 Exp.': sys.float_info.min_10_exp,
'Radix': sys.float_info.radix,
'Rounds': sys.float_info.rounds
}
}
return info | 44ec97df34f5f94e1e0d99e72e7931707db4b3c4 | 692,950 |
def nocrc(byte_cmd):
"""
CRC function to provide no crc
"""
return ['', ''] | 72735f5d33a6c70aa74d6ce2ad9f0e29fbd524d6 | 692,952 |
def find_between(in_str, start='>', end='<'):
""" Find string between two search patterns.
"""
return in_str.split(start)[1].split(end)[0] | 22e52d8865a81ac33b0d498252b1d09c415d68ec | 692,953 |
def to_32byte_hex(web3, val):
"""
:param web3:
:param val:
:return:
"""
return web3.toBytes(val).rjust(32, b"\0") | 244cd2dbce68a6dcfe0833d5ddb7487a67aea66a | 692,954 |
def get_var_profile_inputs_from_database(
scenario_id, subscenarios, subproblem, stage, conn, op_type
):
"""
Select only profiles of projects in the portfolio
Select only profiles of projects with 'op_type' operational type
Select only profiles for timepoints from the correct temporal scenario
and the correct subproblem
Select only timepoints on periods when the project is operational
(periods with existing project capacity for existing projects or
with costs specified for new projects)
:param subscenarios: SubScenarios object with all subscenario info
:param subproblem:
:param stage:
:param conn: database connection
:param op_type:
:return: cursor object with query results
"""
subproblem = 1 if subproblem == "" else subproblem
stage = 1 if stage == "" else stage
c = conn.cursor()
# NOTE: There can be cases where a resource is both in specified capacity
# table and in new build table, but depending on capacity type you'd only
# use one of them, so filtering with OR is not 100% correct.
sql = """
SELECT project, timepoint, cap_factor
-- Select only projects, periods, horizons from the relevant portfolio,
-- relevant opchar scenario id, operational type,
-- and temporal scenario id
FROM
(SELECT project, stage_id, timepoint,
variable_generator_profile_scenario_id
FROM project_operational_timepoints
WHERE project_portfolio_scenario_id = {}
AND project_operational_chars_scenario_id = {}
AND operational_type = '{}'
AND temporal_scenario_id = {}
AND (project_specified_capacity_scenario_id = {}
OR project_new_cost_scenario_id = {})
AND subproblem_id = {}
AND stage_id = {}
) as projects_periods_timepoints_tbl
-- Now that we have the relevant projects and timepoints, get the
-- respective cap factors (and no others) from
-- inputs_project_variable_generator_profiles
LEFT OUTER JOIN
inputs_project_variable_generator_profiles
USING (variable_generator_profile_scenario_id, project,
stage_id, timepoint)
;
""".format(
subscenarios.PROJECT_PORTFOLIO_SCENARIO_ID,
subscenarios.PROJECT_OPERATIONAL_CHARS_SCENARIO_ID,
op_type,
subscenarios.TEMPORAL_SCENARIO_ID,
subscenarios.PROJECT_SPECIFIED_CAPACITY_SCENARIO_ID,
subscenarios.PROJECT_NEW_COST_SCENARIO_ID,
subproblem,
stage,
)
variable_profiles = c.execute(sql)
return variable_profiles | f18b839d51bad85fd9e9b45d3732726ef1f000a8 | 692,955 |
import os
def file_exists(file_name):
""" Return True if file exists or False if not. """
if os.path.isfile(file_name):
return True
else:
return False | ea44febf18cef9b32c80ce5bce46579888d9418c | 692,956 |
def _parse_logline_timestamp(t):
"""Parses a logline timestamp into a tuple.
Args:
t: Timestamp in logline format.
Returns:
An iterable of date and time elements in the order of month, day, hour,
minute, second, microsecond.
"""
date, time = t.split(' ')
month, day = date.split('-')
h, m, s = time.split(':')
s, ms = s.split('.')
return (month, day, h, m, s, ms) | 9b0ea2f6cfe4edef89eec6dbbddbdd258640c210 | 692,957 |
from numpy.linalg import svd, det
from numpy import dot
def fit(X, Y):
"""
Return the translation vector and the rotation matrix
minimizing the RMSD between two sets of d-dimensional
vectors, i.e. if
>>> R,t = fit(X,Y)
then
>>> Y = dot(Y, transpose(R)) + t
will be the fitted configuration.
@param X: (n, d) input vector
@type X: numpy array
@param Y: (n, d) input vector
@type Y: numpy array
@return: (d, d) rotation matrix and (d,) translation vector
@rtype: tuple
"""
## center configurations
x = X.mean(0)
y = Y.mean(0)
## SVD of correlation matrix
V, _L, U = svd(dot((X - x).T, Y - y))
## calculate rotation and translation
R = dot(V, U)
if det(R) < 0.:
U[-1] *= -1
R = dot(V, U)
t = x - dot(R, y)
return R, t | c7f3580652a6577d180280aa0abcdb84b3bc3683 | 692,958 |
def valid_location(currBoard, location):
"""
Takes in the current board and a potential location and checks if placing
something in that square is a valid move or not. Ends by returning true
if the move's valid and false otherwise.
"""
if (location > 8 or location < 0): # Checks if the number is too small or big
return False
elif (currBoard[location] != " "): # Checks if the location is taken
return False
else:
return True | 76a8fbc7b22de9324f68787feac163ce7bcdff50 | 692,959 |
def _isnumeric(var) :
"""
Test if var is numeric, only integers are allowed
"""
return type(var) is int | 51c29247a5d4531f565534afe7750a13138c110e | 692,960 |
def get_description_eng(cve):
"""
Attempts to extract an english description from the provided cve.
If none is found, returns a blank string.
Parameters
----------
cve : dict
The dictionary generated from the CVE json.
Returns
-------
str
This will be either the english description, or a blank string.
"""
description_eng = ""
if "description" in cve and "description_data" in cve["description"]:
for description_data in cve["description"]["description_data"]:
if "lang" in description_data and description_data["lang"] == "eng":
if "value" in description_data:
description_eng = description_data["value"].lower()
break
return description_eng | 3e7d5e6f61fd752225fd0f95178bf2aea4bbbf5c | 692,961 |
import base64
import json
def _read_pubsub_json(event):
"""Extracts the json payload from a pub/sub message.
Args:
event: A Pub/Sub event.
Returns:
The json_payload from a pub/sub message.
"""
pubsub_message = base64.b64decode(event['data']).decode('utf-8')
return json.loads(pubsub_message) | 9bfafe8f36e6bcd0db68f9d4528081c44067b04f | 692,962 |
def lstrip(obj, prefix):
"""Pass."""
if isinstance(obj, (list, tuple)):
obj = [lstrip(obj=x, prefix=prefix) for x in obj]
elif isinstance(obj, str):
plen = len(prefix)
obj = obj[plen:] if obj.startswith(prefix) else obj
return obj | f5cc22025131278cee95177e5fa5a22224172b32 | 692,963 |
def bin(ndarray, new_shape, operation="sum"):
"""
bin an ndarray
ndarray: nd-array
new_shape: shape to bin to. shape of ndarray has to be integer multiple of new_shape along each dimension
operation: string. sum, mean, max, or min. operation to use
"""
ops = ["sum", "mean", "max", "min"]
operation = operation.lower()
if operation not in ops:
raise ValueError("Operation not supported.")
if ndarray.ndim != len(new_shape):
raise ValueError("Shape mismatch: {} -> {}".format(ndarray.shape, new_shape))
compression_pairs = [(d, c // d) for d, c in zip(new_shape, ndarray.shape)]
flattened = [i for p in compression_pairs for i in p]
ndarray = ndarray.reshape(flattened)
for i in range(len(new_shape)):
op = getattr(ndarray, operation)
ndarray = op(-1 * (i + 1))
return ndarray | 3a8e296e1b603751df2e9fb7ec24256dcb1ac259 | 692,964 |
import sys
import re
def get_illegal_char_regex():
"""
Returns an re object to find unicode characters illegal in XML
"""
illegal_unichrs = [ (0x00, 0x08), (0x0B, 0x1F), (0x7F, 0x84), (0x86, 0x9F),
(0xD800, 0xDFFF), (0xFDD0, 0xFDDF), (0xFFFE, 0xFFFF),
(0x1FFFE, 0x1FFFF), (0x2FFFE, 0x2FFFF), (0x3FFFE, 0x3FFFF),
(0x4FFFE, 0x4FFFF), (0x5FFFE, 0x5FFFF), (0x6FFFE, 0x6FFFF),
(0x7FFFE, 0x7FFFF), (0x8FFFE, 0x8FFFF), (0x9FFFE, 0x9FFFF),
(0xAFFFE, 0xAFFFF), (0xBFFFE, 0xBFFFF), (0xCFFFE, 0xCFFFF),
(0xDFFFE, 0xDFFFF), (0xEFFFE, 0xEFFFF), (0xFFFFE, 0xFFFFF),
(0x10FFFE, 0x10FFFF) ]
illegal_ranges = ["%s-%s" % (chr(low), chr(high))
for (low, high) in illegal_unichrs if low < sys.maxunicode]
return re.compile(u'[%s]' % u''.join(illegal_ranges)) | 5fbf4223dc746ad1c24432149a467eb3f40f58b0 | 692,965 |
def has_more_than_two_occurence(x):
"""creating a function for finding words with more than 2 occurences"""
if(x[1]>2):
return(x) | 37a62d1df1e12ac61007ed34c97731de2437731e | 692,966 |
def _conformal_score_interval(predictions, values):
""" Compute the non-conformity score of a set of values under some baseline predictor
Args:
predictions: array [batch_shape, 2], a batch of interval predictions
values: array [n_evaluations, batch_shape], note that for values batch_shape is the last dimension while for predictions batch_shape is the first dimension
Returns:
score: array [n_evaluations, batch_shape], where score[i, j] is the non-conformity score of values[i, j] under the prediction[j]
"""
score = (values - predictions.min(dim=1, keepdims=True)[0].permute(1, 0)) / (predictions[:, 1:2] - predictions[:, 0:1]).abs().permute(1, 0) - 0.5
return score | 5ae6c5ac653a5cfe0c40d5af4434df00cba84d32 | 692,967 |
def client_access(accounts):
"""
Находит аккаунт с введеным номером паспорта
Или возвращает False, если аккаунт не найден
"""
try:
passport = int(input("Номер паспорта: "))
except ValueError:
return False
for account in accounts:
if str(passport).strip() == str(account.passport8).strip():
return account
return False | bbe5ca4e8177cfe711ce4a651703f4fecc6507d8 | 692,968 |
def right_to_left_scalar_mul(point, d):
"""Used for precomputation stage"""
result = None
while d >= 1:
if d % 2 == 1:
u = 2 - (d % 4)
d -= u
if u == 1:
result = point.add(result)
else:
result = point.inverse().add(result)
d //= 2
point = point.point_double()
return result | 715f42d5f82cbcb1b5ec1b7e8069639a0c9d02c2 | 692,969 |
import os
def format_regexp_matches(name, regexps, unmatched):
"""
Utility for formatting regexp match context,
so it can rendered via TimeoutException
"""
if unmatched:
err = '{newline} {name} matched: {matched}'.format(
newline=os.linesep,
name=name,
matched=[
"REGEX('{}')".format(e.pattern)
for e in regexps
if e not in unmatched
])
err += '{newline}Unmatched: {unmatched}'.format(
newline=os.linesep,
unmatched=[
"REGEX('{}')".format(e.pattern)
for e in unmatched
]
)
return err
return '' | 0c9d3e78cf9f177d82f5dcb438742f68f41ca54e | 692,970 |
def calcHedgeRatio(betaHat: float, sigmaHat: float) -> float:
"""Calculates the hedge ratio.
Parameters
----------
betaHat : float
Beta hat of two assets.
sigmaHat : float
Sigma hat of two assets.
Returns
-------
float
Returns the hedge ratio.
"""
return betaHat * (1 + 0.5 * sigmaHat) | aa694502b5364631f126598e7d86f3ef80607afa | 692,971 |
import requests
def get_kucoin_ticker(symbol='BTC-USDT'):
"""
doc:
https://kucoinapidocs.docs.apiary.io/#reference/0/market/tick(open)
limit: UNKNOWN
"""
main_path = 'https://api.kucoin.com'
info_path = '/v1/open/tick?symbol={0}'.format(symbol)
r = requests.get(main_path + info_path)
data_json = r.json()
return data_json['data']['buy'], data_json['data']['sell'] | b994fef6e2f0d2fa6b9263bb53ae268e566fc1af | 692,972 |
def categorize_columns(cols, msuffix='_mean', esuffix='_error'):
"""Categorize the column names of a mean dataframe.
Args:
cols (list): a list of column names
Return:
(list, list, list): (excol, mcol, ecol)
excol are columns of exact values with no errorbar (possibly labels)
mcols are mean columns
ecols are error columns
Examples:
>>> rcol, mcol, ecol = categorize_columns(mdf.columns)
>>> xyye(df, 'Pressure', 'LocalEnergy', xerr=True)
"""
mcol = [col for col in cols if col.endswith(msuffix)]
ecol = [col for col in cols if col.endswith(esuffix)]
rcol = [col for col in cols if
(not col.endswith(msuffix)) and (not col.endswith(esuffix))]
return rcol, mcol, ecol | 3c8752d30d0258ff67d15d0e75c2c2247c5d11fd | 692,973 |
def nonrepeats(inlist):
"""
Returns items that are NOT duplicated in the first dim of the passed list.
Usage: nonrepeats (inlist)
"""
nonrepeats = []
for i in range(len(inlist)):
if inlist.count(inlist[i]) == 1:
nonrepeats.append(inlist[i])
return nonrepeats | ec5f6e0e80ecee8778573d639ea41d2adad0f241 | 692,974 |
def get_itag_of_video():
"""user reads itag of the video format they want, and enters it as user input from terminal"""
print("\nReview list above and pick the itag of the video you want (resolution, codec, etc")
itag_num = input("Enter itag number as an integer: ")
return itag_num | d1494cf28ce08ed858179dbdc9aa3a90b400cdc6 | 692,975 |
def client(anonymous_client):
"""client fixture can be overriden in module/class"""
return anonymous_client | 44891484cbe70e27389ff736913fe04b6194c2c7 | 692,976 |
import json
def make_primary_provider_link_for_name(hpo_name):
"""Returns serialized FHIR JSON for a provider link based on HPO information.
The returned JSON represents a list containing the one primary provider.
"""
return json.dumps([{"primary": True, "organization": {"reference": "Organization/%s" % hpo_name}}], sort_keys=True) | b6abc2ca146e6505b6d8526b3c1121cd3b747e64 | 692,977 |
import requests
def do_request(url, method="get", **kwargs):
"""执行一次HTTP请求
如果请求成功则返回请求结果的response对象;如果请求失败(无论任何原因)均返回None
:param url: 请求的Url
:param method: 请求方法
"""
method = method.lower()
kwargs.setdefault("timeout", 3) # 将默认的请求时间设置为3秒
try:
# 依据不同的请求类型添加默认设置
if method == "get":
kwargs.setdefault("params", None) # 将默认的请求参数设置为空
kwargs.setdefault("allow_redirects", True) # 默认设置允许重定向
elif method == "options" or method == "head":
kwargs.setdefault("allow_redirects", True) # 默认设置允许重定向
elif method == "post":
kwargs.setdefault("data", None) # 将默认的请求参数设置为空
kwargs.setdefault("json", None) # 将默认的请求参数设置为空
elif method == "put" or method == "patch":
kwargs.setdefault("data", None) # 将默认的请求参数设置为空
elif method == "delete":
pass
else:
return None
# 执行请求
if response := requests.request(method=method, url=url, **kwargs):
return response
else:
return None
except requests.RequestException:
return None | 58dfea76f82a2ad07793b784de6b4ec0b12d7e1f | 692,978 |
from typing import Any
def music_player_play(files: Any) -> None:
"""music_player_play(files: Any) -> None
(internal)
Starts internal music file playback (for internal use)
"""
return None | 08169250307b6a63604ba826e31a21d0897c2d4f | 692,979 |
def flatten_descr(ndtype):
"""
Flatten a structured data-type description.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.flatten_descr(ndtype)
(('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))
"""
names = ndtype.names
if names is None:
return ndtype.descr
else:
descr = []
for field in names:
(typ, _) = ndtype.fields[field]
if typ.names:
descr.extend(flatten_descr(typ))
else:
descr.append((field, typ))
return tuple(descr) | 5e95ef5f7a400aa0be6eddd8dd803ff74a31eded | 692,981 |
def home_view(request):
"""Home view."""
return {} | df385b4641f7ab07f477289d2a7bbf04378072f6 | 692,982 |
import os
import gzip
def load_gzipped_json_string(file_path: str) -> str:
"""
Opens a gzipped file (used only for gzipped JSON text files)
"""
if os.path.isfile(file_path):
with gzip.open(file_path, "rt") as f:
return f.read()
else:
return "" | 5b299b522a64f4b6f01fab7302873f2ca24451fe | 692,985 |
import re
def clean_markdown(message_content: str) -> str:
"""Returns a string stripped of any markdown."""
return re.sub(r"[^a-zA-Z0-9\s]", "", message_content) | 731a044968c9501987d8d7d7e7164fd5cd1a253a | 692,986 |
def group_by_compatibility(thermodynamic_states):
"""Utility function to split the thermodynamic states by compatibility.
Parameters
----------
thermodynamic_states : list of ThermodynamicState
The thermodynamic state to group by compatibility.
Returns
-------
compatible_groups : list of list of ThermodynamicState
The states grouped by compatibility.
original_indices: list of list of int
The indices of the ThermodynamicStates in theoriginal list.
"""
compatible_groups = []
original_indices = []
for state_idx, state in enumerate(thermodynamic_states):
# Search for compatible group.
found_compatible = False
for group, indices in zip(compatible_groups, original_indices):
if state.is_state_compatible(group[0]):
found_compatible = True
group.append(state)
indices.append(state_idx)
# Create new one.
if not found_compatible:
compatible_groups.append([state])
original_indices.append([state_idx])
return compatible_groups, original_indices | 8cc8f5f7e39f4354b014805fd687ea4ba92bcc81 | 692,987 |
def _parse_node_to_coords(element):
"""
Parse coordinates from a node in the overpass response.
The coords are only used to create LineStrings and Polygons.
Parameters
----------
element : dict
element type "node" from overpass response JSON
Returns
-------
coords : dict
dict of latitude/longitude coordinates
"""
# return the coordinate of a single node element
coords = {"lat": element["lat"], "lon": element["lon"]}
return coords | 6ce67abb5b294ea8458ecdee64d2b49736348372 | 692,988 |
import re
import sys
def _find_requirements():
"""
List all installation requirements
Returns:
list: installation requirements
"""
with open('requirements.txt', 'r') as req_fd:
lines = req_fd.readlines()
req_list = []
for line in lines:
# comment or empty line: skip it
if not line.strip() or re.match('^ *#', line):
continue
# url format: need to extract requirement name
if '://' in line:
egg_index = line.find('#egg=')
# no egg specifier present: requirement cannot be converted to
# setuptools format
if egg_index == -1:
print('warning: excluding requirement {}'.format(line),
file=sys.stderr)
continue
line = line[egg_index+5:]
# normal format: use requirement and drop additional parameters
else:
line = line.split()[0]
req_list.append(line)
return req_list | 87e2df8f1e75d0b0b55dc9398f11514281f5d2fa | 692,989 |
def numerise_params(prop_dict):
""" Returns drug properties with all qualitative values transformed into
numeric values
returns: numerically transformed property dictionaries
rtype: dict
"""
clearance_dict = {
'low (< 5.6)': 1,
'medium (5.6-30.5)': 4,
'high (> 30.5)': 7,
'low (< 12)': 1,
'medium (12-44)': 4
}
pampa_dict = {
'low': 2.5,
'med2high': 5.5
}
drug_properties = prop_dict
for k, v in clearance_dict.items():
if k == drug_properties['clearance_mouse']:
drug_properties['clearance_mouse'] = v
if k == drug_properties['clearance_human']:
drug_properties['clearance_human'] = v
for k, v in pampa_dict.items():
if k == drug_properties['pampa']:
drug_properties['pampa'] = v
if k == drug_properties['logd']:
drug_properties['logd'] = v
return (drug_properties) | fc4678576f1e63f024dec4cca9943e30a29adb6e | 692,990 |
def draw_path(
obj, col, marker_symbol, marker_size, marker_color, line_style, line_width, ax
):
"""draw path in given color and return list of path-points"""
# pylint: disable=protected-access
path = obj._position
if len(path) > 1:
ax.plot(
path[:, 0],
path[:, 1],
path[:, 2],
ls=line_style,
lw=line_width,
color=col,
marker=marker_symbol,
mfc=marker_color,
mec=marker_color,
ms=marker_size,
)
ax.plot(
[path[0, 0]], [path[0, 1]], [path[0, 2]], marker="o", ms=4, mfc=col, mec="k"
)
return list(path) | 63a3d2c6828ced39903ad9f756768b140f10189b | 692,991 |
def schema():
"""
Pytest fixture that returns a set of solr field definitions as
though from the Solr schema API. Irrelevant elements `stored`,
`indexed`, and `required` are not included.
"""
return { 'fields': [
{ 'name': 'haystack_id',
'type': 'string',
'multiValued': False,
'uniqueKey': True },
{ 'name': 'django_id',
'type': 'string',
'multiValued': False },
{ 'name': 'django_ct',
'type': 'string',
'multiValued': False },
{ 'name': 'code',
'type': 'string',
'multiValued': False },
{ 'name': 'label',
'type': 'string',
'multiValued': False },
{ 'name': 'type',
'type': 'string',
'multiValued': False },
{ 'name': 'id',
'type': 'long',
'multiValued': False },
{ 'name': 'creation_date',
'type': 'date',
'multiValued': False },
{ 'name': 'title',
'type': 'text_en',
'multiValued': False },
{ 'name': 'notes',
'type': 'text_en',
'multiValued': True },
{ 'name': 'status_code',
'type': 'string',
'multiValued': False },
{ 'name': 'children_ids',
'type': 'long',
'multiValued': True },
{ 'name': 'children_codes',
'type': 'string',
'multiValued': True },
{ 'name': 'parent_id',
'type': 'long',
'multiValued': False },
{ 'name': 'parent_title',
'type': 'text_en',
'multiValued': False },
{ 'name': 'suppressed',
'type': 'boolean',
'multiValued': False },
] } | 5be66b99fea8754313e57288a338559f2914bdf9 | 692,992 |
import json
def encode_project_info(long_name, description):
"""Encode a Sumatra project as JSON"""
data = {}
if long_name:
data["name"] = long_name
if description:
data["description"] = description
return json.dumps(data) | 65ab651a812741986edc3ac8c0a4188c930420ff | 692,994 |
def assign_asset_type_to_province_roads(x):
"""Assign asset types to roads assets in Vietnam
The types are assigned based on our understanding of:
1. The reported asset code in the data
Parameters
x - Pandas DataFrame with numeric asset code
Returns
asset type - Which is either of (Bridge, Dam, Culvert, Tunnel, Spillway, Road)
"""
if x.code in (12, 25):
return 'Bridge'
elif x.code == (23):
return 'Dam'
elif x.code == (24):
return 'Culvert'
elif x.code == (26):
return 'Tunnel'
elif x.code == (27):
return 'Spillway'
else:
return 'Road' | b759f0e3295dddc2348e1888cae157333c2b2d32 | 692,995 |
def parse_csv_results(csv_obj, upper_limit_data):
""" Parses the raw CSV data
Convers the csv_obj into an array of valid values for averages and
confidence intervals based on the described upper_limits.
Args:
csv_obj: An array of rows (dict) descriving the CSV results
upper_limit_data: A dictionary containing the upper limits of each story
Raturns:
A dictionary which has the stories as keys and an array of confidence
intervals and valid averages as data.
"""
values_per_story = {}
for row in csv_obj:
# For now only frame_times is used for testing representatives'
# performance.
if row['name'] != 'frame_times':
continue
story_name = row['stories']
if (story_name not in upper_limit_data):
continue
if story_name not in values_per_story:
values_per_story[story_name] = {
'averages': [],
'ci_095': []
}
if (row['avg'] == '' or row['count'] == 0):
continue
values_per_story[story_name]['ci_095'].append(float(row['ci_095']))
values_per_story[story_name]['averages'].append(float(row['avg']))
return values_per_story | fb92c6be25abae94f615c0dedb5723eec8e49f62 | 692,996 |
def restapi_version():
"""
Displays a version.
"""
return "0.1.1235" | eb23ca6fc1653f69df33c07c24ad2b61e4b5c90d | 692,997 |
def _join_lexemes(lexemes, links):
"""
Combine linked lexemes to a single lexeme.
"""
# <link_types>
# <type id="1">ADJF-ADJS</type>
# <type id="2">ADJF-COMP</type>
# <type id="3">INFN-VERB</type>
# <type id="4">INFN-PRTF</type>
# <type id="5">INFN-GRND</type>
# <type id="6">PRTF-PRTS</type>
# <type id="7">NAME-PATR</type>
# <type id="8">PATR_MASC-PATR_FEMN</type>
# <type id="9">SURN_MASC-SURN_FEMN</type>
# <type id="10">SURN_MASC-SURN_PLUR</type>
# <type id="11">PERF-IMPF</type>
# <type id="12">ADJF-SUPR_ejsh</type>
# <type id="13">PATR_MASC_FORM-PATR_MASC_INFR</type>
# <type id="14">PATR_FEMN_FORM-PATR_FEMN_INFR</type>
# <type id="15">ADJF_eish-SUPR_nai_eish</type>
# <type id="16">ADJF-SUPR_ajsh</type>
# <type id="17">ADJF_aish-SUPR_nai_aish</type>
# <type id="18">ADJF-SUPR_suppl</type>
# <type id="19">ADJF-SUPR_nai</type>
# <type id="20">ADJF-SUPR_slng</type>
# </link_types>
EXCLUDED_LINK_TYPES = set([7, ])
# ALLOWED_LINK_TYPES = set([3, 4, 5])
moves = dict()
def move_lexeme(from_id, to_id):
lm = lexemes[str(from_id)]
while to_id in moves:
to_id = moves[to_id]
lexemes[str(to_id)].extend(lm)
del lm[:]
moves[from_id] = to_id
for link_start, link_end, type_id in links:
if type_id in EXCLUDED_LINK_TYPES:
continue
# if type_id not in ALLOWED_LINK_TYPES:
# continue
move_lexeme(link_end, link_start)
lex_ids = sorted(lexemes.keys(), key=int)
return [lexemes[lex_id] for lex_id in lex_ids if lexemes[lex_id]] | 32da9eb4e5a279d3f87c582231927e966a689707 | 692,998 |
def lomuto_partition(sorting: list, left: int, right: int) -> int:
"""
Example:
>>> lomuto_partition([1,5,7,6], 0, 3)
2
"""
pivot = sorting[right]
store_index = left
for i in range(left, right):
if sorting[i] < pivot:
sorting[store_index], sorting[i] = sorting[i], sorting[store_index]
store_index += 1
sorting[right], sorting[store_index] = sorting[store_index], sorting[right]
return store_index | fa35f2544596918935f1793b6645562d65f93d97 | 692,999 |
def check(physical_line):
"""Test check to make sure local-checks are working."""
if physical_line.strip() == "#this-is-the-test-phrase":
return (0, "L100: Found local-check test case") | f70577aff4be98fa3f82b2d5a71365a50c7114f1 | 693,000 |
def default_jvp(node, **kwargs):
""" Calling apl on the jvp inputs.
This would be appropriate for many occasions.
"""
apl = node.operator.apl
d = {}
# remove "_" from input variable names before calling apl
for argname, value in kwargs.items():
if argname.endswith('_'):
d[argname[:-1]] = value
else:
d[argname] = value
r = apl.impl(node, **d)
if not isinstance(r, dict):
# scalar output, no need to patch the names.
return r
else:
# add '_' to the output results before returning
r1 = {}
for argname, value in r.items():
if argname in apl.aout:
r1[argname + '_'] = value
else:
r1[argname] = value
return r1 | 7817117e628a1eb32c2787914099cf40fdec5d40 | 693,001 |
def sanitize(*values):
""" removes quotation marks from a list or tuple of values """
def remove(value, values_to_remove):
""" removes characters from a string """
if isinstance(value, str):
for char in values_to_remove:
value = value.replace(char, "")
return value
if len(values) <= 1:
value = values[0]
return remove(value, ["\'", "\\\'", "\"", "\\\""])
sanitized = []
for value in values:
if isinstance(value, (list, tuple)):
value = sanitize(*value)
elif isinstance(value, str):
value = sanitize(value)
sanitized.append(value)
return tuple(sanitized) | d8caa6674f3e74c6f704b45ed70190f0451d274e | 693,002 |
import torch
def BCELoss_labels_weighted(P: torch.Tensor, Y: torch.Tensor, W: torch.Tensor) \
-> torch.Tensor:
"""
Binary cross entropy loss which allows for different weights for different labels.
Parameters
----------
P : torch.Tensor
The predicted labels.
Y : torch.Tensor
The true labels.
W : torch.Tensor
The weights per label.
Returns
-------
loss : torch.Tensor
Tensor object of size (1,1) containing the loss value.
"""
P = torch.clamp(P, min=1e-7, max=1 - 1e-7)
bce = W * (- Y * torch.log(P) - (1 - Y) * torch.log(1 - P))
loss = torch.mean(bce)
return loss | 03d0088f8276f2cd106b6f628e6661eb115aa360 | 693,003 |
def apply_format(var, format_str):
"""Format all non-iterables inside of the iterable var using the format_str
Example:
>>> print apply_format([2, [1, 4], 4, 1], '{:.1f}')
will return ['2.0', ['1.0', '4.0'], '4.0', '1.0']
"""
if isinstance(var, (list, tuple)):
new_var = map(lambda x: apply_format(x, format_str), var)
if isinstance(var, tuple):
new_var = '(' + ', '.join(new_var) + ')'
elif isinstance(var, list):
new_var = '[' + ', '.join(new_var) + ']'
return '{}'.format(new_var)
else:
return format_str.format(var) | ad011be4a5998c9a338f54c9c3550da00375273c | 693,004 |
def currency_clean_helper(currency, value):
"""Used to validate that a currency value works for a
give currency. Should be called from a forms clean() method.
Returns (value, errors)
"""
whole = value[0]
frac = str(value[1]) if len(value) == 2 else None
if frac and len(frac) > currency.decimal_places:
return None, "Too many decimal places (%s) for currency %s" % (
len(frac), currency)
if not frac:
frac = '0' * currency.decimal_places
elif len(frac) < currency.decimal_places:
frac += '0' * (currency.decimal_places - len(frac))
return int(str(whole) + frac), None | db51c6969316264065bb973a1871021775f40f6c | 693,005 |
import logging
import requests
import json
def add_intersight_org(AUTH, RES_MOID, CLAIM_CONFIG):
""" Add Intersight Organization """
request_body = {
"Name": CLAIM_CONFIG['partner_id'],
"Description": "Org for " + CLAIM_CONFIG['partner_id'],
"ResourceGroups": [
{
"ObjectType":"resource.Group",
"Moid":RES_MOID
}
]
}
logging.info(request_body)
response = requests.post(
CLAIM_CONFIG['intersight_base_url'] + 'organization/Organizations',
data=json.dumps(request_body),
auth=AUTH
)
logging.info(response.text)
response_json = response.json()
logging.info("ORGANIZATION: " + response_json["Moid"])
return response_json["Moid"] | b8727f6f4db32bfed5f162cd0ac97fe4b3eb2d8d | 693,006 |
import os
import re
def internet_access():
"""
查看当前系统是否联网
:return:布尔值,是否联网
"""
p = os.popen('ping www.baidu.com')
a = p.read()
ra = re.findall(r'(\(0% 丢失\))',a)
return bool(ra) | 24e73cc27706f7b304adae77216c10f04990a9fb | 693,007 |
def read_sample_rate(data_dir: str):
"""Read the sample rate from the raw_data.csv file"""
with open(f"{data_dir}/raw_data.csv") as csvfile:
_name_row = next(csvfile)
fs = float(next(csvfile).strip().split(",")[1])
return fs | bdc1a8e32ee33f6cd556ca6680400c803499b14b | 693,008 |
def is_builder_newer(old_component, new_component):
"""
Return True if the given builder has been modified with respect to its
state when the given component_meta was created.
:param old_component: a dict of metadata describing a component ring
:param new_component: a dict of metadata describing a component ring
:return: True if the builder has been modified, False otherwise.
:raises ValueError: if the version of the new_component is older than the
version of the existing component.
"""
if new_component['version'] < old_component['version']:
raise ValueError('Older builder version: %s < %s' %
(new_component['version'], old_component['version']))
return old_component['version'] < new_component['version'] | 7491b7162e33fccc0171ee8a991cb283f6bca817 | 693,009 |
def ints(int_list):
"""coerce a list of strings that represent integers into a list of integers"""
return [ int(number) for number in int_list ] | 64805b44c1a781386b9b5acbba4d4345388c6e53 | 693,010 |
def upsample_weights(name, data=None):
"""
:param name:
:param data:
:return:
"""
weights_name = []
return weights_name | d885895b1f44d5cdd8400b00ac4f06307c24dc7c | 693,011 |
def regrid_get_section_shape(src, axis_sizes, axis_indices):
"""Get the shape of each regridded section.
:Parameters:
src: `Field`
The source field.
axis_sizes: sequence
A sequence of the sizes of each axis along which the
section. will be taken
axis_indices: sequence
A sequence of the same length giving the axis index of
each axis.
:Returns:
`list`
A list of integers defining the shape of each section.
"""
shape = [1] * src.ndim
for i, axis_index in enumerate(axis_indices):
shape[axis_index] = axis_sizes[i]
return shape | f4a316a7f06a170c507345f52db90bb5b1e5c64c | 693,012 |
def average_profile(profiles):
""" Computes the average profile from the given ISI- or SPIKE-profiles.
:param profiles: list of :class:`PieceWiseConstFunc` or
:class:`PieceWiseLinFunc` representing ISI- or
SPIKE-profiles to be averaged.
:returns: the averages profile :math:`<S_{isi}>` or :math:`<S_{spike}>`.
:rtype: :class:`PieceWiseConstFunc` or :class:`PieceWiseLinFunc`
"""
assert len(profiles) > 1
avrg_profile = profiles[0].copy()
for i in range(1, len(profiles)):
avrg_profile.add(profiles[i])
avrg_profile.mul_scalar(1.0/len(profiles)) # normalize
return avrg_profile | d70903ec9bf704a6eaacccb0984a794a29bb906b | 693,013 |
import os
import logging
import sys
def pmdk_public_headers(include_path):
"""
Returns list of header files acquired from given path
"""
headers = []
for root, _, files in os.walk(include_path):
for file in files:
if file.endswith(".h"):
headers.append(os.path.join(root, file))
if not headers:
logging.error("Wrong or empty includes directory")
sys.exit(1)
return headers | f25474172da5c19a9aaa0732a3ccd6495ed4efce | 693,014 |
def pad_bytes32(instr):
""" Pad a string \x00 bytes to return correct bytes32 representation. """
bstr = instr.encode()
return bstr + (32 - len(bstr)) * b'\x00' | 76c057b64435f2bc9d0c3c92f1b9315fb6252fd9 | 693,015 |
def echo(n):
"""Return the inner_echo function."""
# Define inner_echo
def inner_echo(word1):
"""Concatenate n copies of word1."""
echo_word = word1 * n
return echo_word
# Return inner_echo
return (inner_echo) | 20f0b91120a094fb522a9f42f648aec665808fa1 | 693,017 |
def get_bbox(bbox):
""" Compute square image crop window. """
y1, x1, y2, x2 = bbox
img_width = 480
img_length = 640
window_size = (max(y2-y1, x2-x1) // 40 + 1) * 40
window_size = min(window_size, 440)
center = [(y1 + y2) // 2, (x1 + x2) // 2]
rmin = center[0] - int(window_size / 2)
rmax = center[0] + int(window_size / 2)
cmin = center[1] - int(window_size / 2)
cmax = center[1] + int(window_size / 2)
if rmin < 0:
delt = -rmin
rmin = 0
rmax += delt
if cmin < 0:
delt = -cmin
cmin = 0
cmax += delt
if rmax > img_width:
delt = rmax - img_width
rmax = img_width
rmin -= delt
if cmax > img_length:
delt = cmax - img_length
cmax = img_length
cmin -= delt
return rmin, rmax, cmin, cmax | 119b6cbaed36923e06c431c450dca303d5233ce3 | 693,019 |
def is_lambda(fn):
"""Return True if ``fn`` is a lambda expression.
For some reason testing against LambdaType does not work correctly.
"""
return fn.__name__ == '<lambda>' | da11eba9439cb0d588b6e76b15654da6ea6f6fc0 | 693,020 |
def get_tree_details(nodes):
"""Creates pertinent tree details for the given list of nodes."""
opts = nodes[0]._meta
return '\n'.join(['%s %s %s %s %s %s' %
(n.pk, getattr(n, '%s_id' % opts.parent_attr) or '-',
getattr(n, opts.tree_id_attr), getattr(n, opts.level_attr),
getattr(n, opts.left_attr), getattr(n, opts.right_attr))
for n in nodes]) | 0832589a83b718b3a1ebe4e4464266ec3b7140da | 693,021 |
def _check_custom_instruction(experiments, optypes=None):
"""Return True if circuits contain instructions that cant be split"""
# Check via optype list if available
if optypes is not None:
# Optypes store class names as strings
return any(
{"SaveData", "Snapshot"}.intersection(optype)
for optype in optypes
)
# Otherwise iterate over instruction names
return any(
"save_" in inst.name or "snapshot" in inst.name
for exp in experiments for inst in exp.instructions
) | 52216e1df590cb2422650c296b7d414330ef4f7b | 693,023 |
import pickle
def load_from_pickle(name="pickle"):
"""load python object from pickle file"""
filename = "{}.p".format(name)
with open(filename, "rb") as filehandler:
obj = pickle.load(filehandler)
return obj | cf1ab242ddab9381d89d352b259435994b70b89c | 693,024 |
import copy
def reduce_dict_dimension(d: dict):
"""Reduce dictionary dimension."""
reduced_dict = {}
dict1 = copy.deepcopy(d)
for k1, entities1 in dict1.items():
for k2, entities2 in entities1.items():
if k1 in reduced_dict.keys():
reduced_dict[k1].update(entities2)
else:
reduced_dict[k1] = entities2
return reduced_dict | 20086ee324b3885212d29e5db9f6a796c25ba5da | 693,025 |
def lsmod():
""" Returns list of names of all loaded modules. """
with open("/proc/modules") as f:
lines = f.readlines()
return [l.split()[0] for l in lines] | 6ccc7d97bfd431855b55a2a63198934329bfaf15 | 693,026 |
import json
import os
def config_buffers_scene(args):
"""Modifies Tungsten scene file to save albedo and normal."""
# Load JSON scene file
if not args.scene_path.endswith('.json'):
raise ValueError('Scene file must be in JSON format')
with open(args.scene_path, 'r') as fp:
scene = json.load(fp)
# Save either in low or high dynamic range
if args.hdr_buffers:
scene['renderer']['hdr_output_file'] = 'render.exr'
else:
scene['camera']['tonemap'] = 'reinhard'
scene['renderer']['output_file'] = 'render.png'
del scene['renderer']['hdr_output_file']
# Add output buffers
scene['renderer']['output_buffers'] = []
for buffer_type in ['albedo', 'normal']:
buffer_dict = {}
buffer_dict['type'] = buffer_type
if args.hdr_buffers:
buffer_dict['hdr_output_file'] = f'{buffer_type}.exr'
else:
buffer_dict['ldr_output_file'] = f'{buffer_type}.png'
buffer_dict['sample_variance'] = False
scene['renderer']['output_buffers'].append(buffer_dict)
# Update resolution, if requested
if args.resolution:
res = scene['camera']['resolution']
if isinstance(res, int):
w, h = res, res
else:
w, h = res[0], res[1]
ratio_preserved = w / h == args.resolution[0] / args.resolution[1]
assert ratio_preserved, 'Resizing image with ratio that doesn\'t match reference'
scene['camera']['resolution'] = list(args.resolution)
# Update SPP count
scene['renderer']['spp'] = args.spp
# Save buffer scene configuration
scene_dir = os.path.dirname(os.path.splitext(args.scene_path)[0])
buffers_file = f'scene_buffers.json'
buffers_path = os.path.join(scene_dir, buffers_file)
with open(buffers_path, 'w') as fp:
json.dump(scene, fp, indent=2)
return buffers_path | c98d75d1d273ab2e7706ab9de9835d5b19ffa405 | 693,029 |
import json
import random
def mls_data() -> list:
"""
This fixture reads in sample data from the data directory for the purposes
of testing larger-scale data processing functions. Selects 1000 documents
from a corpus of research papers relating to the Microwave Limb Sounder.
:return: None
"""
# read in sample json
with open("data/microwave_limb_sounder.json", 'r') as f:
# read in docs that have authors
docs = [json.loads(d) for d in f.readlines()]
data = [d["_source"] for d in docs if "author" in d["_source"].keys()]
# setting a seed so that results are reproducable
random.seed(777)
# sample documents for testing
random.shuffle(data)
random_data = data[:50]
return random_data | 2a0a06b8e8c29e69dae2cd36e0e90b088b6a84c1 | 693,030 |
def echo0(_, *ignored_):
"""simply return the 0th parameter"""
return _ | d8a6baa31c50383af1ec1bf0dbda5998fe7b6715 | 693,031 |
def thresholdOutput(bInt, yInt):
"""
returns an output of 1 if the intensity is greater than 1% of the sum of the intensities, 0 if not.
"""
# Where does 0.005 come from???
if bInt > 0.005:
bOutput = 1
else:
bOutput = 0
if yInt > 0.005:
yOutput = 1
else:
yOutput = 0
return [bOutput, yOutput] | ff2208c66d2694e2b105b4cfe01f81116cc7304c | 693,032 |
def area():
"""Return the area of the DR6+DR7 sample. See the
`legacyhalos-sample-selection.ipynb` notebook for this calculation.
"""
return 6717.906 | e52aac70bab1acdc7f46de0da18bfcafa3658011 | 693,033 |
def _set_default(d, k, default):
"""Same behavior as dict.setdefault"""
if k in d:
return d[k]
else:
d[k] = default
return default | 9a86246d35a94042e06100e58085f3afa353a472 | 693,034 |
def SendGetRequest(client, url_map_ref):
"""Send Url Maps get request."""
if url_map_ref.Collection() == 'compute.regionUrlMaps':
return client.apitools_client.regionUrlMaps.Get(
client.messages.ComputeRegionUrlMapsGetRequest(**url_map_ref.AsDict()))
return client.apitools_client.urlMaps.Get(
client.messages.ComputeUrlMapsGetRequest(**url_map_ref.AsDict())) | 6de4e858524ac0cb6b3239dc78a73aeb57c77e26 | 693,035 |
import os
def files_by_tag(tag):
"""Extracts files by tag from the configuration file.
:param str tag: tag
:return: list of filenames
"""
prefix = os.path.dirname(os.path.realpath(__file__))
filename = os.path.join(prefix, 'pyquiz.cfg')
files, _tag = [], None
for line in open(filename, 'r').read().splitlines():
line = line.strip()
if line.startswith(tag+':'):
_tag = line[:-1]
elif not line:
_tag = None
elif _tag:
files.append(line)
else:
continue
return files | 806224e9a7a0312cd05c53bf0180e7f430189573 | 693,037 |
import argparse
import os
import sys
def process_args():
"""Handles user-passed parameters"""
parser = argparse.ArgumentParser()
parser.add_argument('--username', '-u', type=str, action='store',
required=True, help='Your Ubuntu One account email.')
parser.add_argument('--key', '-k', type=str, action='store',
required=True, help='Full path to the ssh privkey'
' matching the pubkey in your Ubuntu One account.')
args = parser.parse_args()
if not os.path.isfile(args.key):
print("[!] That key file does not exist. Please try again.")
sys.exit()
return args | 41b1fe4d6b2d7b3af773dd59a486eb3fc53d3d8f | 693,038 |
import re
def keyword_split(keywords):
"""
Return all the keywords in a keyword string.
Keeps keywords surrounded by quotes together, removing the surrounding quotes:
>>> keyword_split('Hello I\\'m looking for "something special"')
['Hello', "I'm", 'looking', 'for', 'something special']
Nested quoted strings are returned as is:
>>> keyword_split("He said \\"I'm looking for 'something special'\\" so I've given him the 'special item'")
['He', 'said', "I'm looking for 'something special'", 'so', "I've", 'given', 'him', 'the', 'special item']
"""
matches = re.findall(r'"([^"]+)"|\'([^\']+)\'|(\S+)', keywords)
return [match[0] or match[1] or match[2] for match in matches] | 02c43aba1cf94a65c878a25cdfea18a7b3048cea | 693,039 |
import numpy
def wokCurveAPO(r):
"""Curve of the wok at APO at radial position r
Parameters
-----------
r : scalar or 1D array
radius (cylindrical coords) mm
Returns:
---------
result : scalar or 1D array
z (height) of wok surface in mm (0 at vertex)
"""
A = 9199.322517101522
return A - numpy.sqrt(A**2 - r**2) | d702393f02dccc0bf007bbe27c51df31443b5cf0 | 693,040 |
import curses
def mkcolor():
"""
Start pairs at 100 so we're less likely to clobber user defined pairs.
"""
color = {}
for i in range(1, 8):
curses.init_pair(i + 100, i, -1) # color fg on black bg
curses.init_pair(i + 107, curses.COLOR_WHITE, i) # white fg on color bg
curses.init_pair(i + 114, curses.COLOR_BLACK, i) # black fg on color bg
color[str(i + 30)] = curses.color_pair(i + 100)
color[str(i + 40)] = curses.color_pair(i + 107)
color["0;" + str(i + 30)] = curses.color_pair(i + 100)
color["0;" + str(i + 40)] = curses.color_pair(i + 107)
color[str(i + 30) + ";0"] = curses.color_pair(i + 100)
color[str(i + 40) + ";0"] = curses.color_pair(i + 107)
color[str(i + 90)] = curses.color_pair(i + 100) | curses.A_BOLD
color["1;" + str(i + 30)] = curses.color_pair(i + 100) | curses.A_BOLD
color["1;" + str(i + 40)] = curses.color_pair(i + 107) | curses.A_BOLD
color[str(i + 30) + ";1"] = curses.color_pair(i + 100) | curses.A_BOLD
color[str(i + 40) + ";1"] = curses.color_pair(i + 107) | curses.A_BOLD
return color | 372849e8825b7ca7d53d97d6b943b77485943163 | 693,041 |
from typing import Optional
from typing import List
from typing import Tuple
import pathlib
def verify_request(argv: Optional[List[str]]) -> Tuple[int, str, List[str]]:
"""Fail with grace."""
if not argv or len(argv) != 4:
return 2, 'received wrong number of arguments', ['']
command, inp, out, dryrun = argv
if command not in ('extract'):
return 2, 'received unknown command', ['']
if inp:
if not pathlib.Path(str(inp)).is_file():
return 1, 'source is no file', ['']
if out:
if pathlib.Path(str(out)).is_file():
return 1, 'target file exists', ['']
return 0, '', argv | d8241532119ca847a8e622288744937a55c6c09e | 693,042 |
import os
def _find_pairs_of_matching_filenames(content_images, style_images):
"""
Given a list of content images and a list of style images, find all the
pairs of images that have the same name.
"""
pairs = []
for content_path in content_images:
content_name = os.path.splitext(os.path.split(content_path)[-1])[0]
for style_path in style_images:
style_name = os.path.splitext(os.path.split(style_path)[-1])[0]
if content_name == style_name:
pairs.append((content_path, style_path))
return pairs | 5681f41ecf02018549a54bd7c075cded72389a2e | 693,043 |
def lsm_loop_cond(exercise_index, cashflow):
"""Condition to exit a countdown loop when the exercise date hits zero."""
del cashflow
return exercise_index > 0 | e023ac99d03f1118399a1a5b8af5c26a8d81d864 | 693,044 |
import yaml
def ceph_repository_type_cdn(ansible_dir, installer_node):
"""
Fetches container image information from all.yml.sample
Args:
ansible_dir ansible directory on installer node
installer_node installer node to fetch all.yml.sample
Returns:
docker_registry, docker_image, docker_image_tag
"""
out, err = installer_node.exec_command(
sudo=True,
cmd="cat {ansible_dir}/group_vars/all.yml.sample".format(
ansible_dir=ansible_dir
),
)
sample_conf = yaml.safe_load(out)
docker_registry = sample_conf.get("ceph_docker_registry")
docker_image = sample_conf.get("ceph_docker_image")
docker_image_tag = sample_conf.get("ceph_docker_image_tag")
return docker_registry, docker_image, docker_image_tag | c8f60f0a0fdf4d135ba9355582b6e4f3bdd98b22 | 693,045 |
def _create_args(objects, mail_data):
"""Create args to call bulk update/create"""
if not objects:
return {}
arg_list = {}
arg_list["objects"] = [
{"type": obj[0], "id": int(obj[1])} for obj in objects
]
arg_list["mail_data"] = mail_data
return arg_list | 7d0e56961139b1848614ed4079ccbb3ed429d234 | 693,046 |
def _row_to_dict(cursor, row):
"""Produce a dict from a database row"""
return {col[0]: row[idx] for idx, col in enumerate(cursor.description)} | 1b6b554ec7d7731d2f9c74131d6cbb4c54cf46bd | 693,047 |
def original_id(individualized_id):
"""
Gets the original id of an ingredient that has been transformed by individualize_ingredients()
Args:
individualized_id (str):
Returns:
str:
Examples:
>>> original_id('en:water**')
'en:water'
>>> original_id('en:sugar')
'en:sugar'
"""
return individualized_id.strip('*') | a16bec10606af4ec9707d0c4961d57aac576f8ec | 693,048 |
def _removeInvalidChars(line):
"""Return a copy of line with each ASCII control character (0-31),
and each double quote, removed."""
output = ''
for c in line:
if c >= ' ' and c != '"':
output = output + c
return output | 5a3f0014ec29df0dce12287ab13ba23a8b31f496 | 693,049 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.