content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def drop_irrelevant_practices(df, practice_col):
"""Drops irrelevant practices from the given measure table.
An irrelevant practice has zero events during the study period.
Args:
df: A measure table.
practice_col: column name of practice column
Returns:
A copy of the given measure table with irrelevant practices dropped.
"""
is_relevant = df.groupby(practice_col).value.any()
return df[df[practice_col].isin(is_relevant[is_relevant == True].index)] | a0ed74aeabf8cfafbbd146cd74ed0bfe5895cc2f | 690,937 |
import codecs
def encode_endian(text, encoding, errors="strict", le=True):
"""Like text.encode(encoding) but always returns little endian/big endian
BOMs instead of the system one.
Args:
text (text)
encoding (str)
errors (str)
le (boolean): if little endian
Returns:
bytes
Raises:
UnicodeEncodeError
LookupError
"""
encoding = codecs.lookup(encoding).name
if encoding == "utf-16":
if le:
return codecs.BOM_UTF16_LE + text.encode("utf-16-le", errors)
else:
return codecs.BOM_UTF16_BE + text.encode("utf-16-be", errors)
elif encoding == "utf-32":
if le:
return codecs.BOM_UTF32_LE + text.encode("utf-32-le", errors)
else:
return codecs.BOM_UTF32_BE + text.encode("utf-32-be", errors)
else:
return text.encode(encoding, errors) | 1d15ffe8d330ade301f2e46ec3297e20017134dd | 690,938 |
def _non_present_values_to_zero(test_data):
"""
Adds keys to the test data of one dataframe, if key was not present in that one but in one of the other datasets.
:param test_data:
:return:
"""
for dataset1 in test_data:
for dataset2 in test_data:
for key in dataset1.keys():
if key not in dataset2:
dataset2[key] = 0
return test_data | 4786b953c20ade2570d8cc45ed7d27c790483132 | 690,939 |
def features_axis_is_np(is_np, is_seq=False):
"""
B - batch
S - sequence
F - features
:param d: torch.Tensor or np.ndarray of shape specified below
:param is_seq: True if d has sequence dim
:return: axis of features
"""
if is_np:
# in case of sequence (is_seq == True): (S, F)
# in case of sample: (F)
return 0 + is_seq
else:
# in case of sequence: (B, S, F)
# in case of sample: (B, F)
return 1 + is_seq | 88c555eaee9a9ce03c8ad874cd75f958a81b9096 | 690,940 |
def parse_args(args):
"""Parses and validates command line arguments
Args:
args (argparse.Namespace): Pre-paresed command line arguments
Returns:
bool: verbose flag, indicates whether tag frequency dictionary should be printed
bool: sandbox flag, indicates whether Evernote API calls should be made against the production or sandbox environment
str: name of the file the tag cloud image should be saved to
int: width of the tag cloud image
int: height of the tag cloud image
file: mask file to use when generating the tag cloud
str: font file to use when generating the tag cloud
int: maximum number of tags to include in the tag cloud
int: ratio of horizontal tags in the tag cloud
int: the impact of tag frequency (as opposed to tag ranking) on tag size
str: the name of a matplotlib colormap to use for tag colors (see https://matplotlib.org/examples/color/colormaps_reference.html for available colormaps)
str: the Evernote API authentication token
Raises:
ValueError: if numeric parameters have an invalid value
"""
image_width_height = args.imageSize.split("x")
if len(image_width_height) != 2:
raise ValueError("invalid imageSize [%s] format, expected format is <width>x<height>", args.imageSize)
image_width, image_height = int(image_width_height[0]), int(image_width_height[1])
if image_width not in range(10, 10000):
raise ValueError("invalid imageSize.width [%d], imageSize.width must be between 10 and 9999", image_width)
if image_height not in range(10, 10000):
raise ValueError("invalid imageSize.height [%d], imageSize.height must be between 10 and 9999", image_height)
if args.maxTags not in range(1, 1000):
raise ValueError("invalid maxTags [%d], maxTags must be between 1 and 999", args.maxTags)
if args.horizontalTags < 0 or args.horizontalTags > 1 :
raise ValueError("invalid horizontalTags [%f], horizontalTags must be between 0 and 1", args.horizontalTags)
if args.tagScaling < 0 or args.tagScaling > 1:
raise ValueError("invalid tagScaling [%f], tagScaling must be between 0 and 1", args.horizontalTags)
font_file = None if args.fontFile == None else args.fontFile.name
return args.verbose, args.sandbox, args.imageFile.name, image_width, image_height, args.maskFile, font_file, args.maxTags, args.horizontalTags, args.tagScaling, args.tagColorScheme, args.evernoteAuthToken | e0070eae06c0f4df64d0818c9bfc352014383a29 | 690,941 |
import torch
def _halve(x):
""" computes the point on the geodesic segment from o to x at half the distance """
return x / (1. + torch.sqrt(1 - torch.sum(x ** 2, dim=-1, keepdim=True))) | 24d711ee70bd211ef655676a3c4bf466fb22d254 | 690,942 |
def _extract_prop_set(line):
"""
Extract the (key, value)-tuple from a string like:
>>> 'set foo = "bar"'
:param line:
:return: tuple (key, value)
"""
token = ' = "'
line = line[4:]
pos = line.find(token)
return line[:pos], line[pos + 4:-1] | 2bdb491c14308ca1b634e7d840893af3c698fa60 | 690,946 |
import torch
def depth_map_to_locations(depth, invK, invRt):
"""
Create a point cloud from a depth map
Inputs:
depth HxW torch.tensor with the depth values
invK 3x4 torch.tensor with the inverse intrinsics matrix
invRt 3x4 torch.tensor with the inverse extrinsics matrix
Outputs:
points HxWx3 torch.tensor with the 3D points
"""
H,W = depth.shape[:2]
depth = depth.view(H,W,1)
xs = torch.arange(0, W).float().reshape(1,W,1).to(depth.device).expand(H,W,1)
ys = torch.arange(0, H).float().reshape(H,1,1).to(depth.device).expand(H,W,1)
zs = torch.ones(1,1,1).to(depth.device).expand(H,W,1)
world_coords = depth * (torch.cat((xs, ys, zs), dim=2) @ invK.T[None]) @ invRt[:3,:3].T[None] + invRt[:3,3:].T[None]
return world_coords | 7091666c1e14afb8c48d062f676fc79a0290d344 | 690,947 |
def list_to_quoted_delimited(input_list: list, delimiter: str = ',') -> str:
"""
Returns a string which is quoted + delimited from a list
:param input_list: eg: ['a', 'b', 'c']
:param delimiter: '|'
:return: eg: 'a'|'b'|'c'
"""
return f'{delimiter} '.join("'{0}'".format(str_item) for str_item in input_list) | 73c8c73b6e6716edad0f17ccb55e460d5356f800 | 690,948 |
def better_no_extreme(listed):
"""
why better ? For starters , does not modify original list
"""
return listed[1:-1] | 2e6786388c1763ae566ba1ed1ffbae3582f9ee8f | 690,949 |
def plugin(plugin_without_server):
"""
Construct mock plugin with NotebookClient with server registered.
Use `plugin.client` to access the client.
"""
server_info = {'notebook_dir': '/path/notebooks',
'url': 'fake_url',
'token': 'fake_token'}
plugin_without_server.client.register(server_info)
return plugin_without_server | 2cc59e2fd8de66ce7dbd68114ae5805bc13527fb | 690,950 |
def rec_multiply(a_multiplier, digit, carry):
"""Function to multiply a number by a digit"""
if a_multiplier == '':
return str(carry) if carry else ''
prefix = a_multiplier[:-1]
last_digit = int(a_multiplier[-1])
this_product = last_digit * digit + carry
this_result = this_product % 10
this_carry = this_product // 10
return rec_multiply(prefix, digit, this_carry) + str(this_result) | e10c977c89956ca24bc395603cb8c17adb3829cb | 690,951 |
import string
def lowerFirstCamelWord(word):
""" puts the first word in a CamelCase Word in lowercase.
I.e. CustomerID becomes customerID, XMLInfoTest becomes xmlInfoTest
"""
newstr = ''
swapped = word.swapcase()
idx = 0
# if it's all-caps, return an all-lowered version
lowered = word.lower()
if swapped == lowered:
return lowered
for c in swapped:
if c in string.ascii_lowercase:
newstr += c
idx += 1
else:
break
if idx < 2:
newstr += word[idx:]
else:
newstr = newstr[:-1] + word[idx - 1:]
return newstr | a21b72725a30e68d188fe4f292a8130d1d5a9ba2 | 690,952 |
def distance_between_points(first_point, second_point):
"""
Calculates the Distance between 2 points. (x^2 + y^2) ^ 0.5
:param first_point: tuple of x and y value of point 1
:param second_point: tuple of x and y value of point 2
:return: Float value of the distance between the 2 points
:rtype: float
"""
return ((first_point[0] - second_point[0]) ** 2 + (first_point[1] - second_point[1]) ** 2) ** 0.5 | 3e5a9141b74968ec36f76147e0aa994ce2d51996 | 690,954 |
import typing
import pathlib
import glob
def _get_paths(patterns: typing.Set[pathlib.Path]) -> typing.Set[pathlib.Path]:
"""Convert a set of file/directory patterns into a list of matching files."""
raw = [
pathlib.Path(item)
for pattern in patterns
for item in glob.iglob(str(pattern), recursive=True)
]
files = [p for p in raw if not p.is_dir()]
output = files + [
child
for p in raw
for item in glob.iglob(f"{p}/**/*", recursive=True)
if p.is_dir() and not (child := pathlib.Path(item)).is_dir()
]
return set(output) | 5373f00f18326f4b18fbd5e9d6c7631a67ad7d16 | 690,955 |
import numpy
def epsilon_greedy_policy(env, S, Q, epsilon):
"""Act epsilon greedily with respect to action-value funtion Q in state S.
env is an OpenAI gym environment.
S is state where the agent is.
Q is action-value function.
epsilon is probability of taking random action.
"""
if numpy.random.rand() < epsilon:
return env.action_space.sample()
# else choose action which maximize Q in S
return numpy.argmax([Q[S, A] for A in range(env.action_space.n)]) | 7823b2df372a45e50c87027ecbb548a5f6c3c3df | 690,957 |
def _attr2obj(element, attribute, convert):
"""
Reads text from attribute in element
:param element: etree element
:param attribute: name of attribute to be read
:param convert: intrinsic function (e.g. int, str, float)
"""
try:
if element.get(attribute) is None:
return None
elif element.get(attribute) == '':
return None
return convert(element.get(attribute))
except Exception:
None | 42391ace71e954ced70ec905deb979fa7bcdb47f | 690,958 |
import math
def get_height(point, biom_size, coords):
"""
Pro každý bod na mapě zjistí výšku podle blízkosti boim bodů.
Vrátí údaj výšce.
:param point: zkoumaný bod na mapě
:param biom_size: poloměr dosahu biom bodu
:param coords: seznam souřadnic biom bodů s váhami
:return height: součet vah od biom bodů v dosahu
"""
height = 0
for i in range(len(coords)):
dif_x = abs(coords[i]["co_x"] - point[0])
dif_y = abs(coords[i]["co_y"] - point[1])
if dif_x < biom_size:
if dif_y < biom_size:
if dif_x + dif_y <= biom_size:
height += coords[i]["weight"]
elif math.pow(dif_x, 2) + math.pow(dif_y, 2)\
<= math.pow(biom_size, 2):
height += coords[i]["weight"]
return height | a1f9f2b7ab6e4dc7cf76c077cd1afb2d1d670a66 | 690,959 |
def rename_column_values_for_comparison(df, sourcename):
"""
To compare some datasets at different geographic scales, must rename FlowName and Compartments
to those available at national level
:param df: df with FlowName and Compartment columns
:param sourcename: string, datasource name
:return:
"""
# at the national level, only have information for 'FlowName' = 'total' and
# 'Compartment' = 'total'. At state/county level, have information for fresh/saline
# and ground/surface water. Therefore, to compare subset data to national level, rename
# to match national values.
if sourcename == 'USGS_NWIS_WU':
df['FlowName'] = df['FlowName'].str.replace('fresh', 'total', regex=True)
df['FlowName'] = df['FlowName'].str.replace('saline', 'total', regex=True)
df['Compartment'] = df['Compartment'].str.replace('ground', 'total', regex=True)
df['Compartment'] = df['Compartment'].str.replace('surface', 'total', regex=True)
return df | 69472df8a04b541ae8960e4280e3e6b17b214d0f | 690,960 |
def create_cercauniversita(conn, cercauni):
"""
Create a new person into the cercauniversita table
:param conn:
:param cercauni:
:return: cercauni id
"""
sql = ''' INSERT INTO cercauniversita(id,authorId,anno,settore,ssd,fascia,orcid,cognome,nome,genere,ateneo,facolta,strutturaAfferenza)
VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?) '''
cur = conn.cursor()
cur.execute(sql, cercauni)
return cur.lastrowid | f54a9c18bec3f0947d96de8559ac27eb2e7218fe | 690,962 |
def get_item_from_user(message, lst):
"""
Gets a valid index from a user to get an item from lst.
The user is shown `message` and the range of valid indices, in the form
of "Valid indices inclusive between 0 and max_index". The user is asked
until a valid item was chosen.
Args:
message: The message to show the user before asking for index
lst: the list to choose from
Returns:
The chosen index
"""
idx = -1
while idx == -1:
try:
# get an input from the user
print(message)
val = input(f"Valid indices inclusive between 0 and {len(lst) - 1}\n")
# try to parse it as an int
idx = int(val)
# try to extract the action, this will throw if out of bounds
item = lst[idx]
# if it was no int or is out of bounds, remind the user what is to do
# and reset idx
except (ValueError, IndexError):
idx = -1
print(
"Please enter an integer value between inclusive "
f"0 and {len(lst) - 1}"
)
return idx | edfe6a9906318d7a44af4768234a06427d0d185b | 690,963 |
def cauchy_cooling_sequence(initial_t, it):
"""
Calculates the new temperature per iteration using a cauchy progression.
Parameters
----------
initial_t : float
initial temperature
it: int
actual iteration
Returns
--------
tt: float
new temperature
"""
tt = initial_t/(1+it)
return tt | fd275244d184eadac62512ec4972fb1856174108 | 690,964 |
def is_end(word, separator):
"""Return true if the subword can appear at the end of a word (i.e., the subword
does not end with separator). Return false otherwise."""
return not word.endswith(separator) | b2bc94dce0f628ce8738c434d2824871f64a653e | 690,965 |
def shell_escape_str(str):
"""
处理shell转义单引号
:param str:
:return:
"""
return str.replace("'", "'\''") | 76459efcc1d852a46b5907ebd11ce373f772abb9 | 690,966 |
def _reconstruct_path(target_candidate, scanned):
"""
Reconstruct a path from the scanned table, and return the path
sequence.
"""
path = []
candidate = target_candidate
while candidate is not None:
path.append(candidate)
candidate = scanned[candidate.id]
return path | 18ca897a367eab0d5868a5d933c7f2fa96d661ff | 690,968 |
import string
def query_to_url(query, search_type, size):
"""convert query to usuable url
Args:
query (str): search query by user
search_type (str): Search type (choice)
size (int): number of items to query
Returns:
(str) the url for search
"""
# remove bad string & convert txt -> search keys
query = query.translate(str.maketrans('', '', string.punctuation))
query = query.replace(" ", "+")
# get the correct mapping
mapping = {"All Fields": "",
"Title": "title",
"Abstract": "abstract"}
search_type = mapping[search_type]
#return the url
return f"https://arxiv.org/search/?query={query}&searchtype={search_type}&abstracts=show&order=&size={size}" | 5ef32179f8c876fe9751fc87561259fbff8d41ef | 690,969 |
import os
def navigate_parent_dirs(path: str, levels: int) -> str:
"""
Navigate to a parent directory relative to a given file path.
Args:
path (str): path to navigate from (file or directory; note that the parent of a file is directory,
and the parent of a directory is its parent directory)
levels (int): number of levels to navigate (must be >= 0)
Returns:
str: absolute path of the parent directory that sits `levels` above given path
Raises:
ValueError: if `levels` is negative
"""
if levels < 0:
raise ValueError("levels must be >= 0, not {}".format(levels))
result = os.path.abspath(os.path.join(path, os.path.sep.join(".." for _ in range(levels))))
if os.path.isfile(result):
return os.path.dirname(result)
return result | f8df3d98980b4793f7bf4a82f48dad4610752cb9 | 690,970 |
from pathlib import Path
def read_polymer(filename: Path) -> tuple[dict, str]:
"""Read polymer structure information from a file
The polymer is represented by a dict of rules and a polymer structer.
Args:
filename (Path): path to the input file.
Returns:
dict: rules
str: initial polymer structure
Examples:
>>> read_polymer(Path("test_data/day_14.data"))[0]['CH']
'B'
>>> read_polymer(Path("test_data/day_14.data"))[1]
'NNCB'
"""
with filename.open("r") as file:
poly_template, rules_raw = file.read().split("\n\n")
rules = {}
for line in rules_raw.strip().split("\n"):
start, end = line.strip().split(" -> ")
rules[start] = end
return rules, poly_template | c326f14e1904361928e032ac2c9b1cfc14225cd1 | 690,971 |
def is_google_sheets(user_agent):
"""User-Agent: Mozilla/5.0 (compatible; GoogleDocs; apps-spreadsheets; +http://docs.google.com)"""
return 'apps-spreadsheets' in user_agent or 'Google-Apps-Script' in user_agent | a4a9c7dd66f5760ac7a6737f329a44523da6eb6b | 690,973 |
import os
import sys
def find_project_root_dir():
"""Returns PROJECT_ROOT_DIR"""
if os.environ.get("PROJECT_ROOT_DIR"):
return os.environ.get("PROJECT_ROOT_DIR")
else:
print('Cannot find PROJECT_ROOT_DIR')
sys.exit(1) | cc8a7002d7a9bebc25741858e98b5b5362814cb1 | 690,975 |
import os
def get_conda_env_name() -> str:
"""Get the name of the conda env, or empty string if none."""
env_dir = os.environ.get('CONDA_PREFIX', '')
return os.path.basename(env_dir) | 96adeda0d59423cf034e8c5ea8e8765b44eb5843 | 690,976 |
def NH_Cluster(a_IDX,cluster_label_X,X,kd_tree,idx_sub):
"""
Purpose:
Determines the the neighborhood for data points identified
by their absolute index in a_IDX array.
Return:
List of neighborhood (in absolute index) for each data point in a_IDX.
"""
n_data=len(a_IDX)
_,nn_r_IDX=kd_tree.query(list(X[a_IDX]), k=40)
nn_a_IDX=idx_sub[nn_r_IDX]
cluster_label_a_IDX=cluster_label_X[a_IDX].reshape(-1,1)
cluster_label_nn_a_IDX=cluster_label_X[nn_a_IDX]
tmp=(cluster_label_nn_a_IDX == cluster_label_a_IDX)
return [nn_a_IDX[i][t] for i,t in zip(range(n_data),tmp)]
#return cluster_members | 52e06f502f359b97bf4f074312ad71d8346629d1 | 690,977 |
import os
def getImgs(d):
"""Get the images from the test directory, partitioned by class."""
exts = ["jpg", "png"]
imgClasses = [] # Images, separated by class.
for subdir, dirs, files in os.walk(d):
imgs = []
for fName in files:
# (imageClass, imageName) = (os.path.basename(subdir), fName)
imageName = fName
if any(imageName.lower().endswith("." + ext) for ext in exts):
imgs.append(os.path.join(subdir, fName))
imgClasses.append(imgs)
return imgClasses | 597144ec4034dea29a86edd862a6128d22f42241 | 690,979 |
def query_phantoms(view, pids):
"""Query phantoms."""
return view.query_phantoms(pids) | 62148567a0b678bc56c0c92d802d5f47f757d85f | 690,980 |
def ensure_dictionary(config):
"""
ensure_dictionary
Ensure that config is a dictionary. If not, it is probably a list of one
element dictionaries (the writer of the configuration file probably put
hyphens '-' in front of his keys). If it is the case, this function will
return a single dictionary built by fusing all the elements of the list.
This function is mostly intended to simplify the parsing functions, as
the output is always a dictionary.
"""
if isinstance(config, dict):
return config
if not isinstance(config, list):
raise TypeError("Unforeseen error in configuration file.\n" +
str(config))
output = {}
for element in config:
if not isinstance(element, dict):
raise ValueError("Parsing error in the configuration file.\n" +
str(element))
if len(element) != 1:
raise ValueError("Parsing error in the configuration file.\n" +
str(element))
# getting one key in the dictionary
key = next(iter(element))
# Checking if key is not already in the output
if key in output.keys():
raise ValueError("Parsing error in the configuration file."+
"Two elements have the same key : " + str(key))
# inserting this element in the output dictionary
output[key] = element[key]
return output | d38c180315bab196f6773e7f2535b5e031692edf | 690,981 |
def get_positive_passages(positive_pids, doc_scores, passage_map):
"""
Get positive passages for a given grounding using BM25 scores from the positive passage pool
Parameters:
positive_pids: list
Positive passage indices
doc_scores: list
BM25 scores against the query's grounding for all passages
passage_map: dict
All passages mapped with their ids
Returns:
positive_passage_pool
"""
scores = [(i, score) for (i, score) in doc_scores if i in positive_pids]
top_scores = sorted(scores, key=lambda x: x[1], reverse=True)
top_n_passages = [
{"psg_id": ix, "score": score, "title": passage_map[ix]["title"], "text": passage_map[ix]["text"]}
for ix, score in top_scores
]
return top_n_passages | 0623dcd3a2bc93b59d8ef6f0fc2a790762292d9a | 690,983 |
def _get_ignore_set():
""" """
ignore_env_vars = [
# shell variables
'PWD', 'OLDPWD', 'SHLVL', 'LC_ALL', 'TST_HACK_BASH_SESSION_ID',
# CI variables
'BUILD_URL', 'BUILD_TAG', 'SVN_REVISION',
'BUILD_ID', 'EXECUTOR_NUMBER', 'START_USER',
'EXECUTOR_NUMBER', 'NODE_NAME', 'NODE_LABELS',
'IF_PKG', 'BUILD_NUMBER', 'HUDSON_COOKIE',
# ssh variables
'SSH_CLIENT', 'SSH2_CLIENT',
# vim variables
'VIM', 'MYVIMRC', 'VIMRUNTIME']
for i in range(30):
ignore_env_vars.append('SVN_REVISION_%d' % i)
return frozenset(ignore_env_vars) | 98a89538b5decb97fa488b6f7b53491c63c5480d | 690,984 |
def position_angle(freq, pa_zero, rm, freq_cen):
""" Polarisation position angle as a function of freq."""
c = 2.998e8 # vacuum speed of light in m/s
return pa_zero + rm*(((c/freq)**2) - ((c/(freq_cen*1e6))**2)) | ab8de0e05a0d7494a1b034aeaf11edebf3472434 | 690,986 |
def query_range_result_delay_60s(query_range_result_ok):
"""
Range query data with bad values in first 4 values (first 60s of the
measurement).
"""
query_range_data = query_range_result_ok
# insert bad value "0" for first 60s in both metrics
for i in range(2):
for j in range(4):
value_tuple = query_range_data[i]['values'][j]
value_tuple[1] = "0"
return query_range_data | 8513c43b91b9da0acc4d81b698bcc857d9f5cfd4 | 690,987 |
import torch
def ising_potential(hparams):
"""
Potential for square Ising model with periodic boundary conditions
"""
J = getattr(hparams, 'J', 1.0)
def potential(state, passive_rates):
x = state
x_up = torch.roll(x, 1, dims=-2)
x_right = torch.roll(x, -1, dims=-1)
pot = (2*x - 1) * (2*x_up - 1) # vertical interactions
pot += (2*x - 1) * (2*x_right - 1) # horizontal interactions
pot = -J * pot.sum(dim=(-1, -2)) # sum over all sites
pot -= passive_rates.sum(dim=(-1, -2, -3)) # sum over all sites and directions
return pot
return potential | a4477b55cd81a45e90663d47105500678c8496c8 | 690,988 |
def lire_mots(nom_fichier):
"""fonction qui récupère la liste des mots dans un fichier
paramètre
- nom_fichier, de type chaine de caractère : nom du fichier contenant les mots
(un par ligne)
retour : liste de chaine de caractères
"""
liste_mots = [] # le tableau qui contiendra les lignes
with open(nom_fichier, encoding="UTF-8") as f: # on ouvre le fichier
ligne = f.readline() # une variable temporaire pour récupérer la ligne courante dans le fichier f
while ligne != "":
liste_mots.append(ligne.strip()) # on rajoute la ligne courante dans le tableau
ligne = f.readline() # on récupère la ligne suivante
return liste_mots | 0c0ac636ef6bfbffcfa102f5bd26716564f73367 | 690,989 |
def epsilonEffectiveVectorialOtherBuildingMat(receivedListOFepsilons):
"""This function receives a list of emissivities and find the corresponding
effective emissivity in case the other side would be building material -> epsilon = 0.9
"""
calculatedepsilonEffectiveS = []
epsilon2 = 0.9 # fixed value
for epsilon1 in receivedListOFepsilons:
epsilonEffective =1.0/(1/epsilon1+1/epsilon2-1)
calculatedepsilonEffectiveS.append(epsilonEffective)
return calculatedepsilonEffectiveS | 0431927dc9dd4718b97614b1b0b4de08362085b4 | 690,991 |
from typing import Dict
import argparse
def parse_args() -> Dict[str, str]:
"""
Define the command-line arguments. Check the option [-h] to learn more
:return:
"""
parser = argparse.ArgumentParser(description='Collate TACRED samples including external dependency parses.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('tacred-split', type=str, help='TACRED split (train, dev, or test) in json format.')
parser.add_argument('dependency-parse', type=str, help='Dependency parses of the split in CONLLU format.')
parser.add_argument('out', type=str, help='Collated samples.')
# Retrieve the command-line argument as a dictionary
args = vars(parser.parse_args())
return args | ad1e0d85c818e731c45a75ef54df6808d8240b18 | 690,992 |
from typing import List
def _hyphens_exists_next_line(
line_splitted_list: List[str], next_line_idx: int) -> bool:
"""
Get the boolean value of whether there are multiple hyphen
characters on the next line.
Parameters
----------
line_splitted_list : list of str
A list containing line-by-line strings.
next_line_idx : int
Next line index.
Returns
-------
result_bool : bool
If exists, True will be set.
"""
if len(line_splitted_list) < next_line_idx + 1:
return False
next_line_str: str = line_splitted_list[next_line_idx]
is_in: bool = '----' in next_line_str
if is_in:
return True
return False | fedc408dab1176d43be6aecec59c871dda1821d6 | 690,993 |
import time
def millis():
"""Return the number of milliseconds which have elapsed since the CPU turned on."""
return time.monotonic() * 1000 | ca612ddac64d2c3286a46493da014575dc34cbae | 690,994 |
def get_data(dataset):
"""Get features and targets from a specific dataset.
Parameters
----------
dataset : Dataset object
dataset that can provide features and targets
Returns
-------
features : arrary
features in the dataset
targets : array
1-d targets in the dataset
"""
handle = dataset.open()
data = dataset.get_data(handle, slice(0, dataset.num_examples))
features = data[0]
targets = data[1]
dataset.close(handle)
return features, targets | aeda21744b23ebb74304d4d9f58a99570f5a4321 | 690,995 |
def _gen_perm_Numpy(order, mode):
"""
Generate the specified permutation by the given mode.
Parameters
----------
order : int
the length of permutation
mode : int
the mode of specific permutation
Returns
-------
list
the axis order, according to Kolda's unfold
"""
tmp = list(range(order - 1, -1, -1))
tmp.remove(mode)
perm = [mode] + tmp
return perm | df2511a8b9278c3018808a259f202e42c8a5462f | 690,996 |
def parse_bool_or_400(data, key, default=None):
"""
Parse the data[key] to boolean
"""
if key not in data:
return default
if isinstance(data[key], bool):
return data[key]
return data[key].lower() in ('true', '1') | 184d0da37fef0f413e364dfb4aa133d992dba319 | 690,997 |
import argparse
import sys
def parse_cl_args():
"""Parses CL arguments"""
parser = argparse.ArgumentParser(
description='Extract data from the MIMIC-III CSVs.')
parser.add_argument('-ip', '--input-path', type=str, default='../mimic',
help='Path to MIMIC-III CSV files.')
parser.add_argument('-op', '--output-path', type=str, default='data',
help='Path to desired output directory.')
parser.add_argument('-v', '--verbose', type=int, default=1,
help='Info in console output (0 or 1).')
return parser.parse_args(sys.argv[1:]) | 4cc5d124ef3ad7813a01f062c0ec741ed7c90165 | 690,998 |
def seq(*sequence):
"""Runs a series of parsers in sequence optionally storing results in a returned dictionary.
For example:
seq(whitespace, ('phone', digits), whitespace, ('name', remaining))
"""
results = {}
for p in sequence:
if callable(p):
p()
continue
k, v = p
results[k] = v()
return results | cb3e8694d55275eced9361b9f85bd9717bafd64c | 690,999 |
def contains_duplicate(nums: list[int]) -> bool:
"""Returns True if any value appears at least twice in the array, otherwise False
Complexity:
n = len(nums)
Time: O(n)
Space: O(n)
Args:
nums: array of possibly non-distinct values
Examples:
>>> contains_duplicate([1,2,3,1])
True
>>> contains_duplicate([1,2,3,4])
False
>>> contains_duplicate([1,1,1,3,3,4,3,2,4,2])
True
"""
"""ALGORITHM"""
## INITIALIZE VARS ##
# DS's/res
nums_set = set()
for num in nums:
if num not in nums_set:
nums_set.add(num)
else:
return True
else:
return False | 397c5f03cd7025eec1fda6041e1a344e7e61f4bc | 691,000 |
import subprocess
def get_git_version(abbreviate: bool = False) -> str:
"""Gets the latest Git tag (as a string), e.g. 0.1.2.
Note that `git describe --tags` works as follows:
- Finds the most recent tag that is reachable from a commit.
- If the tag points to the commit, then only the tag is shown.
- Otherwise, it suffixes the tag name with the number of additional commits
on top of the tag, and the abbreviated name of the most recent commit,
e.g. 0.1.2-9-g2118b21. If you add `--abbrev=0`, this suffix is removed.
This behavior is controlled by the `abbrev` parameter.
"""
cmd = ["git", "describe", "--tags"]
if abbreviate:
cmd.append("--abbrev=0")
try:
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
return out.strip().decode("ascii")
except (subprocess.SubprocessError, OSError):
return "Unknown" | 79dc3e89744fc783df6c3e6cbbccf2d952be0a02 | 691,001 |
def check_for_empty_nests_in_nest_spec(nest_spec):
"""
Ensures that the values of `nest_spec` are not empty lists.
Raises a helpful ValueError if they are.
Parameters
----------
nest_spec : OrderedDict, or None, optional.
Keys are strings that define the name of the nests. Values are lists of
alternative ids, denoting which alternatives belong to which nests.
Each alternative id must only be associated with a single nest!
Default == None.
Returns
-------
None.
"""
empty_nests = []
for k in nest_spec:
if len(nest_spec[k]) == 0:
empty_nests.append(k)
if empty_nests != []:
msg = "The following nests are INCORRECTLY empty: {}"
raise ValueError(msg.format(empty_nests))
return None | b75e12acd62b5cfafd8f6b177ee849a125d48fae | 691,002 |
def depth_bonacci_rule(depth):
"""rule for generating tribonacci or other arbitrary depth words
Args:
depth (int): number of consecutive previous words to concatenate
Returns:
lambda w: w[-1] + w[-2] + ... + w[-(depth+1)]
For example, if depth is 3, you get the tribonacci words.
(there is already a tribonacci and quadbonacci words method)
"""
return lambda w: "".join(w[-1:-depth-1:-1]) | db0d9f51475c9e4dd60acee36ffa651e577c4be2 | 691,003 |
import re
def compile_patterns(patterns):
"""Compile a list of patterns into one big option regex. Note that all of them will match whole words only."""
# pad intent patterns with \b (word boundary), unless they contain '^'/'$' (start/end)
return re.compile('|'.join([((r'\b' if not pat.startswith('^') else '') + pat +
(r'\b' if not pat.endswith('$') else ''))
for pat in patterns]),
re.I | re.UNICODE) | 80fdca22145a3f37c1c7c5035051d2b14b121ca6 | 691,004 |
def laplacian(Z, dx):
"""
Calculates laplace operator for particular matrix
:param Z: Combination of two initial matrices
:param dx: float space step
:return: Boundaries of matrix
"""
Ztop = Z[0:-2, 1:-1]
Zleft = Z[1:-1, 0:-2]
Zbottom = Z[2:, 1:-1]
Zright = Z[1:-1, 2:]
Zcenter = Z[1:-1, 1:-1]
return (Ztop + Zleft + Zbottom + Zright -
4 * Zcenter) / dx**2 | ec159067bec0f4211545f015124b340436b71c0c | 691,005 |
def cli(ctx):
"""Get the list of all data tables.
Output:
A list of dicts with details on individual data tables.
For example::
[{"model_class": "TabularToolDataTable", "name": "fasta_indexes"},
{"model_class": "TabularToolDataTable", "name": "bwa_indexes"}]
"""
return ctx.gi.tool_data.get_data_tables() | f718231f7a1a852f52d3d77dea8325e8e9ae5af4 | 691,006 |
import os
def is_template_file(path):
"""
Return True if the given file path is an HTML file.
"""
ext = os.path.splitext(path)[1].lower()
return ext in [
'.html',
'.htm',
'.xml',
] | 2bc6bbc8d64507f54f2911a2b76c22a3e4c381ea | 691,007 |
def compareLists(list1, list2):
"""returns the union and the disjoint members of two lists.
.. note:: Deprecated
Use python sets instead.
Returns
-------
unique1 : set
Elements unique in set1
unique2 : set
Elements unique in set2
common: set
Elements in both lists.
"""
unique1 = []
unique2 = []
common = []
set1 = sorted(list1)
set2 = sorted(list2)
x1 = 0
x2 = 0
while 1:
if x1 >= len(set1) or x2 >= len(set2):
break
if set2[x2] == set1[x1]:
common.append(set1[x1])
x1 += 1
x2 += 1
continue
if set1[x1] < set2[x2]:
unique1.append(set1[x1])
x1 += 1
continue
if set1[x1] > set2[x2]:
unique2.append(set2[x2])
x2 += 1
if x2 < len(set2):
unique2 += set2[x2:]
elif x1 < len(set1):
unique1 += set1[x1:]
return unique1, unique2, common | f07f5a78fbee9cdc0ef2aa4ef1dd1a6ea63e4f76 | 691,008 |
def get_fem_on_colab_installation_cell_code(
package_name: str, package_version: str, package_url: str, package_import: str
) -> str:
"""Return installation cell code for a FEM on Colab package."""
if package_version != "":
assert package_version.startswith("==")
package_version = package_version.replace("==", "")
if package_url == "":
package_url_prefix = "https://fem-on-colab.github.io/releases"
else:
assert "https" not in package_url, "Please provide the commit SHA instaed of the full URL"
package_url_prefix = f"https://github.com/fem-on-colab/fem-on-colab.github.io/raw/{package_url}/releases"
package_url_suffix = ".sh" if package_version == "" else f"-{package_version}.sh"
package_url = f"{package_url_prefix}/{package_name}-install{package_url_suffix}"
package_install = f"{package_name}-install.sh"
return f"""try:
import {package_import}
except ImportError:
!wget "{package_url}" -O "/tmp/{package_install}" && bash "/tmp/{package_install}"
import {package_import}""" | 7ab6df5be7baea630b1a2e66cfc831e4e616f5a4 | 691,009 |
def _get_input_shape(shape_x):
"""_get_input_shape"""
dim_a = shape_x[0]
dim_b = shape_x[1]
res = []
if dim_a % 16 != 0:
dim_a = (dim_a // 16) * 16 + 16
res.append(dim_a)
else:
res.append(dim_a)
if dim_b % 16 != 0:
dim_b = (dim_b // 16) * 16 + 16
res.append(dim_b)
else:
res.append(dim_b)
return res | 8bc1a57a21dcd0a5deeae69ef5fe317c660e1b80 | 691,010 |
import socket
def free_port():
"""
Return an unused port number
"""
with socket.socket() as s:
s.bind(("", 0))
return s.getsockname()[1] | 97fde5c3ab5a8832d684db15d2aa7df7ed70a30f | 691,011 |
def compute_precision_recall(tp, fp, gt_per_classes_num):
"""
计算不同阈值下的precision/recall
"""
# compute precision/recall
cumsum = 0
for idx, val in enumerate(fp):
fp[idx] += cumsum
cumsum += val
cumsum = 0
for idx, val in enumerate(tp):
tp[idx] += cumsum
cumsum += val
rec = tp[:]
for idx, val in enumerate(tp):
rec[idx] = float(val) / gt_per_classes_num
# print(rec)
prec = tp[:]
for idx, val in enumerate(tp):
prec[idx] = float(tp[idx]) / (fp[idx] + tp[idx])
return prec, rec | 22ac02e8e7d0c48826d69ec6180f1c48f1036750 | 691,012 |
def _function_iterator(graph):
"""Iterate over the functions in a graph.
:rtype: iter[str]
"""
return (
node.function
for node in graph
) | aaff945176f3d5754a4381cb74ad5a660a298556 | 691,013 |
import pandas
import os
def _read_dataframes_20M(path):
""" reads in the movielens 20M"""
ratings = pandas.read_csv(os.path.join(path, "ratings.csv"))
movies = pandas.read_csv(os.path.join(path, "movies.csv"))
return ratings, movies | 58a87f78e43492bcf82271afbd349ebb09568bbf | 691,014 |
def smart_city_event_data(event_dict):
"""
Assembles smart_city event relevant data for transmission to smart_city cloud endpoint
params:
event_dict which contains event_type, speed, gps-coordinates, and time information
returns:
a dictionary that captures all the information that an insurance event must contain per the insurance cloud endpoint
"""
return {
"event_type": event_dict["EVENT_TYPE"],
"speed": event_dict["SPEED"],
"event_timestamp": event_dict["TIME"] ,
"gps_coord": event_dict["GPS"]
} | dabd32b5d629af2a6bd7624518aff24d94707172 | 691,015 |
import os
def get_file_list(path):
"""
get file list from input directory
"""
file_py_list = []
for root, dirs, files in os.walk(path):
for file in files:
if not os.path.splitext(file)[1] == '.py':
continue
file_py_list.append(os.path.join(root, file))
return file_py_list | c240efe25698b462473d2d250d66a3774c11ae67 | 691,016 |
def get_aspect_ids(ctx, target):
"""Returns the all aspect ids, filtering out self."""
aspect_ids = None
if hasattr(ctx, "aspect_ids"):
aspect_ids = ctx.aspect_ids
elif hasattr(target, "aspect_ids"):
aspect_ids = target.aspect_ids
else:
return None
return [aspect_id for aspect_id in aspect_ids if "intellij_info_aspect" not in aspect_id] | 5d5501c8afb02c789a442a840b04fc002cd7a984 | 691,017 |
def DES30xx(v):
"""
DES-30xx-series
:param v:
:return:
"""
return (
v["platform"].startswith("DES-3010")
or v["platform"].startswith("DES-3016")
or v["platform"].startswith("DES-3018")
or v["platform"].startswith("DES-3026")
) | 9b5d185d25d69847bd86a0d0822e393a437c5bcd | 691,018 |
import re
def split_phrases(text: str):
"""Goes through text phrase by phrase."""
punc_re = re.compile(r"([?!.,-;]+)")
phrases = []
for part in map(str.strip, punc_re.split(text)):
if not part:
continue
# if phrases and punc_re.match(part) is not None:
# phrases[-1] += " " + part
# continue
phrases.append(part)
return phrases | 498ddbba48cf1bd3b6e4d8290a10be694f55c605 | 691,019 |
def ms_energy_stat(microstates):
"""
Given a list of microstates, find the lowest energy, average energy, and highest energy
"""
ms = next(iter(microstates))
lowerst_E = highest_E = ms.E
N_ms = 0
total_E = 0.0
for ms in microstates:
if lowerst_E > ms.E:
lowerst_E = ms.E
elif highest_E < ms.E:
highest_E = ms.E
N_ms += ms.count
total_E += ms.E*ms.count
average_E = total_E/N_ms
return lowerst_E, average_E, highest_E | 87a276c554750048b9c7c3826f49cce162094c35 | 691,020 |
def _get_object_id(obj):
"""
Gets the ID for an API object.
:param obj:
The API object
:type obj:
`dict`
:return:
"""
return obj.get('id', None) | 19ebf32e30e9863f89f509cbc59430847f41f979 | 691,021 |
def tokensMatch(expectedTokens, receivedTokens, ignoreErrorOrder):
"""Test whether the test has passed or failed
If the ignoreErrorOrder flag is set to true we don't test the relative
positions of parse errors and non parse errors
"""
if not ignoreErrorOrder:
return expectedTokens == receivedTokens
else:
#Sort the tokens into two groups; non-parse errors and parse errors
tokens = {"expected":[[],[]], "received":[[],[]]}
for tokenType, tokenList in zip(tokens.keys(),
(expectedTokens, receivedTokens)):
for token in tokenList:
if token != "ParseError":
tokens[tokenType][0].append(token)
else:
tokens[tokenType][1].append(token)
return tokens["expected"] == tokens["received"] | 6653dd384f8072efcd07f937b0931dbe142dc19b | 691,023 |
import json
def check_role_in_retrieved_nym(retrieved_nym, role):
"""
Check if the role in the GET NYM response is what we want.
:param retrieved_nym:
:param role: the role we want to check.
:return: True if the role is what we want.
False if the role is not what we want.
"""
if retrieved_nym is None:
return False
nym_dict = json.loads(retrieved_nym)
if "data" in nym_dict["result"]:
temp_dict = json.loads(nym_dict["result"]["data"])
if "role" in temp_dict:
if not temp_dict["role"] == role:
return False
else:
return True
return False | d1e95415ebe2b049eecd8c002ee381ae43e7ed5b | 691,024 |
def tagsoverride(msg, override):
"""
Merge in a series of tag overrides, with None
signaling deletes of the original messages tags
"""
for tag, value in override.items():
if value is None:
del msg[tag]
else:
msg[tag] = value
return msg | c292ace74e9832716a76044cbd82804a6d860ae1 | 691,025 |
def get_possible_standard_coords_dims(name_for: str = 'coords', ndim: int = 1):
"""
that the definition of all project, used by read nc to standard da
:return:
:rtype:
"""
standard_coords = ['time', 'lev', 'lat', 'lon', 'number']
standard_dims_1d = ['time', 'lev', 'y', 'x', 'num']
standard_dims_2d = ['time', 'lev', ['y', 'x'], ['y', 'x'], 'num']
# this definition is used for all projects, that's the best order do not change this
# key word: order, dims, coords,
if name_for == 'coords':
return standard_coords
if name_for == 'dims':
if ndim == 1:
return standard_dims_1d
if ndim == 2:
return standard_dims_2d
else:
return 0 | d149bd221974f5e5824d40a736e97b22a0e426b0 | 691,027 |
def Model(request):
"""All model classes"""
return request.param | e5588c155e49f9ebb894d939b47e1954d157fab1 | 691,029 |
def _do_get_features_list(featurestore_metadata):
"""
Gets a list of all features in a featurestore
Args:
:featurestore_metadata: metadata of the featurestore
Returns:
A list of names of the features in this featurestore
"""
features = []
for fg in featurestore_metadata.featuregroups.values():
features.extend(fg.features)
features = list(map(lambda f: f.name, features))
return features | d5d1b99aa77067dc22473d12fb8fec6312d75b53 | 691,030 |
import base64
def encode(value):
"""
Get a string and put it in value variable, convert it bytes by using encode() function
and gives back the base64 encoded bytes as a string by converting it decode() function
"""
return base64.b64encode(value.encode()).decode() | b600f0995f4a39f9ca6be6b21ba6444f35c71aea | 691,031 |
import tempfile
import os
def temporary_file():
"""Unique anonymous temporary file. Delete on close."""
with tempfile.TemporaryFile() as temp_file:
created = os.path.exists(temp_file.name)
return created and "fileno {}".format(temp_file.name) | 6a8fc121cc149ebcd4bc7ca6a037016ca57b3e58 | 691,032 |
def contains_point(poly, point):
"""
Given a list of 2-tuples (lat, lng) defining a convex polygon, returns
True if the given point, which is a 2-tuple (lat, lng), is inside the
polygon, False otherwise.
"""
n = len(poly)
c = False
i = 0
j = n - 1
while i < n:
if ((poly[i][0] > point[0]) != (poly[j][0] > point[0])) and \
(point[1] < (poly[j][1] - poly[i][1]) * (point[0] - poly[i][0]) /
(poly[j][0] - poly[i][0]) + poly[i][1]):
c = not c
j = i
i += 1
return c | 6a1fe734a8b1fece974735971d701c41d4952720 | 691,033 |
import os
import re
def get_file_with_basename(path, basename):
"""
Find a file where the basename (minus extension) matches the given basename in path.
Args:
path (string): Path to search.
basename (string): Basename of the file (filename minus extension)
Returns:
string of filename (with extension) or None if nothing is found.
"""
found_file = None
for f in os.listdir(path):
regex = '^{}\.\S+'.format(basename)
filepath = os.path.join(path, f)
if os.path.isfile(filepath) and re.match(regex, os.path.basename(f)):
found_file = f
break
return found_file | d6b8293633619592c4dc4f1bf7f1875514c03c3e | 691,035 |
import argparse
def parse_args():
"""Parse arguments."""
parser = argparse.ArgumentParser(description="Arguments to run the application")
parser.add_argument("-d","--dataset", type=str, help="[Required] Choose PTEN or BRCA1 dataset.", required=True)
parser.add_argument("-i",
"--input",
type=str,
help="[Required] File path for PTEN or BRCA1 dataset.", required=True
)
parser.add_argument("-o",
"--output_dir",
type=str,
default="src/Results",
help="[Optional] Folder for output results (ROC_curve and PR_curve)."
)
args = parser.parse_args()
return args | f8f4b190839e8db06d7df0b9231be99bead20de3 | 691,036 |
def vector_dot(xyz, vector):
"""
Take a dot product between an array of vectors, xyz and a vector [x, y, z]
**Required**
:param numpy.ndarray xyz: grid (npoints x 3)
:param numpy.ndarray vector: vector (1 x 3)
**Returns**
:returns: dot product between the grid and the (1 x 3) vector, returns an
(npoints x 1) array
:rtype: numpy.ndarray
"""
if len(vector) != 3:
raise Exception(
"vector should be length 3, the provided length is {}".format(
len(vector)
)
)
return vector[0]*xyz[:, 0] + vector[1]*xyz[:, 1] + vector[2]*xyz[:, 2] | bb793fdf51ec83fd9dc98693030e5d3bfbfbdb6c | 691,037 |
def filter_leetify(a, **kw):
"""Leetify text ('a' becomes '4', 'e' becomes '3', etc.)"""
return a.replace('a','4').replace('e','3').replace('i','1').replace('o','0').replace('u','^') | 768116ee67e7f089c9dfd73e8463b2386c48aa15 | 691,038 |
import ast
def eval_string(string):
"""automatically evaluate string to corresponding types.
For example:
not a string -> return the original input
'0' -> 0
'0.2' -> 0.2
'[0, 1, 2]' -> [0,1,2]
'eval(1+2)' -> 3
'eval(range(5))' -> [0,1,2,3,4]
Args:
value : string.
Returns: the corresponding type
"""
if not isinstance(string, str):
return string
if len(string) > 1 and string[0] == '[' and string[-1] == ']':
return eval(string)
if string[0:5] == 'eval(':
return eval(string[5:-1])
try:
v = ast.literal_eval(string)
except:
v = string
return v | eb61fe3949c401318ac219195969fbf3c07048e1 | 691,039 |
def convolution(image, kernel, row, column):
"""
Apply convolution to given image with the kernel and indices.
:param image: image that will be convolved.
:param kernel: kernel that will be used with convolution.
:param row: row index of the central pixel.
:param column: row index of the central pixel.
:return: the convolved pixel value.
"""
value = 0
for i in range(2, -1, -1):
for j in range(2, -1, -1):
row_index = row - (i - 1)
col_index = column - (j - 1)
value += image[row_index][col_index] * kernel[i][j]
if value < 0:
value = 0
elif value > 255:
value = 255
return int(value) | f7bd8f16f70fec06da0647a0d8eaf79790d4173d | 691,041 |
def token_to_char_position(tokens, start_token, end_token):
"""Converts a token positions to char positions within the tokens."""
start_char = 0
end_char = 0
for i in range(end_token):
tok = tokens[i]
end_char += len(tok) + 1
if i == start_token:
start_char = end_char
return start_char, end_char | 3ebb9fc59bb6040a55305c6e930df41dc4dc726e | 691,042 |
def flatten_image(image):
"""
Flat the input 2D image into an 1D image while preserve the channels of the input image
with shape==[height x width, channels]
:param image: Input 2D image (either multi-channel color image or greyscale image)
:type image: numpy.ndarray
:return: The flatten 1D image. shape==(height x width, channels)
:rtype: numpy.ndarry
"""
assert len(image.shape) >= 2, "The input image must be a 2 Dimensional image"
if len(image.shape) == 3:
image = image.reshape((-1, image.shape[2]))
elif len(image.shape) == 2:
image = image.reshape((-1, 1))
return image | 6733b30adc9131fea237ab11afd3853bec0c2c44 | 691,043 |
def tail(sequence):
"""Get all but the first element in a sequence.
Parameters
----------
sequence : Sequence[A]
The sequence to decapitate.
Returns
-------
Sequence[A]
The decapitated sequence.
"""
return sequence[1:] | 69868b17a75579cca2eb75b68883e76683319105 | 691,044 |
def split_fqdn(fqdn):
"""
Unpack fully qualified domain name parts.
"""
if not fqdn:
return [None] * 3
parts = fqdn.split(".")
return [None] * (3 - len(parts)) + parts[0:3] | 6615456641d09e8e1f2d4f38517204b3db1e1d7c | 691,045 |
from typing import Union
from pathlib import Path
import json
def read_json(path: Union[str, Path]):
"""
Read a json file from a string path
"""
with open(path) as f:
return json.load(f) | 86498196f4c5c01bcd4cadbaa9a095d0fb71c0a1 | 691,046 |
import random
def random_package():
"""Create structure with random package metadata."""
p = random.choice([
"acl-2.2.53-6",
"add-apt-key-1.0-0.5",
"usb-modeswitch-2.5.2+repack0",
"vim-common-2:8.1.2269",
"vim-gtk-2:8.1.2269",
"vim-gui-common-2:8.1.2269",
"vim-runtime-2:8.1.2269",
"vim-tiny-2:8.1.2269",
"syslinux-common-3:6.04~git20190206.bf6db5b4+dfsg1-2",
"syslinux-legacy-2:3.63+dfsg-2ubuntu9",
"syslinux-utils-3:6.04~git20190206.bf6db5b4+dfsg1-2",
"syslinux-3:6.04~git20190206.bf6db5b4+dfsg1-2",
"system-config-printer-common-1.5.12",
"system-config-printer-gnome-1.5.12",
"system-config-printer-udev-1.5.12",
"system-config-printer-1.5.12",
"system-tools-backends-2.10.2-3",
"systemd-coredump-245.4",
"systemd-sysv-245.4",
"systemd-245.4",
"openssl-libs-1.1.1g-12"])
return {
"rpm_nvra": [p + ".el8_3.x86_64"],
"srpm_nevra": p + ".el8_3.src",
} | 423e0741c5c27b8fbe98ad3c27f0683e710d6323 | 691,047 |
def add_arrays(arr1, arr2):
"""
Function to adds two arrays element-wise
Returns the a new array with the result
"""
if len(arr1) == len(arr2):
return [arr1[i] + arr2[i] for i in range(len(arr1))]
return None | d16adfb33fb0c1a30277f7250485cddea60b1fe9 | 691,048 |
def strip_commands(commands):
""" Strips a sequence of commands.
Strips down the sequence of commands by removing comments and
surrounding whitespace around each individual command and then
removing blank commands.
Parameters
----------
commands : iterable of strings
Iterable of commands to strip.
Returns
-------
stripped_commands : list of str
The stripped commands with blank ones removed.
"""
# Go through each command one by one, stripping it and adding it to
# a growing list if it is not blank. Each command needs to be
# converted to an str if it is a bytes.
stripped_commands = []
for v in commands:
if isinstance(v, bytes):
v = v.decode(errors='replace')
v = v.split(';')[0].strip()
if len(v) != 0:
stripped_commands.append(v)
return stripped_commands | d2260c157de8b791af59fadaba380bbc06c6a30d | 691,049 |
def model_view():
"""
Panel factory for the model tab in the GUI.
Return None if not present.
"""
return None | 6cf4b21b3e0fe77518e0420c9febab2671dd8335 | 691,050 |
def __merge2sorted__(arr1, arr2):
"""
Takes two sorted subarrays and returns a sorted array
"""
m, n = len(arr1), len(arr2)
aux_arr = [None] * (m + n)
p1 = 0
p2 = 0
c = 0
while p1 < m and p2 < n:
if arr1[p1] < arr2[p2]:
aux_arr[c] = arr1[p1]
p1 += 1
else:
aux_arr[c] = arr2[p2]
p2 += 1
c += 1
if p1 == m: # arr1 exhausted
while p2 < n:
aux_arr[c] = arr2[p2]
p2 += 1
c += 1
elif p2 == n: # arr2 exhausted
while p1 < m:
aux_arr[c] = arr1[p1]
p1 += 1
c += 1
return aux_arr | 292521702ba5b9f9237ca988c1e3f6091b0f142e | 691,051 |
def args_idx(x):
"""Get the idx of "?" in the string"""
return x.rfind('?') if '?' in x else None | 65a59f63ccad4e755731814850870f914968a060 | 691,052 |
import shelve
def initialize_database(file_name, sample_size):
"""Set up the database connection"""
database = shelve.open(file_name, flag='wu')
if 'index' not in database:
database['index'] = 0
database['sample_size'] = sample_size
return database | 53bf4d535b684bcca03ecce5ca5e54d8b969a2cb | 691,054 |
from typing import Dict
from typing import Any
def userinfo_token(
token_subject: str,
token_idp: str,
token_ssn: str,
) -> Dict[str, Any]:
"""
Mocked userinfo-token from Identity Provider (unencoded).
"""
return {
'iss': 'https://pp.netseidbroker.dk/op',
'nbf': 1632389572,
'iat': 1632389572,
'exp': 1632389872,
'amr': ['code_app'],
'mitid.uuid': '7ddb51e7-5a04-41f8-9f3c-eec1d9444979',
'mitid.age': '55',
'mitid.identity_name': 'Anker Andersen',
'mitid.date_of_birth': '1966-02-03',
'loa': 'https://data.gov.dk/concept/core/nsis/Substantial',
'acr': 'https://data.gov.dk/concept/core/nsis/Substantial',
'identity_type': 'private',
'idp': token_idp,
'dk.cpr': token_ssn,
'auth_time': '1632387312',
'sub': token_subject,
'aud': '0a775a87-878c-4b83-abe3-ee29c720c3e7',
'transaction_id': 'a805f253-e8ea-4457-9996-c67bf704ab4a',
} | 976cadad107d5a167f169405e62d56e64b3f865e | 691,055 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.