content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def plot_data(d, field):
"""
Return the x and y series to be used for plotting
Args:
d (OrderedDict)
field (str)
Returns:
Tuple[list, list]:
[0] The x-series
[1] The y-series
"""
return ([year for year in d.keys() if d[year][field] is not None],
[i[field] for k, i in d.items() if i[field] is not None]) | a5c7de18f1da44da0641362a5ace24f7bc35c1b5 | 692,282 |
from bs4 import BeautifulSoup
from typing import List
def find_data_rows(soup: BeautifulSoup) -> List[BeautifulSoup]:
"""Queries the provided BeautifulSoup to find <tr> elements which are inside a <tbody>.
Exploring the data shows that such rows correspond to vaccine site data
"""
def is_data_row(tag):
return tag.name == "tr" and tag.parent.name == "tbody"
return soup.find_all(is_data_row) | 34760102227c6daceb50f17befeae9f9534efebb | 692,283 |
def keys_from_position(position, width, height=None):
"""
Generate a set of keys suitable to be used on a map generator to get a
horizontal rectangular slice of a (t,x,y,z) space centered on position.
(Returns a tuple (float, slice, slice, float).
"""
if height is None:
height = width
return (position[0],
slice(position[1] - width/2, position[1] + width/2),
slice(position[2] - height/2, position[2] + height/2),
position[3]) | c5e6e8d20aa5769fddb51b62b812b2a8913c053b | 692,285 |
def get_length(node):
"""
vstup: 'node' prvni uzel seznamu, ktery je linearni, nebo kruhovy
vystup: pocet prvku v zadanem seznamu
casova slozitost: O(n), kde 'n' je pocet prvku seznamu
"""
if node is None:
return 0
length = 1
first_node = node
while node.next:
node = node.next
if first_node is node:
return length
length += 1
return length | 2e84122dc1f9524d07d66bec015035cdd07ada9c | 692,287 |
from pathlib import Path
import os
def relativize_relative_resource(resource, page):
"""
The `page` argument is an HTML page.
The `resource` argument is a relative location of an HTML page resource (like another page,
a picture, a CSS file etc.). So the both arguments cannot be empty or end with a '/'.
The method considers the both arguments being relative to the same location. It returns the
relative location that being applied on the HTML page `page` will resolve to `path`.
ATTENTION! This method wasn't tested with ABSOLUTE paths as any of the arguments.
"""
page = (page or '').replace('\\', '/')
if not page or page.endswith('/'):
raise ValueError(f"'page' argument is not a relatively located resource: {page}")
resource = (resource or '').replace('\\', '/')
if not resource or resource.endswith('/'):
raise ValueError(f"'page' argument is not a relatively located resource: {resource}")
base_path = Path(page).resolve()
if len(base_path.parents) < 1:
return resource
return str(os.path.relpath(resource, base_path.parent)).replace('\\', '/') | 77e4e63370e3ddadc6eb9cc4bda04d170c3c8607 | 692,288 |
def euclidean_ext_gcd(a,b):
"""Extended Euclidean Algorithm to find GCD and Bézout's identity.
Extended Euclidean algorithm is an extension to the Euclidean algorithm,
and computes, in addition to the greatest common divisor (GCD) of integers
a and b, also the coefficients of Bézout's identity, which are integers
x and y such that ax+by = gcd(a,b) [Wiki].
Args:
a (int): The first integer, > 0,
b (int): The second integer, > 0.
Returns:
tuple(int,int,int): the gcd and coefficients x and y.
"""
def calc_next_step(a,b,s,spv,t,tpv):
if a < b:
a,b = b,a
r = a
qs = 0
qt = 0
while r >= b:
r = r - b
qs += s
qt += t
spv, s = s, spv - qs
tpv, t = t, tpv - qt
return (b, r, s, spv, t, tpv )
spv = 1
tpv = 0
s = 0
t = 1
flip = 0
if a < b:
flip = 1
while (b != 0):
#print("a =", a, "b =", b, "s =", s, "t =", t)
a,b,s,spv,t,tpv = calc_next_step(a,b,s,spv,t,tpv)
return (a,tpv,spv) if flip else (a,spv,tpv) | d84c61e31c22f28834a1ce3639e69d33cdf6ecdc | 692,289 |
def evaluate(env,
policy,
num_episodes = 10,
video_filename = None,
max_episodes_per_video = 5,
return_distributions=False,
return_level_ids=False):
"""Evaluates the policy.
Args:
env: Environment to evaluate the policy on.
policy: Policy to evaluate.
num_episodes: A number of episodes to average the policy on.
video_filename: If not None, save num_episodes_per_video to a video file.
max_episodes_per_video: When saving a video, how many episodes to render.
return_distributions: Whether to return per-step rewards and episode return
distributions instead of mean
return_level_ids: Whether to return level ids to agent in ProcGen.
Returns:
Averaged reward and a total number of steps.
"""
del video_filename # placeholder
del max_episodes_per_video
total_timesteps = 0.
total_returns = 0.0
total_log_probs = 0.0
return_acc = []
reward_acc = []
for _ in range(num_episodes):
episode_return = 0.
episode_log_prob = 0.
episode_timesteps = 0.
timestep = env.reset()
while not timestep.is_last():
if type(policy).__name__ == 'TfAgentsPolicy':
action, log_probs = policy.act(timestep.observation)
episode_log_prob += log_probs.numpy().item()
else:
if return_level_ids:
action = policy.act(timestep.observation, env._infos[0]['level_seed']) # pylint: disable=protected-access
else:
action = policy.act(timestep.observation)
if hasattr(action, 'numpy'):
action = action.numpy()
timestep = env.step(action)
total_returns += timestep.reward[0]
episode_return += timestep.reward[0]
total_timesteps += 1.0
episode_timesteps += 1.0
reward_acc.append(timestep.reward[0])
episode_log_prob /= episode_timesteps
total_log_probs += episode_log_prob
return_acc.append(episode_return)
if return_distributions:
return (reward_acc, return_acc,
total_timesteps / num_episodes, total_log_probs / num_episodes)
if type(policy).__name__ == 'tfAgentsPolicy':
return (total_returns / num_episodes,
total_timesteps / num_episodes, total_log_probs / num_episodes)
else:
return total_returns / num_episodes, total_timesteps / num_episodes | 4948cb3058c85a016167a350a5b6c076ae09174f | 692,290 |
def str_upper_lookup(full_match: str, inner_group: str) -> str:
"""Handles the cases of: ${upper:aaAAaa}"""
# ignore default values
inner_group = inner_group.split(":-", 1)[0]
return inner_group.split(":", 1)[1].upper() | c6ff0bddea0e3e4eb318367190ae0af568e1dfe1 | 692,291 |
def _remove_nodes(node, removal_tags):
"""
Parameters
----------
node: NonTerminal or Terminal
removal_tags: list[str]
Returns
-------
NonTerminal or Terminal
"""
if node.is_terminal():
return node
# Children of this node
new_children = []
for c_i in range(len(node.children)):
# Remove (skip) child terminal nodes whose tags are in removal_tags.
if node.children[c_i].is_terminal() and node.children[c_i].label in removal_tags:
continue
new_children.append(node.children[c_i])
node.children = new_children
# Recursive
for c_i in range(len(node.children)):
node.children[c_i] = _remove_nodes(node.children[c_i], removal_tags=removal_tags)
return node | be36302167bb64ad66f847655ccc686d41244e66 | 692,292 |
def _match_exit_function(edge, stack):
"""Exit function."""
if len(stack) > 0:
for index in reversed(range(0, len(stack))):
if stack[index]['enter id'] == edge['return']:
# Exit control function
stack.pop(index)
return True
return False | a5b6b3d12f23093d48200e53fc82714fb0e19084 | 692,293 |
import unicodedata
import re
import hashlib
def path_sanitise(unsafe, maintain_uniqueness=4, forbidden=r'[^a-zA-Z0-9_-]'):
"""generate a string safe for use in filenames etc from unsafe;
if maintain_uniqueness is not False, then we append a hashed version
of unsafe to minimise risk of collision between, e.g.,
cafe
and
café
maintain_uniqueness should be the number of bytes of entropy to retain
"""
normed = unicodedata.normalize('NFD', unsafe)
safe = re.sub(forbidden, '_', normed)
if maintain_uniqueness is not False:
hash_ = hashlib.sha256(unsafe.encode()).hexdigest()
safe += '_' + hash_[:int(2*maintain_uniqueness)]
return safe | 3686facf943c8471a1646503eef859d422ffd608 | 692,294 |
import pickle as pickle
def loadMirrorDictionary(fileLocation):
"""Returns saved Mirror Dictionary from Pickled file at 'fileLocation'"""
print('*** Loading %s...' % fileLocation)
f= open(fileLocation,"rb")
mir = pickle.load(f)
f.flush()
f.close()
print("*** Mirror Dictionary Loaded!")
return mir | e40715a47c681a92d4c1d3a3ad7eb5e73e2884f1 | 692,295 |
import requests
from bs4 import BeautifulSoup
def retrieve_html():
"""Function that locates the exact files needed for data extraction"""
page = requests.get("http://insideairbnb.com/get-the-data.html")
if page.status_code != 200:
print("Error: Request to InsideAirBnB is failing")
soup = BeautifulSoup(page.content, 'html.parser')
td_tags = soup.find_all('td')
# To ensure only the latest data for a particular city is used.
city_set = set()
# To maintain city level summary for data fetched.
city_data = []
for td_tag in td_tags:
link_list = [a['href'] for a in td_tag.find_all('a', href=True)]
if len(link_list) > 0 and link_list[0].find('listings.csv.gz') != -1:
# Summary for each city is got by parsing the url itself.
url_split = link_list[0].split('/')
if len(url_split) != 9:
print(f"Error: URL not following the " # noqa: E999
f"format {link_list[0]}")
if url_split[3] == 'united-states':
country = url_split[3]
region = url_split[4]
city = url_split[5]
date = url_split[6]
url = link_list[0]
if city not in city_set:
city_set.add(city)
city_data.append([country, region, city, date, url])
# Check summary information of each city.
print(f"Info: Total number of city information fetched: {len(city_data)}")
print("Info: Start summary information of each city")
for city in city_data:
print(city)
print("Info: Completed summary information of each city ")
return city_data | 163f97615b8a9ed9ceccc445d310345b08144db7 | 692,296 |
from datetime import datetime
def find_period_ranges(input_frame, transition_date="2016-10-01"):
"""
Read in the input_frame dataframe which is the frame returned
by pivot_by_treatment.
Based on this frame and a transition date, this function finds
the start and end dates of the pre and post transition date
periods. In the cases where the dates in the frame represents
weeks or months and do not match the transition date exactly,
the function will draw the boundary based on which dates are
closest.
Args:
input_frame: the dataframe containing the collision data
transition_date: date when the pre-period ends and the post
period begins. Should be a string in the format of
'YYYY-MM-DD'.
Returns:
a list of 2 lists with 2 elements. List 1 is the date range
of the pre-period. List 2 is the date range of the post-period.
Raises:
ValueError: raises this error when the transition date comes
before the min date of the given frame or after the max
date of the given frame.
"""
min_date = input_frame.index.min()
max_date = input_frame.index.max()
transition_datetime = datetime.strptime(transition_date, '%Y-%m-%d')
if transition_datetime <= min_date or transition_datetime >= max_date:
raise ValueError("transition_date {0} must be between the minimum \
and maximum frame dates.".format(transition_date))
actual_transition = input_frame.ix[input_frame.index.
get_loc(transition_datetime, method='nearest')].name
if actual_transition < transition_datetime:
pre_end = actual_transition
post_start = input_frame.ix[input_frame.index.
get_loc(transition_datetime, method='nearest')+1].name
else:
pre_end = input_frame.ix[input_frame.index.
get_loc(transition_datetime, method='nearest')-1].name
post_start = actual_transition
pre_period_range = [min_date.strftime('%Y-%m-%d'), pre_end.strftime('%Y-%m-%d')]
post_period_range = [post_start.strftime('%Y-%m-%d'), max_date.strftime('%Y-%m-%d')]
return [pre_period_range, post_period_range] | 48008d9ff0f64c3e5478fc6bba79b7b47719dbc7 | 692,297 |
import uuid
def generate_hierarchy():
"""Returns four UUIDs separated by double colons."""
return "::".join([str(uuid.uuid4()) for _ in range(4)]) | b20a93adb6231df5c436835c66af42495f1411e8 | 692,298 |
import binascii
def hex2bin(pattern):
"""
Converts a hex string (\\x??\\x??\\x??\\x??) to real hex bytes
Arguments:
pattern - A string representing the bytes to convert
Return:
the bytes
"""
pattern = pattern.replace("\\x", "")
pattern = pattern.replace("\"", "")
pattern = pattern.replace("\'", "")
hex_str = [binascii.a2b_hex(i + j) for i, j in zip(str(pattern[0::2]), str(pattern[1::2]))]
return hex_str | 53d40e4010f03e86325b9585f5ab02a0fb500083 | 692,299 |
import math
def make_context_fst(x, weight=True):
"""
read a Kaldi lexicon format list.
<word1> <weight> <sub-word1> <sub-word2> <...>
example:
ABABA 1.0 ABABA
ABACHA 1.0 ABACHA
每日一物 100 每 日 一 物
每日一物 100 每日 一物
Returns:
List with FST format.
"""
C = x
C_fst = []
state = int(0)
if weight:
for i in range(len(C)):
if len(C[i]) == 3:
logprob = '%.10f' % (-math.log(float(C[i][1])))
C_fst.append(['0', '0', C[i][2], C[i][0], logprob])
else:
logprob = '%.10f' % (-math.log(float(C[i][1])))
for j in range(len(C[i]) - 2):
if j == 0:
C_fst.append(['0', '%s' % (state + 1), C[i][j+2], C[i][0], logprob])
state = state + 1
elif j == len(C[i]) - 3:
C_fst.append(['%s' % state, '0', C[i][j+2], '<eps>'])
else:
C_fst.append(['%s' % state, '%s' % (state + 1), C[i][j+2], '<eps>'])
state = state + 1
C_fst.append(['0','0']) # add end
else:
for i in range(len(C)):
if len(C[i]) == 3:
C_fst.append(['0', '0', C[i][2], C[i][0]])
else:
for j in range(len(C[i]) - 2):
if j == 0:
C_fst.append(['0', '%s' % (state + 1), C[i][j+2], C[i][0]])
state = state + 1
elif j == len(C[i]) - 3:
C_fst.append(['%s' % state, '0', C[i][j+2], '<eps>'])
else:
C_fst.append(['%s' % state, '%s' % (state + 1), C[i][j+2], '<eps>'])
state = state + 1
C_fst.append(['0','0']) # add end
return C_fst | ccd44a36626bf0f0c54015aa4b6e6177e7132f19 | 692,300 |
def validate_sample_id(value: str, length: int = 15) -> bool:
"""
Strict validation of BMH Sample ID
:param value: sample_id
:param length: expected length of string
"""
if len(value) != length:
raise Exception(f"Sample ID '{value}' does not meet the expected length of 15 characters. "
f"Sample ID must be in the following format: 'BMH-2018-000001'")
components = value.split("-")
if len(components) != 3:
raise Exception(f"Sample ID '{value}' does not appear to meet expected format. "
f"Sample ID must be in the following format: 'BMH-2018-000001'")
elif components[0] != "BMH":
raise Exception(f"BMH component of Sample ID ('{components[0]}') does not equal expected 'BMH'")
elif not components[1].isdigit() or len(components[1]) != 4:
raise Exception(f"YEAR component of Sample ID ('{components[1]}') does not equal expected 'YYYY' format")
elif not components[2].isdigit() or len(components[2]) != 6:
raise Exception(f"ID component of Sample ID ('{components[2]}') does not equal expected 'XXXXXX' format")
else:
return True | e6a34eceee6e34198f56d40fe570e9043aec8eec | 692,301 |
def in_group(user, groups):
"""Returns a boolean if the user is in the given group, or comma-separated
list of groups.
Usage::
{% if user|in_group:"Friends" %}
...
{% endif %}
or::
{% if user|in_group:"Friends,Enemies" %}
...
{% endif %}
"""
group_list = (groups).split(',')
try:
# print group_list, user.groups.filter(), user.groups.values('name'), user
return bool(user.groups.filter(name__in=group_list).values('name'))
except:
return False | e1b040f4e3534bd7198a5de69be21e6a94cbad4f | 692,302 |
def number_of_groups_vi(no_x_names):
"""Determine no of groups for groupwise variable importance measure.
Parameters
----------
no_x_names :INT. No of variables considered in analysis.
Returns
-------
groups : INT.
merged_groups : INT.
"""
if no_x_names >= 100:
groups = 20
merged_groups = 19
elif 20 <= no_x_names < 100:
groups = 10
merged_groups = 9
elif 10 <= no_x_names < 20:
groups = 5
merged_groups = 4
elif 4 <= no_x_names < 10:
groups = 2
merged_groups = 0
else:
groups = 0
merged_groups = 0
return groups, merged_groups | 87972b259c57d32e844c3574275e3291772169a2 | 692,303 |
def get_network(vm):
"""
Returns network from a virtual machine object.
vm
Virtual Machine Object from which to obtain mac address.
"""
network = {}
for device in vm.guest.net:
network[device.macAddress] = {}
network[device.macAddress]["ipv4"] = []
network[device.macAddress]["ipv6"] = []
for address in device.ipAddress:
if "::" in address:
network[device.macAddress]["ipv6"].append(address)
else:
network[device.macAddress]["ipv4"].append(address)
return network | d89522728df5cf076ff0697bd9657eba72505da7 | 692,304 |
import math
def atm_pressure(altitude):
"""
Estimate atmospheric pressure from altitude.
Calculated using a simplification of the ideal gas law, assuming 20 degrees
Celsius for a standard atmosphere. Based on equation 7, page 62 in Allen
et al (1998).
:param altitude: Elevation/altitude above sea level [m]
:return: atmospheric pressure [kPa]
:rtype: float
"""
temp = (293.0 - (0.0065 * altitude)) / 293.0
return math.pow(temp, 5.26) * 101.3 | 1a90988d13197b370ab971e92af6caaa864a2f8f | 692,305 |
def offer_banner_wrapper(user, block, view, frag, context): # pylint: disable=W0613
"""
A wrapper that prepends the First Purchase Discount banner if
the user hasn't upgraded yet.
"""
if block.category != 'vertical':
return frag
offer_banner_fragment = None
if not offer_banner_fragment:
return frag
# Course content must be escaped to render correctly due to the way the
# way the XBlock rendering works. Transforming the safe markup to unicode
# escapes correctly.
offer_banner_fragment.content = str(offer_banner_fragment.content)
offer_banner_fragment.add_content(frag.content)
offer_banner_fragment.add_fragment_resources(frag)
return offer_banner_fragment | 0e52a589f5b4c09c898b03d3c3cc9e6413526e61 | 692,306 |
def scale(value):
"""Scale an value from (acceleration range) to 0-255 (RGB range)"""
value = abs(value)
value = max(min(19.6, value), 0)
return int(value / 19.6 * 255) | 4b6665681294a301069ce0462e9b9dc2cc05f8bf | 692,307 |
def translate(bbox, x_offset=0, y_offset=0):
"""Translate bounding boxes by offsets.
Parameters
----------
bbox : numpy.ndarray
Numpy.ndarray with shape (N, 4+) where N is the number of bounding boxes.
The second axis represents attributes of the bounding box.
Specifically, these are :math:`(x_{min}, y_{min}, x_{max}, y_{max})`,
we allow additional attributes other than coordinates, which stay intact
during bounding box transformations.
x_offset : int or float
Offset along x axis.
y_offset : int or float
Offset along y axis.
Returns
-------
numpy.ndarray
Translated bounding boxes with original shape.
"""
bbox = bbox.copy()
bbox[:, :2] += (x_offset, y_offset)
bbox[:, 2:4] += (x_offset, y_offset)
return bbox | 9e0950b11b883b77122f77ed512ca4b84a4a6af4 | 692,308 |
def get_width_height(image):
""" Returns a tuple (width, height) indicating the width and height of the image."""
width = len(image)
height = len(image[0])
return width, height | 4b45ec92d3a4b400d3b106e63d56fcbdef297760 | 692,309 |
def _GetGitKey(obj):
"""Hash the Git specification for the given RepoSync|RootSync object."""
repo = obj['spec']['git']['repo']
branch = 'main'
if 'branch' in obj['spec']['git']:
branch = obj['spec']['git']['branch']
directory = '.'
if 'dir' in obj['spec']['git']:
directory = obj['spec']['git']['dir']
revision = ''
if 'revision' in obj['spec']['git']:
revision = obj['spec']['git']['revision']
if not revision:
return '{repo}//{dir}@{branch}'.format(
repo=repo, dir=directory, branch=branch)
else:
return '{repo}//{dir}@{branch}:{revision}'.format(
repo=repo, dir=directory, branch=branch, revision=revision) | bdf7b08038312f833fa05e1ece41558f52074060 | 692,310 |
import logging
def get_logger(name):
"""
Return a logger that will contextualize the logs with the name.
"""
logger = logging.getLogger(name)
return logger | f4ca7829ba0794f276e83b9b4f7394d1eebed416 | 692,311 |
import re
def get_eos_np(sm_key, wc):
"""Get the NP contribution to an EOS Wilson coefficient identified
by the name `sm_key` from a wcxf.WC instance `wc`"""
if sm_key in wc.dict:
return wc.dict[sm_key].real
elif r'Re{' in sm_key: # if the EOS name contains "Re"
k2 = re.sub(r'Re\{([^\}]+)\}', r'\1', sm_key) # name of the coeff in the basis file
if k2 in wc.dict:
return wc.dict[k2].real
elif k2.replace('lnu', 'enue') in wc.dict or k2.replace('lnu', 'munumu') in wc.dict:
# for charged-current WCs, check if the WCxf file is LFU, else raise an error
if wc.dict.get(k2.replace('lnu', 'enue'), 0) != wc.dict.get(k2.replace('lnu', 'munumu'), 0):
raise ValueError("Found lepton flavour non-universal charged-current coefficients")
return wc.dict.get(k2.replace('lnu', 'enue'), 0).real
elif r'Im{' in sm_key: # if the EOS name contains "Im"
k2 = re.sub(r'Im\{([^\}]+)\}', r'\1', sm_key) # name of the coeff in the basis file
if k2 in wc.dict:
return wc.dict[k2].imag
elif k2.replace('lnu', 'enue') in wc.dict or k2.replace('lnu', 'munumu') in wc.dict:
# for charged-current WCs, check if the WCxf file is LFU, else raise an error
if wc.dict.get(k2.replace('lnu', 'enue'), 0) != wc.dict.get(k2.replace('lnu', 'munumu'), 0):
raise ValueError("Found lepton flavour non-universal charged-current coefficients")
return wc.dict.get(k2.replace('lnu', 'enue'), 0).imag
return 0 | 0513b8e0bb87e05c0e72a4e1d046bb824837b8f8 | 692,313 |
def get_max_temp(liveness, args):
"""Returns sum of maximum memory usage per tile of temporary variables."""
return sum(liveness["notAlwaysLive"]["maxBytesByTile"]) | 8adbcca61a2256f6255fc6f1c7c5246bb18818a0 | 692,314 |
def fail(test, msg=None):
"""Create failure status and message object
:param test: test with status to be altered
:param msg: optional message for failure reason
:return: updated test object
"""
if msg is None:
msg = 'Test failed'
test['status'] = 'FAILURE'
test['message'] = msg
return test | 0d4f7971ffbea4347fe1a9de2cde067523a67898 | 692,315 |
def is_water_pixel(reflectance_factor_2_25):
"""Classiify the pixels of an image as whether they are in water.
Parameters
----------
reflectance_factor_2_25 : ndarray of float
The reflectance factor of each pixel of an image scanned over the 2.25 micrometer
wavelength. In the GOES data this corresponds to band 6.
Returns
-------
np.ndarray of bool
"""
return reflectance_factor_2_25 <= 0.03 | 8eafcdb5dd15d74cec2862976ccdd096e30a6c08 | 692,316 |
from typing import Dict
from typing import Optional
from typing import cast
def get_function_call_from_source_code(source_code_files: Dict[str, str], line_number: int, file_id: str,
called_function_name: Optional[str] = None) -> str:
"""Extract code snippet from original source code which contains a function call.
:param source_code_files: File-Mapping dictionary
:param line_number: original source code line number to start searching at
:param file_id: file id of the original source code file
:param called_function_name: optional parameter, if value is set, it needs to be included in returned code snippet.
:return: source code snippet
"""
source_code = open(source_code_files[file_id])
source_code_lines = source_code.readlines()
offset = -1
function_call_string = source_code_lines[line_number + offset]
if ")" in function_call_string and "(" in function_call_string:
if function_call_string.index(")") < function_call_string.index("("):
function_call_string = function_call_string[function_call_string.index(")") + 1:]
if ")" in function_call_string and "(" not in function_call_string:
function_call_string = function_call_string[function_call_string.index(")") + 1:]
def __get_word_prior_to_bracket(string):
if "(" not in string:
return None
string = string[:string.index("(")]
string = string.split(" ")
string = [e for e in string if len(e) > 0]
string = string[-1]
return string
called_function_name_contained = False
if called_function_name is None:
called_function_name_contained = True
while function_call_string.count("(") > function_call_string.count(")") \
or function_call_string.count("(") < 1 \
or function_call_string.count(")") < 1 \
or __get_word_prior_to_bracket(function_call_string) == "while" \
or __get_word_prior_to_bracket(function_call_string) == "for" \
or __get_word_prior_to_bracket(function_call_string) == "if" \
or not called_function_name_contained:
# if ) prior to (, cut first part away
if ")" in function_call_string and "(" in function_call_string:
if function_call_string.index(")") < function_call_string.index("("):
function_call_string = function_call_string[function_call_string.index(")") + 1:]
# if word prior to ( is "while", "for" or "if", cut away until (
word_prior_to_bracket = __get_word_prior_to_bracket(function_call_string)
if word_prior_to_bracket is not None:
if word_prior_to_bracket == "while" or word_prior_to_bracket == "for" or word_prior_to_bracket == "if":
function_call_string = function_call_string[function_call_string.index("(") + 1:]
# check if called_function_name is contained in function_call_string
if not called_function_name_contained:
called_function_name_str = cast(str, called_function_name)
if called_function_name_str in function_call_string:
called_function_name_contained = True
offset += 1
function_call_string += source_code_lines[line_number + offset]
function_call_string = function_call_string.replace("\n", "")
# if called_function_name is set and contained more than once in function_call_string, split function_call_string
if called_function_name is not None:
called_function_name_str = cast(str, called_function_name)
while function_call_string.count(called_function_name_str) > 1:
function_call_string = function_call_string[:function_call_string.rfind(called_function_name_str)]
return function_call_string | e9e3445b22d0c16b88b44e90c2d885930e4869e7 | 692,317 |
def claims(oauth2_settings):
"""Set up web tokens claims options
Accepts:
oauth2_settings(dict): dictionary of OAuth2 settings
Returns:
claims(dict): a dictionary describing json token web claims
"""
claims = dict(
iss=dict(essential=True, values=",".join(oauth2_settings.get("issuers", []))),
aud=dict(
essential=oauth2_settings.get("verify_aud", False),
values=",".join(oauth2_settings.get("audience", [])),
),
exp=dict(essential=True),
)
return claims | 8835302fa3ac983d22dd9ce87b118cc314fe0910 | 692,318 |
import io
def _recvall(s, n):
"""Reads all requested bytes from socket, raises Exception otherwise"""
with io.BytesIO() as buf:
tot = 0
while tot < n:
data = s.recv(n - tot)
if not data:
break
buf.write(data)
tot += len(data)
ret = buf.getvalue()
if len(ret) != n:
raise IOError('did not get enough bytes')
return ret | a286a868b3808e8c234ca7c0062de43adc8b44c6 | 692,319 |
def transform_job_describe(sm_client, transform_job_name):
"""API call to describe a batch-transform inference job."""
try:
response = sm_client.describe_transform_job(TransformJobName=transform_job_name)
return response
except sm_client.exceptions.ResourceNotFound:
raise Exception(f"Transform job not found: '{transform_job_name}'") | 4ccc44c795f0fa157b942512ad9fa332ea2b2c22 | 692,320 |
def get_single_label_maodel_accuracy_for_each_label(trained_model, x_test, y_test):
"""A Function that will give the accuracy for each label in the y_test, the model should be
single label model"""
'''
#####Inputs:
model: should be a mahcine learning algorithm that with method predict, etc model.predict(x_test)=y_predict
x_test: numpy array for test data
y_test: numpy array for test labels
#####Output:
A dictionary, where the keys are the labels in y_test, the values are the accuracy for that
particular label predicted by the model on x_test
'''
y_predict = trained_model.predict(x_test)
Accuracy_dict = dict()
unique_labels = set(y_test)
for label in unique_labels:
Accuracy_dict[label] = [0, 0]
for index, true_label in enumerate(y_test):
pred_label = y_predict[index]
Accuracy_dict[true_label][1] += 1
if true_label == pred_label:
Accuracy_dict[true_label][0] += 1
else:
pass
for label, count_list in Accuracy_dict.items():
Accuracy_dict[label] = count_list[0] / count_list[1]
return Accuracy_dict | abd337b4449f96d91247a773f6efc789330cbd06 | 692,321 |
def get_sidebar_app_legend(title):
"""Return sidebar link legend HTML"""
return '<br />'.join(title.split(' ')) | ac396ca13181c82a32da9811982a29094264ab56 | 692,322 |
import torch
def augment_edge(data):
"""Taken from OGB Repo"""
##### AST edge
edge_index_ast = data.edge_index
edge_attr_ast = torch.zeros((edge_index_ast.size(1), 2))
##### Inverse AST edge
edge_index_ast_inverse = torch.stack([edge_index_ast[1], edge_index_ast[0]], dim=0)
edge_attr_ast_inverse = torch.cat(
[
torch.zeros(edge_index_ast_inverse.size(1), 1),
torch.ones(edge_index_ast_inverse.size(1), 1),
],
dim=1,
)
##### Next-token edge
## Obtain attributed nodes and get their indices in dfs order
# attributed_node_idx = torch.where(data.node_is_attributed.view(-1,) == 1)[0]
# attributed_node_idx_in_dfs_order = attributed_node_idx[torch.argsort(data.node_dfs_order[attributed_node_idx].view(-1,))]
## Since the nodes are already sorted in dfs ordering in our case, we can just do the following.
attributed_node_idx_in_dfs_order = torch.where(
data.node_is_attributed.view(
-1,
)
== 1
)[0]
## build next token edge
# Given: attributed_node_idx_in_dfs_order
# [1, 3, 4, 5, 8, 9, 12]
# Output:
# [[1, 3, 4, 5, 8, 9]
# [3, 4, 5, 8, 9, 12]
edge_index_nextoken = torch.stack(
[attributed_node_idx_in_dfs_order[:-1], attributed_node_idx_in_dfs_order[1:]],
dim=0,
)
edge_attr_nextoken = torch.cat(
[
torch.ones(edge_index_nextoken.size(1), 1),
torch.zeros(edge_index_nextoken.size(1), 1),
],
dim=1,
)
##### Inverse next-token edge
edge_index_nextoken_inverse = torch.stack(
[edge_index_nextoken[1], edge_index_nextoken[0]], dim=0
)
edge_attr_nextoken_inverse = torch.ones((edge_index_nextoken.size(1), 2))
data.edge_index = torch.cat(
[
edge_index_ast,
edge_index_ast_inverse,
edge_index_nextoken,
edge_index_nextoken_inverse,
],
dim=1,
)
data.edge_attr = torch.cat(
[
edge_attr_ast,
edge_attr_ast_inverse,
edge_attr_nextoken,
edge_attr_nextoken_inverse,
],
dim=0,
)
return data | 8b85e80e2eabb47d9021a30614bafdc05d658baa | 692,323 |
def init_har2case_parser(subparsers):
""" HAR converter: parse command line options and run commands.
"""
parser = subparsers.add_parser(
"har2case",
help="Convert HAR(HTTP Archive) to YAML/JSON testcases for HttpRunner.",
)
parser.add_argument("har_source_file", nargs="?", help="Specify HAR source file")
parser.add_argument(
"-2y",
"--to-yml",
"--to-yaml",
dest="to_yaml",
action="store_true",
help="Convert to YAML format, if not specified, convert to JSON format by default.",
)
parser.add_argument(
"--filter",
help="Specify filter keyword, only url include filter string will be converted.",
)
parser.add_argument(
"--exclude",
help="Specify exclude keyword, url that includes exclude string will be ignored, "
"multiple keywords can be joined with '|'",
)
return parser | 017dd091d9de7eb9260bd10370a1613b9690d295 | 692,324 |
def numval(token):
"""Return the numerical value of token.value if it is a number"""
if token.type == 'INTEGER':
return int(token.value)
elif token.type == 'FLOAT':
return float(token.value)
else:
return token.value | ca92bc7f7a0ae10705284ca7a0da772242bba35c | 692,325 |
import textwrap
def _center_wrap(text: str, cwidth: int = 80, **kw) -> str:
"""Centers a text.
Args:
text (str): Text to center.
cwidth (int): Wanted width. Defaults to 80.
**kw: Arguments of textwrap.wrap
Returns:
str: Centered text.
"""
lines = textwrap.wrap(text, **kw)
return "\n".join(line.center(cwidth) for line in lines) | 5dad799732cebbcd3061b711b34e367e46f0d4b0 | 692,326 |
def is_even(n):
"""
Determines if the given integer or float input is even.
:param n: an float or integer value.
:return: True if the integer is even, otherwise false.
"""
return n % 2 == 0 | 3dc632c0379aa62035dbcd84081e29f1e2f88c26 | 692,327 |
def define_field(key: str, pattern: str = r'[^\s\'"]+') -> str:
"""Gives a regex pattern for a key/value with a named group for value; 'pattern'
is the regex for the value. The value may optionally be single or double quoted.
E.g. key='map', pattern='\w+' will match "map 'e'" and groupdict()['map']='e'
"""
return rf"""^{key}(?:\s+|\s*[:=]\s*)(?:"(?={pattern}")|'(?={pattern}'))?(?P<{key}>{pattern})['"]?$""" | 911f0f59d9c4759339d1020512bdfedffda7da7e | 692,328 |
def north(n, i1, i2, s):
"""
Performs LS fit to s by
a degree-two polynomial in an orthogonal basis.
Function to be run with the Sine multitaper codes.
**Returns**
ds : float
estimate of 1st derivative ds/dn at center of record
dds : float
estimate of 2nd derivative
|
"""
L = i2 - i1 + 1
el = float(L)
gamma = (el**2 - 1.0)/12.0
u0sq = el
u1sq = el*(el**2 - 1.0)/12.0
u2sq = (el*(el**2 - 1.0)*(el**2- 4.0))/180.0
amid= 0.5*(el + 1.0)
dot0=0.0
dot1=0.0
dot2=0.0
for kk in range(1,L+1):
i = kk + i1 - 1 - 1
# Negative or excessive index uses even function assumption
if (i < 0):
i = -i
if (i > n-1):
i = 2*(n-1) - i
dot0 = dot0 + s[i]
dot1 = dot1 + (kk - amid) * s[i]
dot2 = dot2 + ((kk - amid)**2 - gamma)*s[i]
ds = dot1/u1sq
dds = 2.0*dot2/u2sq
return ds, dds | ef9ee5b5b1546b96fb091ef12c0eaa3f8b658ec0 | 692,329 |
def cgi_escape(text):
"""Produce entities within text."""
cgi_escape_table = {
" ": r"%20",
"&": r"%26",
'"': r"%22",
"'": r"%27",
">": r"%3e",
"<": r"%3c",
":": r"%3a",
"/": r"%2f",
"?": r"%3f",
"=": r"%3d",
}
return "".join(cgi_escape_table.get(c,c) for c in text) | 7dc601e3a0e238b6f63c19d9b8bf7743ad614df0 | 692,330 |
def true_positive(a, b):
""" Return quantity of TP - True Positives
What is in A and B
being A the set of Positive prediction
and B the set of Actual Positive """
tp = 0
for item in a:
if item in b:
tp += 1
return tp | f41c33574cd4b32f3899404043257c3ed10d0ead | 692,331 |
import re
def clean_punctuation(str_text_raw):
"""This function replace some of the troublemaker puntuation elements in a given text"""
return(re.sub("[$\(\)/|{|\}#~\[\]^#;:!?¿]", " ", str_text_raw)) | c6b2743a0bb875aee6b9b40d48baef6998d3c502 | 692,332 |
def hwc2chw(image):
"""
Changes the order of image pixels from Height-Width-Color to Color-Height-Width
Parameters
-------
image : numpy.ndarray
Image with pixels in Height-Width-Color order
Returns
-------
image : numpy.ndarray
Image with pixels in Color-Height-Width order
"""
return image.transpose((2, 0, 1)) | afb1b555b7d433d1563210090b8c70e33f490cfe | 692,333 |
def dict_deep_overlay(defaults, params):
"""If defaults and params are both dictionaries, perform deep overlay (use params value for
keys defined in params), otherwise use defaults value"""
if isinstance(defaults, dict) and isinstance(params, dict):
for key in params:
defaults[key] = dict_deep_overlay(defaults.get(key, None), params[key])
return defaults
return params | 415e43a5ed36a5a53f0c19cda4a873b1671dbe6c | 692,334 |
def get_normalized_distance (pair,s_pronoun) :
"""Normalized distance: if the antecedent and the mention are the same sentence normalize, otherwise 0"""
if pair[2] == pair[3] :
distance = abs(int(pair[1][2]) - int(pair[0][2]))
return round(distance / len(s_pronoun), 3)
return 0 | 9ab61dde3642a755a5e14f2853f3e97d82b1e95b | 692,335 |
def check_arg_error(pp, *numargs):
""" Checks for an illegal number of arguments, returning whether the number of args was legal or not. """
if len(pp) not in numargs:
numargs = " or ".join(map(str, numargs))
print("command requires {} argument(s), {} given".format(numargs, len(pp)))
return True
return False | 01a2c00869d748cdd9751539c52f847ecef90240 | 692,336 |
def isstringlike(item):
"""
Checks whether a term is a string or not
"""
ret = 1
try:
float(item)
ret = 0
except ValueError:
pass
return ret | 013e07ed96feb2a10cf216e5b404e2677159a929 | 692,337 |
def is_type_factory(type_):
"""
Parameters
----------
`type_` - a type to be compared against (e.g. type(x) == `type_`)
Returns
-------
validator - a function of a single argument x , which raises
ValueError if type(x) is not equal to `type_`
"""
def inner(self, x):
if type(x) != type_:
msg = "Value must have type '{typ!s}'"
raise ValueError(msg.format(typ=type_))
return inner | a9cfe2a517ccf98568c51709949b7280d69e276c | 692,338 |
def _split_name(name):
"""Splits a name in two components divided by '.'"""
comp = name.split('.')
if len(comp) > 1:
return (comp[0], '.'.join(comp[1:]))
return (None, name) | 2c4e771a18325dfd518b2f174567adabc773685c | 692,339 |
def flatten_cond_gen_values(gen_eval: dict) -> dict:
"""
Converts the coherence evaluation results into a flattened dict
"""
flattened_dict = {}
for j, l_key in enumerate(sorted(gen_eval['cond'].keys())):
for k, s_key in enumerate(gen_eval['cond'][l_key].keys()):
for g_key in gen_eval['cond'][l_key][s_key]:
key = l_key + '_' + s_key + '__' + g_key
flattened_dict[key] = gen_eval['cond'][l_key][s_key][g_key]
flattened_dict['random'] = {k: v for k, v in gen_eval['random'].items()}
return flattened_dict | 5aeab42035a6ef885e403a632f204531ced57140 | 692,340 |
def findTop(node):
"""
_findTop_
Ignoring tree structure, find the top node that contains the node
provided.
Will work for any ConfigSection, not limited to ConfigSectionTree
"""
if node._internal_parent_ref == None:
return node
return findTop(node._internal_parent_ref) | a534938f320a6fd0de5dd14168a49073c11cafba | 692,341 |
import collections
def conflicting_pactrees(flatdepends):
"""Check for conflicts pair-wise in the dependency tree"""
package_conflicts = collections.defaultdict(set)
for pkg_i, flatdep_i in flatdepends.items():
for pkg_j, flatdep_j in flatdepends.items():
if pkg_i == pkg_j:
continue
deps_i = flatdep_i.depends
conflicts_j = flatdep_j.conflicts
if pkg_i in conflicts_j:
package_conflicts[pkg_i].add(pkg_j)
for conflict in deps_i.intersection(conflicts_j):
package_conflicts[pkg_j].add(pkg_i)
package_conflicts[conflict].add(pkg_i)
return package_conflicts | b394e42e81a2473ee22453281fbec156c9e34b8a | 692,342 |
import logging
def get_querystring_filter_function():
"""Returns the function for filtering querystrings"""
logging.info("Creating querystring filtering function")
filter_querystring_list = [
" def filter_querystring(self, querystring):",
' """Removes None value keys from the querystring"""',
"",
" querystring_out = {}",
" for key in querystring:",
" if querystring[key] != None:",
" querystring_out[key] = querystring[key]",
"",
" return querystring_out",
]
return filter_querystring_list | dc178f4fd0b6a76fffdde6d80613b458e17105ca | 692,343 |
def _collect_generated_proto_go_sources(target):
"""Returns a depset of proto go source files generated by this target."""
if not hasattr(target, "aspect_proto_go_api_info"):
return None
go_proto_info = target.aspect_proto_go_api_info
files = getattr(go_proto_info, "files_to_build", [])
return [f for f in files if f.basename.endswith(".pb.go")] | f61459277cc5debe7a46abccfba92ea0ac07fe58 | 692,344 |
import os
def get_home_path():
"""Retrieve the homepath"""
if 'APPDATA' in os.environ:
return os.environ['APPDATA']
elif 'HOME' in os.environ:
return os.environ["HOME"]
else:
return '' | fc0ec03266a0fa543b55044328448e231f501054 | 692,345 |
import uuid
import os
def uploaded_images_for_products(instance, filepath):
"""Generate new path for upoaded images to products"""
ext = filepath.split('.')[-1]
filename = f'{uuid.uuid4()}.{ext}'
return os.path.join('uploads/products/', filename) | ca4e79b6d2fd56463f63d372bb7502d616091767 | 692,346 |
def abbreviate_path(path):
"""Abbreviate path (replace with first letters of parts)."""
return ''.join(x[0] for x in path.parts) | 1c44728a3dda22d33c4d616c17526a9740ecbe65 | 692,347 |
import random
def creer_bon_mot(n):
"""creer un mot de parentheses de validite vrai de taille 2n"""
word = str()
open_stack = ["("] * n
close_stack = [")"] * n
for i in range(n * 2):
if len(open_stack) == 0:
word += close_stack.pop()
elif len(open_stack) >= len(close_stack):
word += open_stack.pop()
elif random.randint(0, 1):
word += open_stack.pop()
else:
word += close_stack.pop()
return word | bae412b8593e42032927df491691d7138d27da43 | 692,348 |
import os
def _gs(x: str):
"""Shorthand for `os.path.getsize`."""
return os.path.getsize(x) | f2845d390ec41b6f409c1d22d4492eb5b70fed78 | 692,349 |
def sum_values(*values):
"""Return the sum of values, considering only elements that are not None.
An item v,w in values can be anything that contains __add__ function
such that v+0 and v+w is defined.
"""
# Cannot simply return sum([v for v in values if v is not None])
# because it does 0 + v which will not work for v of type, e.g., VectorCost
current = 0
for v in values:
if v is not None:
current = v + current
return current | 5db7d60b5486a29d325ca26d967150084075ab7b | 692,350 |
def isfunction(f):
"""
Tests if argument is a function
"""
return callable(f) | eeed12fc035b9017f6a3fc43c820d0dbc17f745f | 692,351 |
def calculateCost(offers, total_amount_to_buy):
"""
Parameters
----------
offers : list[dict]
total_amount_to_buy : int
Returns
-------
total_cost : int
"""
total_cost = 0
for offer in offers:
if total_amount_to_buy == 0:
break
buy_amount = min(offer['amountAvailable'], total_amount_to_buy)
total_amount_to_buy -= buy_amount
total_cost += buy_amount * offer['precio']
return total_cost | 890d21416c33c375f128fbf551945d94a1f77ee3 | 692,352 |
import math
def calculate_equal_proportions(num_seats, populations):
"""
Calculate final fair shares
"""
# The number of states to apportion seats to.
num_states = len(populations)
# The fair shares per state.
fair_shares = []
for _ in range(num_states):
fair_shares.append(1)
# Reciprocals of the geometric means per state.
priority_numbers = []
for i in range(num_states):
priority_numbers.append(populations[i] / math.sqrt(fair_shares[i] * (fair_shares[i] + 1)))
# Stat apportionment.
while sum(fair_shares) != num_seats:
# Update the priority numbers.
for i in range(num_states):
priority_numbers[i] = (populations[i] / math.sqrt(fair_shares[i] * (fair_shares[i] + 1)))
# Find the biggest priority number, and give that state another seat.
highest_priority = max(priority_numbers)
index = priority_numbers.index(highest_priority)
fair_shares[index] += 1
return fair_shares | 0fb3a14c521ffebfa2c2dd6c12af18a3f1a60d15 | 692,353 |
import platform
def python_version():
"""Get the current python version."""
return tuple(map(int, platform.python_version_tuple())) | ce16643da826fc90cb32679ee43e55a102f679ec | 692,354 |
def _on_args(catalog, row):
"""Given the row of the DR1 catalog, turn into
(lon, lat, radius) tuple
"""
lon, lat, pa, a, b, t, hr = catalog[row]
pa *= -1
return lon, lat, 1.3 * (a + t) | 83a32e69871ecdf629757cbd257f8c739980c415 | 692,355 |
import random
def mutate(gene, threshold):
""" mutate gene
Arguments:
----------
gene {list[int]} -- gene
threshold {float} -- threshold for mutating
Returns:
--------
new_gene {list[int]} -- new gene
Examples:
---------
>>> gene = [4, 3, 2, 1, 0]
>>> for _ in range(5):
... new_gene = mutate(gene, 0.5)
... print(new_gene)
[0, 3, 2, 1, 0]
[4, 3, 2, 1, 0]
[4, 3, 2, 1, 0]
[2, 3, 2, 1, 0]
[4, 0, 2, 1, 0]
>>> for _ in range(5):
... new_gene = mutate(gene, 0.1) # change thresold to 0.1
... print(new_gene)
[4, 2, 2, 1, 0]
[4, 3, 0, 1, 0]
[4, 1, 2, 1, 0]
[4, 0, 2, 1, 0]
[2, 3, 2, 1, 0]
>>> for _ in range(5):
... new_gene = mutate(gene, 0.9) # change thresold to 0.9
... print(new_gene)
[2, 3, 2, 1, 0]
[4, 3, 2, 1, 0]
[4, 3, 2, 1, 0]
[4, 3, 2, 1, 0]
[4, 3, 2, 1, 0]]
"""
mutate_prob = random.random()
if mutate_prob < threshold:
new_gene = gene
else:
length = len(gene)
mutate_pos = random.randint(0, length-2)
new_gene = gene.copy()
while True:
new_value = random.randint(0, length-mutate_pos-1)
if not new_value == gene[mutate_pos]:
new_gene[mutate_pos] = new_value
break
return new_gene | c57dbecec05fb4a2bffbb9606bc9ebf142aa5bc0 | 692,356 |
def getSQLT(timestamp):
"""Make timestamp for SQLite from Python timestamp, meaning a UNIX epoch INTEGER.
:param timestamp:
:return: SQLite compatible timestamp in the form of a UNIX epoch INTEGER"""
# I know this is a very small function, but now it's clear what SQL needs
return int(timestamp) | 6fb7a1ede4b9bcbc3a92039c130a2fe557346079 | 692,357 |
import logging
def get_logger(logger_name, propagate=True):
"""
This function provides a simple wrapper to add a null handler to the logger
requested so that we make sure not to dump stuff to terminal as defined by
default configuration in case the library user do not want to use logging
(or didn't care about configuring it).
Args:
logger_name (str): the logger instance name (usually the module name
with __name__)
propagate (bool): whether to propagate the messages up to ancestor
loggers
Returns:
logging.Logger: Logger instance
Raises:
None
"""
# if logger instance does not exist it will be created upon call
logger = logging.getLogger(logger_name)
logger.propagate = propagate
# add the null handler to make sure nothing is written in case user didn't
# configure logging
logger.addHandler(logging.NullHandler())
return logger | 74bf3497b7fb062182bdd56b9e6ffb8d550512dd | 692,358 |
import re
def get_slide_id(url):
"""
https://docs.google.com/presentation/d/1N8YWE7ShqmhQphT6L29-AcEKZfZg2QripM4L0AK8mSU/edit#slide=id.g4c7fe486b7_0_0
https://docs.google.com/presentation/d/1N8YWE7ShqmhQphT6L29-AcEKZfZg2QripM4L0AK8mSU/edit#slide=id.g4f00846b3a_0_0
https://docs.google.com/presentation/d/1N8YWE7ShqmhQphT6L29-AcEKZfZg2QripM4L0AK8mSU/edit
"""
res = re.findall("edit#slide=id.(.+?)$", url)
if res:
return res[0]
return None | accfe0785ec173a742549c500b06844f1ae09cf5 | 692,359 |
import torch
def focal_equal(attn, batch_size, queryL, sourceL):
"""
consider the confidence g(x) for each fragment as equal
sigma_{j} (xi - xj) = sigma_{j} xi - sigma_{j} xj
attn: (batch, queryL, sourceL)
"""
funcF = attn * sourceL - torch.sum(attn, dim=-1, keepdim=True)
fattn = torch.where(funcF > 0, torch.ones_like(attn), torch.zeros_like(attn))
return fattn | c98243f487e5934369527fc0312d01a89c50d709 | 692,360 |
def get_role_assignments(client, user, project):
"""Get role assignments
:param client:
:type client: keystoneclient.v3.client.Client
:param user:
:type user: keystoneclient.v3.users.User
:param project:
:type project: keystoneclient.v3.projects.Project
:return:
"""
return client.role_assignments.list(user=user, project=project) | 14da8f34a242ed4b1352b3d906144c5042371dbd | 692,362 |
import warnings
def _determine_cy_weighting_func(weighting_function):
""" Determine cython weight function value. """
if weighting_function.upper() == 'GRIDRAD':
cy_weighting_function = 4
elif weighting_function.upper() == 'BARNES2':
cy_weighting_function = 3
elif weighting_function.upper() == 'NEAREST':
cy_weighting_function = 2
elif weighting_function.upper() == 'CRESSMAN':
cy_weighting_function = 1
elif weighting_function.upper() == 'BARNES':
warnings.warn("Barnes weighting function is deprecated."
" Please use Barnes 2 to be consistent with"
" Pauley and Wu 1990.", DeprecationWarning)
cy_weighting_function = 0
else:
raise ValueError('unknown weighting_function')
return cy_weighting_function | 124f47519402cb07e1d0efc600802d22fd4729c7 | 692,363 |
def process_vpc_subnets(account, region, ec2):
"""
describes one or more of your subnets
:param account:
:param region:
:param ec2 (child session):
:return list of subnets:
"""
list = []
result = []
response = ec2.describe_subnets()
for item in response['Subnets']:
list.append(item)
while 'NextToken' in response:
response = ec2.describe_subnets(NextToken=response['NextToken'])
for item in response['Subnets']:
list.append(item)
for item in list:
dict = {'AccountId': account, 'SubnetId': item['SubnetId'], 'VpcId': item['VpcId'], 'CIDR': item['CidrBlock'], 'Region': region}
print(f'Account {account}: New VPC subnet found {dict}')
item['AccountId'] = account
item['Region'] = region
item['QueryType'] = 'subnets'
result.append(item)
return result | 3bd749ed103aa7e5dd731d8497bfb2b209922098 | 692,364 |
def get_content_ids_to_page_views_mapper(df):
"""
Transform BigQuery dataframe to a dictionary where keys are content_ids and the values are pageviews.
:param df:
:return:
"""
return df.set_index('content_id').T.to_dict('records')[0] | 93876e7e957622e19ae0b3228ff548dad02ccc73 | 692,365 |
import os
def new_mask():
"""
new_mask() -> bytearray
Create a new mask value as conformant to the protocol.
"""
return bytearray(os.urandom(4)) | 78c7a3d1cbc63f4b53e5df9f810d4bfe922b601c | 692,366 |
import re
def checkStr(datafile, string, begin_line=0):
"""Find the first occurrence of a string and return its line number
Returns: the list index containing the string
Args:
datafile (list-like): a list-like object
string (str): the string to find in the txt file
"""
value = []
count = 0
for line in datafile:
if count < begin_line:
count += 1
continue
count += 1
match = re.search(string, str(line))
if match:
return count
break | 858073e18ccf94f9bac5348d303ef1d8e6ea6672 | 692,367 |
def stationlist2nameext(IDs,ID_list):
"""Creates 1D list with extensions formed by index of IDs in ID_list.
It needs:
IDs ... list with single IDs which can include multiples
ID_list ... list with single, unique IDs
It returns:
name_ext ... list with extensions '_<ID>'
"""
name_ext = []
for i in range(len(IDs)):
st = str(ID_list.index(IDs[i])+1)
name_ext.append('_'+st)
# end for i in range(len(IDs))
return(name_ext) | 48f181480b7365ae83c1bdcea0f8a10837496618 | 692,369 |
import functools
def create_new_class(cls):
"""
Create a fake class for auto-bound parameters.
"""
@functools.wraps(cls, updated=())
class NewClass(cls):
pass
return NewClass | 8149c041c4d34e48f8084ce217f7e5bf9275deaa | 692,371 |
import os
def is_dir_empty(path):
"""
Returns true if directory is empty
:param path: Directory path
:type path: str
:returns True if directory is empty
:rtype bool
"""
return len(os.listdir(path)) == 0 | 240848721e29f6657a01d367c26528d05c85c066 | 692,372 |
def net_interface_add_ip_address(client, ifc_index, ip_addr):
"""Add IP address.
Args:
ifc_index: ifc index of the nic device (int)
ip_addr: ip address will be added
"""
params = {'ifc_index': ifc_index, 'ip_address': ip_addr}
return client.call('net_interface_add_ip_address', params) | 1cfca18eee73bb476eb81fe1ca2f5fc6f784713b | 692,373 |
import numpy as np
def Normalize(X, Type):
"""Normalize an numpy array
Input: X, Output: normalized X using specific methods
"""
if(Type == 'max'):
X = X/np.max(X)
return X
if(Type == 'mean_sig'):
mu = np.mean(X)
sigma = np.std(X)
X = (X-mu)/sigma
return X | 6cd14c30aea51f29be69fb96ce023b0c9d1ff573 | 692,374 |
def load_matrix_group_name(value):
"""load matrix_group_name"""
return value | f5e34110007e5d7a9c33a0d45f800656b2c48b00 | 692,375 |
def add_tags_to_sentence(df):
"""
Adds ref tag to the gender pronoun word, and ent tag to the entity word.
"""
df = df.values
sentence = df[0]
p_idx = int(df[1])
split_sentence = sentence.split(' ')
split_sentence[p_idx] = "<ent>" + split_sentence[p_idx] + "</ent>"
return " ".join(split_sentence) | bc887104cba63ef25280690c7ce2cc8f993ed79e | 692,376 |
def _get_val(row, input_arr):
"""Small function for extracting values from array based on index.
"""
i, j, k = row
return input_arr[i, j, k] | 190fee22f78be9f7d8f26bee7c783a2b225a7513 | 692,377 |
def get_enz_remnant(enz):
"""Get enzyme recognition site remnant sequence"""
if enz.ovhg > 0:
remnant = enz.site[enz.fst3:]
return [remnant,remnant.replace('G','A')]
else:
remnant = enz.site[enz.fst5:]
return [remnant,remnant.replace('C','T')] | 5b76c23b3b3a4b853927bdbc6ab267d20781b7ba | 692,378 |
def get_dimorder(dimstring):
"""Get the order of dimensions from dimension string
:param dimstring: string containing the dimensions
:type dimstring: str
:return: dims_dict - dictionary with the dimensions and its positions
:rtype: dict
:return: dimindex_list - list with indices of dimensions
:rtype: list
:return: numvalid_dims - number of valid dimensions
:rtype: integer
"""
dimindex_list = []
dims = ['R', 'I', 'M', 'H', 'V', 'B', 'S', 'T', 'C', 'Z', 'Y', 'X', '0']
dims_dict = {}
for d in dims:
dims_dict[d] = dimstring.find(d)
dimindex_list.append(dimstring.find(d))
numvalid_dims = sum(i > 0 for i in dimindex_list)
return dims_dict, dimindex_list, numvalid_dims | 9f41f4697e4db75c97db7ad3cb06012867ce2d55 | 692,380 |
def apply_ants_transform_to_vector(transform, vector):
"""
Apply transform to a vector
ANTsR function: `applyAntsrTransformToVector`
Arguments
---------
vector : list/tuple
vector to which the transform will be applied
Returns
-------
tuple : transformed vector
"""
return transform.apply_to_vector(vector) | 677b44258569dad6cb193e8a802bd3912cfd85dd | 692,381 |
def product(numbers):
"""Calculates the product of a series of numbers."""
if not product:
raise ValueError("product of empty sequence")
result = 1
for n in numbers:
result *= n
return result | 36f6d5cf35aa1f929a01d4f060fbf82238093c86 | 692,382 |
from typing import List
from typing import Dict
def flatten_dicts(dicts: List[Dict]) -> Dict:
"""Flatten a list of dicts
Args:
dicts (list): list of dicts
Returns:
dict: flattened dict
"""
agg_dict = {}
for d in dicts:
for k, v in d.items():
agg_dict.setdefault(k, []).append(v)
return agg_dict | ba7154936323e924a76a083f878930dbf6533502 | 692,383 |
def readFile(filename, split=False):
""" Read the file and return it as a string
Parameters
----------
@param filename - path to the file
@param split - whether to split at newline or not, default False
Returns
----------
@param s - either a string (if split==False) or a list of strings (if split==True)
representing the entire file or its subsequent lines, respectively
"""
with open(filename,'r') as f:
# concentrate the file as a string
s = ''
for line in f:
s += line
f.close()
if split:
s = s.split("\n")
return s | 8f8aafa7e05d8098812bee94b65590e312453b4e | 692,384 |
import glob
import os
def get_cases(cases_root, arm, event, case=None):
"""
Get a list of cases from root dir, optionally for a single case
"""
match = 'NCANDA_S*'
if case:
match = case
case_list = list()
for cpath in glob.glob(os.path.join(cases_root, match)):
if os.path.isdir(os.path.join(cpath,arm,event)) :
case_list.append(cpath)
case_list.sort()
return case_list | 7915f8197bd21a26d3e2b1c3ee3ec4e2da279da3 | 692,386 |
from typing import List
import re
def _find_cli_command_names(file_contents: str) -> List[str]:
"""Search the given file's contents and see if it specifies commands and retrieve the command names"""
if 'command_re' not in globals():
# Search for similar to:
# root_cli.command('build')
globals()['command_re'] = re.compile(r"""^.*\.command\s*\(\s*['"]([\w_]+)['"].*\).*$""")
# Search for similar to:
# root_cli.add_typer(kernel_tests_cli, name='kernel_tests')
globals()['group_re'] = re.compile(r"""^.*add_typer\s*\(.*name\s*=\s*['"]([\w_]+)['"].*\)$""")
command_re = globals()['command_re']
group_re = globals()['group_re']
# If see if any command groups have been defined and return those
retval = []
for line in file_contents.splitlines():
match = group_re.match(line)
if match:
retval.append(match.group(1))
# Otherwise, see if any command have been defined
if not retval:
for line in file_contents.splitlines():
match = command_re.match(line)
if match:
retval.append(match.group(1))
return retval | 519c80174aded1cdf459a3365f9a179677f04d5f | 692,387 |
def selector(expression):
"""If the expression is true, return the string 'selected'.
Useful for HTML <option>s.
"""
if expression:
return "selected"
else:
return None | 7151506a127f185a706f3dfcfce38a73d673277e | 692,388 |
def fibonacci(number):
"""
Fibonacci Implementaion
The Febonacci numbers are the numbers in the following sequence
0,1,1,2,3,5,8,13,21,34,55,89,144,............
In Mathematical terms,the sequence Fn of the fibonacci numbers is defined by the recurrence relation
F(n) = F(n-1) + F(n-1)
with seed values
F(0) = 0 and F(1) = 1
Example :
Fibonacci(5) = 5
Fibonacci(10) = 55
Fibonacci(1) = 1
Fibonacci(2) = 2
Fibonacci(12) = 144
"""
if number == 0:
return 0
elif number == 1:
return 1
else:
return fibonacci(number-1)+fibonacci(number-2) | e47638eae1aa0506e5753c1cfd8d3541987ff549 | 692,390 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.