seed stringlengths 1 14k | source stringclasses 2
values |
|---|---|
def _generate_anchor_configs(min_level, max_level, num_scales, aspect_ratios):
"""Generates mapping from output level to a list of anchor configurations.
A configuration is a tuple of (num_anchors, scale, aspect_ratio).
Args:
min_level: integer number of minimum level of the output feature pyramid.
max_level: integer number of maximum level of the output feature pyramid.
num_scales: integer number representing intermediate scales added
on each level. For instances, num_scales=2 adds two additional
anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: list of tuples representing the aspect raito anchors added
on each level. For instances, aspect_ratios =
[(1, 1), (1.4, 0.7), (0.7, 1.4)] adds three anchors on each level.
Returns:
anchor_configs: a dictionary with keys as the levels of anchors and
values as a list of anchor configuration.
"""
anchor_configs = {}
for level in range(min_level, max_level + 1):
anchor_configs[level] = []
for scale_octave in range(num_scales):
for aspect in aspect_ratios:
anchor_configs[level].append(
(2**level, scale_octave / float(num_scales), aspect))
return anchor_configs | bigcode/self-oss-instruct-sc2-concepts |
import copy
def sequences_add_end_id_after_pad(sequences, end_id=888, pad_id=0):
"""Add special end token(id) in the end of each sequence.
Parameters
-----------
sequences : list of list of int
All sequences where each row is a sequence.
end_id : int
The end ID.
pad_id : int
The pad ID.
Returns
----------
list of list of int
The processed sequences.
Examples
---------
>>> sequences = [[1,2,0,0], [1,2,3,0], [1,2,3,4]]
>>> print(sequences_add_end_id_after_pad(sequences, end_id=99, pad_id=0))
[[1, 2, 99, 0], [1, 2, 3, 99], [1, 2, 3, 4]]
"""
# sequences_out = [[] for _ in range(len(sequences))]#[[]] * len(sequences)
sequences_out = copy.deepcopy(sequences)
# # add a pad to all
# for i in range(len(sequences)):
# for j in range(len(sequences[i])):
# sequences_out[i].append(pad_id)
# # pad -- > end
# max_len = 0
for i, v in enumerate(sequences):
for j, _v2 in enumerate(v):
if sequences[i][j] == pad_id:
sequences_out[i][j] = end_id
# if j > max_len:
# max_len = j
break
# # remove pad if too long
# for i in range(len(sequences)):
# for j in range(len(sequences[i])):
# sequences_out[i] = sequences_out[i][:max_len+1]
return sequences_out | bigcode/self-oss-instruct-sc2-concepts |
import sqlite3
import typing
def get_robot_types(cursor: sqlite3.Cursor) -> typing.List[dict]:
"""Gets all robot types from the db
:param cursor: [description]
:type cursor: sqlite3.Cursor
:return: [description]
:rtype: typing.List[dict]
"""
return cursor.execute(
"SELECT id, name FROM robot_type"
).fetchall() | bigcode/self-oss-instruct-sc2-concepts |
from typing import Sequence
from typing import Dict
def get_tagged_secrets(secrets_manager, secrets_tags: Sequence[str]) -> Dict[str, str]:
"""
Return a dictionary of AWS Secrets Manager names to arns
for any secret tagged with `secrets_tag`.
"""
secrets = {}
paginator = secrets_manager.get_paginator("list_secrets")
for secrets_tag in secrets_tags:
for page in paginator.paginate(
Filters=[
{
"Key": "tag-key",
"Values": [secrets_tag],
},
],
):
for secret in page["SecretList"]:
secrets[secret["Name"]] = secret["ARN"]
return secrets | bigcode/self-oss-instruct-sc2-concepts |
def usechangesetcentricalgo(repo):
"""Checks if we should use changeset-centric copy algorithms"""
if repo.filecopiesmode == b'changeset-sidedata':
return True
readfrom = repo.ui.config(b'experimental', b'copies.read-from')
changesetsource = (b'changeset-only', b'compatibility')
return readfrom in changesetsource | bigcode/self-oss-instruct-sc2-concepts |
def can_zip_response(headers):
"""
Check if request supports zipped response
:param headers: request headers
:return:
"""
if 'ACCEPT-ENCODING' in headers.keys():
if 'gzip' in headers.get('ACCEPT-ENCODING'):
return True
return False | bigcode/self-oss-instruct-sc2-concepts |
from typing import Optional
from typing import Dict
from typing import List
def get_mock_aws_alb_event(
method,
path,
query_parameters: Optional[Dict[str, List[str]]],
headers: Optional[Dict[str, List[str]]],
body,
body_base64_encoded,
multi_value_headers: bool,
):
"""Return a mock AWS ELB event.
The `query_parameters` parameter must be given in the
`multiValueQueryStringParameters` format - and if `multi_value_headers`
is disabled, then they are simply transformed in to the
`queryStringParameters` format.
Similarly for `headers`.
If `headers` is None, then some defaults will be used.
if `query_parameters` is None, then no query parameters will be used.
"""
resp = {
"requestContext": {
"elb": {
"targetGroupArn": (
"arn:aws:elasticloadbalancing:us-east-2:123456789012:"
"targetgroup/lambda-279XGJDqGZ5rsrHC2Fjr/49e9d65c45c6791a"
)
}
},
"httpMethod": method,
"path": path,
"body": body,
"isBase64Encoded": body_base64_encoded,
}
if headers is None:
headers = {
"accept": [
"text/html,application/xhtml+xml,application/xml;"
"q=0.9,image/webp,image/apng,*/*;q=0.8"
],
"accept-encoding": ["gzip"],
"accept-language": ["en-US,en;q=0.9"],
"connection": ["keep-alive"],
"host": ["lambda-alb-123578498.us-east-2.elb.amazonaws.com"],
"upgrade-insecure-requests": ["1"],
"user-agent": [
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
"(KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
],
"x-amzn-trace-id": ["Root=1-5c536348-3d683b8b04734faae651f476"],
"x-forwarded-for": ["72.12.164.125"],
"x-forwarded-port": ["80"],
"x-forwarded-proto": ["http"],
"x-imforwards": ["20"],
}
query_parameters = {} if query_parameters is None else query_parameters
# Only set one of `queryStringParameters`/`multiValueQueryStringParameters`
# and one of `headers`/multiValueHeaders (per AWS docs for ALB/lambda)
if multi_value_headers:
resp["multiValueQueryStringParameters"] = query_parameters
resp["multiValueHeaders"] = headers
else:
# Take the last query parameter/cookie (per AWS docs for ALB/lambda)
resp["queryStringParameters"] = {
k: (v[-1] if len(v) > 0 else []) for k, v in query_parameters.items()
}
resp["headers"] = {k: (v[-1] if len(v) > 0 else []) for k, v in headers.items()}
return resp | bigcode/self-oss-instruct-sc2-concepts |
def get_artist_names(artist_database, artist_id):
"""
Return the list of names corresponding to an artist
"""
if str(artist_id) not in artist_database:
return []
alt_names = [artist_database[str(artist_id)]["name"]]
if artist_database[str(artist_id)]["alt_names"]:
for alt_name in artist_database[str(artist_id)]["alt_names"]:
alt_names.append(alt_name)
return alt_names | bigcode/self-oss-instruct-sc2-concepts |
def containsAll(str, set):
"""
Checks if a given string contains all characters in a given set.
:param str: input string
:type: string
:param set: set if characters
:type: string
:rtype: boolean
"""
for c in set:
if c not in str: return False
return True | bigcode/self-oss-instruct-sc2-concepts |
def format_artist_rating(__, data):
"""Returns a formatted HTML line describing the artist and its rating."""
return (
"<li><a href='{artist_tag}/index.html'>{artist}</a>" " - {rating:.1f}</li>\n"
).format(**data) | bigcode/self-oss-instruct-sc2-concepts |
import hashlib
def _hash_file(fpath, algorithm="sha256", chunk_size=65535):
"""Calculates a file sha256 or md5 hash.
# Example
```python
>>> from keras.data_utils import _hash_file
>>> _hash_file('/path/to/file.zip')
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
```
# Arguments
fpath: path to the file being validated
algorithm: hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
# Returns
The file hash
"""
if algorithm in ("sha256", "auto"):
hasher = hashlib.sha256()
else:
hasher = hashlib.md5()
with open(fpath, "rb") as fpath_file:
for chunk in iter(lambda: fpath_file.read(chunk_size), b""):
hasher.update(chunk)
return hasher.hexdigest() | bigcode/self-oss-instruct-sc2-concepts |
def avoid_keyerror(dictionary, key):
""" Returns the value associated with key in dictionary. If key
does not exist in the dictionary, print out 'Avoid Exception' and
map it to the string 'no value'.
>>> d = {1: 'one', 3: 'three', 5: 'five'}
>>> avoid_keyerror(d, 3)
'three'
>>> avoid_keyerror(d, 4)
Avoid Exception
>>> d[4]
'no value'
"""
try:
return dictionary[key]
except KeyError as e:
print( 'Avoid Exception')
dictionary[key] = 'no value' | bigcode/self-oss-instruct-sc2-concepts |
def string_boolean(value):
"""Determines the boolean value for a specified string"""
if value.lower() in ('false', 'f', '0', ''):
return False
else:
return True | bigcode/self-oss-instruct-sc2-concepts |
import torch
def cosine_similarity(x, y=None, eps=1e-8):
"""Calculate cosine similarity between two matrices;
Args:
x: N*p tensor
y: M*p tensor or None; if None, set y = x
This function do not broadcast
Returns:
N*M tensor
"""
w1 = torch.norm(x, p=2, dim=1, keepdim=True)
if y is None:
w2 = w1.squeeze(dim=1)
y = x
else:
w2 = torch.norm(y, p=2, dim=1)
w12 = torch.mm(x, y.t())
return w12 / (w1*w2).clamp(min=eps) | bigcode/self-oss-instruct-sc2-concepts |
def all_same(L):
"""Check if all elements in list are equal.
Parameters
----------
L : array-like, shape (n,)
List of objects of any type.
Returns
-------
y : bool
True if all elements are equal.
"""
y = len(L) == 0 or all(x == L[0] for x in L)
return y | bigcode/self-oss-instruct-sc2-concepts |
def _IsDuplicateInterestedUser(url_entity, bots_user_key):
"""Checks if bots_user_key exist in interested_users.
Args:
url_entity: BotsUrl Entity to check for.
bots_user_key: bots_user entity key (db.Key).
Returns:
True if bots_user_key exist in interested_users, else False.
"""
return str(bots_user_key) in [str(x) for x in url_entity.interested_users] | bigcode/self-oss-instruct-sc2-concepts |
def defined_or_defaut_dir(default, directory):
"""
if given a directory it will return that directory, otherwise it returns the default
"""
if directory:
return directory
else:
return default | bigcode/self-oss-instruct-sc2-concepts |
def display_timedelta(minutes):
"""Converts timedelta in minutes to human friendly format.
Parameters
----------
minutes: int
Returns
-------
string
The timedelta in 'x days y hours z minutes' format.
Raises
------
ValueError
If the timedelta is negative.
"""
def plural(num):
if num != 1:
return 's'
else:
return ''
if minutes < 0:
raise ValueError
days = minutes // 1440
hours = minutes // 60 % 24
minutes = minutes % 60
time_elements = []
if days > 0:
time_elements.append(f'{days} day{plural(days)}')
if hours > 0:
time_elements.append(f'{hours} hour{plural(hours)}')
if minutes > 0 or (days == 0 and hours == 0):
time_elements.append(f'{minutes} minute{plural(minutes)}')
time_string = ' '.join(time_elements)
return time_string | bigcode/self-oss-instruct-sc2-concepts |
def obs_model_parameter_values(values):
"""
Return a list containing all of the values for a single parameter.
:param values: The values for the parameter.
:type values: Union[List[Any], Any]
"""
if isinstance(values, list):
return values
else:
return [values] | bigcode/self-oss-instruct-sc2-concepts |
import ast
def subscript_type_to_string(subscript: ast.Subscript) -> str:
"""
Returns a string representation of a subscript.
For example, if the subscript is Optional[int] it returns "Optional[int]".
"""
return ast.unparse(subscript) | bigcode/self-oss-instruct-sc2-concepts |
import random
def makelist(random_int):
"""Creates a list of random float values of total list length
random_int (specified as function param)"""
num_list = []
for count in range(random_int):
num_list.append(random.random())
return num_list | bigcode/self-oss-instruct-sc2-concepts |
def get_table_headers(table):
"""Given a table soup, returns all the headers"""
headers = []
for th in table.find("tr").find_all("th"):
headers.append(th.text.strip())
return headers | bigcode/self-oss-instruct-sc2-concepts |
import logging
from datetime import datetime
from pathlib import Path
import requests
def download_file(url, file_location=None, overwrite=False):
"""
Purpose:
Download file from specified URL and store in a specfied location.
If no location is provided, the file is downloaded in the current
directory. If overwrite is false, the file is not downloaded.
Args:
url (string): Full URL path to download file from.
file_location (string): Full path to where file will be stored.
overwrite (Boolean): Whether or not to overwrite file if it already
exists
Return
file_location (string): Full path to where file was be stored.
"""
logging.info(f"Fetching file from {url}")
report_date = str(datetime.now().date())
file_type = url.split(".")[-1]
if not file_location:
file_location = f"./{report_date}.{file_type}"
elif file_location.endswith("/"):
file_location = f"{file_location}{report_date}.{file_type}"
if not overwrite:
existing_file = Path(file_location)
if existing_file.is_file():
error_msg = "Cannot Download. File exists and overwrite = False"
logging.error(error_msg)
raise Exception(error_msg)
logging.info(f"Storing File to: {file_location}")
request = requests.get(url, stream=True)
request.raise_for_status()
with open(file_location, "wb") as downloaded_file:
for chunk in request.iter_content(chunk_size=1024):
if chunk:
downloaded_file.write(chunk)
downloaded_file.flush()
return file_location | bigcode/self-oss-instruct-sc2-concepts |
import re
def md(text):
"""Basic filter for escaping text in Markdown."""
return re.sub(r'([_*])', r'\\\1', text) | bigcode/self-oss-instruct-sc2-concepts |
def get_confirmation_block_from_results(*, block_identifier, results):
"""
Return the confirmation block from results list
"""
return next((i for i in results if i['message']['block_identifier'] == block_identifier), None) | bigcode/self-oss-instruct-sc2-concepts |
def add(initial: int=0, number: int=0) -> int:
"""Return sum of *intial* and *number*.
:param initial: Initial value.
:type initial: int
:param number: Value to add to initial.
:type number: int
:return: Sum of initial and number.
:rtype: int
"""
return initial + number | bigcode/self-oss-instruct-sc2-concepts |
from urllib.parse import urlencode
from typing import OrderedDict
def make_pr_link(url, project, slug, from_branch, to_branch):
"""Generates a URL that can be used to create a PR"""
if "github.com" in url:
result = "{url}/{organization}/{repo_name}/compare/{to_branch}...{from_branch}".format(
url=url,
organization=project,
repo_name=slug,
from_branch=from_branch,
to_branch=to_branch,
)
else:
base_url = "{url}/projects/{project}/repos/{slug}/compare/commits?".format(
url=url, project=project, slug=slug
)
result = base_url + urlencode(
OrderedDict([("sourceBranch", from_branch), ("targetBranch", to_branch)])
)
return result | bigcode/self-oss-instruct-sc2-concepts |
from typing import List
from typing import Tuple
def generate_options_for_point(x: int, y: int, z: int) -> List[Tuple[int, int, int]]:
"""
generate 24 different orientations for point presented of (x, y, z)
"""
return [(x, y, z), (z, y, -x), (-x, y, -z), (-z, y, x),
(-y, x, z), (z, x, y), (y, x, -z), (-z, x, -y),
(y, -x, z), (z, -x, -y), (-y, -x, -z), (-z, -x, y),
(x, -z, y), (y, -z, -x), (-x, -y, z), (-y, -z, x),
(x, -y, -z), (-z, -y, -x), (-x, -y, z), (z, -y, x),
(x, z, -y), (-y, z, -x), (-x, z, y), (y, z, x)
] | bigcode/self-oss-instruct-sc2-concepts |
def filter_bin(length, index_pairs):
"""
用于某些二进制标志位的场景
index_pairs: {index: value,}
返回 length 长度内第 index 位值为 value 的所有可能的 int 的 list, index 从 1 开始, e.g.
>>> filter_bin(3, {1: 1})
[1, 3, 5, 7]
>>> filter_bin(3, {1: 1, 2: 0})
[1, 5]
>>> filter_bin(3, {1: 0, 2: 1})
[2, 6]
"""
ret = []
for number in range(int('1' * length, 2) + 1):
match = True
for index in index_pairs:
if len(bin(number)) - index >= 2: # 位数够
if int(bin(number)[-index]) ^ index_pairs[index]:
match = False
else: # 位数不够
if index_pairs[index]:
match = False
if match:
ret.append(number)
return ret | bigcode/self-oss-instruct-sc2-concepts |
def switch_month(month: str):
"""
Translates an english month to a french one. For example: 'Jan' becomes '(01)Janvier'.
month : the month that will be translated
"""
return {
"Jan": "(01)Janvier",
"Feb": "(02)Fevrier",
"Mar": "(03)Mars",
"Apr": "(04)Avril",
"May": "(05)Mai",
"Jun": "(06)Juin",
"Jul": "(07)Juillet",
"Aug": "(08)Aout",
"Sep": "(09)Septembre",
"Oct": "(10)Octobre",
"Nov": "(11)Novembre",
"Dec": "(12)Decembre",
}[month] | bigcode/self-oss-instruct-sc2-concepts |
import json
def extract_json(serie, k):
"""Extract a value by a key."""
return serie.map(lambda row: json.loads(row)[k]) | bigcode/self-oss-instruct-sc2-concepts |
def escape_quotes(s: str) -> str:
"""Replaces double quotes in the input string with either ' or \\".
Description:
Given a string, returns that string with double quotes escaped in one of two ways.
If the string contains single quotes, then \\" will be used to escape the double quotes.
Otherwise, a single quote (') will be used instead.
Examples:
>>> escape_quotes('one "two" three')
"one 'two' three"
>>> escape_quotes('"He said he didn\\'t know."')
'\\\\"He said he didn\\'t know.\\\\"'
"""
if "'" in s:
return s.replace('"', '\\"')
return s.replace('"', "'") | bigcode/self-oss-instruct-sc2-concepts |
def process_icon_emoji(inIconEmoji: str) -> str:
"""Process an 'icon_emoji' string.
Args:
inIconEmoji:
Single icon emoji string.
Returns:
Properly formatted 'icon_emoji' string
"""
tmpStr = inIconEmoji.strip(": ")
return f":{tmpStr}:" if tmpStr else "" | bigcode/self-oss-instruct-sc2-concepts |
def GetLabelsFromDict(metadata):
"""Converts a metadata dictionary to a string of labels.
Args:
metadata: a dictionary of string key value pairs.
Returns:
A string of labels in the format that Perfkit uses.
"""
return ','.join('|%s:%s|' % (k, v) for k, v in metadata.iteritems()) | bigcode/self-oss-instruct-sc2-concepts |
def generate_md_code_str(code_snippet: str, description: str = 'Snippet') -> str:
"""The normal ``` syntax doesn't seem to get picked up by mdv. It relies on indentation based code blocks.
This one liner accommodates for this by just padding a code block with 4 additional spaces.
Hacky? Sure. Effective? Yup.
:param code_snippet:
:param description:
:return: formatted code snippet
"""
code_snippet = code_snippet.replace('\n', '\n ')
return f'\n###{description}:\n {code_snippet}\n' | bigcode/self-oss-instruct-sc2-concepts |
def subsample(data, every=5):
"""Subsample to the desired frequency. Use every `every`th sample."""
return data[::every] | bigcode/self-oss-instruct-sc2-concepts |
def get_subclasses(klass):
"""Gets the list of direct/indirect subclasses of a class"""
subclasses = klass.__subclasses__()
for derived in list(subclasses):
subclasses.extend(get_subclasses(derived))
return subclasses | bigcode/self-oss-instruct-sc2-concepts |
def is_vulnerable(source):
"""A simple boolean function that determines whether a page
is SQL Injection vulnerable from its `response`"""
errors = {
# MySQL
"you have an error in your sql syntax;",
"warning: mysql",
# SQL Server
"unclosed quotation mark after the character string",
# Oracle
"quoted string not properly terminated",
}
for error in errors:
# if you find one of these errors, return True
if error in source.lower():
return True
# no error detected
return False | bigcode/self-oss-instruct-sc2-concepts |
def skip_material(mtl: int) -> int:
"""Computes the next material index with respect to default avatar skin indices.
Args:
mtl (int): The current material index.
Returns:
int: The next material index to be used.
"""
if mtl == 1 or mtl == 6:
return mtl + 2
return mtl + 1 | bigcode/self-oss-instruct-sc2-concepts |
import json
import requests
def getProcedures(host, service):
"""
Get all the procedures (stations) using the WA-REST API
"""
proc_url = f"http://{host}/istsos/wa/istsos/services/{service}/procedures/operations/getlist"
# Request and Get the procedures from the response
procedures = json.loads(requests.get(proc_url).text)['data']
return procedures | bigcode/self-oss-instruct-sc2-concepts |
def agg_moy_par_pret(dataframe, group_var, prefix):
"""Aggregates the numeric values in a dataframe. This can
be used to create features for each instance of the grouping variable.
Parameters
--------
dataframe (dataframe):
the dataframe to calculate the statistics on
group_var (string):
the variable by which to group df
prefix (string):
the variable used to rename the columns
Return
--------
agg (dataframe):
a dataframe with the statistics aggregated for
all numeric columns. Each instance of the grouping variable will have
the statistics (mean, min, max, sum; currently supported) calculated.
The columns are also renamed to keep track of features created.
"""
# Remove id variables other than grouping variable
for col in dataframe:
if col != group_var and 'SK_ID' in col:
dataframe = dataframe.drop(columns = col)
group_ids = dataframe[group_var]
numeric_df = dataframe.select_dtypes('number')
numeric_df[group_var] = group_ids
# Group by the specified variable and calculate the statistics
agg = numeric_df.groupby(group_var).agg(['mean']).reset_index()
# Need to create new column names
columns = [group_var]
# Iterate through the variables names
for var in agg.columns.levels[0]:
# Skip the grouping variable
if var != group_var:
# Iterate through the stat names
for stat in agg.columns.levels[1][:-1]:
# Make a new column name for the variable and stat
columns.append('%s_%s_%s' % (prefix, var, stat))
agg.columns = columns
return agg | bigcode/self-oss-instruct-sc2-concepts |
import unicodedata
def lowercase_and_remove_accent(text):
"""
Lowercase and strips accents from a piece of text based on
https://github.com/facebookresearch/XLM/blob/master/tools/lowercase_and_remove_accent.py
"""
text = " ".join(text)
text = text.lower()
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output).lower().split(" ") | bigcode/self-oss-instruct-sc2-concepts |
import random
def blood_pressure_depending_on_age(age):
"""Randomly generate a blood pressure value depending upon the given age
value.
It is assumed that for a given age value the blood pressure is normally
distributed with an average blood pressure of 75 at birth (age 0) and of
90 at age 100, and standard deviation in blood pressure of 4.
"""
if ((not isinstance(age, int)) and (not isinstance(age, float))):
raise Exception( 'Age value given is not a number: %s' % (str(age)))
if (age < 0) or (age > 130):
raise Exception( 'Age value below 0 or above 130 given')
avrg_bp = 75.0 + age/100.0
std_dev_bp = 4.0
bp = random.normalvariate(avrg_bp, std_dev_bp)
if bp < 0.0:
bp = 0.0
print( 'Warning, blood pressure value of 0.0 returned!')
return bp | bigcode/self-oss-instruct-sc2-concepts |
def write_content(path, content):
"""Write string content to path name."""
print(f"- writing {path}")
with open(path, 'w') as file:
file.write(content)
return path | bigcode/self-oss-instruct-sc2-concepts |
import hashlib
def sha1_hash(bytes):
"""
Compute the SHA-1 hash of a byte stream.
"""
sha = hashlib.sha1()
sha.update(bytes)
return sha.digest() | bigcode/self-oss-instruct-sc2-concepts |
def founder_allocation() -> float:
"""How much tokens are allocated to founders, etc."""
return 0.2 | bigcode/self-oss-instruct-sc2-concepts |
import torch
def generate_padding_masks(data, pad_value=0):
"""
Returns a mask based on the data. For values of the padding token=0, the mask will contain 1, indicating the
model cannot attend over these positions.
:param data: The data of shape (sequence_len, batch_size)
:param pad_value: The value of the padding. Default: 0.
:return: The respective mask of shape (batch_size, 1, sequence_len)
"""
with torch.no_grad():
mask = (data == pad_value).to(data.device).t().unsqueeze(1)
return mask | bigcode/self-oss-instruct-sc2-concepts |
from typing import Dict
def _get_local_logging_config() -> Dict:
"""Create local logging config for running the dummy projects."""
return {
"version": 1,
"formatters": {
"simple": {"format": "%(asctime)s - %(name)s - %(levelname)s - %(message)s"}
},
"root": {"level": "ERROR", "handlers": ["console"]},
"loggers": {
"kedro": {"level": "ERROR", "handlers": ["console"], "propagate": False}
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": "ERROR",
"formatter": "simple",
"stream": "ext://sys.stdout",
}
},
"info_file_handler": {
"class": "logging.handlers.RotatingFileHandler",
"level": "ERROR",
"formatter": "simple",
"filename": "logs/info.log",
},
} | bigcode/self-oss-instruct-sc2-concepts |
import warnings
def get_mol_3d_coordinates(mol):
"""Get 3D coordinates of the molecule.
This function requires that molecular conformation has been initialized.
Parameters
----------
mol : rdkit.Chem.rdchem.Mol
RDKit molecule instance.
Returns
-------
numpy.ndarray of shape (N, 3) or None
The 3D coordinates of atoms in the molecule. N for the number of atoms in
the molecule. For failures in getting the conformations, None will be returned.
Examples
--------
An error will occur in the example below since the molecule object does not
carry conformation information.
>>> from rdkit import Chem
>>> from dgllife.utils import get_mol_3d_coordinates
>>> mol = Chem.MolFromSmiles('CCO')
Below we give a working example based on molecule conformation initialized from calculation.
>>> from rdkit.Chem import AllChem
>>> AllChem.EmbedMolecule(mol)
>>> AllChem.MMFFOptimizeMolecule(mol)
>>> coords = get_mol_3d_coordinates(mol)
>>> print(coords)
array([[ 1.20967478, -0.25802181, 0. ],
[-0.05021255, 0.57068079, 0. ],
[-1.15946223, -0.31265898, 0. ]])
"""
try:
conf = mol.GetConformer()
conf_num_atoms = conf.GetNumAtoms()
mol_num_atoms = mol.GetNumAtoms()
assert mol_num_atoms == conf_num_atoms, \
'Expect the number of atoms in the molecule and its conformation ' \
'to be the same, got {:d} and {:d}'.format(mol_num_atoms, conf_num_atoms)
return conf.GetPositions()
except:
warnings.warn('Unable to get conformation of the molecule.')
return None | bigcode/self-oss-instruct-sc2-concepts |
def population_render_transparency(x, invert_colours=False, b=None):
"""Render image from patches with transparancy.
Renders patches with transparency using black as the transparent colour.
Args:
x: tensor of transformed RGB image patches of shape [S, B, 5, H, W].
invert_colours: Invert all RGB values.
b: optional tensor of background RGB image of shape [S, 3, H, W].
Returns:
Tensor of rendered RGB images of shape [S, 3, H, W].
"""
# Sum the RGB patches [S, B, 3, H, W] as [S, 3, H, W].
x = x[:, :, :3, :, :] * x[:, :, 3:4, :, :]
y = x[:, :, :3, :, :].sum(1)
if invert_colours:
y[:, :3, :, :] = 1.0 - y[:, :3, :, :]
# Add backgrounds [S, 3, H, W].
if b is not None:
b = b.cuda() if x.is_cuda else b.cpu()
y = (y + b).clamp(0., 1.)
return y.clamp(0., 1.).permute(0, 2, 3, 1) | bigcode/self-oss-instruct-sc2-concepts |
def youngs(vp=None, vs=None, rho=None, mu=None, lam=None, bulk=None, pr=None,
pmod=None):
"""
Computes Young's modulus given either Vp, Vs, and rho, or
any two elastic moduli (e.g. lambda and mu, or bulk and P
moduli). SI units only.
Args:
vp, vs, and rho
or any 2 from lam, mu, bulk, pr, and pmod
Returns:
Young's modulus in pascals, Pa
"""
if (vp is not None) and (vs is not None) and (rho is not None):
return rho * vs**2 * (3.*vp**2 - 4.*vs**2) / (vp**2 - vs**2)
elif (mu is not None) and (lam is not None):
return mu * (3.*lam + 2*mu) / (lam + mu)
elif (bulk is not None) and (lam is not None):
return 9.*bulk * (bulk - lam) / (3.*bulk - lam)
elif (bulk is not None) and (mu is not None):
return 9.*bulk*mu / (3.*bulk + mu)
elif (lam is not None) and (pr is not None):
return lam * (1+pr) * (1 - 2*pr) / pr
elif (pr is not None) and (mu is not None):
return 2. * mu * (1+pr)
elif (pr is not None) and (bulk is not None):
return 3. * bulk * (1 - 2*pr)
else:
return None | bigcode/self-oss-instruct-sc2-concepts |
def initial_workload_is_ready(ops_test, app_names) -> bool:
"""Checks that the initial workload (ie. x/0) is ready.
Args:
ops_test: pytest-operator plugin
app_names: array of application names to check for
Returns:
whether the workloads are active or not
"""
return all(
ops_test.model.applications[name].units[0].workload_status == "active"
for name in app_names
) | bigcode/self-oss-instruct-sc2-concepts |
def duplication_consistency(set_one, set_two):
"""
Calculates the duplication consistency score for two sets of species
:param set_one: set/list of species
:param set_two: set/list of species
:return: float with duplication consistency score
"""
union_size = len(set(set_one).union(set(set_two)))
#ORIGINAL <<<<
intersection_size = len(set(set_one).intersection(set(set_two)))
return intersection_size/union_size
#>>>>>>>>>>>>>
#<<<<< sdash (just for testing)
# DISCARD after trees loading successful
# if union_size == 0:
# return 0
# else:
# intersection_size = len(set(set_one).intersection(set(set_two)))
# return intersection_size/union_size
#>>>>>>>>>>> | bigcode/self-oss-instruct-sc2-concepts |
def _auto_correlations(n_states):
"""Returns list of autocorrelations
Args:
n_states: number of local states
Returns:
list of tuples for autocorrelations
>>> l = _auto_correlations(np.arange(3))
>>> assert l == [(0, 0), (1, 1), (2, 2)]
"""
local_states = n_states
return [(l, l) for l in local_states] | bigcode/self-oss-instruct-sc2-concepts |
import re
def process_clinical_significance(clin_sig):
"""
Processes ClinVar clinical significance string into a format suitable for OT JSON schema.
Namely, splits multiple clinical significance levels into an array and normalises names (to lowercase, using only
spaces for delimiters). Multiple levels of clinical significance are separated using two delimieters: ('/', ', ').
See /clinvar-variant-types/README.md for further explanation. The output array is sorted alphabetically.
Example: 'Benign/Likely benign, risk_factor' → ['benign', 'likely benign', 'risk factor'].
"""
return sorted(re.split('/|, ', clin_sig.lower().replace('_', ' '))) | bigcode/self-oss-instruct-sc2-concepts |
from typing import Dict
def linear_utility(
exchange_params_by_currency_id: Dict[str, float],
balance_by_currency_id: Dict[str, int],
) -> float:
"""
Compute agent's utility given her utility function params and a good bundle.
:param exchange_params_by_currency_id: exchange params by currency
:param balance_by_currency_id: balance by currency
:return: utility value
"""
money_utility = [
exchange_params_by_currency_id[currency_id] * balance
for currency_id, balance in balance_by_currency_id.items()
]
return sum(money_utility) | bigcode/self-oss-instruct-sc2-concepts |
def sum_of_square_deviations(data, c):
"""Return sum of square deviations of sequence data."""
ss = sum((float(x) - c)**2 for x in data)
return ss | bigcode/self-oss-instruct-sc2-concepts |
def DataArray_to_dictionary(da, include_data=True):
""" Convert DataArray to dictionary
Args:
da (DataArray): data to convert
include_data (bool): If True then include the .ndarray field
Returns:
dict: dictionary containing the serialized data
"""
fields = ['label', 'name', 'unit', 'is_setpoint', 'full_name', 'array_id', 'shape', 'set_arrays']
if include_data:
fields += ['ndarray']
dct = dict([(f, getattr(da, f)) for f in fields])
dct['set_arrays'] = tuple([x.array_id for x in dct['set_arrays']])
return dct | bigcode/self-oss-instruct-sc2-concepts |
from torch.utils.data._utils.collate import default_collate
from typing import Sequence
from typing import Dict
def multi_supervision_collate_fn(batch: Sequence[Dict]) -> Dict:
"""
Custom collate_fn for K2SpeechRecognitionDataset.
It merges the items provided by K2SpeechRecognitionDataset into the following structure:
.. code-block::
{
'features': float tensor of shape (B, T, F)
'supervisions': [
{
'sequence_idx': Tensor[int] of shape (S,)
'text': List[str] of len S
'start_frame': Tensor[int] of shape (S,)
'num_frames': Tensor[int] of shape (S,)
}
]
}
Dimension symbols legend:
* ``B`` - batch size (number of Cuts),
* ``S`` - number of supervision segments (greater or equal to B, as each Cut may have multiple supervisions),
* ``T`` - number of frames of the longest Cut
* ``F`` - number of features
"""
dataset_idx_to_batch_idx = {
example['supervisions'][0]['sequence_idx']: batch_idx
for batch_idx, example in enumerate(batch)
}
def update(d: Dict, **kwargs) -> Dict:
for key, value in kwargs.items():
d[key] = value
return d
supervisions = default_collate([
update(sup, sequence_idx=dataset_idx_to_batch_idx[sup['sequence_idx']])
for example in batch
for sup in example['supervisions']
])
feats = default_collate([example['features'] for example in batch])
return {
'features': feats,
'supervisions': supervisions
} | bigcode/self-oss-instruct-sc2-concepts |
def ebc_to_srm(ebc):
"""
Convert EBC to SRM Color
:param float ebc: EBC Color
:return: SRM Color
:rtype: float
"""
return ebc / 1.97 | bigcode/self-oss-instruct-sc2-concepts |
def _unique_id_to_host_id(unique_id):
"""Return the chip id unique to the daplink host procesor
Unique ID has the following fomat
Board ID - 4 bytes
Version - 4 bytes
Host ID - Everything else
"""
return unique_id[8:8 + 32] | bigcode/self-oss-instruct-sc2-concepts |
import ntpath
def pathLeaf(path):
"""
Returns the basename of the file/directory path in an _extremely_ robust way.
For example, pathLeaf('/hame/saheel/git_repos/szz/abc.c/') will return 'abc.c'.
Args
----
path: string
Path to some file or directory in the system
Returns
-------
string
Basename of the file or directory
"""
head, tail = ntpath.split(path)
return tail or ntpath.basename(head) | bigcode/self-oss-instruct-sc2-concepts |
import copy
def convertUnits(ww_params, pixel_size):
"""
Convert to the units currently used by the analysis pipeline. This also
adds additional zeros as needed to match the length expected by the
analysis pipeline.
"""
ww_params = copy.copy(ww_params)
ww_params[0] = ww_params[0] * pixel_size
ww_params[1] = ww_params[1] * 1.0e+3
ww_params[2] = ww_params[2] * 1.0e+3
for i in range(len(ww_params), 7):
ww_params.append(0.0)
return ww_params | bigcode/self-oss-instruct-sc2-concepts |
import random
def generate_smallest_secret(poly_degree, crc_length, min_size=0, echo=False):
""" Helper function to generate smallest secret which is:
- divisible by 8, so that it can be encoded to bytes
- secret length + crc_length is divisible by poly_degree + 1
to be able to split secret into coefficients for polynomial
:param poly_degree: polynomial degree as int
:param crc_length: CRC length as int
:param min_size: minimum bit size as int
:param echo: if True, printing intermediate messages to console
:returns bytes """
if min_size % 8 == 0:
candidate_size = min_size
else:
candidate_size = min_size + (8 - (min_size % 8))
assert candidate_size % 8 == 0
while not ((candidate_size + crc_length) % (poly_degree + 1) == 0):
candidate_size += 8
# generate random secret
secret_int = random.randint(0, 2 ** candidate_size - 1)
assert candidate_size % 8 == 0
secret_bytes = secret_int.to_bytes(candidate_size // 8, byteorder='big')
if echo:
print('Secret size is {} bits'.format(candidate_size))
print('Secret bytes are {}'.format(secret_bytes))
return secret_bytes | bigcode/self-oss-instruct-sc2-concepts |
def force_key(d, k):
"""Return a key from a dict if existing and not None, else an empty string
"""
return d[k] if d.has_key(k) and d[k] is not None else "" | bigcode/self-oss-instruct-sc2-concepts |
def foundSolution(solver_result):
"""
Check if a solution was found.
"""
return "Valid" not in solver_result and "unsat" not in solver_result | bigcode/self-oss-instruct-sc2-concepts |
import torch
def one_hotify(vec, number_of_classes, dimension):
"""
Turn a tensor of integers into a matrix of one-hot vectors.
:param vec: The vector to be converted.
:param number_of_classes: How many possible classes the one hot vectors encode.
:param dimension: Which dimension stores the elements of vec. If 0, they're stored in the rows. If 1, the columns.
:return A matrix of one-hot vectors, each row or column corresponding to one element of vec
"""
num_vectors = vec.size()[0]
binary_vec = torch.zeros(num_vectors, number_of_classes)
for i in range(num_vectors):
binary_vec[i][vec[i]] = 1
if dimension == 1:
binary_vec.t_()
return binary_vec | bigcode/self-oss-instruct-sc2-concepts |
def get_fuels(collection, country):
"""
Accumulate capacities for each fuel type for a given country
"""
result = collection.aggregate([
{
# Search country
'$search': {
'index': 'default',
'text': {
'query': country,
'path': 'country_long'
}
}
},
{
# Accumulate capacity by fuel type
'$group':
{
'_id': '$primary_fuel',
'totCapacity': {
'$sum': {'$toDecimal': '$capacity_mw'}
}
}
}
])
return result | bigcode/self-oss-instruct-sc2-concepts |
def extract_category(report, key):
"""
Create a dict from a classification report for a given category.
Args:
report: The report
key: The key to a category on the report
"""
result = report.get(key)
result['category'] = key
return result | bigcode/self-oss-instruct-sc2-concepts |
def get_int_from_little_endian_bytearray(array, offset):
""" Get an int from a byte array, using little-endian representation,\
starting at the given offset
:param array: The byte array to get the int from
:type array: bytearray
:param offset: The offset at which to start looking
:type offset: int
:return: The decoded integer
:rtype: int
"""
return ((array[offset + 3] << 24) | (array[offset + 2] << 16) |
(array[offset + 1] << 8) | array[offset]) | bigcode/self-oss-instruct-sc2-concepts |
def simple_dist(x1: float, x2: float) -> float:
"""Get distance between two samples for dtw distance.
Parameters
----------
x1:
first value
x2:
second value
Returns
-------
float:
distance between x1 and x2
"""
return abs(x1 - x2) | bigcode/self-oss-instruct-sc2-concepts |
def fitness_bc(agent, environment, raw_fitness, path):
""" Returns as a tuple the raw fitness and raw observation of an agent evaluation. """
if len(path) > 0:
return raw_fitness, path
return raw_fitness, [0] | bigcode/self-oss-instruct-sc2-concepts |
def accuracy(y_pred, y_truth):
"""
Accuracy: #right / #all
Parameters
-------------
y_pred : numpy array, (m, 1)
the predicted labels
y_truth : numpy array, (m, 1)
the true labels
Returns
--------
accuracy : double
larger is better
"""
return (y_pred == y_truth).sum()/y_truth.shape[0] | bigcode/self-oss-instruct-sc2-concepts |
def train_test_split(t, df):
"""
Splits time series dataset into train, validation and test set with a 60-20-20 split.
Targeted to be used in an apply function to a Series object.
:param t: current timestep (int)
:param df: pandas Dataframe containing the data
:returns: subset categorization as string
"""
if t < (len(df) * 0.6):
return 'train'
elif t < (len(df) * 0.8):
return 'val'
else:
return 'test' | bigcode/self-oss-instruct-sc2-concepts |
import traceback
def cut_traceback(tb, func_name):
"""
Cut off a traceback at the function with the given name.
The func_name's frame is excluded.
Args:
tb: traceback object, as returned by sys.exc_info()[2]
func_name: function name
Returns:
Reduced traceback.
"""
tb_orig = tb
for _, _, fname, _ in traceback.extract_tb(tb):
tb = tb.tb_next
if fname == func_name:
break
if tb is None:
# We could not find the method, take the full stack trace.
# This may happen on some Python interpreters/flavors (e.g. PyInstaller).
return tb_orig
else:
return tb | bigcode/self-oss-instruct-sc2-concepts |
def crop_2d_using_xy_boundaries(mask, boundaries):
"""
:mask: any 2D dataset
:boundaries: dict{xmin,xmax,ymin,ymax}
:return: cropped mask
"""
b = boundaries
return mask[b['ymin']:b['ymax'], b['xmin']:b['xmax']] | bigcode/self-oss-instruct-sc2-concepts |
def get_unit(a):
"""Extract the time unit from array's dtype"""
typestr = a.dtype.str
i = typestr.find('[')
if i == -1:
raise TypeError("Expected a datetime64 array, not %s", a.dtype)
return typestr[i + 1: -1] | bigcode/self-oss-instruct-sc2-concepts |
def find_dimension_contexts(instance,context,dimensions):
"""Returns a list of contexts containing the given dimension values and having the same period as the given context."""
contexts = []
for dimcontext in instance.contexts:
if dimcontext.period_aspect_value == context.period_aspect_value and dimcontext.entity_identifier_aspect_value == context.entity_identifier_aspect_value:
dim_values = list(dimcontext.dimension_aspect_values)
if dim_values:
matching_context = True
for dim in dim_values:
if dim.dimension not in dimensions or dim.value not in dimensions[dim.dimension]:
matching_context = False
break
if matching_context:
contexts.append(dimcontext)
return contexts | bigcode/self-oss-instruct-sc2-concepts |
def atoms_to_xyz_file(atoms, filename, title_line='', append=False):
"""
Print a standard .xyz file from a list of atoms
Arguments:
atoms (list(autode.atoms.Atom)): List of autode atoms to print
filename (str): Name of the file (with .xyz extension)
Keyword Arguments:
title_line (str): Second line of the xyz file, can be blank
append (bool): Do or don't append to this file. With append=False
filename will be overwritten if it already exists
"""
assert atoms is not None
assert filename.endswith('.xyz')
with open(filename, 'a' if append else 'w') as xyz_file:
print(len(atoms), title_line, sep='\n', file=xyz_file)
for atom in atoms:
x, y, z = atom.coord
print(f'{atom.label:<3}{x:^10.5f}{y:^10.5f}{z:^10.5f}',
file=xyz_file)
return None | bigcode/self-oss-instruct-sc2-concepts |
from typing import List
from typing import Dict
import torch
def prepare_batch(batch: List) -> Dict[str, torch.Tensor]:
"""
Take a list of samples from a Dataset and collate them into a batch.
Returns:
A dictionary of tensors
"""
input_ids = torch.stack([example['input_ids'] for example in batch])
lm_labels = torch.stack([example['target_ids'] for example in batch])
lm_labels[lm_labels[:, :] == 0] = -100
attention_mask = torch.stack([example['attention_mask'] for example in batch])
decoder_attention_mask = torch.stack([example['target_attention_mask'] for example in batch])
return {
'input_ids': input_ids,
'attention_mask': attention_mask,
'labels': lm_labels,
'decoder_attention_mask': decoder_attention_mask
} | bigcode/self-oss-instruct-sc2-concepts |
def variants(*strings):
"""Creates three variants of each string:
- lowercase (e.g. `husky`)
- title version (e.g. `Husky`)
- uppercase (e.g. `HUSKY`)
:return: A list of all variants of all given strings.
:rtype: list
"""
result = []
for string in strings:
lowercase = string.lower()
result += [lowercase, lowercase.title(), string.upper()]
return result | bigcode/self-oss-instruct-sc2-concepts |
def get_start_end(title, print_char="-", size=150, nl_str=True, nl_end=True):
"""
:return:
start: ------------------------------- <title> -------------------------------
end: -----------------------------------------------------------------------
"""
title_len = len(title)
print_char_group = print_char * int((size - title_len - 2) / 2)
start = " ".join([print_char_group, title, print_char_group])
end = print_char * len(start)
if nl_str:
start = f"\n{start}"
if nl_end:
end = f"{end}\n"
return start, end | bigcode/self-oss-instruct-sc2-concepts |
def has_text(text):
"""
Return False if the text is empty or has only spaces.
A true value is returned otherwise.
"""
# This is faster than "not text.rstrip()" because no new string is
# built.
return text and not text.isspace() | bigcode/self-oss-instruct-sc2-concepts |
import torch
def jacobian(f, x):
"""Computes the Jacobian of f w.r.t x.
This is according to the reverse mode autodiff rule,
sum_i v^b_i dy^b_i / dx^b_j = sum_i x^b_j R_ji v^b_i,
where:
- b is the batch index from 0 to B - 1
- i, j are the vector indices from 0 to N-1
- v^b_i is a "test vector", which is set to 1 column-wise to obtain the correct
column vectors out ot the above expression.
:param f: function R^N -> R^N
:param x: torch.tensor of shape [B, N]
:return: Jacobian matrix (torch.tensor) of shape [B, N, N]
"""
B, N = x.shape
x.requires_grad = True
in_ = torch.zeros(B, 1)
y = f(in_, x)
jacobian = list()
for i in range(N):
v = torch.zeros_like(y)
v[:, i] = 1.
dy_i_dx = torch.autograd.grad(y,
x,
grad_outputs=v,
retain_graph=True,
create_graph=True,
allow_unused=True)[0] # shape [B, N]
jacobian.append(dy_i_dx)
jacobian = torch.stack(jacobian, dim=2).requires_grad_()
return jacobian | bigcode/self-oss-instruct-sc2-concepts |
from typing import List
def mimes_to_codec(mimes: List[str]) -> str:
"""Utility function for turning mutagen's mime types into a single codec string."""
if any(["codecs=opus" in m for m in mimes]):
return "opus"
else:
return mimes[0].replace("audio/", "") | bigcode/self-oss-instruct-sc2-concepts |
import re
def split_source_id(source_id):
"""Retrieve the source_name and version information from a source_id.
Not complex logic, but easier to have in one location.
Standard form: {source_name}_v{search_version}.{submission_version}
Arguments:
source_id (str): The source_id to split. If this is not a valid-form source_id,
the entire string will be assumed to be the source_name and source_id
and the versions will be 0.
Returns:
dict:
success (bool): True if the versions were extracted, False otherwise.
source_name (str): The base source_name.
source_id (str): The assembled source_id.
search_version (int): The Search version from the source_id.
submission_version (int): The Connect version from the source_id.
"""
# Check if source_id is valid
if not re.search("_v[0-9]+\\.[0-9]+$", source_id):
return {
"success": False,
"source_name": source_id,
"source_id": source_id,
"search_version": 0,
"submission_version": 0
}
source_name, versions = source_id.rsplit("_v", 1)
v_info = versions.split(".", 1)
search_version, submission_version = v_info
return {
"success": True,
"source_name": source_name,
"source_id": "{}_v{}.{}".format(source_name, search_version, submission_version),
"search_version": int(search_version),
"submission_version": int(submission_version)
} | bigcode/self-oss-instruct-sc2-concepts |
def pruneListByIndices(lst, indices):
"""Prunes a `lst` to only keep elements at the given `indices`."""
return [l for i, l in enumerate(lst) if i in indices] | bigcode/self-oss-instruct-sc2-concepts |
def get_machine_type(cpu_cores, memory, accelerator_type):
"""Returns the GCP AI Platform machine type."""
if accelerator_type.value == "TPU_V2" or accelerator_type.value == "TPU_V3":
return "cloud_tpu"
machine_type_map = {
(4, 15): "n1-standard-4",
(8, 30): "n1-standard-8",
(16, 60): "n1-standard-16",
(32, 120): "n1-standard-32",
(64, 240): "n1-standard-64",
(96, 360): "n1-standard-96",
(2, 13): "n1-highmem-2",
(4, 26): "n1-highmem-4",
(8, 52): "n1-highmem-8",
(16, 104): "n1-highmem-16",
(32, 208): "n1-highmem-32",
(64, 416): "n1-highmem-64",
(96, 624): "n1-highmem-96",
(16, 14.4): "n1-highcpu-16",
(32, 28.8): "n1-highcpu-32",
(64, 57.6): "n1-highcpu-64",
(96, 86.4): "n1-highcpu-96",
}
return machine_type_map[(cpu_cores, memory)] | bigcode/self-oss-instruct-sc2-concepts |
import torch
def unk_init(dim):
"""
Initialize out of vocabulary words as uniform
distribution in range 0 to 1.
:param dim: word embedding dimension
:return: randomly initialized vector
"""
return torch.rand(1, dim) | bigcode/self-oss-instruct-sc2-concepts |
def convert_txt_to_inds(txt, char2ind, eos=False, sos=False):
"""
Args:
txt: Array of chars to convert to inds.
char2ind: Lookup dict from chars to inds.
Returns: The converted chars, i.e. array of ints.
"""
txt_to_inds = [char2ind[ch] for ch in txt]
if eos:
txt_to_inds.append(char2ind['<EOS>'])
if sos:
txt_to_inds.insert(0, char2ind['<SOS>'])
return txt_to_inds | bigcode/self-oss-instruct-sc2-concepts |
def pairwise_prefs(A,P,params,distance_bound=1):
"""
Return a dict pref that gives for each pair (i,j) of alternatives from A
the sum, over voters that prefer i to j, of how many positions higher i is than
j in the voter's ballot (but not more than "distance_bound" per ballot).
If params["missing_preferred_less"]==True:
A short ballot is interpreted as listing the top alternatives only;
the unlisted alternatives are preferred less than any listed alternative.
else:
A short ballot contributes nothing for or against the missing candidates.
This routine also handles equals signs in ballots.
if distance_bound is >= |A|-1, then we effectively have a Borda-type measure.
"""
distance_bound = max(1,distance_bound)
pref = { }
for x in A:
for y in A:
pref[(x,y)] = 0
for ballot in P:
# compute logical "position" of each alternative on the ballot
pos = { } # "position" of each alternative in the ballot (0-indexed)
for x in A:
pos[x] = None
nextpos = 0 # position of next candidate to be seen
for x in ballot:
if x == "=":
nextpos = max(0,nextpos-1)
elif pos[x] == None:
pos[x] = nextpos
nextpos += 1
if params==None or params["missing_preferred_less"]: # everything mentioned > everything not
for x in A:
if pos[x] == None:
pos[x] = nextpos
# now compute "preference" matrix contribution for this ballot
bound = 1
for x in A:
for y in A:
if pos[x]!=None and pos[y]!=None and pos[x]<pos[y]:
pref[(x,y)] += P[ballot] * min( (pos[y]-pos[x]), distance_bound)
return pref | bigcode/self-oss-instruct-sc2-concepts |
def common_parent(a, b):
"""
Find the common parent tile of both a and b. The common parent is the tile
at the highest zoom which both a and b can be transformed into by lowering
their zoom levels.
"""
if a.zoom < b.zoom:
b = b.zoomTo(a.zoom).container()
elif a.zoom > b.zoom:
a = a.zoomTo(b.zoom).container()
while a.row != b.row or a.column != b.column:
a = a.zoomBy(-1).container()
b = b.zoomBy(-1).container()
# by this point a == b.
return a | bigcode/self-oss-instruct-sc2-concepts |
import calendar
import random
def random_day(month):
"""Return a random int within the range of the month provided."""
dayRange = calendar.monthrange(2020, month)[1]
return random.randint(1, dayRange) | bigcode/self-oss-instruct-sc2-concepts |
def divide(dataset, target_column):
"""
Divides the dataset in X and y.
This function is called inside prepare_data function (see below) in order to prepare data.
:param dataset:
:param target_column:
:return: X and y
"""
X = dataset.drop(columns=[target_column])
y = dataset[target_column]
return X, y | bigcode/self-oss-instruct-sc2-concepts |
def extract_post_story(div_id_story):
"""
Extracts the post text contents, strips line breaks and whitespaces.
"""
before_keyword = "SHARE /"
post_story = div_id_story.get_text().strip().replace('\n', ' ').replace('\r', '')
return post_story[:post_story.find(before_keyword)] | bigcode/self-oss-instruct-sc2-concepts |
def mean(x):
"""Return the mean of an array across the first dimension."""
return x.mean(axis=0) | bigcode/self-oss-instruct-sc2-concepts |
from typing import List
from typing import Set
def _extract_target_labels(targets_in_order: List[dict], target_name: str) -> Set[str]:
"""Collect a set of all the board names from the inherits field in each target in the hierarchy.
Args:
targets_in_order: list of targets in order of inheritance, starting with the target up to its highest ancestor
target_name: the name of the target to find the labels for
Returns:
A set of names of boards that make up the inheritance tree for the target
"""
labels = {target_name}
for target in targets_in_order:
for parent in target.get("inherits", []):
labels.add(parent)
return labels | bigcode/self-oss-instruct-sc2-concepts |
def svm_predict(classifier, test_data):
"""
Predict the labels of the given test data with the given classifier pipeline.
:param sklearn.pipeline.Pipeline classifier: The classifier pipeline with which the labels of the given test data should be predicted
:param pandas.core.series.Series test_data: The data the given classifier pipeline should predict the labels of
"""
return classifier.predict(test_data) | bigcode/self-oss-instruct-sc2-concepts |
def contfractbeta(
a: float, b: float, x: float, ITMAX: int = 5000, EPS: float = 1.0e-7
) -> float:
"""Continued fraction form of the incomplete Beta function.
Code translated from: Numerical Recipes in C.
Example kindly taken from blog:
https://malishoaib.wordpress.com/2014/04/15/the-beautiful-beta-functions-in-raw-python/
:param float a: a
:param float b: b
:param float x: x
:param int ITMAX: max number of iterations, default is 5000.
:param float EPS: epsilon precision parameter, default is 1e-7.
:returns: continued fraction form
:rtype: float
"""
az = 1.0
bm = 1.0
am = 1.0
qab = a + b
qap = a + 1.0
qam = a - 1.0
bz = 1.0 - qab * x / qap
for i in range(ITMAX + 1):
em = float(i + 1)
tem = em + em
d = em * (b - em) * x / ((qam + tem) * (a + tem))
ap = az + d * am
bp = bz + d * bm
d = -(a + em) * (qab + em) * x / ((qap + tem) * (a + tem))
app = ap + d * az
bpp = bp + d * bz
aold = az
am = ap / bpp
bm = bp / bpp
az = app / bpp
bz = 1.0
if abs(az - aold) < EPS * abs(az):
return az
raise ValueError(
"a={0:f} or b={1:f} too large, or ITMAX={2:d} too small to compute incomplete beta function.".format(
a, b, ITMAX
)
) | bigcode/self-oss-instruct-sc2-concepts |
def readMetadata(lines):
"""
Read metadata tags and values from a TNTP file, returning a dictionary whose
keys are the tags (strings between the <> characters) and corresponding values.
The last metadata line (reading <END OF METADATA>) is stored with a value giving
the line number this tag was found in. You can use this to proceed with reading
the rest of the file after the metadata.
"""
metadata = dict()
lineNumber = 0
for line in lines:
lineNumber += 1
line.strip()
commentPos = line.find("~")
if commentPos >= 0: # strip comments
line = line[:commentPos]
if len(line) == 0:
continue
startTagPos = line.find("<")
endTagPos = line.find(">")
if startTagPos < 0 or endTagPos < 0 or startTagPos >= endTagPos:
print("Error reading this metadata line, ignoring: '%s'" % line)
metadataTag = line[startTagPos+1 : endTagPos]
metadataValue = line[endTagPos+1:]
if metadataTag == 'END OF METADATA':
metadata['END OF METADATA'] = lineNumber
return metadata
metadata[metadataTag] = metadataValue.strip()
print("Warning: END OF METADATA not found in file")
return metadata | bigcode/self-oss-instruct-sc2-concepts |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.