seed stringlengths 1 14k | source stringclasses 2
values |
|---|---|
def build_anthology_id(collection_id, volume_id, paper_id=None):
"""
Transforms collection id, volume id, and paper id to a width-padded
Anthology ID. e.g., ('P18', '1', '1') -> P18-1001.
"""
if (
collection_id.startswith("W")
or collection_id == "C69"
or (collection_id == "D19" and int(volume_id) >= 5)
):
anthology_id = f"{collection_id}-{int(volume_id):02d}"
if paper_id is not None:
anthology_id += f"{int(paper_id):02d}"
elif collection_id.startswith("2"):
anthology_id = f"{collection_id}-{volume_id}"
if paper_id is not None:
anthology_id += f".{paper_id}"
else:
anthology_id = f"{collection_id}-{int(volume_id):01d}"
if paper_id is not None:
anthology_id += f"{int(paper_id):03d}"
return anthology_id | bigcode/self-oss-instruct-sc2-concepts |
from typing import Any
def assert_key_for_scope(scope: str):
"""Checks that a key of the given name and type is present in a config."""
def assert_key(config: dict, key: str, instance: Any) -> None:
if not isinstance(config.get(key), instance):
raise KeyError(f"Missing {key} in {scope}")
return assert_key | bigcode/self-oss-instruct-sc2-concepts |
def BFSUtility(obj,visited,vertex):
"""Utility function for Breadth First Search Algorithm."""
stack = []
subGraph = []
stack.insert(0,vertex)
visited[vertex] = True
while(stack):
subGraph.append(stack.pop())
for nbrVertex in obj.adjList[subGraph[-1]]:
if visited[nbrVertex]:
continue
stack.insert(0,nbrVertex)
visited[stack[0]] = True
return subGraph | bigcode/self-oss-instruct-sc2-concepts |
def get_string_before_delimiter(string, delimiter):
"""
Returns contents of a string before a given delimiter
Example: get_string_before_delimiter("banana-kiwi", "-") returns "banana"
"""
if delimiter in string:
return (string[:string.index(delimiter)]).strip()
else:
return string | bigcode/self-oss-instruct-sc2-concepts |
import json
def test_json(filename):
"""Verify that given filename is valid JSON; if not, return None."""
try:
with open(filename, 'r') as f:
data = json.load(f)
except Exception as e:
print(e)
data = None
return data | bigcode/self-oss-instruct-sc2-concepts |
def contains(seq, value):
"""
Description
----------
Checks to see if a value is in the sequence or dictionary.
Parameters
----------
seq : (list or tuple or set or dict or string) - sequence/dictionary to search in\n
value : any - value to search for
Returns
----------
bool - True (value found), False (value not found)
Examples
----------
>>> lst = [1, 2, 3, 4, 5]
>>> contains(lst, 4)
-> True
>>> contains(lst, 10)
-> False
"""
if isinstance(seq, dict):
return value in seq.keys() or value in seq.values()
return value in seq | bigcode/self-oss-instruct-sc2-concepts |
def stage_title(stage):
"""Helper function for setting the title bar of a stage"""
stage_txt = ("Name", "Vocation", "Character Design and Details",
"Stats and Skills", "All Done: Thank You!")
stage_cmd = ("{w@add/name <character name>{n", "{w@add/vocation <character's vocation>{n",
"{w@add/<field name> <value>", "{w@add/<stat or skill> <stat or skill name>=<+ or -><new value>{n",
"{w@add/submit <application notes>")
msg = "{wStep %s: %s" % (stage, stage_txt[stage - 1])
msg += "\n%s" % (stage_cmd[stage - 1])
return msg | bigcode/self-oss-instruct-sc2-concepts |
def add_weight_decay(model, weight_decay=1e-5, skip_list=()):
"""Splits param group into weight_decay / non-weight decay.
Tweaked from https://bit.ly/3dzyqod
:param model: the torch.nn model
:param weight_decay: weight decay term
:param skip_list: extra modules (besides BN/bias) to skip
:returns: split param group into weight_decay/not-weight decay
:rtype: list(dict)
"""
# if weight_decay == 0:
# return model.parameters()
decay, no_decay = [], []
for name, param in model.named_parameters():
if not param.requires_grad:
continue
if len(param.shape) == 1 or name in skip_list:
no_decay.append(param)
else:
decay.append(param)
return [
{'params': no_decay, 'weight_decay': 0, 'ignore': True},
{'params': decay, 'weight_decay': weight_decay, 'ignore': False}] | bigcode/self-oss-instruct-sc2-concepts |
from typing import List
def remove_stubs(packages: List[str]) -> List[str]:
"""Remove type stubs (:pep:`561`) from a list of packages.
>>> remove_stubs(["a", "a.b", "a-stubs", "a-stubs.b.c", "b", "c-stubs"])
['a', 'a.b', 'b']
"""
return [pkg for pkg in packages if not pkg.split(".")[0].endswith("-stubs")] | bigcode/self-oss-instruct-sc2-concepts |
def to_csv(dictionary, filename=None, headers=None, nm=False):
"""
Save spectral data as csv.
Parameters
----------
dictionary : dict
The dictrionary containing spectral data.
Get using Image.get_spectral_data()
The epected unit for wavelength is Angstrom.
filename : str, optional
The name of the file to save data to.
If omitted, returns the csv file as a string.
headers : iterable, optional
The headers to put on top of the csv.
Default is ("Wavelength ({unit})", "Intensity"),
whwere {unit} is nm or A, depending on the nm parameter.
nm : bool, optional
Whether to convert Anstrom to nanometers. The default is False.
Returns
-------
0 if saved to file.
csv-formatted string if filename is omitted.
"""
# Handle headers
if headers and (isinstance(headers[0], str) and isinstance(headers[1], str)):
try:
headers = list(headers)
except TypeError:
headers = None
else:
headers = None
# Set default headers if headers are not specified or invalid
if headers is None:
if nm:
headers = ["Wavelength (nm)", "Intensity"]
else:
headers = ["Wavelength (A)", "Intensity"]
if filename:
# Remove .csv suffix if it exists
# It's added later
# So we don't get filename.csv.csv
if filename[-4:].lower() == ".csv":
filename=filename[:-4]
with open(filename + ".csv", "w") as file:
file.write(f"{headers[0]},{headers[1]}\n")
for key, value in dictionary.items():
if nm:
key /= 10
file.write(f"{key},{value}\n")
else:
out = ""
out += f"{headers[0]},{headers[1]}\n"
for key, value in dictionary.items():
if nm:
key /= 10
out += f"{key},{value}\n"
return out
return 0 | bigcode/self-oss-instruct-sc2-concepts |
from pathlib import Path
def get_tf_version(tf_path):
"""
Traverse “up” the given directory tree looking for a
.terraform-version file and return its contents, raise an
exception if we reach root without having found anything.
"""
current_path = tf_path
while current_path != Path('/'):
tf_version_path = current_path/'.terraform-version'
if tf_version_path.exists():
with tf_version_path.open('r') as tf_version_fp:
return tf_version_fp.read().strip()
current_path = current_path.parent
raise Exception('Hey, dude! There is no .terraform-version in this tree') | bigcode/self-oss-instruct-sc2-concepts |
def combine(hi, lo):
"""Combine the hi and lo bytes into the final ip address."""
return (hi << 64) + lo | bigcode/self-oss-instruct-sc2-concepts |
def assert_equal_length(psg, hyp, sample_rate):
""" Return True if the PSG and HYP have equal lengths in seconds """
return psg.shape[0] / sample_rate == hyp.total_duration | bigcode/self-oss-instruct-sc2-concepts |
async def time_event() -> dict:
"""Create a mock time_event object."""
return {
"id": "290e70d5-0933-4af0-bb53-1d705ba7eb95",
"bib": 1,
"event_id": "event_1",
"name": "Petter Propell",
"club": "Barnehagen",
"race": "race_name",
"race_id": "race_1",
"timing_point": "Finish",
"rank": 0,
"registration_time": "12:01:02",
"next_race": "semi_name1",
"next_race_id": "semi_1",
"next_race_position": 1,
"status": "OK",
"changelog": [
{"timestamp": "2021-11-08T22:06:30", "user_id": "test", "comment": "hello"}
],
} | bigcode/self-oss-instruct-sc2-concepts |
def convert_8_to_16(value):
"""Scale an 8 bit level into 16 bits."""
return (value << 8) | value | bigcode/self-oss-instruct-sc2-concepts |
def size_of(rect):
"""Return size of list|tuple `rect` (top, left, bottom, right) as tuple (width, height)"""
return (rect[3] - rect[1], rect[2] - rect[0]) | bigcode/self-oss-instruct-sc2-concepts |
import re
def uncolorize(s):
"""
Remove ANSI color escape codes from the string `s` and return the result.
Works with text colorized with the `colorama` module.
"""
return re.sub("\033\[([0-9]+;)*[0-9]*m", "", s, flags=re.UNICODE) | bigcode/self-oss-instruct-sc2-concepts |
import numbers
def is_number(a):
"""Check that a value `a` is numeric"""
return isinstance(a, numbers.Number) | bigcode/self-oss-instruct-sc2-concepts |
def transform_len(val, default=None):
"""
Calculate length
<dotted>|len len(val) or raises
<dotted>|len:<default> len(val) or <default>
"""
try:
return len(val)
except TypeError:
if default is not None:
return default
raise
return val | bigcode/self-oss-instruct-sc2-concepts |
from typing import Iterable
def nested_depth(iterable:Iterable) -> int:
"""
calculate the nested depth of an iterable
Args:
iterable (Iterable): the iterable to calculate the depth of
Returns:
int: the depth
"""
depth_func = lambda L: isinstance(L, (list, tuple)) and max(map(depth_func, L)) + 1
depth = depth_func(iterable)
return depth if depth is not False else 0 | bigcode/self-oss-instruct-sc2-concepts |
def post_process_fieldsets(context, fieldset):
"""
Removes a few fields from FeinCMS admin inlines, those being
``id``, ``DELETE`` and ``ORDER`` currently.
Additionally, it ensures that dynamically added fields (i.e.
``ApplicationContent``'s ``admin_fields`` option) are shown.
"""
# abort if fieldset is customized
if fieldset.model_admin.fieldsets:
return fieldset
fields_to_include = set(fieldset.form.fields.keys())
for f in ("id", "DELETE", "ORDER"):
fields_to_include.discard(f)
def _filter_recursive(fields):
ret = []
for f in fields:
if isinstance(f, (list, tuple)):
# Several fields on one line
sub = _filter_recursive(f)
# Only add if there's at least one field left
if sub:
ret.append(sub)
elif f in fields_to_include:
ret.append(f)
fields_to_include.discard(f)
return ret
new_fields = _filter_recursive(fieldset.fields)
# Add all other fields (ApplicationContent's admin_fields) to
# the end of the fieldset
for f in fields_to_include:
new_fields.append(f)
if context.get("request"):
new_fields.extend(
list(
fieldset.model_admin.get_readonly_fields(
context.get("request"), context.get("original")
)
)
)
fieldset.fields = new_fields
return "" | bigcode/self-oss-instruct-sc2-concepts |
def sos_gradient(params):
"""Calculate the gradient of the sum of squares function."""
return 2 * params["value"].to_numpy() | bigcode/self-oss-instruct-sc2-concepts |
import numbers
def commanum(v):
"""
Makes large numbers readable by adding commas
E.g. 1000000 -> 1,000,000
"""
if isinstance(v, numbers.Number):
return "{:,}".format(v)
return v | bigcode/self-oss-instruct-sc2-concepts |
import glob
def createPlotString(postAnalyzerPath, description):
""" Create the plotFile readable by the PostPlot*.py program.
Args:
postAnalyzerPath: Path to the output from PostAnalyzer
description: description of the results (for instance canonical or submatrix)
Returns:
plotFile readable by the PostPlot*.py program
"""
analyzedFolders = glob.glob(postAnalyzerPath+"*/")
scriptLine = []
script = []
for folder in analyzedFolders:
if not (folder.endswith('_rigid/') or folder.endswith('_medium/') or folder.endswith('_difficult/') or folder.endswith('_all/')):
continue
scriptLine = []
scriptLine.append(folder)
scriptLine.append(description)
scriptLine.append("AssessmentWhole") # for now, later this needs to be refactored to made the argument optional
difficulty = folder[folder.rfind("_")+1:-1]
difficulty = difficulty.upper()[0]+difficulty[1:]
scriptLine.append("Difficulty"+difficulty)
script.append(scriptLine)
# sort to have all, difficult, medium, rigid in this order (a before d before m before r)
script.sort(key=lambda x: x[3], reverse=False)
return script | bigcode/self-oss-instruct-sc2-concepts |
import math
def vector_normalize(vect=()):
""" Generates a unit vector from the input.
:param vect: input vector
:type vect: tuple
:return: unit vector
:rtype: list
"""
if not vect:
raise ValueError("Input argument is empty.")
sq_sum = math.pow(vect[0], 2) + math.pow(vect[1], 2)
if len(vect) == 3:
sq_sum += math.pow(vect[2], 2)
# Calculate magnitude of the vector
magnitude = math.sqrt(sq_sum)
if magnitude != 0:
# Normalize the vector
if len(vect) == 3:
retval = [vect[0] / magnitude,
vect[1] / magnitude,
vect[2] / magnitude]
else:
retval = [vect[0] / magnitude,
vect[1] / magnitude]
# Return the normalized vector
return retval
else:
raise ValueError("The magnitude of the vector is zero.") | bigcode/self-oss-instruct-sc2-concepts |
from typing import List
def remove_nulls(obj):
"""Recursive function to remove nulls in a dict."""
if isinstance(obj, List):
return [remove_nulls(list_item) for list_item in obj]
if not isinstance(obj, dict):
return obj
return {k: remove_nulls(v) for k, v in obj.items() if v is not None} | bigcode/self-oss-instruct-sc2-concepts |
def are_disjoint(sa, sb):
"""
:param sa: set 1
:param sb: set 2
:return: True if two sets are disjoint, otherwise False
"""
return True if sa & sb == set() else False | bigcode/self-oss-instruct-sc2-concepts |
def create_coordinate_matrix(sp, xn, yn, lons, lats):
"""
Creates xn times yn matrix of GNSS points.
:param sp: Starting GNSS point.
:param xn: Number of rectangles (columns).
:param yn: Number of rectangles (rows).
:param lons: Longitude step.
:param lats: Latitude step.
:return: Matrix of GNSS points for rectangle drawing. Every cell consists of a tuple with four points (lon1, lat1, lon2, lat2, lon3, lat3, lon4, lat4)
"""
coordinate_matrix = []
column_values = []
for ii in range(1, yn + 1):
for jj in range(1, xn + 1):
lon1 = sp[0] + ((jj - 1) * lons)
lat1 = sp[1] - ((ii - 1) * lats)
lon2 = sp[0] + (jj * lons)
lat2 = sp[1] - (ii * lats)
lon3 = lon1 + lons
lat3 = lat1
lon4 = lon2 - lons
lat4 = lat2
column_values.append((lon1, lat1, lon2, lat2, lon3, lat3, lon4, lat4))
coordinate_matrix.append(column_values)
column_values = []
return coordinate_matrix | bigcode/self-oss-instruct-sc2-concepts |
def sanitize_identifier_for_cpp(identifier: str) -> str:
"""
Convert the provided identifier to a valid C++ identifier
:param identifier: the name which needs to to sanitized
:return: str: sanitized identifier
"""
if not identifier:
return ''
sanitized_identifier = list(identifier)
for index, character in enumerate(sanitized_identifier):
if not (character.isalnum() or character == '_'):
sanitized_identifier[index] = '_'
return "".join(sanitized_identifier) | bigcode/self-oss-instruct-sc2-concepts |
def readfile(filename, current_dir_path):
"""Return the contents of a file as a string given a Path object to it.
Parameters
----------
filename : str
Name of the file to read.
current_dir_path : Path
pathlib.Path object for where the file is found on the system.
"""
file_path = current_dir_path / filename
with open(file_path, 'r', encoding='ISO-8859-1') as f:
return f.read() | bigcode/self-oss-instruct-sc2-concepts |
def find_best_match(path, prefixes):
"""Find the Ingredient that shares the longest prefix with path."""
path_parts = path.split('.')
for p in prefixes:
if len(p) <= len(path_parts) and p == path_parts[:len(p)]:
return '.'.join(p), '.'.join(path_parts[len(p):])
return '', path | bigcode/self-oss-instruct-sc2-concepts |
import time
def wait_for_influxdb(db_client):
"""Function to wait for the influxdb service to be available."""
try:
db_client.ping()
print("connected to db")
return None
except ConnectionError:
print("not yet")
time.sleep(1)
wait_for_influxdb(db_client) | bigcode/self-oss-instruct-sc2-concepts |
def wc1(file_):
"""Takes an absolute file path/name, calculates the number of
lines/words/chars, and returns a string of these numbers + file, e.g.:
3 12 60 /tmp/somefile
(both tabs and spaces are allowed as separator)"""
with open(file_) as f:
content = f.read()
num_lines = len(content.split("\n"))
num_words = len(content.split())
num_chars = len(content)
numbers = f"{num_lines}\t{num_words}\t{num_chars}"
return f"{numbers} {file_}" | bigcode/self-oss-instruct-sc2-concepts |
def rjust(s, width, *args):
"""rjust(s, width[, fillchar]) -> string
Return a right-justified version of s, in a field of the
specified width, padded with spaces as needed. The string is
never truncated. If specified the fillchar is used instead of spaces.
"""
return s.rjust(width, *args) | bigcode/self-oss-instruct-sc2-concepts |
def fields(d, names, pred=lambda x: True,
key_transform=lambda x: x, value_transform=lambda x: x):
"""Returns the entries in this dictionary with keys appearing in names.
:type d: dict
:type names: [a]
:param pred: a filter that is applied to the values of the dictionary.
:type pred: (a -> bool)
:param key_transform: a transform to apply to the key before returning it
:type key_transform: a -> a
:param value_transform: a transform to apply to the value before
returning it
:type value_transform: a -> a
:rtype: dict
"""
return dict((key_transform(k), value_transform(v))
for k, v in d.items()
if k in names and pred(v)) | bigcode/self-oss-instruct-sc2-concepts |
def HarmonicPairConstraints(a,b,value,sd):
"""
Returns properly formated string for declaring harmonic distance constraints
between CA carbons of resiedues a and b, at distance "value", and "sd" standard deviation.
"""
st = "AtomPair CA %i CA %i HARMONIC %3.1f %3.1f\n" %(a,b,value,sd)
return st | bigcode/self-oss-instruct-sc2-concepts |
def grab_data(h5file, path, col, matching=None):
"""Grabs data in a path matching parameters
Parameters
----------
h5file : PyTables HDF5 File handle
path : str
the path to the appropriate table
col : str
the target column name
matching : tuple, optional
a tuple of col name and data to match, if no match is given, all column
values will be returned
Returns
-------
data : list, dict, other
if a matching is provided, a dictionary from the instance id to the
data value is returned, otherwise a list of all column values is given
"""
h5node = h5file.get_node(path)
if matching is None:
data = [x[col] for x in h5node.iterrows()]
else:
data = []
scol, search = matching
data = {x['instid']: x[col] for x in h5node.iterrows() if x[scol] in search}
return data | bigcode/self-oss-instruct-sc2-concepts |
def get_min(data_frame, field):
"""Calculates the minimum value of a given field in a given DataFrame.
Args:
data_frame (DataFrame): the Pandas DataFrame to analyze
field (string): the field to calculate the values of
Returns:
Series: the minimum value of the field
"""
return data_frame[field].min()
# -------------------------------------------------------------- get_min() | bigcode/self-oss-instruct-sc2-concepts |
import torch
def kinetic_energy_reg_func(x: torch.Tensor,
t: torch.Tensor,
dx: torch.Tensor,
unused_context) -> torch.Tensor:
"""
Quadratic cost / kinetic energy regularization: https://arxiv.org/pdf/2002.02798.pdf
:param x:
:param t:
:param dx:
:return: The regularization term.
"""
del x, t
dx = dx.view(dx.shape[0], -1)
return 0.5 * dx.pow(2).mean(dim=-1) | bigcode/self-oss-instruct-sc2-concepts |
import re
def first_clean_up(lines):
"""
A function to remove line delimiters, comments, tabs, etc. from the
.sas dictionary file.
Parameters
----------
lines : list of strings; the raw file
"""
#Remove all tabs from the string
for i in range(0, len(lines)):
lines[i] = re.sub('\t', ' ', lines[i])
lines[i] = re.sub('\r', '', lines[i])
lines[i] = re.sub('\n', '', lines[i])
#Remove all comments from the code
start_comment = 0
for i in range(0, len(lines)):
if lines[i].find('/*') >= 0:
start_comment = i
elif lines[i].find('*/') >= 0:
lines[start_comment] = lines[start_comment]\
.replace('/*', '')
lines[start_comment] = lines[start_comment]\
.replace('*', '')
lines[i] = lines[i].replace('*/', '')
lines[i] = lines[i].replace('*', '')
if i > start_comment + 1:
lines[start_comment + 1:i] = ['' for x in
range(start_comment+1, i)]
for i in range(0, len(lines)):
lines[i] = lines[i].replace('*', '')
return lines | bigcode/self-oss-instruct-sc2-concepts |
def verify_request_target(replay_json, request_target):
"""
Verify that the 'url' element of the first transaction contains the request target.
"""
try:
url = replay_json['sessions'][0]['transactions'][0]['client-request']['url']
except KeyError:
print("The replay file did not have a first transaction with a url element.")
return False
if url != request_target:
print("Mismatched request target. Expected: {}, received: {}".format(request_target, url))
return False
return True | bigcode/self-oss-instruct-sc2-concepts |
from typing import List
def gray_max(colors: List[int]) -> float:
"""
Convert to grayscale: get brightest value from channels.
:param colors: [R, G, B] values
:return: one grayscale color
"""
return max(colors) | bigcode/self-oss-instruct-sc2-concepts |
import pickle
def load(ensemble):
"""
Load a previously pickled ensemble.
"""
return pickle.loads(ensemble) | bigcode/self-oss-instruct-sc2-concepts |
def empty_table(unique_database, request):
"""Create an empty table within the test database before executing test.
The table will have the same name as the test_function itself. Setup and teardown
of the database is handled by the unique_database fixture.
Args:
unique_database: pytest fixture defined in conftest.py
request: standard pytest request fixture
Returns:
fq_table_name (str): the fully qualified name of the table: : dbname.table_name
"""
table_name = request.node.name
fq_table_name = '.'.join([unique_database, table_name])
stmt = "CREATE TABLE %s (i integer, s string)" % fq_table_name
request.instance.execute_query_expect_success(request.instance.client, stmt,
query_options={'sync_ddl': 1})
return fq_table_name | bigcode/self-oss-instruct-sc2-concepts |
def fix_line_breaks(text):
""" Convert Win line breaks to Unix
"""
return text.replace("\r\n", "\n") | bigcode/self-oss-instruct-sc2-concepts |
def ignition_delay(states, species):
"""
This function computes the ignition delay from the occurrence of the
peak in species' concentration.
"""
i_ign = states(species).Y.argmax()
return states.t[i_ign] | bigcode/self-oss-instruct-sc2-concepts |
def average_tweets_per_user(tweets, users_with_freq):
"""
Return the average number of tweets per user from a list of tweets.
:param tweets: the list of tweets.
:param users_with_freq: a Counter of usernames with the number of tweets in 'tweets' from each user.
:return: float. average number of tweets per user
"""
tweets_number = len(tweets)
users_number = len(users_with_freq)
return tweets_number/users_number | bigcode/self-oss-instruct-sc2-concepts |
import torch
def cross_entropy_soft_targets(predicted_distribution, target_distribution):
"""Cross entropy loss with soft targets.
B = batch size, D = dimension of target (num classes), N = ensemble size
Args:
inputs (torch.tensor((B, D - 1))): predicted distribution
soft_target (torch.tensor((B, D - 1))): target distribution
"""
return torch.mean(-target_distribution * torch.log(predicted_distribution)) | bigcode/self-oss-instruct-sc2-concepts |
import csv
def loadGrabbers(fname="grabbers.csv"):
"""
Read a CSV file and return contents as a list of dictionaries.
"""
columns = ["ID", "call", "title", "name", "loc", "site", "url"]
grabbers = []
with open(fname) as f:
reader = csv.reader(f, delimiter=",", quotechar='"')
for row in reader:
row = [x.strip() for x in row]
row = [x.strip('"') for x in row]
if len(row) < 5 or row[0].startswith("#"):
continue
grabber = {}
for i, key in enumerate(columns):
grabber[key] = row[i]
grabbers.append(grabber)
return grabbers | bigcode/self-oss-instruct-sc2-concepts |
def make_car(manufacturer, model, **car_info):
"""Build a dictionary containing information about a car."""
car_info['manufacturer_name'] = manufacturer
car_info['model_name'] = model
return car_info | bigcode/self-oss-instruct-sc2-concepts |
from typing import List
def split_into_lists(input_list: List, target_number_of_lists: int) -> List[List]:
"""
Evenly splits list into n lists.
E.g split_into_lists([1,2,3,4], 4) returns [[1], [2], [3], [4]].
:param input_list: object to split
:param target_number_of_lists: how many lists to split into
:returns: list of lists containing original items
"""
quotient, reminder = divmod(len(input_list), target_number_of_lists)
return [input_list[i * quotient + min(i, reminder):(i + 1) * quotient + min(i + 1, reminder)] for i in
range(target_number_of_lists)] | bigcode/self-oss-instruct-sc2-concepts |
def get_job(api, job_id):
"""Get a Borgy job."""
return api.v1_jobs_job_id_get(job_id) | bigcode/self-oss-instruct-sc2-concepts |
import requests
def get_json_from_query(location):
"""Search for a city and return metadata from API"""
url = f"https://www.metaweather.com/api/location/search/?query={location}"
r = requests.get(url).json()
return r[0] | bigcode/self-oss-instruct-sc2-concepts |
def is_punkt(token):
"""
Return if token consists of only punctuation and whitespace
Args:
token: single token
Returns:
Boolean
Raises:
None
Examples:
>>> is_punkt(" ")
True
>>> is_punkt(", ,")
True
>>> is_punkt("?!!")
True
>>> is_punkt("x")
False
"""
#punkt = string.punctuation
punkt = '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~'
return all(letter in punkt or letter.isspace() for letter in token) | bigcode/self-oss-instruct-sc2-concepts |
def movie_sort(movies):
"""Sorts a list of movies by their release date using bubble sort.
Args:
movies: a list of movies.
Returns:
A sorted list of movies.
"""
sorted_movies = movies.copy()
swapped = True
while swapped:
swapped = False
for i in range(1, len(sorted_movies[1:])):
if sorted_movies[i - 1].year > sorted_movies[i].year:
sorted_movies[i], sorted_movies[i - 1] = \
sorted_movies[i - 1], sorted_movies[i]
swapped = True
return sorted_movies | bigcode/self-oss-instruct-sc2-concepts |
def xor_hex_strings(bytes_a, bytes_b):
# type: (str, str) -> str
"""Given two hex strings of equal length, return a hex string with
the bitwise xor of the two hex strings."""
assert len(bytes_a) == len(bytes_b)
return ''.join(["%x" % (int(x, 16) ^ int(y, 16))
for x, y in zip(bytes_a, bytes_b)]) | bigcode/self-oss-instruct-sc2-concepts |
import re
import json
def read_job_properties(jobscript,
prefix="# properties",
pattern=re.compile("# properties = (.*)")):
"""Read the job properties defined in a snakemake jobscript.
This function is a helper for writing custom wrappers for the
snakemake --cluster functionality. Applying this function to a
jobscript will return a dict containing information about the job.
"""
with open(jobscript) as jobscript:
for m in map(pattern.match, jobscript):
if m:
return json.loads(m.group(1)) | bigcode/self-oss-instruct-sc2-concepts |
def solve(input):
"""Solve the puzzle."""
return int(input/3)-2 | bigcode/self-oss-instruct-sc2-concepts |
def Dic_Extract_By_Subkeylist(indic,keylist):
"""
Return a new dic by extracting the key/value paris present in keylist
"""
outdic={}
for key in keylist:
try:
outdic[key]=indic[key]
except KeyError:
raise KeyError("input key {0} not present!".format(key))
return outdic | bigcode/self-oss-instruct-sc2-concepts |
def canonicalize_tensor_name(name):
"""Canonicalizes tensor names.
For an op that produces only one output, we may be refer to its output tensor
as either "op_name:0" or simply "op_name". This standardizes all internal
names as "op_name:0" to simplify the logic.
Args:
name: Input name to canonicalize.
Returns:
The canonicalized input name.
"""
if ":" not in name:
return f"{name}:0"
return name | bigcode/self-oss-instruct-sc2-concepts |
def _get_header(request_type):
"""Returns header str for talking with cosmos
:param request_type: name of specified request (ie uninstall-request)
:type request_type: str
:returns: header information
:rtype: str
"""
return ("application/vnd.dcos.package.{}+json;"
"charset=utf-8;version=v1").format(request_type) | bigcode/self-oss-instruct-sc2-concepts |
def strip_html_comments(text):
"""Strip HTML comments from a unicode string."""
lines = text.splitlines(True) # preserve line endings.
# Remove HTML comments (which we only allow to take a special form).
new_lines = [line for line in lines if not line.startswith("<!--")]
return "".join(new_lines) | bigcode/self-oss-instruct-sc2-concepts |
import torch
def matrix_to_cartesian(batch: torch.Tensor, keep_square: bool = False) -> torch.Tensor:
"""
Transforms a matrix for a homogeneous transformation back to cartesian
coordinates.
Args:
batch: the batch oif matrices to convert back
keep_square: if False: returns a NDIM x NDIM+1 matrix to keep the
translation part
if True: returns a NDIM x NDIM matrix but looses the translation
part. defaults to False.
Returns:
torch.Tensor: the given matrix in cartesian coordinates
"""
batch = batch[:, :-1, ...]
if keep_square:
batch = batch[..., :-1]
return batch | bigcode/self-oss-instruct-sc2-concepts |
import requests
import ssl
import base64
def get_digest_base64(location):
"""Download the sha256sum.txt message digest file at the given
`location`.
:return: A `string` of the base64-encoded message digest
"""
res = requests.get(location,
verify=ssl.get_default_verify_paths().openssl_cafile)
if res.status_code == 200:
# b64encode needs a bytes type input, use the dedicated
# 'encode' method to turn str=>bytes. The result of
# `b64encode` is a bytes type. Later when we go to serialize
# this with json it needs to be a str type so we will decode
# the bytes=>str now.
return base64.b64encode(res.text.encode()).decode()
else:
raise(Exception(res.reason)) | bigcode/self-oss-instruct-sc2-concepts |
def _is_inline_update(job):
"""This returns true if the job contains an inline update"""
if not job.metadata.get('update'):
return False
return job.metadata["update"].inline_query | bigcode/self-oss-instruct-sc2-concepts |
def _note(item):
"""Handle secure note entries
Returns: title, username, password, url, notes
"""
return f"{item['name']} - Secure Note", "", "", "", item.get('notes', '') or "" | bigcode/self-oss-instruct-sc2-concepts |
import unicodedata
def remove_accentuated(s):
"""Removes accentuated chars and lower case
"""
s = ''.join(c for c in unicodedata.normalize('NFD', s.lower()) if unicodedata.category(c) != 'Mn')
return s | bigcode/self-oss-instruct-sc2-concepts |
def get_filters(filters):
"""Return the rsync options for the given filters."""
arguments = []
for filter_ in filters:
if len(filter_) > 1:
raise Exception(
"Filter must contain only one entry: {}".format(filter_))
if "exclude" in filter_:
arguments += ["--exclude", filter_["exclude"]]
elif "include" in filter_:
arguments += ["--include", filter_["include"]]
else:
raise Exception(
"Filter must contain include or exclude: {}".format(filter_))
return arguments | bigcode/self-oss-instruct-sc2-concepts |
def _replace_words(replacements, string):
"""Replace words with corresponding values in replacements dict.
Words must be separated by spaces or newlines.
"""
output_lines = []
for line in string.split('\n'):
output_words = []
for word in line.split(' '):
new_word = replacements.get(word, word)
output_words.append(new_word)
output_lines.append(output_words)
return '\n'.join(' '.join(output_words) for output_words in output_lines) | bigcode/self-oss-instruct-sc2-concepts |
def continue_crawl(search_history, target_url, max_steps = 25):
"""
Determines whether or not we should keep crawling
search_history: is a list of strings which are urls of Wikipedia
articles. The last item in the list is the most
recently found url.
target_url: is a string, the url of the article that the search
should stop at if it is found.
"""
if search_history[-1] == target_url:
print("Congratulations! You reached the target Url. The article chain was {} article(s) long.".format(len(search_history)))
return False
elif len(search_history) > max_steps:
print("The article chain has exceeded 25 articles so we are giving up.")
return False
elif search_history[-1] in search_history[:-1]:
print("Uh oh! You got caught in an endless loop of articles.")
return False
else:
return True | bigcode/self-oss-instruct-sc2-concepts |
def odd(number):
""" Returns True if number is odd """
return number % 2 == 1 | bigcode/self-oss-instruct-sc2-concepts |
def reverse(current_block, *args):
"""Reverses the data of the current block."""
return current_block[::-1] | bigcode/self-oss-instruct-sc2-concepts |
from typing import Optional
def frame_index_to_seconds(
frame_index: int, fps: int, zero_indexed: Optional[bool] = True
) -> float:
"""Converts a frame index within a video clip to the corresponding
point in time (in seconds) within the video, based on a specified frame rate.
Args:
frame_index (int): The index of the frame within the video.
fps (int): The frame rate (frames per second) of the video.
zero_indexed (Optional[bool]): Whether the specified frame is zero-indexed
(if True) or one-indexed (if False).
Returns:
(float) The point in time within the video.
"""
if not zero_indexed:
frame_index -= 1
time_in_seconds = frame_index / fps
return time_in_seconds | bigcode/self-oss-instruct-sc2-concepts |
def escape_special(v):
""" Escape literal bools and None as strings.
"""
if v is True or v is False or v is None:
return str(v)
else:
return v | bigcode/self-oss-instruct-sc2-concepts |
def tuple_set(base, values, indices):
"""
Creates a new tuple with the given values put at indices and otherwise the same as base. The
list of indices must be in sorted order.
"""
new = base[:indices[0]]
for i in range(len(indices)-1):
new += (values[i],) + base[indices[i]+1:indices[i+1]]
return new + (values[-1],) + base[indices[-1]+1:] | bigcode/self-oss-instruct-sc2-concepts |
def alter_context(context):
""" Modify the context and return it """
# An extra variable
context['ADD'] = '127'
return context | bigcode/self-oss-instruct-sc2-concepts |
def get_all_context_names(context_num):
"""Based on the nucleotide base context number, return
a list of strings representing each context.
Parameters
----------
context_num : int
number representing the amount of nucleotide base context to use.
Returns
-------
a list of strings containing the names of the base contexts
"""
if context_num == 0:
return ['None']
elif context_num == 1:
return ['A', 'C', 'T', 'G']
elif context_num == 1.5:
return ['C*pG', 'CpG*', 'TpC*', 'G*pA',
'A', 'C', 'T', 'G']
elif context_num == 2:
dinucs = list(set(
[d1+d2
for d1 in 'ACTG'
for d2 in 'ACTG']
))
return dinucs
elif context_num == 3:
trinucs = list(set(
[t1+t2+t3
for t1 in 'ACTG'
for t2 in 'ACTG'
for t3 in 'ACTG']
))
return trinucs | bigcode/self-oss-instruct-sc2-concepts |
def is_valid_ip(ip):
"""
Check if IP address is valid
:param ip: IP address
:return: Is valid
"""
ip = ip.split(".")
if len(ip) != 4:
return False
return all([0 <= int(t) <= 255 for t in ip]) | bigcode/self-oss-instruct-sc2-concepts |
def split_rows_by_condition(df, mask):
"""Split dataframe based on logical indexes (that could come from a condition).
Args:
df (pd.DataFrame): Dataframe.
mask (pd.Series): Series with boolean indexes (could come from a condition).
Returns:
list: List of split dataframes.
Examples:
>>> df = pd.DataFrame({"letters":["a","b","c"], "numbers":[1,2,3], "numbers2":[4,5,6]})
>>> mask = df["numbers"] > 1
>>> df1, df2 = split_rows_by_condition(df, mask)
>>> df1
letters numbers numbers2
1 b 2 5
2 c 3 6
>>> df2
letters numbers numbers2
0 a 1 4
"""
df1 = df[mask]
df2 = df[~mask]
return df1, df2 | bigcode/self-oss-instruct-sc2-concepts |
def celcius(temperature):
"""Converts a temperature from degrees Kelvin to degrees Celcius."""
return temperature - 273.15 | bigcode/self-oss-instruct-sc2-concepts |
def mplt_bars(ax, ticks, values, colors, ylabel=None, title=None):
"""Quick function for creating stacked matplotlib barplot"""
bar0 = ax.bar(ticks, values[0], color=colors[0])
bar1 = ax.bar(ticks, values[1], bottom=values[0], color=colors[1])
if ylabel is not None:
ax.set_ylabel(ylabel)
if title is not None:
ax.set_title(title)
return bar0, bar1 | bigcode/self-oss-instruct-sc2-concepts |
def _tag_tuple(revision_string):
"""convert a revision number or branch number into a tuple of integers"""
if revision_string:
t = [int(x) for x in revision_string.split('.')]
l = len(t)
if l == 1:
return ()
if l > 2 and t[-2] == 0 and l % 2 == 0:
del t[-2]
return tuple(t)
return () | bigcode/self-oss-instruct-sc2-concepts |
def indentLevel(line):
"""Returns the indentation level of a line, defined in Piklisp as the number of leading tabs."""
for i in range(len(line)):
if line[i] != "\t":
return i # i characters were "\t" before lines[i]
return None | bigcode/self-oss-instruct-sc2-concepts |
def marker_cell_identifier(marker_region, cells):
"""Return cell identifier of marker region."""
pos = marker_region.convex_hull.centroid
return cells[pos] | bigcode/self-oss-instruct-sc2-concepts |
def get_synapse_data_by_contin(cur,contin):
"""
Returns synapse data for given contin
Each row is a single section of the synapse
Row format: [section_number,preobj,[post_obj]]
Parameters:
-----------
cur : MySQLdb cursor
contin : str
Contin number
"""
sql = ("select IMG_Number,fromObj,toObj "
"from object "
"where CON_Number = %s " %(contin))
cur.execute(sql)
try:
return [(a[0],a[1],a[2].split(',')) for a in cur.fetchall()]
except:
return None | bigcode/self-oss-instruct-sc2-concepts |
def add_id_to_dict(doc):
""" Adds the document's id to the document's fields dictionary.
"""
full_dict = doc.to_dict()
full_dict['id'] = doc.id
return full_dict | bigcode/self-oss-instruct-sc2-concepts |
def s_to_b(s: str) -> bytes:
"""convert string to bytes
:param s: input string
:type s: str
:return: output bytes
:rtype: bytes
"""
b = s.encode('utf8')
return b | bigcode/self-oss-instruct-sc2-concepts |
def match_to_int(match):
"""Returns trace line number matches as integers for sorting.
Maps other matches to negative integers.
"""
# Hard coded string are necessary since each trace must have the address
# accessed, which is printed before trace lines.
if match == "use-after-poison" or match == "unknown-crash":
return -2
elif match == "READ":
return -1
# Cutting off non-integer part of match
return int(match[1:-1]) | bigcode/self-oss-instruct-sc2-concepts |
def to_pascal_case(string):
"""Convert from snake_case to PascalCase
Using the standard library `title` doesn't help as it changes everything after the first letter to lowercase, we
want the following:
- API -> API
- api_key -> ApiKey
- user -> User
- paginated_response(account) -> PaginatedResponse(Account)
:param str string: String to reformat.
:returns: Reformatted string.
:rtype: str
"""
string = string.replace(" ", "_")
string = string.replace("__", "_")
string = string.replace("(", "(_")
return string and "".join(n[0].upper() + n[1:] for n in string.split("_") if n) | bigcode/self-oss-instruct-sc2-concepts |
def related_to_hardware(cpes):
"""
Return True if the CVE item is related to hardware.
"""
for cpe in cpes:
cpe_comps = cpe.split(":")
# CPE follow the format cpe:cpe_version:product_type:vendor:product
if len(cpe_comps) > 2 and cpe_comps[2] == "h":
return True
return False | bigcode/self-oss-instruct-sc2-concepts |
def map_range(x, in_min, in_max, out_min, out_max):
"""Map Value from one range to another."""
return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min | bigcode/self-oss-instruct-sc2-concepts |
def compare_dicts(file, src_test_dict, infer_dict):
"""
Check if a particular file
exists in the source/test dict and infer dict
If file exists, decrement the counter in
both dictionaries
Args:
file: file potentially not analyzed by infer
src_test_dict: dictionary containing
src/test files
infer_dict: dictionary containing infer files
Returns:
True if file exists in both, False otherwise
"""
if file in src_test_dict and file in infer_dict:
if src_test_dict[file] > 0 and infer_dict[file] > 0:
src_test_dict[file] -= 1
infer_dict[file] -= 1
return True
return False | bigcode/self-oss-instruct-sc2-concepts |
def parse_requirements(fname):
"""Read requirements from a pip-compatible requirements file."""
with open(fname):
lines = (line.strip() for line in open(fname))
return [line for line in lines if line and not line.startswith("#")] | bigcode/self-oss-instruct-sc2-concepts |
import math
def actual_pressure(temperature, pressure, height=0.0):
"""
Convert the pressure from absolute pressure into sea-level adjusted
atmospheric pressure.
Uses the barometric formula.
Returns the mean sea-level pressure values in hPa.
"""
temperature = temperature + 273.15
pressure = pressure * 100
g0 = 9.80665
M = 0.0289644
R = 8.31432
lapse_rate = -0.0065
return "%0.2f" % (pressure / math.pow(
temperature / (temperature + (lapse_rate * height)),
(g0 * M) / (R * lapse_rate)) / 100) | bigcode/self-oss-instruct-sc2-concepts |
import re
def get_emails(s):
"""Returns first matched email found in string s."""
# Removing lines that start with '//' because the regular expression
# mistakenly matches patterns like 'http://foo@bar.com' as '//foo@bar.com'.
# Adopted from code by Dennis Ideler ideler.dennis@gmail.com
regex = re.compile(("([a-z0-9!#$%&'*+\/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+\/=?^_`"
"{|}~-]+)*(@|\sat\s)(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?(\.|"
"\sdot\s))+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?)"))
s = s.lower()
result = re.findall(regex, s)
if result:
if not result[0][0].startswith('//'):
return result[0][0] | bigcode/self-oss-instruct-sc2-concepts |
def update_counter(galini, name, amount, initial_value=0.0):
"""Update a counter, creating the gauge if it does not exists."""
telemetry = galini.telemetry
counter = telemetry.get_counter(name)
if counter is None:
counter = telemetry.create_counter(name, initial_value)
return counter.increment(amount) | bigcode/self-oss-instruct-sc2-concepts |
def optional(cls):
"""
Returns a Converter for a type which can optionally be missing (represented by None).
"""
def converter(string_or_instance):
if string_or_instance is None or isinstance(string_or_instance, cls):
return string_or_instance
if string_or_instance.strip() == "":
return None
return cls(string_or_instance)
return converter | bigcode/self-oss-instruct-sc2-concepts |
def extract_name(in_line: str) -> str:
"""
Extracts the name from the type construct. Information that is not needed will be removed
such as the stereotype.
"""
types = {'package', 'class', 'abstract', 'interface',
'enum', 'abstract class', 'entity'}
process_type: str = ''
for item_type in types:
if in_line.startswith(item_type):
process_type = item_type
break
process_line = in_line.replace(' ', '')
if '<<' in process_line:
process_line = process_line.split('<<')[0]
process_line = process_line.replace(process_type, '')
found_name = process_line.split('{')[0]
return found_name | bigcode/self-oss-instruct-sc2-concepts |
def _info_from_first_line(line):
"""
Gets the info from the first line of landmarks' txt. The format of the file
is hardcoded for now, e.g. the expected numbers and fields.
It returns a dictionary, which enables future extensions of what is returned.
Along with the functions _from_line_to_vec, from_txt_to_numpy_points, and
access_ln_frame they are the functions to access the single txt (with sparse
landmarks) per video.
Unless you know how to call the function, please avoid calling it directly, it is used
internally by the from_txt_to_numpy_points().
:param line:
:return: Dict with meta-data.
"""
info = {}
# # get the framename of the first (assumed int!)
info['init_framename'] = int(line[line.find(':') + 1: line.find('\t')])
# # get the number of frames.
line1 = line[line.find('n_frames:'):]
info['n_frames'] = int(line1[9: line1.find('\t')])
# # get the number of landmarks.
line2 = line[line.find('n_landmarks:'):]
info['n_landm'] = int(line2[12: line2.find('\n')])
return info | bigcode/self-oss-instruct-sc2-concepts |
def database_test_url() -> str:
"""
generate in memory sqlite db connect url for test purposes
:return: url string for test database connection
"""
return "sqlite+aiosqlite://?cache=shared" | bigcode/self-oss-instruct-sc2-concepts |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.