seed stringlengths 1 14k | source stringclasses 2
values |
|---|---|
def _give_default_names(list_of_objects, name):
"""Helper function to give default names to objects for error messages."""
return [name + '_' + str(index) for index in range(len(list_of_objects))] | bigcode/self-oss-instruct-sc2-concepts |
def translate (vector, obj):
""" Function translate
return openscad translate command
@param vector: [x, y, z] translation matrix
@param obj: text object to translate
"""
return "translate({}){{{}}}".format(vector, obj) | bigcode/self-oss-instruct-sc2-concepts |
import ipaddress
def hops_to_str(hops):
"""Concatenate together a list of (host, port) hops into a string such as
'127.0.0.1:9001|192.168.1.1:9002|[::1]:9000' appropriate for printing or passing to
a zeromq connect() call (does not include the 'tcp://'' prefix)"""
formatted_hops = []
for host, port in hops:
ip = ipaddress.ip_address(host)
if ip.version == 4:
host = ip.compressed
elif ip.version == 6:
host = '[%s]' % ip.compressed
else:
assert False
formatted_hops.append('%s:%s' % (host, port))
return '|'.join(formatted_hops) | bigcode/self-oss-instruct-sc2-concepts |
def _parse_package_name(name: str) -> str:
"""
Force lower case and replace underscore with dash to compare environment
packages (see https://www.python.org/dev/peps/pep-0426/#name)
Args:
name: Unformatted package name
Returns:
Formatted package name
"""
return name.lower().replace("_", "-") | bigcode/self-oss-instruct-sc2-concepts |
import json
import codecs
def encode_blob(data):
""" Encode a dictionary to a base64-encoded compressed binary blob.
:param data: data to encode into a blob
:type data: dict
:returns: The data as a compressed base64-encoded binary blob
"""
blob_data = json.dumps(data).encode('utf8')
for codec in ('zlib', 'base64'):
blob_data = codecs.encode(blob_data, codec)
return blob_data.replace(b'\n', b'') | bigcode/self-oss-instruct-sc2-concepts |
def intro(secret):
"""
Returns opening instructions to player.
"""
return 1 | bigcode/self-oss-instruct-sc2-concepts |
import struct
def readLEFloat(f):
"""Read 4 bytes as *little endian* float in file f"""
read_bytes = f.read(4)
return struct.unpack('<f', read_bytes)[0] | bigcode/self-oss-instruct-sc2-concepts |
from typing import Dict
def _compute_metrics(hits_or_lcs: int, pred_len: int, target_len: int) -> Dict[str, float]:
"""This computes precision, recall and F1 score based on hits/lcs, and the length of lists of tokenizer
predicted and target sentences.
Args:
hits_or_lcs:
A number of matches or a length of the longest common subsequence.
pred_len:
A length of a tokenized predicted sentence.
target_len:
A length of a tokenized target sentence.
"""
precision = hits_or_lcs / pred_len
recall = hits_or_lcs / target_len
if precision == recall == 0.0:
return dict(precision=0.0, recall=0.0, fmeasure=0.0)
fmeasure = 2 * precision * recall / (precision + recall)
return dict(precision=precision, recall=recall, fmeasure=fmeasure) | bigcode/self-oss-instruct-sc2-concepts |
def _set_type(values, new_type):
"""Transforms a list of values into the specified new type. If the value has zero length, returns none
Args:
values: A list of values
new_type: A type class to modify the list to
Returns:
The values list modified to the new_type. If an element is empty, the element is set to None.
"""
new_vals = []
for i in values:
if len(i) > 0: # Some values may have len(0); we convert them to None to put into sql db
new_vals.append(new_type(i))
else:
new_vals.append(None)
return new_vals | bigcode/self-oss-instruct-sc2-concepts |
def first_true_pred(predicates, value):
"""
Given a list of predicates and a value, return the index of first predicate,
s.t. predicate(value) == True.
If no such predicate found, raises IndexError.
>>> first_true_pred([lambda x: x%2==0, lambda x: x%2==1], 13)
1
"""
for num, pred in enumerate(predicates):
if pred(value):
return num
raise IndexError | bigcode/self-oss-instruct-sc2-concepts |
def check_x_lim(x_lim, max_x):
"""
Checks the specified x_limits are valid and sets default if None.
"""
if x_lim is None:
x_lim = (None, None)
if len(x_lim) != 2:
raise ValueError("The x_lim parameter must be a list of length 2, or None")
try:
if x_lim[0] is not None and x_lim[0] < 0:
raise ValueError("x_lim[0] cannot be negative")
if x_lim[1] is not None and x_lim[1] > max_x:
raise ValueError("x_lim[1] cannot be greater than the sequence length")
if x_lim[0] is not None and x_lim[1] is not None and x_lim[0] >= x_lim[1]:
raise ValueError("x_lim[0] must be less than x_lim[1]")
except TypeError:
raise TypeError("x_lim parameters must be numeric")
return x_lim | bigcode/self-oss-instruct-sc2-concepts |
def get_model_device(model):
"""Return the device on which a model is."""
return next(model.parameters()).device | bigcode/self-oss-instruct-sc2-concepts |
def distx2(x1,x2):
"""
Calculate the square of the distance between two coordinates.
Returns a float
"""
distx2 = (x1[0]-x2[0])**2 + (x1[1]-x2[1])**2 + (x1[2]-x2[2])**2
return distx2 | bigcode/self-oss-instruct-sc2-concepts |
def sp(dividend, divisor):
"""Returns the percentage for dividend/divisor, safely."""
if not divisor:
return 0.
return 100. * float(dividend) / float(divisor) | bigcode/self-oss-instruct-sc2-concepts |
def bdev_delay_create(client, base_bdev_name, name, avg_read_latency, p99_read_latency, avg_write_latency, p99_write_latency):
"""Construct a delay block device.
Args:
base_bdev_name: name of the existing bdev
name: name of block device
avg_read_latency: complete 99% of read ops with this delay
p99_read_latency: complete 1% of read ops with this delay
avg_write_latency: complete 99% of write ops with this delay
p99_write_latency: complete 1% of write ops with this delay
Returns:
Name of created block device.
"""
params = {
'base_bdev_name': base_bdev_name,
'name': name,
'avg_read_latency': avg_read_latency,
'p99_read_latency': p99_read_latency,
'avg_write_latency': avg_write_latency,
'p99_write_latency': p99_write_latency,
}
return client.call('bdev_delay_create', params) | bigcode/self-oss-instruct-sc2-concepts |
def get_scan_indices(data):
"""Get the line numbers where scans start
This finds all the lines starting with the word 'Scan'. Although
some of these lines have a source name, it appears to be correct
only for the first scans in the file.
Notes
=====
This is specific to Tid ASCII files
@param data : list of strings
@return: list of integers
Line numbers starting with 'Scan'
"""
scan_indices = []
for i in range(len(data)):
if data[i][1:5] == 'Scan':
# This starts a new scan. Save a sequential index into the
# data file for this scan.
scan_indices.append(i)
# Assume that all the data blocks in a file are the same size
block_size = scan_indices[1]-scan_indices[0]
return scan_indices, block_size | bigcode/self-oss-instruct-sc2-concepts |
from typing import Sequence
def _lcs(pred_tokens: Sequence[str], target_tokens: Sequence[str]) -> int:
"""Common DP algorithm to compute the length of the longest common subsequence.
Args:
pred_tokens:
A tokenized predicted sentence.
target_tokens:
A tokenized target sentence.
"""
lcs = [[0] * (len(pred_tokens) + 1) for _ in range(len(target_tokens) + 1)]
for i in range(1, len(target_tokens) + 1):
for j in range(1, len(pred_tokens) + 1):
if target_tokens[i - 1] == pred_tokens[j - 1]:
lcs[i][j] = lcs[i - 1][j - 1] + 1
else:
lcs[i][j] = max(lcs[i - 1][j], lcs[i][j - 1])
return lcs[-1][-1] | bigcode/self-oss-instruct-sc2-concepts |
import torch
def load_io_dataset(dataset_path: str, device=None):
"""
Load the saved dataset, map the data location to `device`.
Args:
dataset_path: Path to the checkpoint (.pt format)
device: Device for the data.
Returns: The loaded IO dataset
"""
data = torch.load(dataset_path, map_location=device)
return data | bigcode/self-oss-instruct-sc2-concepts |
def dice_similarity_coefficient(inter, union):
"""Computes the dice similarity coefficient.
Args:
inter (iterable): iterable of the intersections
union (iterable): iterable of the unions
"""
return 2 * sum(inter) / (sum(union) + sum(inter)) | bigcode/self-oss-instruct-sc2-concepts |
def rint(f: float) -> int:
"""
Rounds to an int.
rint(-0.5) = 0
rint(0.5) = 0
rint(0.6) = 1
:param f: number
:return: int
"""
return int(round(f, 0)) | bigcode/self-oss-instruct-sc2-concepts |
def merge_with(f, *dicts):
"""Returns a dict that consists of the rest of the dicts merged with
the first. If a key occurs in more than one map, the value from the
latter (left-to-right) will be the combined with the value in the former
by calling f(former_val, latter_val). Calling with no dicts returns {}."""
d = dict()
for _dict in dicts:
for k in _dict:
if k in d:
d[k] = f(d[k], _dict[k])
else:
d[k] = _dict[k]
return d | bigcode/self-oss-instruct-sc2-concepts |
def ok(results):
"""Return whether or not all results are status 200 OK."""
return all([result['data'].ok for result in results]) | bigcode/self-oss-instruct-sc2-concepts |
from io import StringIO
def write_tgf(graph, key_tag=None):
"""
Export a graph in Trivial Graph Format
.. note::
TGF graph export uses the Graph iternodes and iteredges methods to retrieve
nodes and edges and 'get' the data labels. The behaviour of this process is
determined by the single node/edge mixin classes and the ORM mapper.
:param graph: Graph object to export
:type graph: :graphit:Graph
:param key_tag: node/edge data key
:type key_tag: :py:str
:return: TGF graph representation
:rtype: :py:str
"""
# Define node and edge data tags to export
key_tag = key_tag or graph.key_tag
# Create empty file buffer
string_buffer = StringIO()
# Export nodes
for node in graph.iternodes():
string_buffer.write('{0} {1}\n'.format(node.nid, node.get(key_tag, default='')))
# Export edges
string_buffer.write('#\n')
for edge in graph.iteredges():
e1, e2 = edge.nid
string_buffer.write('{0} {1} {2}\n'.format(e1, e2, edge.get(key_tag, default='')))
# Reset buffer cursor
string_buffer.seek(0)
return string_buffer.read() | bigcode/self-oss-instruct-sc2-concepts |
def valid_elements(symbols,reference):
"""Tests a list for elements that are not in the reference.
Args:
symbols (list): The list whose elements to check.
reference (list): The list containing all allowed elements.
Returns:
valid (bool): True if symbols only contains elements from the reference.
"""
valid = True
if reference is not None:
for sym in symbols:
if sym not in reference:
valid = False
return valid | bigcode/self-oss-instruct-sc2-concepts |
import torch
def collate_fn(batch):
"""Collate batches of images together.
"""
imgs, targets = list(zip(*batch))
imgs = torch.stack(imgs)
targets = torch.LongTensor(targets)
return imgs, targets | bigcode/self-oss-instruct-sc2-concepts |
import torch
def any(input_, axis=None, keepdims=False):
"""Wrapper of `torch.any`.
Parameters
----------
input_ : DTensor
Input tensor.
axis : None or int or tuple of ints, optional
Axis or axes to operate on, by default None
keepdims : bool, optional
If true, the axes along which the operation performed are left to size
one, by default False
Returns
-------
DTensor or bool:
"""
if axis is None:
return torch.any(input_._data)
return torch.any(input_._data, dim=axis, keepdim=keepdims) | bigcode/self-oss-instruct-sc2-concepts |
import hashlib
def file_hash_sha256(file):
"""Return SHA-256 hash of file as hexadecimal string."""
with open(file, 'rb') as hf:
hfstuff = hf.read()
ho = hashlib.sha256()
ho.update(hfstuff)
return ho.hexdigest() | bigcode/self-oss-instruct-sc2-concepts |
def percent_change(d1, d2):
"""Calculate percent change between two numbers.
:param d1: Starting number
:type d1: float
:param d2: Ending number
:type d2: float
:return: Percent change
:rtype: float
"""
return (d2 - d1) / d1 | bigcode/self-oss-instruct-sc2-concepts |
import re
def isvowel(char):
"""Check whether char is tibetan vowel or not.
Args:
char (str): char to be checked
Returns:
boolean: true for vowel and false for otherwise
"""
flag = False
vowels = ["\u0F74", "\u0F72", "\u0F7A", "\u0F7C"]
for pattern in vowels:
if re.search(pattern, char):
flag = True
return flag | bigcode/self-oss-instruct-sc2-concepts |
def _indent(string, width=0): #pragma: no cover
""" Helper function to indent lines in printouts
"""
return '{0:>{1}}{2}'.format('', width, string) | bigcode/self-oss-instruct-sc2-concepts |
def is_list(node: dict) -> bool:
"""Check whether a node is a list node."""
return 'listItem' in node | bigcode/self-oss-instruct-sc2-concepts |
def _is_using_intel_oneapi(compiler_version):
"""Check if the Intel compiler to be used belongs to Intel oneAPI
Note: Intel oneAPI Toolkit first version is 2021.1
"""
return int(compiler_version.split(".")[0]) >= 2021 | bigcode/self-oss-instruct-sc2-concepts |
def celsius_to_fahrenheit(temperature_in_c):
"""Convert temperature from celsius to fahrenheit
PARAMETERS
----------
temperature_in_c : float
A temperature in degrees Celsius
RETURNS
-------
temperature_in_f : float
A temperature in degrees Fahrenheit
"""
temperature_in_f = 9/5*temperature_in_c+32
print(temperature_in_f)
return temperature_in_f | bigcode/self-oss-instruct-sc2-concepts |
def path_probability(trans_mat, quad_to_matrix_index, path):
"""
Computes the probability of a given path
:param trans_mat: trained transition matrix (numpy matrix)
:param quad_to_matrix_index: dictionary to keep track of indicies
:param path: input path of neo4j types
:return: float representing probability of seeing that path generated by the MArkov chain
"""
product = 1
tuple_list = []
for path_index in range(1, len(path) - 2 + 1):
if path_index % 2 == 1:
tup = tuple(path[path_index:path_index + 2])
tuple_list.append(tup)
for tup_index in range(len(tuple_list) - 2 + 1):
quad = tuple_list[tup_index] + tuple_list[tup_index + 1]
(i, j) = quad_to_matrix_index[quad]
product *= trans_mat[i, j]
return product | bigcode/self-oss-instruct-sc2-concepts |
def frameToCell(frame, info):
"""
Convert a frame and game info to a cell representation
"""
return str((info['x']//32,info['y']//32,info['act'],info['zone'])) | bigcode/self-oss-instruct-sc2-concepts |
def usage(wf):
"""CLI usage instructions."""
return __doc__ | bigcode/self-oss-instruct-sc2-concepts |
from typing import List
import torch
def conv(
in_channels: int,
out_channels: int,
stride: int = 1,
groups: int = 1,
kernel_size: int = 3,
padding: int = 1,
) -> List[torch.nn.Module]:
""" 3x3 convolution with padding."""
return [
torch.nn.Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups,
bias=False,
),
torch.nn.BatchNorm2d(out_channels),
torch.nn.ReLU(inplace=True),
] | bigcode/self-oss-instruct-sc2-concepts |
def join_hostname_index(hostname, index):
"""Joins a hostname with an index, reversing splti_hostname_index()."""
return "{}~{}".format(hostname, index) | bigcode/self-oss-instruct-sc2-concepts |
def t03_SharingIsPassByReference(C, pks, crypto, server):
"""Verifies that updates to a file are sent to all other users who have that
file."""
alice = C("alice")
bob = C("bob")
alice.upload("k", "v")
m = alice.share("bob", "k")
bob.receive_share("alice", "k", m)
score = bob.download("k") == "v"
bob.upload("k", "q")
score += alice.download("k") == "q"
return score / 2.0 | bigcode/self-oss-instruct-sc2-concepts |
import gzip
import bz2
def open_file(file_name, flags='r'):
"""Opens a regular or compressed file (decides on the name)
:param file_name a name of the file, it has a '.gz' or
'.bz2' extension, we open a compressed stream.
:param flags open flags such as 'r' or 'w'
"""
if file_name.endswith('.gz'):
return gzip.open(file_name, flags)
elif file_name.endswith('.bz2'):
return bz2.open(file_name, flags)
else:
return open(file_name, flags) | bigcode/self-oss-instruct-sc2-concepts |
def _average_gradients_across_replicas(replica_context, gradients):
"""Computes the average gradient across replicas.
This computes the gradient locally on this device, then copies over the
gradients computed on the other replicas, and takes the average across
replicas.
This is faster than copying the gradients from TPU to CPU, and averaging
them on the CPU (which is what we do for the losses/fetches).
Args:
replica_context: the return value of `tf.distribute.get_replica_context()`.
gradients: The output of tape.gradients(loss, variables)
Returns:
A list of (d_loss/d_varabiable)s.
"""
# We must remove any Nones from gradients before passing them to all_reduce.
# Nones occur when you call tape.gradient(loss, variables) with some
# variables that don't affect the loss.
# See: https://github.com/tensorflow/tensorflow/issues/783
gradients_without_nones = [g for g in gradients if g is not None]
original_indices = [i for i, g in enumerate(gradients) if g is not None]
results_without_nones = replica_context.all_reduce('mean',
gradients_without_nones)
results = [None] * len(gradients)
for ii, result in zip(original_indices, results_without_nones):
results[ii] = result
return results | bigcode/self-oss-instruct-sc2-concepts |
def all_data(request, data, data_missing):
"""Parametrized fixture giving 'data' and 'data_missing'"""
if request.param == 'data':
return data
elif request.param == 'data_missing':
return data_missing | bigcode/self-oss-instruct-sc2-concepts |
import torch
def make_prediction(neural_net, save_path, images, classes, p = True):
"""
function to make prediction
--------------------------
parameters:
neural_net: a torch neural network
save_path: path to load neural network from
images: images to predict class of
classes: the possible labels
p: whether to print result or not
"""
neural_net.load_state_dict(torch.load(save_path))
outputs = neural_net(images)
_, predicted = torch.max(outputs, 1)
if p:
print('Predicted: ', ' '.join('%5s' % classes[predicted[j]]
for j in range(len(images))))
return predicted | bigcode/self-oss-instruct-sc2-concepts |
def _extract_month_from_filename(fname):
"""Extract month number from precipitation file name"""
return str(fname[7:].split('.tif')[0]) | bigcode/self-oss-instruct-sc2-concepts |
def format_pkg_data(master_package_data: list):
""" Format and parse the reponse from pypi.org so we can build a dictionary of needed data for
each package. Example response string (pkg_data variable below) looks like this:
b' <a href="https://files.pythonhosted.org/packages/9f/a5/eec74d8d1016e6c2042ba31ca6fba3bb
a520e27d8a061e82bccd36bd64ef/docker-4.4.1-py2.py3-none-any.whl#sha256=e455fa49aabd4f22da9f4e1
c1f9d16308286adc60abaf64bf3e1feafaed81d06" data-requires-python=">=2.7, !=3.0.*, !=3.1.*,
!=3.2.*, !=3.3.*, !=3.4.*">docker-4.4.1-py2.py3-none-any.whl</a><br/>' """
formatted_package_data = []
for pkg_dict in master_package_data:
filename = pkg_dict['filename']
pkg_data = pkg_dict['pkg_data']
pkg_url_and_sha = pkg_data.split('"')[1]
pkg_sha_with_extra_characters = pkg_url_and_sha.split('#')[1]
pkg_sha = pkg_sha_with_extra_characters.split('=')[1]
pkg_url = pkg_url_and_sha.split('#')[0]
sha_type = "sha256"
formatted_pkg = {
'pkg_filename': filename,
'pkg_url': pkg_url,
'pkg_sha': pkg_sha,
'sha_type': sha_type
}
formatted_package_data.append(formatted_pkg)
return formatted_package_data | bigcode/self-oss-instruct-sc2-concepts |
def bytes_to_text(byte_array, encoding='UTF-8'):
"""
Decode a byte array to a string following the given encoding.
:param byte_array: Byte array to decode.
:param encoding: String encoding (default UTF-8)
:return: a decoded string
"""
return bytes(byte_array).decode(encoding) | bigcode/self-oss-instruct-sc2-concepts |
import logging
def has_file_handlers(logger):
"""To check if a log file has a file handler
Parameters:
* logger (object): Logger file object
Returns:
* bool: True if logger is a file handler logger
"""
for handler in logger.handlers:
if isinstance( handler, logging.FileHandler ):
return True
return False | bigcode/self-oss-instruct-sc2-concepts |
def get_gse_gsm_info(line):
"""
Extract GSE and GSM info
Args:
line: the entry to process
Returns:
the GSE GSM info tuple
"""
parts = line.strip().split(",")
if parts[0] == "gse_id":
return None
return parts[0], parts[1:] | bigcode/self-oss-instruct-sc2-concepts |
def filter_title_transcriptions(df):
"""Filter the title transcriptions."""
df = df[df['motivation'] == 'describing']
df = df[df['tag'] == 'title']
return df | bigcode/self-oss-instruct-sc2-concepts |
def draw_batches(data, batch_size=128):
""" Create a list of batches for the given data.
Args:
data: the dataframe returned by load_data
batch_size: number of samples to include in each batch
Returns:
a list of batches. Each batch is a part of the data dataframe with
batch_size rows.
"""
minibatches = []
num_samples = data.shape[0]
# Complete mini batches
complete_batches = num_samples // batch_size
for i in range(0, complete_batches):
minibatch = data.iloc[i * batch_size: i * batch_size + batch_size]
minibatches.append(minibatch)
# Eventually uncomplete last minibatch
if num_samples % batch_size != 0:
minibatch = data.iloc[complete_batches * batch_size: num_samples]
minibatches.append(minibatch)
return minibatches | bigcode/self-oss-instruct-sc2-concepts |
import glob
def count_traj_files(path, extension):
"""
path : string
Path of directory containing trajectory files.
extension : string
File extension type for trajectory files.
EX) 'dcd', 'xtc', ...
"""
return len(glob.glob1(path,"*."+extension)) | bigcode/self-oss-instruct-sc2-concepts |
import random
import string
def create_temporary_cache_directory_name() -> str:
"""Create a temporary cache directory name."""
temp_directory_name = ''.join([random.choice(string.ascii_letters) for i in range(10)])
return ".tmp-" + temp_directory_name | bigcode/self-oss-instruct-sc2-concepts |
def get_dict_value(dict_var, key, default_value=None, add_if_not_in_map=True):
"""
This is like dict.get function except it checks that the dict_var is a dict
in addition to dict.get.
@param dict_var: the variable that is either a dict or something else
@param key: key to look up in dict
@param default_value: return value if dict_var is not of type dict or key is not in dict_var
@return:
either default_value or dict_var[key]
"""
if (isinstance(dict_var, dict) and key in dict_var):
result = dict_var[key]
else:
result = default_value
if add_if_not_in_map:
dict_var[key] = result
return result | bigcode/self-oss-instruct-sc2-concepts |
def cleanly(text: str):
"""
Splits the text into words at spaces, removing excess spaces.
"""
segmented = text.split(' ')
clean = [s for s in segmented if not s == '']
return clean | bigcode/self-oss-instruct-sc2-concepts |
def freestyle_table_params(rows, aging):
"""Returns parameters for OpenSQL freestyle request"""
return {'rowNumber': str(rows), 'dataAging': str(aging).lower()} | bigcode/self-oss-instruct-sc2-concepts |
def manhattan(point1, point2):
"""Computes distance between 2D points using manhattan metric
:param point1: 1st point
:type point1: list
:param point2: 2nd point
:type point2: list
:returns: Distance between point1 and point2
:rtype: float
"""
return abs(point1[0] - point2[0]) + abs(point1[1] - point2[1]) | bigcode/self-oss-instruct-sc2-concepts |
def strip_if_scripts(if_outscript, if_inscript):
"""
Given an OutScriptIf and an InScriptIf satisfying it, return the "active" parts
of them. I.e., if if_inscript.condition_value=True, return the "true" branch, else
the "false" branch.
:return: a 2-tuple of (OutScript, InScript)
"""
# extract condition_value from inscript:
cond = if_inscript.condition_value
# extract corresponding branch of outscript:
inner_outscript = if_outscript.inner_scripts[cond]
# extract inner inscript, with script_type corresponding to inner_outscript:
inner_inscript = if_inscript.get_inner_inscript(inner_outscript)
return inner_outscript, inner_inscript | bigcode/self-oss-instruct-sc2-concepts |
def getadminname(s,admindf):
"""Convert adminname from id_num to actual str name
"""
extractednamelist=admindf[admindf.id==s].name.values
if extractednamelist:
adminname=extractednamelist[0]
else:
adminname=None
return adminname | bigcode/self-oss-instruct-sc2-concepts |
def lap_time_to_seconds(time_str):
"""Returns the lap time string as a float representing total seconds.
E.g. '1:30.202' -> 90.202
"""
min, secs = time_str.split(':')
total = int(min) * 60 + float(secs)
return total | bigcode/self-oss-instruct-sc2-concepts |
def process_instance(el):
"""
Process each 'process instance' element from the .mxml file
and returns as dict
"""
resp = []
for entry in el[1:]:
r = {
"TraceId": el.get("id")
}
for item in entry:
if item.tag == 'Data':
r[item.tag] = item[-1].text
else:
r[item.tag] = item.text
resp.append(r)
return resp | bigcode/self-oss-instruct-sc2-concepts |
def Score(low, high, n):
"""Score whether the actual value falls in the range.
Hitting the posts counts as 0.5, -1 is invalid.
low: low end of range
high: high end of range
n: actual value
Returns: -1, 0, 0.5 or 1
"""
if n is None:
return -1
if low < n < high:
return 1
if n == low or n == high:
return 0.5
else:
return 0 | bigcode/self-oss-instruct-sc2-concepts |
def _matches_section_title(title, section_title):
"""Returns whether title is a match for a specific section_title.
Example:
_matches_section_title('Yields', 'yield') == True
Args:
title: The title to check for matching.
section_title: A specific known section title to check against.
"""
title = title.lower()
section_title = section_title.lower()
return section_title in (title, title[:-1]) # Supports plurals / some typos. | bigcode/self-oss-instruct-sc2-concepts |
def _find_private_network(oneandone_conn, private_network):
"""
Validates the private network exists by ID or name.
Return the private network ID.
"""
for _private_network in oneandone_conn.list_private_networks():
if private_network in (_private_network['name'],
_private_network['id']):
return _private_network['id'] | bigcode/self-oss-instruct-sc2-concepts |
def postprocess(output_val):
"""
This postprocess simply returns the input ``output_val``.
:param output_val: dictionary mapping output_data to output_layers
:return: ``output_val``
"""
return output_val | bigcode/self-oss-instruct-sc2-concepts |
def mm2m(millimeters):
"""millimeters -> meters"""
return millimeters/1000 | bigcode/self-oss-instruct-sc2-concepts |
import torch
def _gen_mask(valid_step: torch.Tensor, batch_size: int, seq_len: int):
"""
Mask for dealing with different lengths of MDPs
Example:
valid_step = [[1], [2], [3]], batch_size=3, seq_len = 4
mask = [
[0, 0, 0, 1],
[0, 0, 1, 1],
[0, 1, 1, 1],
]
"""
assert valid_step.shape == (batch_size, 1)
assert (1 <= valid_step).all()
assert (valid_step <= seq_len).all()
device = valid_step.device
mask = torch.arange(seq_len, device=device).repeat(batch_size, 1)
mask = (mask >= (seq_len - valid_step)).float()
return mask | bigcode/self-oss-instruct-sc2-concepts |
import logging
def norm_range(voltage):
"""Check to see if the voltage values are within the acceptable normal range
The normal range for the voltage readings is +/- 300 mV. Within the
assignment, it was asked that if a voltage reading were found to be outside
of this range, then add a warning entry to the log file indicating the name
of the file. This function reads in all of the voltage values and checks to
see that each one is in fact within the acceptable range. If any of the
voltage readings are outside of this range, a warning entry is made.
Parameters
----------
voltage : list
List of floats containing the voltage values
Returns
-------
bool
True if successful, False if otherwise
"""
result = all(elem >= -300.0 and elem <= 300.0 for elem in voltage)
if result is False:
logging.warning('The voltage data contains an element outside'
' of the normal range of +/- 300 mV')
return result | bigcode/self-oss-instruct-sc2-concepts |
def is_test_directory_name(directory_name):
"""
Returns whether the given directory name is a name of a test directory.
Parameters
----------
directory_name : `str`
A directory's name.
Returns
-------
is_test_directory_name : `bool`
"""
if directory_name == 'tests':
return True
if directory_name.startswith('test_'):
return True
if directory_name.endswith('_tests'):
return True
return False | bigcode/self-oss-instruct-sc2-concepts |
def waterGmKgDryToPpmvDry(q):
"""
Convert water vapor Grams H2o / Kg dry air to ppmv dry air.
"""
Mair = 28.9648
Mh2o = 18.01528
return (q*1e3*Mair)/Mh2o | bigcode/self-oss-instruct-sc2-concepts |
def read_file(path):
"""
Read file at `path`.
Return a list of lines
"""
lines = []
with open(path, 'r') as srcfile:
return srcfile.read().split('\n') | bigcode/self-oss-instruct-sc2-concepts |
def _get_fields(attrs, field_class, pop=False):
"""
Get fields from a class.
:param attrs: Mapping of class attributes
:param type field_class: Base field class
:param bool pop: Remove matching fields
"""
fields = [
(field_name, field_value)
for field_name, field_value in attrs.items()
if issubclass(field_value, field_class) or
isinstance(field_value, field_class)
]
if pop:
for field_name, _ in fields:
del attrs[field_name]
return fields | bigcode/self-oss-instruct-sc2-concepts |
from typing import List
def recall_score(relevant: List, recovered: List) -> float:
"""Recall score is: which of the total relevant documents where recovered"""
# Recovered relevant
rr = [d for d in recovered if d in relevant]
return len(rr) / len(relevant) | bigcode/self-oss-instruct-sc2-concepts |
from typing import Dict
from typing import List
def get_retrohunt_rules(r: Dict) -> List[str]:
"""Extracts rules used within a retrohunt."""
rules = []
for line in r.get("attributes", {}).get("rules", "").splitlines():
line = line.strip()
if "rule" in line[:4]:
line = line.split("{")[0]
line = line.split(":")[0]
line = line[4:].strip()
rules.append(line)
return rules | bigcode/self-oss-instruct-sc2-concepts |
import click
def require_region(ctx, param, value):
"""
Require region to be set in parameter or in context
:param ctx: Context
:param param: Click Parameter
:param value: Parameter value
:return: Parameter value
"""
if not value and not ctx.obj.config.region:
raise click.BadParameter(f'{param.name} is required for {ctx.command.name}')
return value | bigcode/self-oss-instruct-sc2-concepts |
def torr_to_pascal(torr):
"""Convert Torr to Pascal."""
return torr * 101325.0 / 760.0 | bigcode/self-oss-instruct-sc2-concepts |
def get_item(d, k):
"""attempts to get an item from d
at key k. if d is a list and the key is the list selector [],
then tries to return the first item from the list.
if the list is empty, returns None."""
try:
return d[k]
except KeyError:
if k.endswith('[]'):
lst = d[k[:-2]]
try:
return lst[0]
except IndexError:
# return None for empty list
return None
return {} | bigcode/self-oss-instruct-sc2-concepts |
import random
def return_random_from_word(word):
"""
This function receives a TextMobject,
obtains its length:
len(TextMobject("Some text"))
and returns a random list, example:
INPUT: word = TextMobjecT("Hello")
length = len(word) # 4
rango = list(range(length)) # [0,1,2,3]
OUTPUT: [3,0,2,1] # Random list
"""
rango = list(range(len(word)))
random.shuffle(rango)
return rango | bigcode/self-oss-instruct-sc2-concepts |
def transition_model(corpus, page, damping_factor):
"""
Return a probability distribution over which page to visit next,
given a current page.
With probability `damping_factor`, choose a link at random
linked to by `page`. With probability `1 - damping_factor`, choose
a link at random chosen from all pages in the corpus.
"""
# If page has at least one outgoing link
if corpus[page]:
# Initialise probability distribution to P(page chosen at random out of all pages in corpus)
total_probabilities = [(1 - damping_factor) / len(corpus)] * len(corpus)
total_probabilities_dict = dict(zip(corpus.keys(), total_probabilities))
# Add additional probability for all pages linked to by current page
link_probabilities = damping_factor / len(corpus[page])
for link in corpus[page]:
total_probabilities_dict[link] += link_probabilities
return total_probabilities_dict
# If page has no outgoing links, probability distribution chooses randomly among all pages with equal probability
else:
return dict(zip(corpus.keys(), [1 / len(corpus)] * len(corpus))) | bigcode/self-oss-instruct-sc2-concepts |
def is_link_field(field):
"""Return boolean whether field should be considered a link."""
return "." in field | bigcode/self-oss-instruct-sc2-concepts |
def minForestSizeTLCovers(tlcovs):
"""
Prune top-level covers for minimum forest size
Inputs:
tlcovs: A list of top-level covers as returned by explain.
Outputs:
tlcovs_fs_min: The pruned top level covers.
fs_min: The minimum forest size found.
"""
fs_min = min(sum(ts) for (_,_,_,_,ts) in tlcovs)
tlcovs_fs_min = [(u,k,d_min,d_max,ts) for (u,k,d_min,d_max,ts) in tlcovs if sum(ts) == fs_min]
return tlcovs_fs_min, fs_min | bigcode/self-oss-instruct-sc2-concepts |
def get_all(model, scenario):
"""
:param model: a database model with fields scenario and name which are unique together
:return: a dictionary of the fields of the given model corresponding to the current simulation,
with their name fields as key.
"""
records = model.objects.filter(scenario=scenario)
return {record.name: record for record in records} | bigcode/self-oss-instruct-sc2-concepts |
def convert_from_bytes_if_necessary(prefix, suffix):
"""
Depending on how we extract data from pysam we may end up with either
a string or a byte array of nucleotides. For consistency and simplicity,
we want to only use strings in the rest of our code.
"""
if isinstance(prefix, bytes):
prefix = prefix.decode('ascii')
if isinstance(suffix, bytes):
suffix = suffix.decode('ascii')
return prefix, suffix | bigcode/self-oss-instruct-sc2-concepts |
def get_display_settings_for_lid(local_identifier, label):
""" Search a PDS4 label for Display_Settings of a data structure with local_identifier.
Parameters
----------
local_identifier : str or unicode
The local identifier of the data structure to which the display settings belong.
label : Label or ElementTree Element
Label for a PDS4 product with-in which to look for the display settings.
Returns
-------
Label, ElementTree Element or None
Found Display_Settings section with same return type as *label*, or None if not found.
"""
matching_display = None
# Find all the Display Settings classes in the label
displays = label.findall('.//disp:Display_Settings')
if not displays:
return None
# Find the particular Display Settings for this LID
for display in displays:
# Look in both PDS and DISP namespace due to standards changes in the display dictionary
lid_disp = display.findtext('.//disp:local_identifier_reference')
lid_pds = display.findtext('.//local_identifier_reference')
if local_identifier in (lid_disp, lid_pds):
matching_display = display
break
return matching_display | bigcode/self-oss-instruct-sc2-concepts |
from typing import List
import re
def find_assets(html: str) -> List[str]:
"""
Return a list of assets found in the given HTML string
"""
return re.findall(
r'"([^"]+\.(?:css|js|jpg|jpeg|gif|tiff|png|bmp|svg|ico|pdf))"',
html, flags=re.IGNORECASE) | bigcode/self-oss-instruct-sc2-concepts |
def get_float_format(number, places=2):
"""
Return number with specific float formatting
"""
format_string = '{:.' + str(places) + 'f}'
return format_string.format(number) if number % 100 else str(number) | bigcode/self-oss-instruct-sc2-concepts |
def scheme_ij(u, u_n, u_nm1, k_1, k_2, k_3, k_4,
f, dt2, Cx2, Cy2, x, y, t_1,
i, j, im1, ip1, jm1, jp1):
"""
Right-hand side of finite difference at point [i,j].
im1, ip1 denote i-1, i+1, resp. Similar for jm1, jp1.
t_1 corresponds to u_n (previous time level relative to u).
"""
u_ij = - k_2*u_nm1[i,j] + k_1*2*u_n[i,j]
u_xx = k_3*Cx2*(u_n[im1,j] - 2*u_n[i,j] + u_n[ip1,j])
u_yy = k_3*Cx2*(u_n[i,jm1] - 2*u_n[i,j] + u_n[i,jp1])
f_term = k_4*dt2*f(x, y, t_1)
return u_ij + u_xx + u_yy + f_term | bigcode/self-oss-instruct-sc2-concepts |
import csv
import itertools
def gen_csv(f):
"""peek at rows from a csv and start yielding when we get past the comments
to a row that starts with an int"""
def startswith_int(row):
try:
int(row[0][0])
return True
except (ValueError, IndexError):
return False
cr = csv.reader(f)
return itertools.dropwhile(lambda x: not startswith_int(x), cr) | bigcode/self-oss-instruct-sc2-concepts |
def countissue(s):
"""Count number of issues"""
if s:#check if Nonetype.
if s=='None':
#if type(s)==str or type(s)==float:#Handle
return 0
else:
return len(s)
else:#if empty
return 0 | bigcode/self-oss-instruct-sc2-concepts |
def to_float(value):
"""
Noneを0.0に置き換えfloat化する。
引数:
value 対象値
戻り値:
置き換え済みfloat値
"""
if value is None:
return 0.0
else:
return float(value) | bigcode/self-oss-instruct-sc2-concepts |
from enum import Enum
def AgeEnum(ctx):
"""Age Enumeration."""
return Enum(
ctx,
what=-2,
unset=-1,
dark=0,
feudal=1,
castle=2,
imperial=3,
postimperial=4,
dmpostimperial=6,
default='unknown'
) | bigcode/self-oss-instruct-sc2-concepts |
def fibo_even_sum(limit: int) -> int:
"""Compute the sum of the even fibonacci numbers that are <= limit
using a while loop with accumulator and trial division.
:param limit: Max value of the fibonacci range to sum.
:return: Sum of the even fibonacci numbers that are <= limit.
"""
even_sum = 0
fibo_current = 0
fibo_next = 1
while (fibo_current <= limit):
if (fibo_current % 2 == 0):
even_sum += fibo_current
fibo_current, fibo_next = fibo_next, fibo_current + fibo_next
return even_sum | bigcode/self-oss-instruct-sc2-concepts |
import re
def _convert_camelcase(name, seperator=' '):
"""ExtraCondensed -> Extra Condensed"""
return re.sub('(?!^)([A-Z]|[0-9]+)', r'%s\1' % seperator, name) | bigcode/self-oss-instruct-sc2-concepts |
def spaceship(a,b):
"""3-way comparison like the <=> operator in perl"""
return (a > b) - (a < b) | bigcode/self-oss-instruct-sc2-concepts |
def _repo_fixture(request) -> str:
"""Create a repository name from the test function name."""
return request.node.nodeid.replace("/", "-").replace(":", "-").replace(".py", "") | bigcode/self-oss-instruct-sc2-concepts |
def outputids2words(id_list, vocab, article_oovs):
""" Maps output ids to words,
including mapping in-article OOVs from their temporary ids to the original OOV string
(applicable in pointer-generator mode).
Args:
id_list: list of ids (integers)
vocab: Vocabulary object
article_oovs: list of OOV words (strings) in the order corresponding to their temporary article OOV ids
(that have been assigned in pointer-generator mode), or None (in baseline mode)
Returns:
words: list of words (strings)
"""
words = []
for i in id_list:
try:
w = vocab.id2word(i) # might be [UNK]
except ValueError as e: # w is OOV
error_info = "Error: model produced a word ID that isn't in the vocabulary. This should not happen in baseline (no pointer-generator) mode"
assert article_oovs is not None, error_info
article_oov_idx = i - vocab.size()
try:
w = article_oovs[article_oov_idx]
except IndexError as e: # i doesn't correspond to an article oov
print(id_list)
raise ValueError('Error: model produced word ID %i which corresponds to article OOV %i but this example only has %i article OOVs' % (i, article_oov_idx, len(article_oovs)))
#
words.append(w)
#
return words | bigcode/self-oss-instruct-sc2-concepts |
def convert_scan_dict_to_string(scan_dict):
"""
converts parsed ImageScanStatus dictionary to string.
:param scan_dict: {'HIGH': 64, 'MEDIUM': 269, 'INFORMATIONAL': 157, 'LOW': 127, 'CRITICAL': 17, 'UNDEFINED': 6}
:return: HIGH 64, MEDIUM 269, INFORMATIONAL 157, LOW 127, CRITICAL 17, UNDEFINED 6
"""
result = ''
if not scan_dict:
return result
try:
for key, value in scan_dict.items():
result = result + key + " " + str(value) + ", "
except AttributeError:
return "Failed to retrieve repository scan results"
return result[:len(result)-2] | bigcode/self-oss-instruct-sc2-concepts |
import math
def entropy(data):
"""
Calculate entropy, used by entropy_graph()
"""
h = 0
bins = [0 for x in range(0, 256)]
length = len(data)
for v in data:
bins[ord(v)] += 1
for x in range(0, 256):
p_x = float(bins[x]) / length
if p_x > 0:
h += - p_x * math.log(p_x, 2)
return h | bigcode/self-oss-instruct-sc2-concepts |
def increment_count(val):
""" Increments the value by 1. """
return val + 1 | bigcode/self-oss-instruct-sc2-concepts |
import hashlib
def md5sum(infile):
"""Calculate the md5sum of a file
"""
# Implementation taken from: http://stackoverflow.com/a/4213255
md5 = hashlib.md5()
with open(infile,'rb') as f:
for chunk in iter(lambda: f.read(128*md5.block_size), b''):
md5.update(chunk)
return md5.hexdigest() | bigcode/self-oss-instruct-sc2-concepts |
import json
import collections
def extract_from_json(path, key, values, proc=(lambda x: x)):
"""Extracts and parses data from json files and returns a dictionary.
Args:
path: string, path to input data.
key: string, name of key column.
values: string, name of column containing values to extract.
proc: function, used to process values from input. Follows the signature:
* Args:
* x: string or tuple of string
* Returns:
string
Returns:
Dictionary of parsed data.
"""
res = {}
keys = []
with open(path) as f:
for line in f:
line = json.loads(line)
item_key = proc(line[key])
res[item_key] = line[values]
keys.append(item_key)
key_count = collections.Counter(keys)
unique_keys = [key for key in keys if key_count[key] == 1]
return {k: res[k] for k in unique_keys} | bigcode/self-oss-instruct-sc2-concepts |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.