seed stringlengths 1 14k | source stringclasses 2
values |
|---|---|
from typing import List
def _merge(nums: List[int], left: int, mid: int, right: int,
aux: List[int]) -> int:
"""
Helper function to merge the given sub-list.
:param nums: list[int]
:param left: int
:param mid: int
:param right: int
:param aux: list[int]
:return: int
"""
left_ptr, right_ptr = left, mid + 1
merged_ptr = left
inversion_count = 0
while left_ptr <= mid and right_ptr <= right:
if nums[left_ptr] <= nums[right_ptr]:
aux[merged_ptr] = nums[left_ptr]
left_ptr += 1
inversion_count += right_ptr - (mid + 1)
else:
aux[merged_ptr] = nums[right_ptr]
right_ptr += 1
merged_ptr += 1
while left_ptr <= mid:
aux[merged_ptr] = nums[left_ptr]
left_ptr += 1
merged_ptr += 1
inversion_count += right_ptr + 1 - (mid + 1)
while right_ptr <= right:
aux[merged_ptr] = nums[right_ptr]
right_ptr += 1
merged_ptr += 1
nums[left:right + 1] = aux[left:right + 1]
return inversion_count
# Running time complexity: O(n) | bigcode/self-oss-instruct-sc2-concepts |
def int_to_byte(n):
""" convert int into byte """
return n.to_bytes(1, byteorder='big') | bigcode/self-oss-instruct-sc2-concepts |
def get_coords_from_line(line):
""" Given a line, split it, and parse out the coordinates.
Return:
A tuple containing coords (position, color, normal)
"""
values = line.split()
pt = None
pt_n = None
pt_col = None
# The first three are always the point coords
if len(values) >= 3:
pt = [float(values[0]), float(values[1]), float(values[2])]
# Then if there are only 6 total, the next three are normal coords
if len(values) == 6:
pt_n = [float(values[3]), float(values[4]), float(values[5])]
else:
if len(values) >= 7: # Otherwise the next 4 are colors
pt_col = [float(values[3]), float(values[4]), float(values[5]), float(values[6])]
if len(values) >= 10: # And if there are more, those are the normals
pt_n = [float(values[7]), float(values[8]), float(values[9])]
return pt, pt_col, pt_n | bigcode/self-oss-instruct-sc2-concepts |
def get_dataset_names(datasets):
"""
Returns the names of the datasets.
Parameters
----------
datasets : tuple
tuple of datasets
Returns
----------
dataset_names : list
list of dataset names
"""
dataset_names = []
for index_dataset, (dataset_name, df_data) in enumerate(datasets):
dataset_names.append(dataset_name)
return dataset_names | bigcode/self-oss-instruct-sc2-concepts |
def angle_offset(base, angle):
"""
Given a base bearing and a second bearing, return the offset in degrees.
Positive offsets are clockwise/to the right, negative offsets are
counter-clockwise/to the left.
"""
# rotate the angle towards 0 by base
offset = angle - base
if offset <= -180:
# bring it back into the (-180, 180] range
return 360 + offset
if offset > 180:
return offset - 360
return offset | bigcode/self-oss-instruct-sc2-concepts |
def is_leaf(node):
"""Checks whether the :code:`node` is a leaf node."""
return not isinstance(node, list) | bigcode/self-oss-instruct-sc2-concepts |
def raiz(x, y):
"""
La raíz enésima de un número.
.. math::
\sqrt[y]{x}
Args:
x (float): Radicando.
y (float): Índice.
Returns:
float: La raíz.
"""
return x**(1/y) | bigcode/self-oss-instruct-sc2-concepts |
def is_tar(name):
"""Check if name is a Tarball."""
return (
name.endswith(".tar") or name.endswith(".gz") or name.endswith(".bz2")
) | bigcode/self-oss-instruct-sc2-concepts |
def format_date(datestring):
"""Convert a long iso date to the day date.
input: 2014-05-01T02:26:28Z
output: 2014-05-01
"""
return datestring[0:10] | bigcode/self-oss-instruct-sc2-concepts |
import torch
def _train_step_ddc(x_batch, y_batch, model):
"""
Single training step for DDC.
Args:
x_batch: Batch of inputs
y_batch: Batch of labels
model: ddc.DynamicDropConnect object
Returns:
Model output, batch loss
"""
# Compute forward pass on current batch
output = model(x_batch)
# Compute the batch loss as the cross entropy loss
batch_loss = torch.nn.functional.cross_entropy(output, y_batch)
# Compute gradients of model parameters wrt current batch and detach
gradients = torch.autograd.grad(batch_loss, model.parameters())
gradients = [g.detach() for g in gradients]
# Optimize all parameters
model.update_params(gradients)
# Apply weight probability update
model.update_probs(gradients)
return output, batch_loss | bigcode/self-oss-instruct-sc2-concepts |
def compare_file(file1, file2):
"""
Compare two file, line by line
"""
line1 = True
line2 = True
with open(file1, 'r') as f_1, open(file2, 'r') as f_2:
while line1 and line2:
line1 = f_1.readline()
line2 = f_2.readline()
if line1 != line2:
return False
return True | bigcode/self-oss-instruct-sc2-concepts |
def get_bar_order(plot_params):
"""
Gets which cumulative bars to show at the top of the graph given what level
of detail is being specified
Parameters
----------
plot_params: dict
Dictionary of plotting parameters. Here, `all_pos_contributions`,
`detailed`, `show_score_diffs`, and `show_total` are used
Returns
-------
List of strs indicating which cumulative bars to show
"""
if plot_params["detailed"]:
if plot_params["show_score_diffs"]:
bar_order = [
"neg_s",
"pos_s",
"neg_s_neg_p",
"neg_s_pos_p",
"pos_s_neg_p",
"pos_s_pos_p",
]
else:
bar_order = ["neg_s_neg_p", "neg_s_pos_p", "pos_s_neg_p", "pos_s_pos_p"]
else:
if not plot_params["all_pos_contributions"]:
bar_order = ["neg_total", "pos_total"]
else:
bar_order = ["all_pos_pos", "all_pos_neg"]
if plot_params["show_total"]:
bar_order = ["total"] + bar_order
return bar_order | bigcode/self-oss-instruct-sc2-concepts |
def _pad_name(name, pad_num=10, quotes=False):
"""
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string
"""
l_name = len(name)
if l_name < pad_num:
pad = pad_num - l_name
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = "{name}{sep:<{pad}}"
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
else:
return '{0}'.format(name) | bigcode/self-oss-instruct-sc2-concepts |
def respond(text):
"""Creates a response in-channel."""
return {
"response_type" : "in_channel",
"text" : text
} | bigcode/self-oss-instruct-sc2-concepts |
def split(s, sep=None, maxsplit=None):
"""对一个长字符串按特定子字符串进行分割
:param s: 原长字符串
:type s: str
:example s: "a b c d e f"
:param sep: 子字符串,为None时值为 " "
:type sep: str
:example sep: " "
:param maxsplit: 分割次数,为None时代表全部分割
:type maxsplit: int
:example maxsplit: 3
:rtype list
:return 按特定子字符串分割后的字符
:example ['a', 'b', 'c', 'd e f']
"""
if sep is None:
sep = " "
def _split():
"""切分字符串
"""
if maxsplit is not None and maxsplit == 0:
yield s
return
sep_length = len(sep)
length = len(s)
i, j = 0, 0
split_count = 0
while i < length:
if s[i:i + sep_length] == sep:
yield s[j:i]
i += sep_length
j = i
split_count += 1
if maxsplit and split_count >= maxsplit:
break
else:
i += 1
if j <= len(s):
yield s[j:]
return [sub for sub in _split()] | bigcode/self-oss-instruct-sc2-concepts |
def normalize_qa(qa, max_qa=None):
""" Normalize quantitative anisotropy.
Used mostly with GQI rather than GQI2.
Parameters
----------
qa : array, shape (X, Y, Z, N)
where N is the maximum number of peaks stored
max_qa : float,
maximum qa value. Usually found in the CSF (corticospinal fluid).
Returns
-------
nqa : array, shape (x, Y, Z, N)
normalized quantitative anisotropy
Notes
-----
Normalized quantitative anisotropy has the very useful property
to be very small near gray matter and background areas. Therefore,
it can be used to mask out white matter areas.
"""
if max_qa is None:
return qa / qa.max()
return qa / max_qa | bigcode/self-oss-instruct-sc2-concepts |
def stop_filter(record):
""" A filter function to stop iteration.
"""
if record["int"] == 789:
raise StopIteration
return record | bigcode/self-oss-instruct-sc2-concepts |
def distributeN(comm,N):
"""
Distribute N consecutive things (rows of a matrix , blocks of a 1D array)
as evenly as possible over a given communicator.
Uneven workload (differs by 1 at most) is on the initial ranks.
Parameters
----------
comm: MPI communicator
N: int
Total number of things to be distributed.
Returns
----------
rstart: index of first local row
rend: 1 + index of last row
Notes
----------
Index is zero based.
"""
P = comm.size
rank = comm.rank
rstart = 0
rend = 0
if P >= N:
if rank < N:
rstart = rank
rend = rank + 1
else:
rstart = rank
rend = rank
else:
n = N/P
remainder = N%P
rstart = n * rank
rend = n * (rank+1)
if remainder:
if rank >= remainder:
rstart += remainder
rend += remainder
else:
rstart += rank
rend += rank + 1
return rstart, rend | bigcode/self-oss-instruct-sc2-concepts |
import requests
def _get_etag(uri):
"""
Gets the LDP Etag for a resource if it exists
"""
# could put in cache here - but for now just issue a HEAD
result = requests.head(uri)
return result.headers.get('ETag') | bigcode/self-oss-instruct-sc2-concepts |
def jmp(cur_position, value_change):
"""Jump relatively from cur_position by value_change."""
return cur_position + value_change | bigcode/self-oss-instruct-sc2-concepts |
def correction_factor(rt, lt, icr=None, ocr=None):
"""
Calculate the deadtime correction factor.
Parameters:
-----------
* rt = real time, time the detector was requested to count for
* lt = live time, actual time the detector was active and
processing counts
* icr = true input count rate (TOC_t/lt, where TOC_t = true total counts
impinging the detector)
* ocr = output count rate (TOC_s/lt, where TOC_s = total processed
{slow filter for dxp} counts output by the detector)
If icr and/or ocr are None then only lt correction is applied
Outputs:
-------
* cor = (icr/ocr)*(rt/lt)
the correction factor. the correction is applied as:
corrected_counts = counts * cor
"""
if icr is not None and ocr is not None:
return (icr/ocr)*(rt/lt)
return (rt/lt) | bigcode/self-oss-instruct-sc2-concepts |
def normalize_key(key):
"""
Return tuple of (group, key) from key.
"""
if isinstance(key, str):
group, _, key = key.partition(".")
elif isinstance(key, tuple):
group, key = key
else:
raise TypeError(f"invalid key type: {type(key).__class__}")
return group, key or None | bigcode/self-oss-instruct-sc2-concepts |
import random
def random_sign(number):
"""Multiply number on 1 or -1"""
return number*random.choice([-1, 1]) | bigcode/self-oss-instruct-sc2-concepts |
def compute_all_relationships(scene_struct, eps=0.2):
"""
Computes relationships between all pairs of objects in the scene.
Returns a dictionary mapping string relationship names to lists of lists of
integers, where output[rel][i] gives a list of object indices that have the
relationship rel with object i. For example if j is in output['left'][i] then
object j is left of object i.
"""
all_relationships = {}
for name, direction_vec in scene_struct['directions'].items():
if name == 'above' or name == 'below': continue
all_relationships[name] = []
for i, obj1 in enumerate(scene_struct['objects']):
coords1 = obj1['location']
related = set()
for j, obj2 in enumerate(scene_struct['objects']):
if obj1 == obj2: continue
coords2 = obj2['location']
diff = [coords2[k] - coords1[k] for k in [0, 1, 2]]
dot = sum(diff[k] * direction_vec[k] for k in [0, 1, 2])
if dot > eps:
related.add(j)
all_relationships[name].append(sorted(list(related)))
return all_relationships | bigcode/self-oss-instruct-sc2-concepts |
import colorsys
def hsl_to_rgb(h, s, l):
"""
Converts HSL to RGB.
Parameters
----------
h: :class:`int`
The hue value in the range ``[0, 360]``.
s: :class:`float`
The saturation value in the range ``[0, 1]``.
l: :class:`float`
The lightness value in the range ``[0, 1]``.
Returns
-------
Tuple[:class:`int`, :class:`int`, :class:`int`]
The RGB tuple.
"""
h /= 360
r, g, b = colorsys.hls_to_rgb(h, l, s)
r = int(round(r * 255, 0))
g = int(round(g * 255, 0))
b = int(round(b * 255, 0))
return (r, g, b) | bigcode/self-oss-instruct-sc2-concepts |
from typing import Dict
def recount_map_sample_to_study(metadata_file: str) -> Dict[str, str]:
"""
Parse the recount3 metadata file and extract the sample to study mappings
Arguments
---------
metadata_file: The path to where the metadata is stored
Returns
-------
sample_to_study: A mapping between samples and studies
"""
with open(metadata_file) as in_file:
header = in_file.readline()
header = header.replace('"', '')
header = header.strip().split('\t')
# Add one to the indices to account for the index column in metadata not present in the
# header
sample_index = header.index('external_id') + 1
study_index = header.index('study') + 1
sample_to_study = {}
for line in in_file:
line = line.strip().split('\t')
sample = line[sample_index]
sample = sample.replace('"', '')
study = line[study_index]
study = study.replace('"', '')
sample_to_study[sample] = study
return sample_to_study | bigcode/self-oss-instruct-sc2-concepts |
def data_sort(gdf,str):
"""
sort the geodataframe by special string
Parameters
----------
gdf : geodataframe
geodataframe of gnss data
str: sort based on this string
Returns
-------
geodataframe:
geodataframe of gnss data after sorting
"""
gdf = gdf.sort_values(by = [str])
return gdf | bigcode/self-oss-instruct-sc2-concepts |
def pages(record):
"""
Convert double hyphen page range to single hyphen,
eg. '4703--4705' --> '4703-4705'
:param record: a record
:type record: dict
:return: dict -- the modified record
"""
try:
record['pages'] = record['pages'].replace('--', '-')
except KeyError:
record['pages'] = ""
return record | bigcode/self-oss-instruct-sc2-concepts |
def is_response(body):
"""judge if is http response by http status line"""
return body.startswith(b'HTTP/') | bigcode/self-oss-instruct-sc2-concepts |
import csv
def get_valid_rows(report_data):
"""Fetch all the rows in the report until it hits a blank in first column.
This is needed because sometimes DV360 inserts a weird additional metric at
the end of all the rows. This prevents us from just counting backwards to get
all the rows.
Args:
report_data: The raw report data from DV360.
Returns:
List of dict objects with all the valid rows.
"""
report_data_lines = report_data.splitlines()
csv_reader = csv.reader(report_data_lines)
rows = []
for row in csv_reader:
# Stop when the first column is blank.
if not row or not row[0]:
break
rows.append(row)
header = rows[0]
valid_rows = []
# Convert to valid dict objects.
for row in rows[1:]:
obj = {k: v for k, v in zip(header, row)}
valid_rows.append(obj)
return valid_rows | bigcode/self-oss-instruct-sc2-concepts |
def format_isk_compact(value):
"""Nicely format an ISK value compactly."""
# Based on humanize.intword().
powers = [10 ** x for x in [3, 6, 9, 12, 15]]
letters = ["k", "m", "b", "t", "q"]
if value < powers[0]:
return "{:,.2f}".format(value)
for ordinal, power in enumerate(powers[1:], 1):
if value < power:
chopped = value / float(powers[ordinal - 1])
return "{:,.2f}{}".format(chopped, letters[ordinal - 1])
return str(value) | bigcode/self-oss-instruct-sc2-concepts |
def to_settings(settings):
"""Return a dict of application settings.
Args:
settings (dict): Database-specific settings, formatted to use
with :func:`connection_url`.
Returns:
dict: Application-level settings.
"""
return {'DATABASE_{}'.format(k.upper()): v for k, v in settings.items()} | bigcode/self-oss-instruct-sc2-concepts |
def get_items_as_list(items,keys,items_names='styles'):
"""
Returns a dict with an item per key
Parameters:
-----------
items : string, list or dict
Items (ie line styles)
keys: list
List of keys
items_names : string
Name of items
"""
if type(items)!=dict:
if type(items)==list:
if len(items)!=len(keys):
raise Exception('List of {0} is not the same length as keys'.format(items_names))
else:
items=dict(zip(keys,items))
else:
items=dict(zip(keys,[items]*len(keys)))
return items | bigcode/self-oss-instruct-sc2-concepts |
def pystr(s):
"""Output a string as an eval'able representation of a Python string."""
return s.__repr__() | bigcode/self-oss-instruct-sc2-concepts |
from typing import Any
def sum_values(obj: Any) -> Any:
"""Return sum of the object's values."""
return sum(obj.values()) | bigcode/self-oss-instruct-sc2-concepts |
def get_contact_indices(layer_0, layer_1):
"""Get counter that finds index pairs of all contacts between layers.
Args:
layer_0: String. Must be key in state.
layer_1: String. Must be key in state.
Returns:
_call: Function state --> list, where the elements of the returned list
are tuples (i_0, i_0) of indices of sprites in layer_0 and layer_1
that are contacting.
"""
def _call(state):
"""Gets all (i_0, i_1) such that layer_0[i_0] contacts layer_1[i_1]."""
contact_indices = []
for i_0, sprite_0 in enumerate(state[layer_0]):
for i_1, sprite_1 in enumerate(state[layer_1]):
if sprite_0.overlaps_sprite(sprite_1):
contact_indices.append((i_0, i_1))
return contact_indices
return _call | bigcode/self-oss-instruct-sc2-concepts |
def serialize_tree(root):
""" Given a tree root node (some object with a 'data' attribute and a 'children'
attribute which is a list of child nodes), serialize it to a list, each element of
which is either a pair (data, has_children_flag), or None (which signals an end of a
sibling chain).
"""
lst = []
def serialize_aux(node):
# Recursive visitor function
if len(node.children) > 0:
# The node has children, so:
# 1. add it to the list & mark that it has children
# 2. recursively serialize its children
# 3. finally add a "null" entry to signal this node has no children
lst.append((node.data, True))
for child in node.children:
serialize_aux(child)
lst.append(None)
else:
# The node is child-less, so simply add it to
# the list & mark that it has no children:
lst.append((node.data, False))
serialize_aux(root)
return lst | bigcode/self-oss-instruct-sc2-concepts |
def do_pick(pick_status : int):
"""
This function takes one integer type argument and returns a boolean.
Pick Status = Even ==> return True (user's turn)
Pick Status = Odd ==> return False (comp's turn)
"""
if (pick_status % 2) == 0:
return True
else:
return False | bigcode/self-oss-instruct-sc2-concepts |
def search(key, table, prefix=True):
"""Search for `key` in `table`.
:param key: str, The string to look for.
:param table: dic, The table to look in.
:param prefix: bool, So a prefix search.
:returns:
Value in table or None
if it is a prefix search it returns the full key and the value or (None, None)
"""
if key in table:
if prefix:
return key, table[key]
return table[key]
if prefix:
for k, v in table.items():
if k.startswith(key):
return k, v
if prefix:
return None, None
return None | bigcode/self-oss-instruct-sc2-concepts |
def get_list_url(list_name):
"""Get url from requested list name.
Parameters
----------
list_name : str
Name of the requested list. Valid names are: *nea_list,
risk_list, risk_list_special, close_approaches_upcoming,
close_approaches_recent, priority_list, priority_list_faint,
close_encounter, impacted_objects, neo_catalogue_current and
neo_catalogue_middle*.
Returns
-------
url : str
Final URL string.
Raises
------
KeyError
If the requested list_name is not in the dictionary
"""
# Define the parameters of each list
lists_dict = {
"nea_list": 'allneo.lst',
"updated_nea": 'updated_nea.lst',
"monthly_update": 'monthly_update.done',
"risk_list": 'esa_risk_list',
"risk_list_special": 'esa_special_risk_list',
"close_approaches_upcoming": 'esa_upcoming_close_app',
"close_approaches_recent": 'esa_recent_close_app',
"priority_list": 'esa_priority_neo_list',
"priority_list_faint": 'esa_faint_neo_list',
"close_encounter" : 'close_encounter2.txt',
"impacted_objects" : 'impactedObjectsList.txt',
"neo_catalogue_current" : 'neo_kc.cat',
"neo_catalogue_middle" : 'neo_km.cat'
}
# Raise error is input is not in dictionary
if list_name not in lists_dict:
raise KeyError('Valid list names are nea_list, updated_nea, '
'monthly_update, risk_list, risk_list_special, '
'close_approaches_upcoming, close_approaches_recent, '
'priority_list, priority_list_faint, '
'close_encounter, impacted_objects, '
'neo_catalogue_current and neo_catalogue_middle')
# Get url
url = lists_dict[list_name]
return url | bigcode/self-oss-instruct-sc2-concepts |
def fake_train(lrate, batch_size, arch):
"""Optimum: lrate=0.2, batch_size=4, arch='conv'."""
f1 = (
(lrate - 0.2) ** 2
+ (batch_size - 4) ** 2
+ (0 if arch == "conv" else 10)
)
return f1 | bigcode/self-oss-instruct-sc2-concepts |
from contextlib import suppress
import ipaddress
def is_ip_address(value: str) -> bool:
"""Check if a value is a valid IPv4 or IPv6 address.
:param value: value to check
"""
with suppress(ValueError):
ipaddress.ip_address(value)
return True
return False | bigcode/self-oss-instruct-sc2-concepts |
from typing import Dict
def get_example_brand_map() -> Dict[str, str]:
"""
Return notebooks brand mapping based on simple regex.
The mapping is from patterns found in menu_items to the items brand.
The returned dictionary {key, value} has the following structure:
key is the regex search pattern
value is the value this pattern is mapped to
:returns: dictionary with notebooks brand mapping
"""
return {
"heineken": "heineken",
"coca.cola": "coca-cola",
"red.bull": "red bull",
"pizza": "unbranded",
"tee": "unbranded",
"hamburger": "unbranded",
"stella": "stella artois",
"tee mit milch": "unbranded",
"kaffee mit sucker": "unbranded",
"rode wijn": "unbranded",
"fles witte": "unbranded",
} | bigcode/self-oss-instruct-sc2-concepts |
def append_write(filename="", text=""):
""" Write a new file or append info if exists
Args:
filename: string containing the name or "" if
not given.
text: content of the file
Return: number of chars written
"""
with open(filename, 'a', encoding="utf-8") as fl_opened:
return fl_opened.write(text) | bigcode/self-oss-instruct-sc2-concepts |
def ij_to_dxdy(start_idx, end_idx, _row_size=14):
"""
Helper function to calculate _x, _y, _dx, _dy given two indexes from the board
Determine the row, change in column, and change in row
# of the start/end point pair
:param start_idx: starting index
:param end_idx: ending index
:param _row_size
:return: _x, _y, _dx, _dy
"""
# for move validation
_x = start_idx % _row_size
_y = _row_size - start_idx // _row_size
_dx = (end_idx % _row_size) - (start_idx % _row_size)
_dy = (_row_size - end_idx // _row_size) - _y
return _x, _y, _dx, _dy | bigcode/self-oss-instruct-sc2-concepts |
def price_in_usd(usdprice, amount, base, dest='USD'):
# print(amount, base, dest)
""" Return price of any "dest" currency normalized to USD """
return usdprice.convert(amount, base=base, dest=dest) | bigcode/self-oss-instruct-sc2-concepts |
import logging
def get_logfile_from_logger(logger):
"""Return the path to the log file if there is a logging.FileHandler"""
try:
file_handlers = [h for h in logger.handlers if type(h) == logging.FileHandler]
except:
pass
else:
if file_handlers:
return file_handlers[0].baseFilename | bigcode/self-oss-instruct-sc2-concepts |
import torch
def as_numpy(tensor_or_array):
""" If given a tensor or numpy array returns that object cast numpy array
"""
if isinstance(tensor_or_array, torch.Tensor):
tensor_or_array = tensor_or_array.cpu().detach().numpy()
return tensor_or_array | bigcode/self-oss-instruct-sc2-concepts |
def get_indicator_publication(indicator):
"""
Build publications grid field from the indicator external_references field
Args:
indicator: The indicator with publication field
Returns:
list. publications grid field
"""
publications = []
for external_reference in indicator.get('external_references', []):
if external_reference.get('external_id'):
continue
url = external_reference.get('url')
description = external_reference.get('description')
source_name = external_reference.get('source_name')
publications.append({'link': url, 'title': description, 'source': source_name})
return publications | bigcode/self-oss-instruct-sc2-concepts |
import pickle
def load_obj(name):
"""
Method to load pickle objects.
input:
name: path with the name of the pickle without file extension.
"""
with open(name + '.pkl', 'rb') as file:
return pickle.load(file) | bigcode/self-oss-instruct-sc2-concepts |
def _divide_with_ceil(a, b):
"""
Returns 'a' divided by 'b', with any remainder rounded up.
"""
if a % b:
return (a // b) + 1
return a // b | bigcode/self-oss-instruct-sc2-concepts |
def isCST(foraValue):
"""returns whether a foraValue is a constant"""
return foraValue.implVal_.isCST | bigcode/self-oss-instruct-sc2-concepts |
def filter_tagged_articles(articles, tagged_articles):
"""
Filter out those named entity tagged articles from the NLP pipeline which also appear in the evaluation set.
:param articles: List of articles.
:type articles: list
:param tagged_articles: List of tagged articles.
:type tagged_articles: list
:return: List of filtered articles.
:rtype: list
"""
article_ids = set([article.id for article in articles])
return [
tagged_article
for tagged_article in tagged_articles
if tagged_article["meta"]["id"] in article_ids
] | bigcode/self-oss-instruct-sc2-concepts |
def update_max_accel(driver, ma):
"""
Updates the max accel of the driver
:param driver: driver
:param ma: new max accel
:type driver: DriverProfile
:return: updated driver profile
"""
return driver.update_max_accel(ma) | bigcode/self-oss-instruct-sc2-concepts |
def castlingmove(movetuples):
"""Determine if we have a tuple of tuples or just a simple move."""
if isinstance(movetuples[0], tuple) and isinstance(movetuples[1], tuple):
return True
else:
return False | bigcode/self-oss-instruct-sc2-concepts |
def readable_size(i, snapshot=False):
"""
Pretty-print the integer `i` as a human-readable size representation.
"""
degree = 0
while i > 1024:
i = i / float(1024)
degree += 1
scales = ["B", "KB", "MB", "GB", "TB", "EB"]
if snapshot:
return f"{i:+.2f}{scales[degree]:>5}"
return f"{i:.2f}{scales[degree]:>5}" | bigcode/self-oss-instruct-sc2-concepts |
def num_explicit_hydrogens(a):
""" Number of explicit hydrodgens """
return a.GetNumExplicitHs() | bigcode/self-oss-instruct-sc2-concepts |
def get_projects(conn, scenario_id, subscenarios, col, col_value):
"""
Get projects for which the column value of "col" is equal to "col_value".
E.g. "get the projects of operational type gen_commit_lin".
:param conn: database connection
:param subscenarios: Subscenarios class objects
:param col: str
:param col_value: str
:return: List of projects that meet the criteria
"""
c = conn.cursor()
projects = c.execute(
"""SELECT project
FROM project_portfolio_opchars
WHERE project_portfolio_scenario_id = {}
AND project_operational_chars_scenario_id = {}
AND {} = '{}';""".format(
subscenarios.PROJECT_PORTFOLIO_SCENARIO_ID,
subscenarios.PROJECT_OPERATIONAL_CHARS_SCENARIO_ID,
col, col_value
)
)
projects = [p[0] for p in projects] # convert to list
return projects | bigcode/self-oss-instruct-sc2-concepts |
def introspection_query(description: bool = True) -> str:
"""
Return a generic introspection query to be used by GraphQL clients.
Args:
description: If ``True`` the query will require descriptions.
Returns:
Canonical introspection query
"""
return """
query IntrospectionQuery {
__schema {
queryType { name }
mutationType { name }
subscriptionType { name }
types {
...FullType
}
directives {
name
%(description_field)s
locations
args {
...InputValue
}
}
}
}
fragment FullType on __Type {
kind
name
%(description_field)s
fields(includeDeprecated: true) {
name
%(description_field)s
args {
...InputValue
}
type {
...TypeRef
}
isDeprecated
deprecationReason
}
inputFields {
...InputValue
}
interfaces {
...TypeRef
}
enumValues(includeDeprecated: true) {
name
%(description_field)s
isDeprecated
deprecationReason
}
possibleTypes {
...TypeRef
}
}
fragment InputValue on __InputValue {
name
%(description_field)s
type { ...TypeRef }
defaultValue
}
fragment TypeRef on __Type {
kind
name
ofType {
kind
name
ofType {
kind
name
ofType {
kind
name
ofType {
kind
name
ofType {
kind
name
ofType {
kind
name
ofType {
kind
name
}
}
}
}
}
}
}
}
""" % {
"description_field": "" if not description else "description"
} | bigcode/self-oss-instruct-sc2-concepts |
def flatten_strokes(strokes):
"""
Flatten a list of strokes. Add stroke state in the process.
For each point j, the stroke state is a tuple (pj, qj, rj) where:
* pj=1 indicates the point is not the end of a stroke.
* qj=1 indicates the point is the end of a stroke (but not the end of the drawing).
* rj=1 indicates the point is the end of the drawing.
By construction, pj + qj + rj = 1
Input:
[
((x1, x2, ..., xi-1, xi), (y1, y2, ..., yi-1, yi)),
((xi+1, ...), (yi+1, ...)),
...,
((..., xn-1, xn), (..., yn-1, yn))
]
Output:
[
[x1, y1, 1, 0, 0],
[x2, y2, 1, 0, 0],
...,
[xi-1, yi-1, 1, 0, 0],
[xi, yi, 0, 1, 0]
[xi+1, yi+1, 1, 0, 0],
...,
[xn-1, yn-1, 1, 0, 0]
[xn, yn, 0, 0, 1]
]
"""
flat_strokes = []
for xs, ys in strokes:
for x, y in zip(xs, ys):
# Mark stroke in progress by default
flat_strokes.append([x, y, 1, 0, 0])
# Mark end of stroke
x, y, *_ = flat_strokes[-1]
flat_strokes[-1] = [x, y, 0, 1, 0]
# Mark end of drawing
x, y, *_ = flat_strokes[-1]
flat_strokes[-1] = [x, y, 0, 0, 1]
return flat_strokes | bigcode/self-oss-instruct-sc2-concepts |
def get_raw_data(x_norm, min_val, max_val):
"""
Get raw data from the normalized dataset
"""
x = x_norm * (max_val - min_val) + min_val
return x | bigcode/self-oss-instruct-sc2-concepts |
import copy
import torch
def get_printoptions() -> dict:
"""
Returns the currently configured printing options as key-value pairs.
"""
return copy.copy(torch._tensor_str.PRINT_OPTS.__dict__) | bigcode/self-oss-instruct-sc2-concepts |
def get_cavity_phases(fn):
""" Given an OPAL input file, extract phi1, phi2, phi3, phi4 (rad) """
with open (fn, 'r') as f:
getvars = {'t_offset': None,
'dphi': None,
'phi_correction': None,
'phi0': None,
'phi1': None,
'phi2': None,
'phi3': None,
'phi4': None}
locals().update(getvars)
for line in f:
if not line.startswith("//") and any(line.startswith(v) for v in getvars.keys()):
varname, varvalue = [l.strip().strip(';') for l in line.split('=')]
varvalue = eval(varvalue)
if varname == 'phi_correction':
varvalue *= 1e-6 # MHz vs Hz
locals()[varname] = varvalue
return [locals()['phi%d' % i] for i in range(1, 5)] | bigcode/self-oss-instruct-sc2-concepts |
def load_diachronic_dataset(datapath="data/nytimes_dataset.txt", start_date="2019-01-01", end_date="2020-12-31"):
"""
Read in a diachronic dataset with "%Y-%m-%d\tsentence" per line
Inputs:
- datapath [str]: path to a dataset with tab-separated dates (in the same format as start/end_date)
and sentences. Since these sentences will later be passed as is to the transformer,
they shouldn't be too long, i.e., not whole documents. (default: "data/nytimes_dataset.txt")
- start_date [str]: earliest date at and after which the sentences should be taken (default: "2019-01-01")
- end_date [str]: latest date until which the sentences should be included (default: "2020-12-31")
Returns:
- sentences [list: [list: str]]: list of sentences (as lists of words) in chronological order
- dates [list: str]: list of the same length as sentences with corresponding dates
"""
sentences = []
dates = []
with open(datapath) as f:
for line in f:
d, s = line.strip().split("\t")
if d < start_date:
continue
elif d > end_date:
break
dates.append(d)
# lowercase! and some longer words mistakenly can end with "." due to the tokenizer; remove this!
sentences.append([w if len(w) <= 3 or not w.endswith(".") else w[:-1] for w in s.lower().split()])
print(f"Dataset contains {len(sentences)} sentences between {start_date} and {end_date}")
return sentences, dates | bigcode/self-oss-instruct-sc2-concepts |
import requests
def make_request(url, headers=None):
"""
make an http request
"""
r = None
try:
r = requests.get(url, headers=headers)
except Exception as e:
raise e
return r | bigcode/self-oss-instruct-sc2-concepts |
def _is_ascii(text):
"""
Args:
text (str): Text to check.
Returns:
bool: whether or not the text can be encoded in ascii.
"""
return all(ord(char) < 128 for char in text) | bigcode/self-oss-instruct-sc2-concepts |
import math
def attacker_success_probability(q: float, z: int):
"""
Compute the probability of an attacker to create a longer trusted chain
:param q: probability the attacker finds the next block
:param z: number of blocks behind
:return: probability the attacker will ever catch up from z blocks behind
"""
p = 1.0 - q
lam = z * (q / p)
s = 1.0
init_poisson = math.exp(-lam)
for k in range(z + 1):
poisson = init_poisson # attacker potential progress at step k
for i in range(1, k + 1):
poisson *= lam / i
s -= poisson * (1 - math.pow(q / p, z - k))
return s | bigcode/self-oss-instruct-sc2-concepts |
from torch import optim
from functools import partial
def get_optimizer(optimizer):
"""
Returns an optimizer according to the passed string.
:param optimizer: The string representation of the optimizer. eg. 'adam' for Adam etc.
:return: The proper nn.optim optimizer.
"""
_optim_dict = {'adam': partial(optim.Adam, amsgrad=True)}
return _optim_dict[optimizer] | bigcode/self-oss-instruct-sc2-concepts |
import logging
def is_valid_dataset(platform):
"""Filters out datasets that can't be used because it is missing required data
such as the release date or an original price. Other required data includes
the name and abbreviation of the platform.
"""
if 'release_date' not in platform or not platform['release_date']:
logging.warn(u"{0} has no release date".format(platform['name']))
return False
if 'original_price' not in platform or not platform['original_price']:
logging.warn(u"{0} has no original price".format(platform['name']))
return False
if 'name' not in platform or not platform['name']:
logging.warn(u"No platform name found for given dataset")
return False
if 'abbreviation' not in platform or not platform['abbreviation']:
logging.warn(u"{0} has no abbreviation".format(platform['name']))
return False
return True | bigcode/self-oss-instruct-sc2-concepts |
from datetime import datetime
def _zulu_to_epoch_time(zulu_time: str) -> float:
"""Auxiliary function to parse Zulu time into epoch time"""
epoch = datetime(1970, 1, 1)
time_as_date = datetime.strptime(zulu_time, "%Y-%m-%dT%H:%M:%SZ")
return (time_as_date - epoch).total_seconds() | bigcode/self-oss-instruct-sc2-concepts |
import functools
def compose(*functions):
"""
Compose functions.
"""
return functools.reduce(lambda f, g: lambda x: g(f(x)), functions,
lambda x: x) | bigcode/self-oss-instruct-sc2-concepts |
def _find_closest_year_row(df, year=2020):
"""Returns the row which is closest to the year specified (in either direction)"""
df = df.copy()
df['year'] = df['year'].sort_values(ascending=True)
return df.loc[df['year'].map(lambda x: abs(x - 2020)).idxmin()] | bigcode/self-oss-instruct-sc2-concepts |
import re
def _fullmatch(pattern, text, *args, **kwargs):
"""re.fullmatch is not available on Python<3.4."""
match = re.match(pattern, text, *args, **kwargs)
return match if match.group(0) == text else None | bigcode/self-oss-instruct-sc2-concepts |
def fixChars(text:str) -> str:
"""Fixes \\xa0 Latin1 (ISO 8859-1), \\x85, and \\r, replacing them with space"""
text = text.replace(u'\xa0', u' ')
text = text.replace(u'\x85', u' ')
text = text.replace(u'\r', u' ')
return text | bigcode/self-oss-instruct-sc2-concepts |
def get_metadata_value(structure_list, desc_metadata):
"""Retrieve a given metadata from a list of structures from the descriptor key"""
desc_metadata_list = []
for structure in structure_list:
desc_metadata_value = structure.info['descriptor'][str(desc_metadata)]
desc_metadata_list.append(desc_metadata_value)
return desc_metadata_list | bigcode/self-oss-instruct-sc2-concepts |
import ntpath
import posixpath
def to_posix(path):
"""
Return a path using the posix path separator given a path that may contain
posix or windows separators, converting "\\" to "/". NB: this path will
still be valid in the windows explorer (except for a UNC or share name). It
will be a valid path everywhere in Python. It will not be valid for windows
command line operations.
"""
return path.replace(ntpath.sep, posixpath.sep) | bigcode/self-oss-instruct-sc2-concepts |
def SafeToMerge(address, merge_target, check_addresses):
"""Determine if it's safe to merge address into merge target.
Checks given address against merge target and a list of check_addresses
if it's OK to roll address into merge target such that it not less specific
than any of the check_addresses. See description of why ir is important
within public function CollapseAddrList.
Args:
address: Address that is being merged.
merge_target: Merge candidate address.
check_addresses: A list of address to compare specificity with.
Returns:
True if safe to merge, False otherwise.
"""
for check_address in check_addresses:
if (address.network_address == check_address.network_address and
address.netmask > check_address.netmask and
merge_target.netmask <= check_address.netmask):
return False
return True | bigcode/self-oss-instruct-sc2-concepts |
import string
def fmt_to_name(format_string, num_to_name):
"""Try to map a format string to a single name.
Parameters
----------
format_string : string
num_to_name : dict
A dictionary that maps from an integer to a column name. This
enables mapping the format string to an integer to a name.
Returns
-------
A placeholder name if `format_string` consists of a single
placeholder and no other text. Otherwise, None is returned.
"""
parsed = list(string.Formatter().parse(format_string))
if len(parsed) != 1:
# It's an empty string or there's more than one placeholder.
return
if parsed[0][0]:
# Format string contains text before the placeholder.
return
name = parsed[0][1]
if not name:
# The field name is empty.
return
try:
return num_to_name[int(name)]
except (KeyError, ValueError):
return name | bigcode/self-oss-instruct-sc2-concepts |
import re
def _replacestrings(source):
"""Strip string lookup table (list) and replace values in source."""
match = re.search(r'var *(_\w+)\=\["(.*?)"\];', source, re.DOTALL)
if match:
varname, strings = match.groups()
startpoint = len(match.group(0))
lookup = strings.split('","')
variable = '%s[%%d]' % varname
for index, value in enumerate(lookup):
source = source.replace(variable % index, '"%s"' % value)
return source[startpoint:]
return source | bigcode/self-oss-instruct-sc2-concepts |
def PNT2QM_Tv4(XA,chiA):
""" TaylorT2 2PN Quadrupole Moment Coefficient, v^4 Timing Term.
XA = mass fraction of object
chiA = dimensionless spin of object """
return -10.*XA*XA*chiA*chiA | bigcode/self-oss-instruct-sc2-concepts |
import requests
def get_students_registered_for_class(url, username, password, crn, term):
""" Grabs the students registered for a course in a particular term
Uses an API like
https://boomi.som.yale.edu:9090/ws/rest/subscribers/klj39/CourseRoster/
Arguments:
url {string} -- API URL, e.g.
username {string} -- API username (http basic auth)
password {string} -- API password (http basic auth)
crn {string} -- CRN for the course
term {string} -- term for the course, e.g. "201903" for fall 2019
"""
payload = {'CRN': crn, 'TermCode': term}
response = requests.get(url, params=payload, auth=(username, password))
registration_info = response.json()
students = registration_info["Course"]["roster"]["student"]
return students | bigcode/self-oss-instruct-sc2-concepts |
def computeEER(rates, withthresh=0):
"""Computes equal error rates from the list of (true pos, false pos) values.
If withthresh is true (not the default), then returns (eer, index at which eer occurs)."""
det, fpos = zip(*rates)
fpos = map(float, fpos)
npos = [1.0-f for f in fpos]
difs = [(abs(d-n), i) for i, (d, n) in enumerate(zip(det, npos))]
mval, mindex = min(difs)
if withthresh:
return det[mindex], mindex
else:
return det[mindex] | bigcode/self-oss-instruct-sc2-concepts |
def is_nonAA(residue):
"""
Parameters
----------
residue : a residue from a protein structure object made with PDBParser().
Returns
-------
Boolean
True if residue is hetero or water, False otherwise.
"""
residue_id = residue.get_id()
hetfield = residue_id[0]
return (hetfield[0] == 'H') or (hetfield[0] == 'W') | bigcode/self-oss-instruct-sc2-concepts |
import string
import random
def key_generator(size=6, chars=string.ascii_uppercase + string.digits):
"""
Generates random strings for things that need keys. Allows variable size and character lists, if desired.
NOT CRYPTOGRAPHICALLY SECURE
:param size: Size of the key to generate
:param chars: Characters to choose from
:return: Key composed of given characters with a given size, in random order
"""
return ''.join(random.choice(chars) for _ in range(size)) | bigcode/self-oss-instruct-sc2-concepts |
def all_columns(df, names):
"""
Test if df has all columns
"""
array = [name in df.columns for name in names]
return sum(array) == len(array) | bigcode/self-oss-instruct-sc2-concepts |
from typing import Union
def convert_memory_limit(memory: Union[str, int]) -> int:
"""Converts a provided memory limit with optional units into
its equivalent in bytes.
Args:
memory: String or integer representation of memory limit.
Returns:
The provided memory in bytes.
"""
if isinstance(memory, int):
return memory
# MUST be sorted by longest suffix
suffixes = [
("bytes", 1),
("KiB", 1024),
("MiB", 1024 ** 2),
("GiB", 1024 ** 3),
("KB", 1000),
("MB", 1000 ** 2),
("GB", 1000 ** 3),
("B", 1),
("K", 1024),
("M", 1024 ** 2),
("G", 1024 ** 3),
("b", 1),
("k", 1000),
("m", 1000 ** 2),
("g", 1000 ** 3),
]
for suffix, factor in suffixes:
if memory.endswith(suffix):
return factor * int(memory[: -len(suffix)].strip())
return int(memory) | bigcode/self-oss-instruct-sc2-concepts |
def centroid_atmList(atmList):
"""Get centroid for list of atoms."""
i, x, y, z = 0, 0, 0, 0
for atm in atmList:
x += atm["x"]
y += atm["y"]
z += atm["z"]
i += 1
return x/i, y/i, z/i | bigcode/self-oss-instruct-sc2-concepts |
def _process_parameters(parameters):
"""
Run over the parameters are extract parameter and column name. This function deals with figuring out if a
requested parameter also has a user defined output name.
.. note::
Function for internal use!
:param parameters: list of tuples or strings containing the parameters names
:type parameters: list
:return: parameters, column_names: the parameters to extract with extra ML parameters added, and the columnn names
matching those parameters.
"""
parameters_, column_names = [], []
for parameter in parameters:
if type(parameter) == tuple:
parameters_.append(parameter[0])
column_names.append(parameter[1].strip())
else:
parameters_.append(parameter)
column_names.append(parameter.strip())
return parameters_, column_names | bigcode/self-oss-instruct-sc2-concepts |
import configparser
def import_test_configuration(config_file):
"""
Read the config file regarding the testing and import its content
"""
content = configparser.ConfigParser()
content.read(config_file)
config = {}
config['gui'] = content['simulation'].getboolean('gui')
config['max_steps'] = content['simulation'].getint('max_steps')
config['n_cars_generated'] = content['simulation'].getint('n_cars_generated')
config['episode_seed'] = content['simulation'].getint('episode_seed')
config['green_duration'] = content['simulation'].getint('green_duration')
config['yellow_duration'] = content['simulation'].getint('yellow_duration')
config['num_states'] = content['agent'].getint('num_states')
config['num_actions'] = content['agent'].getint('num_actions')
config['sumocfg_file_name'] = content['dir']['sumocfg_file_name']
config['models_path_name'] = content['dir']['models_path_name']
config['model_to_test'] = content['dir'].getint('model_to_test')
return config | bigcode/self-oss-instruct-sc2-concepts |
def solve_modular_equation(a, b, c):
"""
Find solution of ax % b = c (a and b relative primes, ie assume gcd(a, b) == 1)
pow(a, -1, mod=b) computes the modular multiplicative inverse for python >= 3.8
"""
return (pow(a, -1, mod=b) * c) % b | bigcode/self-oss-instruct-sc2-concepts |
import requests
from bs4 import BeautifulSoup
def fetch_pkg_version(pkg_url: str) -> str:
"""
Find the version of package documentation
:param pkg_url: Full URL of package documentation
:return: version string of the package requested
"""
# fetch the page and parse version
page = requests.get(pkg_url)
soup = BeautifulSoup(page.content, 'html.parser')
version = soup.find_all(class_='version')[0].get_text().strip()
return version | bigcode/self-oss-instruct-sc2-concepts |
def get_param(param, arguments, default=None):
"""
Get parameter from list of arguments. Arguments can be in following format:
['--parameter', 'param_value'] or ['--parameter=param_value']
Args:
param (str): Name of parameter
arguments (list): List of arguments from CLI
default (any): any default value for parameter (default: None)
"""
for index, arg in enumerate(arguments):
if param in arg:
if '=' in arg:
return arg.split('=')[1]
return arguments[index + 1]
return default | bigcode/self-oss-instruct-sc2-concepts |
import string
import random
def random_id(size=8, chars=string.ascii_letters + string.digits):
"""Generates a random string of given size from the given chars.
@param size: The size of the random string.
@param chars: Constituent pool of characters to draw random characters from.
@type size: number
@type chars: string
@rtype: string
@return: The string of random characters.
"""
return ''.join(random.choice(chars) for _ in range(size)) | bigcode/self-oss-instruct-sc2-concepts |
def type_of_exception(exception_object):
"""Get the type of an exception object as a string or None"""
if isinstance(exception_object, Exception):
return str(exception_object.__class__.__name__) | bigcode/self-oss-instruct-sc2-concepts |
def generate_bins() -> list:
"""
Generate color bins.
:return: List of bins
"""
h_bins = [(x / 10.0, (x + 1) / 10.0) for x in range(0, 10)]
h_bins[-1] = (h_bins[-1][0], 1.1)
s_bins = [(0.0, 0.333), (0.333, 0.666), (0.666, 1.1)]
l_bins = [(0.0, 0.333), (0.333, 0.666), (0.666, 1.1)]
bins = []
for h_bin in h_bins:
for s_bin in s_bins:
for l_bin in l_bins:
bins.append((h_bin, s_bin, l_bin))
return bins | bigcode/self-oss-instruct-sc2-concepts |
def intermediate_text_filename_generation(cote, lang):
"""
Generate the name of a given intermediate text
cote : the cote
lang : the language to look for to create the texts. Must follow the Code convention ("f","l","t" or "a")
Return : str
"""
lang_dict = {"f":"fr","a":"de","t":"it","l":"la"}
inter_text_filename = "CH-AEV_IV_"+cote+"_interm_text_"+lang_dict[lang]+".txt"
return inter_text_filename | bigcode/self-oss-instruct-sc2-concepts |
def next_cma(new_value, list_len, old_cma):
""" Calculate next cumulative moving average
'list_len' is the length of the currently being averaged list before adding the new value
"""
return (new_value + list_len * old_cma) / (list_len + 1) | bigcode/self-oss-instruct-sc2-concepts |
def rubygems_api_url(name, version=None):
"""
Return a package API data URL given a name, an optional version and a base
repo API URL.
For instance:
>>> url = rubygems_api_url(name='turbolinks', version='1.0.2')
>>> assert url == 'https://rubygems.org/api/v2/rubygems/turbolinks/versions/1.0.2.json'
If no version, we return:
>>> url = rubygems_api_url(name='turbolinks')
>>> assert url == 'https://rubygems.org/api/v1/versions/turbolinks.json'
Things we could return: a summary for the latest version, with deps
https://rubygems.org/api/v1/gems/mqlight.json
"""
if not name:
return
if version:
return f'https://rubygems.org/api/v2/rubygems/{name}/versions/{version}.json'
else:
return f'https://rubygems.org/api/v1/versions/{name}.json' | bigcode/self-oss-instruct-sc2-concepts |
from io import StringIO
def _join(args):
"""
Join the arguments with spaces, except newlines which don't
need to be padded.
"""
result = StringIO()
skipSpace = True
for arg in args:
if skipSpace:
skipSpace = False
else:
result.write(' ')
if arg == '\n':
skipSpace = True
result.write(arg)
return result.getvalue() | bigcode/self-oss-instruct-sc2-concepts |
def keep_only_digits(input_string: str) -> str:
"""This function takes as input a string and returns the same string but only containing digits
Args:
input_string (str): the input string from which we want to remove non-digit characters
Returns:
output_string (str): the output string that only contains digit characters
"""
numeric_filter = filter(str.isdigit, input_string)
output_string = "".join(numeric_filter)
return output_string | bigcode/self-oss-instruct-sc2-concepts |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.