content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def evaluate(x):
"""Evaluate the nested list of resolved symbols.
"""
if isinstance(x, list):
if callable(x[0]):
return x[0](*map(evaluate, x[1:]))
elif x[0] == 'if':
return evaluate(x[2] if evaluate(x[1]) else x[3])
else:
raise ValueError("invalid prefix: '{}'".format(x[0]))
else:
return x | c5b2d80bc92d50bbef6041fa6b19de271af4d048 | 691,290 |
def _ReplaceNameByUuidProcessLine(
node_name, _key, line_identifier, line_key, found, node_uuid=None):
"""Replaces a node's name with its UUID on a matching line in the key file.
This is an auxiliary function for C{_ManipulatePublicKeyFile} which processes
a line of the ganeti public key file. If the line in question matches the
node's name, the name will be replaced by the node's UUID.
@type node_name: string
@param node_name: name of the node to be replaced by the UUID
@type _key: string
@param _key: SSH key of the node (not used)
@type line_identifier: string
@param line_identifier: an identifier of a node in a line of the public key
file. This can be either a node name or a node UUID, depending on if it
got replaced already or not.
@type line_key: string
@param line_key: SSH key of the node whose line is processed
@type found: boolean
@param found: whether or not the line matches the node's name
@type node_uuid: string
@param node_uuid: the node's UUID which will replace the node name
@rtype: (boolean, string)
@return: a tuple indicating whether the target line was found and the
processed line
"""
if node_name == line_identifier:
return (True, "%s %s\n" % (node_uuid, line_key))
else:
return (found, "%s %s\n" % (line_identifier, line_key)) | eb4bc4d320cbbde7d0d56032f0c071629cdb81c3 | 691,291 |
def getVocabList2():
""" reads the vocabulary list in vocab.txt
and returns a dictionary of the words in vocabList.
In this dictionary, integer index is key, word string is value
"""
d = {}
with open('vocab.txt', 'r') as f:
for line in f:
li = line.split()
d[int(li[0])] = li[1]
return d | 30c193724252a0bfd5737040a155d5604ac1f6f7 | 691,292 |
def quality_index(dat, colname):
"""Return the index for `colname` in `dat`"""
colname = colname.split(':')[0]
return list(dat.dtype.names).index(colname) | d13f89bd6da6ea09637433d2ed7c6378a738a6cf | 691,293 |
def examine_stability(box_pos_ori, box_pos_fin, tol=0.01):
"""
Examine the stability of the configuration.
Stability was evaluated by checking position difference of the original configuration and the final configuration.
---------------------------
box_pos_ori[three-dim list]: original box positions
box_pos_fin[three-dim list]: final box positions
Return:
-------
isstable[bool list]: whether configuration is stable or not, each element represent one box in the configuration.
True for stable and False for unstable.
"""
assert len(box_pos_ori) == len(box_pos_fin), "Need to use the same configuration."
box_num = len(box_pos_ori)
isstable = []
for i in range(box_num):
# Consider its z axis shift
pos_diff = (box_pos_ori[i][-1] - box_pos_fin[i][-1])**2
if pos_diff > tol:
# print('Box {} moved'.format(i+1))
isstable.append(False)
else:
isstable.append(True)
return isstable | b9675fbff96fa5f9128972548c265e5a5a8fd43d | 691,294 |
def get_filename_safe_string(string, max_length=146):
"""
Converts a string to a string that is safe for a filename
Args:
string (str): A string to make safe for a filename
max_length (int): Truncate strings longer than this length
Warning:
Windows has a 260 character length limit on file paths
Returns:
str: A string safe for a filename
"""
invalid_filename_chars = ['\\', '/', ':', '"', '*', '?',
'<', '>', '|', '\n', '\r']
if string is None:
string = "None"
for char in invalid_filename_chars:
string = string.replace(char, "")
string = string.rstrip(".")
string = (string[:max_length]) if len(string) > max_length else string
return string | 5b7815c092d566e875cdda4466f716d48ed62671 | 691,295 |
def app_metadata_url():
"""A URL for the test app metadata."""
return "https://raw.githubusercontent.com/aiidalab/aiidalab-hello-world/master/metadata.json" | 9682d02d31256af38ca1781878fcd15bb3129fb5 | 691,296 |
def is_user_active(user_obj):
"""
Helps to find user is active or not.
Returns boolean value True or False
:param user_obj: user queryset of User model
:return: boolean value
"""
if user_obj.is_active:
return True
return False | 1008ab560600803988b32b40ca9c861df0b15ce3 | 691,297 |
from datetime import datetime
def format_datetime(timestamp):
"""Format a timestamp for display."""
return datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%d @ %H:%M') | f841d74e381f4524a7fc428feb4111eb5f282538 | 691,298 |
def read_monoisotopic_mass_table(input_file):
"""
Given a tab-separatedd input file with amino acids (as capital letters)
in the first column, and molecular weights (as floating point numbers)
in the second column - create a dictionary with the amino acids as keys
and their respective weights as values.
"""
mass_dict = {}
with open(input_file, "r") as read_file:
for line in read_file:
elements = line.split()
amino_acid = str(elements[0])
weight = float(elements[1])
mass_dict[amino_acid] = weight
return mass_dict | b14c3353a4a71b23e540f041abc170dc6b2f6942 | 691,299 |
def process_footnote(footnotes):
"""
Returns a list with a list of <li> items from each tables footnotes
"""
# Remove '\n' and more than one space
return [[" ".join(li.text.split()) for li in fn.find_all("li")] for fn in footnotes] | 61576588d0d43c3befdcfb1f1367b4912914aa7d | 691,300 |
from typing import Mapping
def _mapping2list(map: Mapping):
"""Converts a mapping to a list of k1.lower():v1.lower(), k2:v2, ..."""
return [f"{str(key).lower()}:{str(value).lower()}" for key, value in map.items()] | 7cee135c883ce1ff8dcb48103d1718e4bc76ab25 | 691,301 |
def test_equals(x):
"""
>>> x = testobj()
>>> result = test_equals(x)
>>> isinstance(result, plop)
True
>>> test_equals('hihi')
False
>>> test_equals('coucou')
True
"""
eq = x == 'coucou' # not every str equals returns a bool ...
return eq | f5b69c34f0427e3628ae5ac75234ea15ce8b4272 | 691,302 |
from typing import List
def return_index(tp, element) -> List[int]:
"""Returns all the indexes of an element"""
indexes = []
[indexes.append(index) for index, value in enumerate(tp)
if value == element]
return indexes | 60d4a010bdcdc544c99fe1f587af6997ff415560 | 691,303 |
def _get_emslices(shape1, shape2):
"""
Common code used by :func:`embed_to` and :func:`undo_embed` Written
by Christoph Gohlke, Univiersity of California and Matej Tyc, Brno
University of Technology
"""
slices_from = []
slices_to = []
for dim1, dim2 in zip(shape1, shape2):
diff = dim2 - dim1
# In fact: if diff == 0:
slice_from = slice(None)
slice_to = slice(None)
# dim2 is bigger => we will skip some of their pixels
if diff > 0:
# diff // 2 + rem == diff
rem = diff - (diff // 2)
slice_from = slice(diff // 2, dim2 - rem)
if diff < 0:
diff *= -1
rem = diff - (diff // 2)
slice_to = slice(diff // 2, dim1 - rem)
slices_from.append(slice_from)
slices_to.append(slice_to)
return slices_from, slices_to | 0f75ef563d0e1dda42f91feeb4d3c0ae0f7a0a35 | 691,304 |
def parse_KO_f1(query_line):
"""
:param query_line: a line in eggNOG annotation file which contains a query result
:return: a dict for parse_KO function. eg. {"gene":gene name,"KO": KO}
"""
KO_list = [i for i in map(lambda x: x.lstrip("ko:"), query_line["KEGG_ko"].split(","))]
return {"gene": query_line["Query"], "KO": KO_list} | 49cbaa2fecfd2d01d6654de63b846dc5dfa4d2d2 | 691,305 |
def GetBlockIDs(fp):
"""
Get the information about each basic block which is stored at the end
of BBV frequency files.
Extract the values for fields 'block id' and 'static instructions' from
each block. Here's an example block id entry:
Block id: 2233 0x69297ff1:0x69297ff5 static instructions: 2 block count: 1 block size: 5
@return list of the basic block info, elements are (block_id, icount of block)
"""
block_id = []
line = fp.readline()
while not line.startswith('Block id:') and line != '':
line = fp.readline()
if line == '': return []
while line.startswith('Block id:'):
bb = int(line.split('Block id:')[1].split()[0])
bb -= 1 # Change BBs to use 0 based numbering instead of 1 based
icount = int(line.split('static instructions:')[1].split()[0])
block_id.append((bb, icount))
line = fp.readline()
# import pdb; pdb.set_trace()
return block_id | 7434d30db1dfb4ce34469e9c900a6a80cb487bd7 | 691,306 |
import functools
def lcm(l):
"""least common multiple of numbers in a list"""
def _lcm(a, b):
if a > b:
greater = a
else:
greater = b
while True:
if greater % a == 0 and greater % b == 0:
lcm_ = greater
break
greater += 1
return lcm_
return functools.reduce(lambda x, y: _lcm(x, y), l) | 219eb5bbb50ec6f23979db489f42a86b4feef3cc | 691,307 |
def burgers():
"""
returns burgers page
"""
return "here ther will be burgers" | 9c03570ce2312cfe22399cbbdf041c2dcd9dce9b | 691,308 |
import numpy
def set_num(number: int):
"""
give return number of correct
>>> set_num(20)
73
>>> set_num(100)
547
>>> set_num(10000)
104743
"""
f = numpy.array([True for _ in range(200000)])
f[0:2] = False
upper = int(numpy.sqrt(200000))
for i in range(2, upper + 1):
if f[i]:
j = i * i
counter = 0
while j < 200000:
f[j] = False
j = (i * i) + (counter * i)
counter += 1
primes = numpy.where(f == True)[0]
return primes[number] | bcba9965ebfaa0102a63ea9a388d4a568c23a0a9 | 691,310 |
def getStringForNonZero(variables, wordsize):
"""
Asserts that no all-zero characteristic is allowed
"""
command = "ASSERT(NOT(("
for var in variables:
command += var + "|"
command = command[:-1]
command += ") = 0bin{}));".format("0" * wordsize)
return command | 707fdd76b1504f63a34f8eda620e48406f3044e9 | 691,311 |
def map_value(value, inmin, inmax, outmin, outmax):
"""Map the value to a given min and max.
Args:
value (number): input value.
min (number): min input value.
max (number): max input value.
outmin (number): min output value.
outmax (number): max output value.
Returns:
number: output value.
"""
if value < inmin:
value = inmin
elif value > inmax:
value = inmax
return ((outmax - outmin) * (value - inmin)) / (inmax - inmin) + outmin | c44e1c16589e86835321ae883724e514f0a043d1 | 691,312 |
import requests
def get_external_ip():
"""
Get my current externally-visible IP
"""
r = requests.get('http://whatismyip.akamai.com')
return r.text | cf6fd71e7c5aa76242ebd6809f67e01aa5429c51 | 691,313 |
import argparse
import socket
def get_args():
""" Get parsed arguments """
parser = argparse.ArgumentParser(
description='run given command while suitable resource is allocated.\n'
'Usage example: lockable --requirements {"online":true} '
'echo using resource: $ID',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--validate-only',
action="store_true",
default=False,
help='Only validate resources.json')
parser.add_argument('--lock-folder',
default='.',
help='lock folder')
parser.add_argument('--resources',
default='./resources.json',
help='Resources file (utf-8) or http uri')
parser.add_argument('--timeout',
default=1,
help='Timeout for trying allocate suitable resource')
parser.add_argument('--hostname',
default=socket.gethostname(),
help='Hostname')
parser.add_argument('--requirements',
default="{}",
help='requirements as json string')
parser.add_argument('command', nargs='*',
help='Command to be execute during device allocation')
return parser.parse_args() | 4095c36352ffe6f7de47f88e0259dc353cb04063 | 691,314 |
from datetime import datetime
def to_timestamp(date):
"""Convert string to UNIX timestamp."""
ts = datetime.strptime(date, '%Y-%m-%dT%H:%M:%SZ').timestamp()
return ts | b33e3297739acb4b5ed73ab4a09526f7b2bf8778 | 691,316 |
from pathlib import Path
def get_n_bytes(file: Path, n):
"""
Return the first n bytes of filename in a bytes object. If n is -1 or
greater than size of the file, return all of the file's bytes.
"""
with file.open("rb") as in_file:
return in_file.read(n) | 4f791ef60393190b6d367271d21b2f07cdd007c1 | 691,317 |
from functools import reduce
def join(xss):
"""Chain playlists together."""
return reduce(lambda x, y: x + y, xss, []) | 3ae7e8e1a0a77138144ed9a91539b577d03d3880 | 691,318 |
def check_array(arr):
""" Simple check to fill in the map for automatic parameter casting
JSON arrays must be represented as lists at this level
"""
assert isinstance(arr, list), 'Invalid array: {}'.format(arr)
return arr | 69e18e756ead0e9a0feb34c7f2fca86d6bfb88dc | 691,319 |
def _partial_fn(fn, *args, **kwargs):
"""Partial apply *args and **kwargs after the shape, dtype, key args."""
# Dummy value here is so that we can use lax.switch directly.
def f(shape, dtype, key, _):
return fn(shape, dtype, key, *args, **kwargs)
return f | a0f1d0fa86f7dd4a1ad6c7241a03895966b6725a | 691,320 |
def accuracy(y_hat, y):
"""Get accuracy."""
return (y_hat.argmax(axis=1) == y).mean().asscalar() | 9c63e1d8b7e06dc278b2965abb3931cd0f6208c7 | 691,321 |
def transform_lane_gflat2g(h_cam, X_gflat, Y_gflat, Z_g):
"""
Given X coordinates in flat ground space, Y coordinates in flat ground space, and Z coordinates in real 3D ground space
with projection matrix from 3D ground to flat ground, compute real 3D coordinates X, Y in 3D ground space.
:param P_g2gflat: a 3 X 4 matrix transforms lane form 3d ground x,y,z to flat ground x, y
:param X_gflat: X coordinates in flat ground space
:param Y_gflat: Y coordinates in flat ground space
:param Z_g: Z coordinates in real 3D ground space
:return:
"""
X_g = X_gflat - X_gflat * Z_g / h_cam
Y_g = Y_gflat - Y_gflat * Z_g / h_cam
return X_g, Y_g | ab9712aa1d1115bdfe550d60b6d748a7690540d3 | 691,323 |
import json
def MakeDataFrameJson(df):
"""Creates an in-memory json.
Assumes 'df' is a pandas dataframe
"""
json_data = json.loads(json.dumps(df))
return json_data | 8df9875ee9d98743b1b60af67301d3e64d90296b | 691,324 |
def encode_shortform_block_annotation(d):
"""Creates the shortform version of the block annotation
using information from the sequence dictionary.
Parameters
----------
d : dict
Dictionary representation individuals entries in a FASTA file.
Returns
-------
str
"""
# Sample: 'Dm528_2L_3_+_8366009_1_1:1773'
template = '{ref_version}_{chr_loc}_{chr_scaffold}_{ori}_{gpos_oneb}_' \
'{num_blocks}_{blocks}'
try:
return template.format(
ref_version=d['ref_version'],
chr_loc=d['chromosome'],
chr_scaffold=d['chromosome_scaffold'],
ori=d['orientation'],
gpos_oneb=d['genome_pos'] + 1,
num_blocks=len(d['blocks']),
blocks=';'.join(
['{}:{}'.format(s[0].start + 1, s[0].stop) for s in d['blocks']]
),
)
except KeyError:
# If any one of the keys is not found, returns an empty string
# Because "marker" will not have any extra keys in its per sequence dict
return '' | 9ea9123abc4e228752bdd4c77a40e666c7f96923 | 691,325 |
def is_check(fn):
"""Check whether a file contains a check."""
if not fn[-3:] == ".py":
return False
if fn[-11:] == "__init__.py":
return False
if "inprogress" in fn:
return False
return True | 3bd4d1d50b1c2b2a4cf8a6e1a883dc445f8a865b | 691,326 |
import torch
import logging
def alt_compute_ious(gt_masks, pred_masks):
"""Compute Intersection over Union of ground truth and predicted masks.
Args:
gt_masks (torch.IntTensor((img_height, img_width, nb_gt_masks))):
Ground truth masks.
pred_masks (torch.FloatTensor((img_height, img_width, nb_pred_masks))):
Predicted masks.
Returns:
ious (torch.FloatTensor((nb_gt_masks, nb_pred_masks))):
Intersection over Union.
Note: This version vectorize compute_ious, but
uses a lot of memory and does not show improvements in timing
"""
# compute IOUs
gt_masks = gt_masks.to(torch.uint8)
pred_masks = pred_masks.to(torch.uint8)
height, width = gt_masks.shape[0:2]
nb_gts, nb_preds = gt_masks.shape[2], pred_masks.shape[2]
logging.info(f"{nb_gts} x {nb_preds} (GT x predictions)")
gt_masks = gt_masks.unsqueeze(3).repeat(1, 1, 1, nb_preds).view(
height, width, -1)
pred_masks = pred_masks.repeat(1, 1, nb_gts)
inter = (gt_masks & pred_masks).sum((0, 1))
union = (gt_masks | pred_masks).sum((0, 1))
ious = torch.div(inter.float(), union.float())
ious[union == 0] = 0
return ious.view(nb_gts, nb_preds) | c98bccf6ddab8f608176f89c4492fd233b0af220 | 691,327 |
import os
def _parse_format(input_file: str) -> str:
"""
Parse a file name to determine an input format
:param input_file: Input file name
:return: Canonical name of input format (fasta/fastq)
"""
extensions = {"fastq": "fastq",
"fq": "fastq",
"fasta": "fasta",
"fna": "fasta",
"ffn": "fasta",
"faa": "fasta",
"frn": "fasta"}
splitext = os.path.splitext(input_file)
if len(splitext) < 2 or (splitext[1][1:].lower() not in extensions):
raise ValueError("Unknown extension: {}".format(splitext[1][1:]))
return extensions[splitext[1][1:]] | 9124a642fc11155bc625373108574b0b9c889b1b | 691,328 |
def extract_lines(data) -> list:
"""
Extract the list of lines from the given server data
:param data:
:return list:
"""
lines = []
i: int = 0
for _ in range(1000):
line_length = int(data[i:i + 4], base=16)
line = data[i + 4: i + line_length]
lines.append(line)
if line_length == 0:
i += 4 # Shift the index by 4 points
else:
i += line_length
if len(data) < i:
break
return lines | 8b17880b93d94275613e4f5da18205843780cd51 | 691,329 |
def murphy_simm_quartz(Vp, B=0.8029, C=-0.7509):
"""
Vs from Vp using the Murphy-Simm quartz relationship. Very similar to the
Greenberg-Castagna sandstone line but often fits very clean high porosity
sandstone a bit better (from RokDoc Help documents).
Vs = A*Vp**2.0 + B*Vp + C
"""
Vs = B*Vp + C
return Vs | 33b641640b19f1e73c1a1a61de6b5a2407165591 | 691,330 |
def get_velocity_line(pt, vx, vy):
"""get line slope and bias from a start point and x, y change velocity,
y = slope*x + bias
"""
slope, bias = None, None
if vx != 0:
slope = vy / vx
bias = pt[1] - slope * pt[0]
return slope, bias | 4b32259c245f056881743f108ff617c2c49ac49c | 691,331 |
def guessShaper(key, layer):
"""
uses the name to find a known shaper otherwise uses the default, up
sample. Possibly not used if the output size is the desired depth.
"""
kl = key.lower()
if "crop" in kl:
return "crop"
return "upsample" | ae525ef17e508d912990896ff8dc6f0f88e451ad | 691,332 |
def omrs_datetime_to_date(value):
"""
Converts an OpenMRS datetime to a CommCare date
>>> omrs_datetime_to_date('2017-06-27T00:00:00.000+0000') == '2017-06-27'
True
"""
if value and 'T' in value:
return value.split('T')[0]
return value | f3728a47b64781842ce353aacec70b0cfe104f2c | 691,333 |
def default(default_value, force=False):
"""
Creates a function which allows a field to fallback to a given default
value.
:param Any default_value: The default value to set
:param bool force: Whether or not to force the default value regardless of a
value already being present
:rtype: function
"""
def default_setter(value):
"""
Sets the value to the given default value, assuming the original value
is not set or the default value is set to forced.
:param Any value: Injected by CKAN core
:rtype: Any
"""
return value if value and not force else default_value
return default_setter | c6a56d976131e9d712e63b73ac09ddb6b0acedc6 | 691,334 |
def filter_d(mzs, rts, ccss, data):
"""
filter_d
description:
a helper function for filtering data
given M/Z, RT, CCS ranges and a DataFrame containing data,
find and returns all data within that range.
* CCS tolerance is absolute in this case, NOT a percentage *
parameters:
mzs (list(flaot)) -- mz [0] and tolerance [1]
rts (list(float)) -- rt [0] and tolerance [1]
ccss (list(float)) -- ccs [0] and tolerance [1]
data (pandas.DataFrame) -- DataFrame representation of the data
"""
# removed all of the casts to float, that should happen at input time not here
filtered = data[(data[0] < mzs[0] + mzs[1]) & (data[0] > mzs[0] - mzs[1]) &
(data[1] < rts[0] + rts[1]) & (data[1] > rts[0] - rts[1]) &
(data[2] < ccss[0] + ccss[1]) & (data[2] > ccss[0] - ccss[1])]
return filtered | 065f27bed9d5b04dcd1eaa6b48b8466c6847d540 | 691,335 |
import os
def mock_resource_with(filename, resource_type):
"""
Mocks an HTTP request by pulling text from a pre-downloaded file
"""
VALID_RESOURCES = ['html', 'txt']
if resource_type not in VALID_RESOURCES:
raise Exception('Mocked resource must be one of: %s' %
', '.join(VALID_RESOURCES))
subfolder = 'text' if resource_type == 'txt' else 'html'
FIXTURE_DIRECTORY = os.path.join(os.path.dirname(__file__), "fixtures")
resource_path = os.path.join(FIXTURE_DIRECTORY, "%s/%s.%s" %
(subfolder, filename, resource_type))
with open(resource_path, 'r', encoding='utf-8') as f:
return f.read() | dd60a700f877cfa5546afd014e6e2ab4fd31360b | 691,336 |
from typing import List
def flatten(lst: List) -> List:
"""Flatten a list.
"""
return [item for sublist in lst for item in sublist] | 58ce2d954bd80b9a8a60f2eb20803761a5ef8052 | 691,337 |
def make_hit(card, cards, deck):
"""
Adds a card to player's hand
"""
if card in deck:
cards.append(card)
deck.remove(card)
return cards | 2420676fa793d07ee2674d2870428eeb6d580a97 | 691,338 |
import math
def Expectation(Ra, Rb):
""" Calculates the 'Expectation' value following the original formula """
return 1.0 / (1 + math.pow(10, -((Ra - Rb) / 8))) | c7675790f49155e29fe857ee1ea7924e7227d5fd | 691,340 |
from typing import Optional
import pathlib
def _log_figure_path(path: Optional[pathlib.Path]) -> Optional[pathlib.Path]:
"""Adds a suffix to a figure path to indicate the use of a logarithmic axis.
If the path is None (since figure should not be saved), it will stay None.
Args:
path (Optional[pathlib.Path]): original path to figure, or None if figure should
not be saved
Returns:
Optional[pathlib.Path]: new path to figure including _log suffix, or None if
original path is None
"""
if path is not None:
return path.with_name(path.stem + "_log" + path.suffix)
return None | 381bde67a89f0f61d7bdb9a9f2ea033664e6fab0 | 691,342 |
import copy
def copy_model(model, share_params=True):
"""
Copies model structure and, optionally, makes references to parameters point to the base model's ones
"""
other_model = copy.deepcopy(model)
params = model.named_parameters()
other_params = other_model.named_parameters()
dict_other_params = dict(other_params)
if share_params:
for name, param in params:
if name in dict_other_params:
dict_other_params[name].data = param.data
dict_other_params[name].grad = param.grad
return other_model | 0cef5bdf0340e21c82341de5a6cf393ddcf4d665 | 691,343 |
def label(self):
"""
Returns:
label (string): name for the hazard category
"""
try:
value = self._label
except AttributeError:
value = None
return value | 0cc08e0b62420f9eb92da64b4636191c72306091 | 691,344 |
import os
def increment_path(path):
"""
Adds or increments a number near the end of `path` to support avoiding collisions.
.. note::
If `path` has multiple extensions , the number will be placed before the final one (see
**Examples**). This is consistent with how the Python ``wget`` library avoids collisions
and how macOS names file copies.
Parameters
----------
path : str
File or directory path.
Returns
-------
new_path : str
`path` with an added or incremented number.
Examples
--------
.. code-block:: python
increment_path("data.csv")
# data 1.csv
increment_path("data 1.csv")
# data 2.csv
increment_path("archive.tar.gz")
# archive.tar 1.gz
"""
base, ext = os.path.splitext(path)
# check if name already has number
if ' ' in base:
original_base, number_str = base.rsplit(' ', 1)
if number_str.isdigit():
# increment number
number = int(number_str) + 1
return original_base + " {}".format(number) + ext
# add number
return base + " 1" + ext | bf5f98a924004b30a20ccacabe130daab66abb03 | 691,345 |
def is_equal(left, right):
"""
If both left and right are None, then they are equal because both haven't
been initialized yet.
If only one of them is None, they they are not equal
If both of them is not None, then it's possible they are equal, and we'll
return True and do some more comparision later
"""
if left is not None and right is not None:
# Neither of them is None
if left != right:
return False
else:
return True
elif left is None and right is not None:
# Only left is None
return False
elif left is not None and right is None:
# Only right is None
return False
else:
# Both of them are None
return True | bd599c70f61911f7e4a5467494bae614e64f971f | 691,346 |
def group(n, lst, discard_underfull = False):
"""Split sequence into subsequences of given size. Optionally discard or
include last subsequence if it is underfull
"""
out = []
for i in range(0, len(lst), n):
out.append(lst[i:i+n])
if discard_underfull and len(out[-1]) < n:
out.pop()
return out | 5d96885b92b0f470239723554eed14fe276644c0 | 691,348 |
from typing import Optional
def fancy_archetype_name(position: str, archetype: str) -> Optional[str]:
"""
If given a non-goalie position and a string 'passer', 'shooter', or 'deker' (case insensitive), returns a string corresponding
to proper name per rulebook. Returns None otherwise.
"""
position, archetype = position.upper(), archetype.upper()
if position == "FORWARD":
return {"PASSER": "Playmaker", "SHOOTER": "Sniper", "DEKER": "Dangler"}[archetype]
if position == "DEFENSEMAN":
return {"PASSER": "Enforcer", "SHOOTER": "Offensive Defenseman", "DEKER": "Finesser"}[archetype]
return None | fcca0505f722856037274a22ac0c47e6c8baff4f | 691,349 |
from bs4 import BeautifulSoup
def is_single_media(text):
"""
Judge whether the paragraph is an single media.
:param text: one paragraph string.
:return: bool.
"""
soup = BeautifulSoup(text, 'lxml')
# ONE <a> tag here, return True
if soup.select('a'):
anchor = soup.select('a')[0]
if anchor.getText() == '[Media]':
if text.replace(str(anchor), '') == '':
return True
# ONE media plain stmbol here, return True
elif text.strip() == '[Media]':
return True
return False | 3256778f9668ad432b8f0a6e0e620ac49761dafc | 691,350 |
def center_low_freq_2d(x):
"""Be x a multidimensional tensor. Along the last two dimensions reorder the
vector.That is, for the last dimension assume we have
[x_0, x_1, ..., x_{n-1}]. Reorder the tensor along the given
dimesion as:
- if n is even:
[x_{n/2}, x_{n/2+ 1}, ..., x_{n-1}, x_0, x_2, ..., x_{n/2-1}]
- if n is odd:
[x_{(n+1)/2}, x_{(n+3)/2}, ..., x_{n-1}, x_0, x_2, ..., x_{(n-1)/2}]
It does the same for the dimension before the last.
If `x` is the FFT of a signal, this can be understood as centering the frequencies.
"""
shape = x.shape
m, n = shape[-2:]
n_index = list(range(n))[n//2:] + list(range(n))[:n//2]
m_index = list(range(m))[m//2:] + list(range(m))[:m//2]
return x[...,n_index][..., m_index, :] | 918b23a407dc76d3743160559ded64528e5b29c0 | 691,351 |
import os
def venv_ignored():
"""
Check if virtual env is to be ignored.
Graceously borrowed From hatch package.
"""
return os.environ.get('_IGNORE_VENV_') == '1' | 78bfd36182e27a429017381bec0ae175d406b000 | 691,352 |
from typing import Dict
from typing import Any
def validObject(object_: Dict[str, Any]) -> bool:
"""
Check if the Dict passed in POST is of valid format or not.
(if there's an "@type" key in the dict)
:param object_ - Object to be checked
"""
if "@type" in object_:
return True
return False | 6c40ad1cef0a8f056d2e00c9a7632108bfd2f506 | 691,353 |
def calc_unproductive_rxn_score(system_stats, relative_error = 0.5, max_sims = 500):
""" Calculates the unproductive reaction score, which is the maximum fraction of any resting set
occuped with unproductive reactions at a given time. """
max_depletion = 0.0
for rs in system_stats.get_restingsets():
stats = system_stats.get_stats(rs)
max_depletion = max(max_depletion, stats.get_temporary_depletion(relative_error, max_sims = max_sims))
return max_depletion | 4d660a5bc859f03c5a7dda64d301306b90832bb2 | 691,354 |
def get_transaction_amount():
"""Get the user trasaction amount input and returns it as float.
Returns:
float: The user amount.
"""
return float(input('Type the amount please: ')) | a6cdf099834f39d4df76b6547a0946f60de3341f | 691,355 |
def none_if_invalid(item):
"""
Takes advantage of python's 'falsiness' check by
turning 'falsy' data (like [], "", and 0) into None.
:param item: The item for which to check falsiness.
:return: None if the item is falsy, otherwise the item.
"""
return item if bool(item) else None | d19476af100d85590d6357ad8677cbaca18e26b4 | 691,356 |
from typing import List
def insertion_sort(arr: List[int]) -> List[int]:
"""add elements to sorted previous slice"""
for i in range(1, len(arr)):
cur_elem = arr[i]
j = i - 1
while j >= 0 and arr[j] > cur_elem:
arr[j + 1] = arr[j]
j -= 1
arr[j + 1] = cur_elem
return arr | 033fc95ba7096ead8e45357a17ab16bf94281302 | 691,357 |
def sample_by_id(data, id_column, n=None, fraction=None, replace=False, random_state=None, **kwargs):
"""
this method samples the data, by considering rows with the same id as one and refusing to divide them
it is useful when you want a sample of the data based on ids when the ids are repeated
and you want to take all or none of each id, but not a partial sample of multiple rows with the same id
:param DataFrame data: dataframe to be sampled
:param str id_column: column to be used as unique identifier
:param int n: number of items to return
:param float fraction: fraction of items to return, cannot be used with `n`
:param bool replace: with or without replacement
:param int random_state: seed for the random number generator
:rtype: DataFrame
"""
data = data.copy()
data['__index__'] = data.index
ids = data[[id_column]].drop_duplicates()
sampled_ids = ids.sample(n=n, frac=fraction, replace=replace, random_state=random_state, **kwargs)
result = sampled_ids.merge(right=data, on=id_column, how='left').set_index('__index__')
result.index.name = data.index.name
return result | 42686eaead752be52f40f0edd3394ba44147824c | 691,358 |
def makeIterable(item):
"""
Takes as argument or an iterable and if it's not an iterable object then it
will return a listiterator.
"""
try:
iterable = iter(item)
except TypeError:
iterable = iter([item])
return iterable | 5ec952ed2f1c9b0761aa5396a57da6271c7cd6dd | 691,359 |
def geometrical_spreading(freq, dist, model="REA99"):
"""
Effect of geometrical spreading.
Args:
freq (array):
Numpy array of frequencies for computing spectra (Hz).
dist (float):
Distance (km).
model (str):
Name of model for geometric attenuation. Currently only supported
value:
- 'REA99' for Raoof et al. (1999)
Returns:
Array of anelastic attenuation factor.
"""
if model == 'REA99':
dist_cross = 40.0
if dist <= dist_cross:
geom = dist**(-1.0)
else:
geom = (dist / dist_cross)**(-0.5)
else:
raise ValueError('Unsupported anelastic attenuation model.')
return geom | cb03585fc855b8ec565bcc25c37073ec9fd0f1c8 | 691,360 |
import unicodedata
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False | ac4eeeeceba447f85cb61d792237c930d9c131f9 | 691,361 |
import json
def load_file(file):
""" Converts an array of file paths into an array of json defined objects
:param file: An array of filepath strings
:return: An array of loaded json objects
"""
CONFIGS = []
with open(file, 'r') as f:
j = json.load(f)
f.close()
CONFIGS.append(j)
return CONFIGS | 9cfdcd93a823672ff3b82512b8fe5f04f0849c51 | 691,362 |
def source_mac(source_list):
"""Returns the source MAC address, byte 6 - byte 11"""
output = ""
for i in range(6, 12):
output += source_list[i] + " "
return output[:-1] | 170250c6c58cc715484a804f3dfcf533d3e32b39 | 691,363 |
def glue_pair(code1, code2):
"""Glue two pieces of code."""
return code1 + code2 | 030af6fdb333fd8927c3af192e7563d1e285eaad | 691,364 |
def cleanList(aList):
"""
Returns aList with any duplicates removed
"""
return list(set(aList)) | ab8eaa8556bf868beb32100ad5897302d9551eda | 691,365 |
def delete_vm(client, resource_group_name, vm_name):
"""
Delete a VMware virtual machine.
"""
return client.delete(resource_group_name, vm_name) | 5cbd6933079dcd30c2cbbbd385cfa5def443a293 | 691,366 |
def collapse_list(the_list):
""" Collapses a list of list into a single list."""
return [s for i in the_list for s in i] | 4a693970ce2bf7baa4a43bef6c3778d2381a2487 | 691,367 |
from functools import reduce
def bytes_to_int(bytes):
"""
Convert bytes to integer
Args:
bytes(bytes): bytes to be converted
Returns:
int
Examples:
>>> bytes_to_int(b'\xde')
222
"""
return reduce(lambda s, x: (s << 8) + x, bytearray(bytes)) | e771a3ebd4d90d524e48f34ab0fe51e7f6babf90 | 691,368 |
import hashlib
def hashmd5(s, n = 4):
"""Stable cross-platform hash function for strings. Will always return the same output for a given input (suitable for DB storage),
in contrast to the standard hash() function whose implementation varies between platforms and can change in the future.
Calculates MD5 digest and returns the first 'n' bytes (2*n hex digits) converted to an unsigned n-byte long integer, 1 <= n <= 16.
>>> hashmd5("Ala ma kota", 3), hashmd5("Ala ma kota", 7)
(9508390, 40838224789264552)
This function works also for Unicode strings and should return the same value on Python 2 & 3,
however on Python 2, doctests cannot handle unicode properly and return a different value during test:
> > > hashmd5(u"ąśężźćółńĄŚĘŻŹĆÓŁŃ") == 886848614
True
"""
s = s.encode('utf-8')
return int(hashlib.md5(s).hexdigest()[:2*n], 16) | 7e36088fb820dc2793474ca11b9fa2fde64016de | 691,369 |
def linear_map(i, i_min, i_max, o_min, o_max):
"""mapping function"""
if o_max > o_min:
out = (float((i - i_min)) / float(i_max - i_min)) * \
float(o_max - o_min) + o_min
else:
out = (1.0 - (float(i - i_min) / float(i_max - i_min))) * \
float(o_min - o_max) + o_max
return out | 6418d0ddb908fe958f4d73e2677374cc1bd6d92d | 691,371 |
def func(self, obj, key):
"""Replace obj."""
obj = key
return obj | b0cab477f694a55b79569fa54c835e5405380d50 | 691,372 |
def format_git_describe(git_str, pep440=False):
"""format the result of calling 'git describe' as a python version"""
if git_str is None:
return None
if "-" not in git_str: # currently at a tag
return git_str
else:
# formatted as version-N-githash
# want to convert to version.postN-githash
git_str = git_str.replace("-", ".post", 1)
if pep440: # does not allow git hash afterwards
return git_str.split("-")[0]
else:
return git_str.replace("-g", "+git") | 38f5c4787d91e4abc2771499e9f62703fa2a3150 | 691,375 |
import requests
def image_metrics(ID, filename):
"""
GET image metrics from server
Args:
ID (str): user name
Returns:
r7.json() (dict): Dictionary containing the image metrics
example: outdict = {
"timestamp": ...,
"size": [100, 100],
"latency": 1,
"process": 'Reverse Video'}
"""
mjson = {
"username": ID,
"filename": filename
}
r8 = requests.get("http://vcm-9030.vm.duke.edu:5000/api/image_metrics",
json=mjson)
return r8.json() | 9d3f6ea202006f9c53f54e242a805f07eb34e35e | 691,376 |
import re
def email_finder(text):
"""returns emails found inside a given text"""
email_finder = re.compile(r'[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,4}')
mo = re.findall(email_finder, text)
return mo | c3a7b04a80936db59a78eec44c146e410ba00124 | 691,377 |
def getservbyname(servicename, protocolname=None): # real signature unknown; restored from __doc__
"""
getservbyname(servicename[, protocolname]) -> integer
Return a port number from a service name and protocol name.
The optional protocol name, if given, should be 'tcp' or 'udp',
otherwise any protocol will match.
"""
return 0 | b0e583205ee1954c7a9b178f0f3e68f05511144c | 691,378 |
def _isValidOpcodeByte(sOpcode):
"""
Checks if sOpcode is a valid lower case opcode byte.
Returns true/false.
"""
if len(sOpcode) == 4:
if sOpcode[:2] == '0x':
if sOpcode[2] in '0123456789abcdef':
if sOpcode[3] in '0123456789abcdef':
return True;
return False; | dee516c1765aedfc9f7c0044cde29495d54f81f5 | 691,379 |
def correspaction(data):
"""
extract TEI elements <correspAction>
"""
return data.findall(".//correspAction", namespaces=data.nsmap) | 874fbf808d7d3155c871fc647fffde7c5c158b3a | 691,380 |
def domainCLDef(cloid:str, codelists:list) ->dict:
"""
:param cloid:
:param codelists: library codelist definition.
:return:
"""
cldef = {}
for cl in codelists:
if cloid[3:] == cl["href"]:
cldef["name"] = cl["name"]
cldef["href"] = cloid
cldef["terms"] = cl["terms"]
return cldef
return cldef | 8eb6d15edbec79e20f1e6db75dc79e8b7890784c | 691,381 |
def count_k(n, k):
"""Generalized version of count_stair_ways, except that the max number of step to take is defined by 'k'
>>> count_k(3, 3) # 3, 2 + 1, 1 + 2, 1 + 1 + 1
4
>>> count_k(4, 4)
8
>>> count_k(10, 3)
274
>>> count_k(300, 1) # Only one step at a time
1
"""
if n < 0:
return 0
elif n == 0:
return 1
else:
return sum([count_k(n - i, k) for i in range(1, k + 1)]) | 462eb205390707a40e62202da02230626c34adc7 | 691,382 |
def deplace_zero (matrix, support_minimal, min_nb):
"""Retranche le nombre minimal de tous les elements non rayes et l'ajoute au elt rayee"""
ligne_rayee = support_minimal['ligne']
colonne_rayee = support_minimal['colonne']
for y, y_elt in enumerate(matrix):
for x, x_elt in enumerate(y_elt):
if x not in colonne_rayee and y not in ligne_rayee:
matrix[y][x] = x_elt - min_nb
elif x in colonne_rayee and y in ligne_rayee:
matrix[y][x] = x_elt + min_nb
return matrix | 9c22ce515bb80294d7d0a8a9b4a7e7ac400d8e70 | 691,383 |
import ast
def json_tuple_to_cols(df, column_name, col_config={'cols': {'key_prop': 'Name', 'value_prop': 'Value'},
'look_up': {'key_prop': 'name', 'value_prop': 'value'}}):
"""
Convert a column with a JSON tuple in it to two column (typically a name, value pair)
Parameters
----------
:param df: the data frame
:param column_name: column with the json tuple
:param col_config: conversion config
:return: a modified dataframe
Examples
--------
IN[51]: qb_lookup_keys = {'key_prop': 'name', 'value_prop': 'value'}
IN[52]: invoices = json_tuple_to_cols(invoices, 'Line.DiscountLineDetail.DiscountAccountRef',
col_config={'cols': {'key_prop': 'Discount Details', 'value_prop': 'Discount %'},
'look_up': qb_lookup_keys})
"""
def get_value(y, prop):
value = y
if type(value) is str:
value = ast.literal_eval(y)
if type(value) is dict:
return value.get(prop)
if type(value) is list:
return value[0].get(prop)
else:
return None
df[col_config['cols']['key_prop']] = df[column_name].apply(
lambda y: get_value(y, col_config['look_up']['key_prop']))
df[col_config['cols']['value_prop']] = df[column_name].apply(
lambda y: get_value(y, col_config['look_up']['value_prop']))
return df.drop(column_name, 1) | 481c2c87cabca0b85edc512be12c9646f4f67ccb | 691,384 |
def find_lexeme(verb):
""" For a regular verb (base form), returns the forms using a rule-based approach.
taken from pattern.en, because it wouldn't go into py2app properly
"""
vowels = ['a', 'e', 'i', 'o', 'u']
v = verb.lower()
if len(v) > 1 and v.endswith("e") and v[-2] not in vowels:
# Verbs ending in a consonant followed by "e": dance, save, devote, evolve.
return [v, v, v, v+"s", v, v[:-1]+"ing"] + [v+"d"]*6
if len(v) > 1 and v.endswith("y") and v[-2] not in vowels:
# Verbs ending in a consonant followed by "y": comply, copy, magnify.
return [v, v, v, v[:-1]+"ies", v, v+"ing"] + [v[:-1]+"ied"]*6
if v.endswith(("ss", "sh", "ch", "x")):
# Verbs ending in sibilants: kiss, bless, box, polish, preach.
return [v, v, v, v+"es", v, v+"ing"] + [v+"ed"]*6
if v.endswith("ic"):
# Verbs ending in -ic: panic, mimic.
return [v, v, v, v+"es", v, v+"king"] + [v+"ked"]*6
if len(v) > 1 and v[-1] not in vowels and v[-2] not in vowels:
# Verbs ending in a consonant cluster: delight, clamp.
return [v, v, v, v+"s", v, v+"ing"] + [v+"ed"]*6
if (len(v) > 1 and v.endswith(("y", "w")) and v[-2] in vowels) \
or (len(v) > 2 and v[-1] not in vowels and v[-2] in vowels and v[-3] in vowels) \
or (len(v) > 3 and v[-1] not in vowels and v[-3] in vowels and v[-4] in vowels):
# Verbs ending in a long vowel or diphthong followed by a consonant: paint, devour, play.
return [v, v, v, v+"s", v, v+"ing"] + [v+"ed"]*6
if len(v) > 2 and v[-1] not in vowels and v[-2] in vowels and v[-3] not in vowels:
# Verbs ending in a short vowel followed by a consonant: chat, chop, or compel.
return [v, v, v, v+"s", v, v+v[-1]+"ing"] + [v+v[-1]+"ed"]*6
return [v, v, v, v+"s", v, v+"ing"] + [v+"ed"]*6 | 005bf2b13cc3b77ac558562e5abbffefc9ec4f0b | 691,385 |
def upper(input_ch):
"""
If users input lower case, the program would upper it.
:param input_ch: an alpha, users input to run the program.
:return: upper alpha.
"""
ans = ''
if input_ch.islower():
ans += input_ch.upper()
else:
ans += input_ch
return ans | 135b93b3f489fa3a39512d1dbea07398fca046f0 | 691,387 |
import six
import re
def is_valid_mac(address):
"""Return whether given value is a valid MAC."""
m = "[0-9a-f]{2}(:[0-9a-f]{2}){5}$"
return (isinstance(address, six.string_types)
and re.match(m, address.lower())) | 12d84e3e18bf51a48270639e59a3c4ce39d58e84 | 691,388 |
def auto_str(cls):
"""Autogenerate a str method to print all variable names and values.
https://stackoverflow.com/questions/32910096/is-there-a-way-to-auto-generate-a-str-implementation-in-python
"""
def __str__(self):
return '%s(%s)' % (type(self).__name__, ', '.join(
'%s=%s' % item for item in vars(self).items()))
cls.__str__ = __str__
return cls | faa5e8fb8fe4f0af96ae888dde3da22ef7bf19c8 | 691,389 |
def group_by_clauses(selectable):
"""Extract the GROUP BY clause list from a select/query"""
return selectable._group_by_clauses | ce639e2acf78ead54f7f43ec91298dc95cfd0f27 | 691,390 |
from textwrap import dedent
def get_tutorial_examples():
"""Return the dlapp tutorial examples text."""
text = '''
Example 1:
---------
we need to find any item in a list_of_dict where
key of item (i.e dict) has a value starting with Ap
In this case, we need to look into every item of a list_of_dict,
and then grab (key, value) pair that key is equal to "a" and
value need to have a prefix of Ap.
first criteria is that traverses lst_of_dict and report any item
has key is equal "a"
>>> result = query_obj.find(lookup='a', select='')
>>> result
['Apple', 'Apricot', 'Avocado']
>>>
second criteria is that value of key "a" must have a "Ap" prefix.
To be able to achieve this case, we can either use regular
expression or wildcard filtering algorithm in lookup argument.
>>> result = query_obj.find(lookup='a=_wildcard(Ap*)', select='')
>>> result
['Apple', 'Apricot']
>>> # or use regex
>>> result = query_obj.find(lookup='a=_regex(Ap.+)', select='')
>>> result
['Apple', 'Apricot']
>>>
there is another way to achieve the same result by using select-statement
WHERE clause
>>> result = query_obj.find(lookup='a', select='WHERE a match Ap.+')
>>> result
['Apple', 'Apricot']
>>>
Example 2:
---------
Find values where items of lst_of_dict have key "a" or "c"
>>> result = query_obj.find(lookup='_wildcard([ac])', select='')
>>> result
['Apple', 'Cherry', 'Apricot', 'Cantaloupe', 'Avocado', 'Clementine']
>>>
>>> result = query_obj.find(lookup='_regex([ac])', select='')
>>> result
['Apple', 'Cherry', 'Apricot', 'Cantaloupe', 'Avocado', 'Clementine']
Example 3:
---------
Find values where items of lst_of_dict have key "a" or "c" where items
value have letter i or y
>>> result = query_obj.find(lookup='_wildcard([ac])=_wildcard(*[iy]*)', select='')
>>> result
['Cherry', 'Apricot', 'Clementine']
>>>
>>> result = query_obj.find(lookup='_wildcard([ac])=_regex(.*[iy].*)', select='')
>>> result
['Cherry', 'Apricot', 'Clementine']
>>> result = query_obj.find(lookup='_regex([ac])=_wildcard(*[iy]*)', select='')
>>> result
['Cherry', 'Apricot', 'Clementine']
>>>
>>> result = query_obj.find(lookup='_regex([ac])=_regex(.*[iy].*)', select='')
>>> result
['Cherry', 'Apricot', 'Clementine']
Note: in this case, the lookup argument contains two expressions:
a left expression and a right expression, a separator between
left and right expression is "=" symbol.
lookup : _wildcard([ac])=_regex(.*[iy].*)
left expression : _wildcard([ac])
right expression: _regex(.*[iy].*)
Example 3.1:
-----------
Find values where items of lst_of_dict have key "a" or "c" where items
value have letter i or y and select a, c
>>> # this is a result without select a, c
>>> result = query_obj.find(lookup='_wildcard([ac])=_wildcard(*[iy]*)', select='')
>>> result
['Cherry', 'Apricot', 'Clementine']
>>>
>>> # this is a result after select a, c
>>> result = query_obj.find(lookup='_wildcard([ac])=_wildcard(*[iy]*)', select='SELECT a, c')
>>> result
[{'a': 'Apple', 'c': 'Cherry'}, {'a': 'Apricot', 'c': 'Cantaloupe'}, {'a': 'Avocado', 'c': 'Clementine'}]
>>>
########################################
'''
return dedent(text) | 054eda59ba2880e2b54c8142999f2c752ca04338 | 691,391 |
def format_nmmpmat(denmat):
"""
Format a given 7x7 complex numpy array into the format for the n_mmp_mat file
Results in list of 14 strings. Every 2 lines correspond to one row in array
Real and imaginary parts are formatted with 20.13f in alternating order
:param denmat: numpy array (7x7) and complex for formatting
:raises ValueError: If denmat has wrong shape or datatype
:returns: list of str formatted in lines for the n_mmp_mat file
"""
if denmat.shape != (7, 7):
raise ValueError(f'Matrix has wrong shape for formatting: {denmat.shape}')
if denmat.dtype != complex:
raise ValueError(f'Matrix has wrong dtype for formatting: {denmat.dtype}')
#Now we generate the text in the format expected in the n_mmp_mat file
nmmp_lines = []
for row in denmat:
nmmp_lines.append(''.join([f'{x.real:20.13f}{x.imag:20.13f}' for x in row[:3]]) + f'{row[3].real:20.13f}')
nmmp_lines.append(f'{row[3].imag:20.13f}' + ''.join([f'{x.real:20.13f}{x.imag:20.13f}' for x in row[4:]]))
return nmmp_lines | a39dfac693e4acd7632deebef416d27cf18df71e | 691,392 |
import filecmp
import os
def compare_actual_with_expected(test_name):
"""Compare actual file with expected."""
is_eq = filecmp.cmp("../data/dump/gpu_dumps/golden/" +
test_name + ".expected", test_name + ".actual", shallow=False)
if os.path.exists(test_name + ".actual"):
os.remove(test_name + ".actual")
return is_eq | b12f85ff446c6c0fd811b3be846f852b3d47854b | 691,393 |
import os
def get_file_extension(f):
"""Returns the display value of a BoundField"""
basename, ext = os.path.splitext(str(f))
return ext.replace('.', '').lower() | 0f06e37c00393e38fbf1155c55cf3f6f1e6ef2f9 | 691,394 |
def _set_default_application(application_id: int, type_id: int) -> int:
"""Set the default application for mechanical relays.
:param application_id: the current application ID.
:param type_id: the type ID of the relay with missing defaults.
:return: _application_id
:rtype: int
"""
if application_id > 0:
return application_id
return {1: 1, 2: 1, 3: 8, 4: 1, 5: 6, 6: 3}[type_id] | 5a162670c144cd37299caf728ca8cb4da87d9426 | 691,395 |
def arb_func(arg):
"""Arbitrary function just returns input."""
return arg | e09e2bc688d0d9c2c2f0cc5cfa8ae6ec4e5eadfb | 691,396 |
import re
def remove_comments(text_string):
"""Function to omit html comment identifiers in a text string using
regular expression matches
Arguments:
text_string {string} -- The text to be matched
Returns:
{string} -- The input text string with html comments removed
"""
p = re.sub("(?s)<!--(.*?)-->", "", text_string)
return p | a54561bfc84e1efdb5ddda034581ef1e0420caf7 | 691,397 |
def str2bool(str):
"""
Converts a string to a boolean value. The conversion is case insensitive.
:param str: string to convert.
:type str: string
:returns: True if str is one of: "yes", "y", "true", "t" or "1".
:rtype: bool
"""
return str.lower() in ("yes", "y", "true", "t", "1") | 95ad6a99e004e304a160e4edb9bc57bec22bf3d8 | 691,398 |
from typing import Any
from textwrap import dedent
def parse_expression(
val: str, acceptable_types: type | tuple[type, ...], name: str | None = None
) -> Any:
"""Attempts to parse the given `val` as a python expression of the specified `acceptable_types`.
:param val: A string containing a python expression.
:param acceptable_types: The acceptable types of the parsed object.
:param name: An optional logical name for the value being parsed; ie if the literal val
represents a person's age, 'age'.
:raises: If `val` is not a valid python literal expression or it is but evaluates to an object
that is not a an instance of one of the `acceptable_types`.
"""
def format_type(typ):
return typ.__name__
if not isinstance(val, str):
raise ValueError(
f"The raw `val` is not a str. Given {val} of type {format_type(type(val))}."
)
def get_name():
return repr(name) if name else "value"
def format_raw_value():
lines = val.splitlines()
for line_number in range(0, len(lines)):
lines[line_number] = "{line_number:{width}}: {line}".format(
line_number=line_number + 1, line=lines[line_number], width=len(str(len(lines)))
)
return "\n".join(lines)
try:
parsed_value = eval(val)
except Exception as e:
raise ValueError(
dedent(
f"""\
The {get_name()} cannot be evaluated as a literal expression: {e!r}
Given raw value:
{format_raw_value()}
"""
)
)
if not isinstance(parsed_value, acceptable_types):
def iter_types(types):
if isinstance(types, type):
yield types
elif isinstance(types, tuple):
for item in types:
yield from iter_types(item)
else:
raise ValueError(
f"The given acceptable_types is not a valid type (tuple): {acceptable_types}"
)
expected_types = ", ".join(format_type(t) for t in iter_types(acceptable_types))
raise ValueError(
dedent(
f"""\
The {get_name()} is not of the expected type(s): {expected_types}:
Given the following raw value that evaluated to type {format_type(type(parsed_value))}:
{format_raw_value()}
"""
)
)
return parsed_value | 03b1d7987ea4d484ef47827be6889e9841c26320 | 691,400 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.