repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
pmelchior/proxmin | proxmin/operators.py | prox_unity_plus | def prox_unity_plus(X, step, axis=0):
"""Non-negative projection onto sum=1 along an axis
"""
return prox_unity(prox_plus(X, step), step, axis=axis) | python | def prox_unity_plus(X, step, axis=0):
"""Non-negative projection onto sum=1 along an axis
"""
return prox_unity(prox_plus(X, step), step, axis=axis) | [
"def",
"prox_unity_plus",
"(",
"X",
",",
"step",
",",
"axis",
"=",
"0",
")",
":",
"return",
"prox_unity",
"(",
"prox_plus",
"(",
"X",
",",
"step",
")",
",",
"step",
",",
"axis",
"=",
"axis",
")"
] | Non-negative projection onto sum=1 along an axis | [
"Non",
"-",
"negative",
"projection",
"onto",
"sum",
"=",
"1",
"along",
"an",
"axis"
] | 60e49d90c67c46329cc1d3b5c484951dc8bd2c3f | https://github.com/pmelchior/proxmin/blob/60e49d90c67c46329cc1d3b5c484951dc8bd2c3f/proxmin/operators.py#L39-L42 | train | 34,100 |
pmelchior/proxmin | proxmin/operators.py | prox_min | def prox_min(X, step, thresh=0):
"""Projection onto numbers above `thresh`
"""
thresh_ = _step_gamma(step, thresh)
below = X - thresh_ < 0
X[below] = thresh_
return X | python | def prox_min(X, step, thresh=0):
"""Projection onto numbers above `thresh`
"""
thresh_ = _step_gamma(step, thresh)
below = X - thresh_ < 0
X[below] = thresh_
return X | [
"def",
"prox_min",
"(",
"X",
",",
"step",
",",
"thresh",
"=",
"0",
")",
":",
"thresh_",
"=",
"_step_gamma",
"(",
"step",
",",
"thresh",
")",
"below",
"=",
"X",
"-",
"thresh_",
"<",
"0",
"X",
"[",
"below",
"]",
"=",
"thresh_",
"return",
"X"
] | Projection onto numbers above `thresh` | [
"Projection",
"onto",
"numbers",
"above",
"thresh"
] | 60e49d90c67c46329cc1d3b5c484951dc8bd2c3f | https://github.com/pmelchior/proxmin/blob/60e49d90c67c46329cc1d3b5c484951dc8bd2c3f/proxmin/operators.py#L44-L50 | train | 34,101 |
pmelchior/proxmin | proxmin/operators.py | prox_max | def prox_max(X, step, thresh=0):
"""Projection onto numbers below `thresh`
"""
thresh_ = _step_gamma(step, thresh)
above = X - thresh_ > 0
X[above] = thresh_
return X | python | def prox_max(X, step, thresh=0):
"""Projection onto numbers below `thresh`
"""
thresh_ = _step_gamma(step, thresh)
above = X - thresh_ > 0
X[above] = thresh_
return X | [
"def",
"prox_max",
"(",
"X",
",",
"step",
",",
"thresh",
"=",
"0",
")",
":",
"thresh_",
"=",
"_step_gamma",
"(",
"step",
",",
"thresh",
")",
"above",
"=",
"X",
"-",
"thresh_",
">",
"0",
"X",
"[",
"above",
"]",
"=",
"thresh_",
"return",
"X"
] | Projection onto numbers below `thresh` | [
"Projection",
"onto",
"numbers",
"below",
"thresh"
] | 60e49d90c67c46329cc1d3b5c484951dc8bd2c3f | https://github.com/pmelchior/proxmin/blob/60e49d90c67c46329cc1d3b5c484951dc8bd2c3f/proxmin/operators.py#L52-L58 | train | 34,102 |
pmelchior/proxmin | proxmin/operators.py | prox_components | def prox_components(X, step, prox=None, axis=0):
"""Split X along axis and apply prox to each chunk.
prox can be a list.
"""
K = X.shape[axis]
if not hasattr(prox_list, '__iter__'):
prox = [prox] * K
assert len(prox_list) == K
if axis == 0:
Pk = [prox_list[k](X[k], step) for k in range(K)]
if axis == 1:
Pk = [prox_list[k](X[:,k], step) for k in range(K)]
return np.stack(Pk, axis=axis) | python | def prox_components(X, step, prox=None, axis=0):
"""Split X along axis and apply prox to each chunk.
prox can be a list.
"""
K = X.shape[axis]
if not hasattr(prox_list, '__iter__'):
prox = [prox] * K
assert len(prox_list) == K
if axis == 0:
Pk = [prox_list[k](X[k], step) for k in range(K)]
if axis == 1:
Pk = [prox_list[k](X[:,k], step) for k in range(K)]
return np.stack(Pk, axis=axis) | [
"def",
"prox_components",
"(",
"X",
",",
"step",
",",
"prox",
"=",
"None",
",",
"axis",
"=",
"0",
")",
":",
"K",
"=",
"X",
".",
"shape",
"[",
"axis",
"]",
"if",
"not",
"hasattr",
"(",
"prox_list",
",",
"'__iter__'",
")",
":",
"prox",
"=",
"[",
... | Split X along axis and apply prox to each chunk.
prox can be a list. | [
"Split",
"X",
"along",
"axis",
"and",
"apply",
"prox",
"to",
"each",
"chunk",
"."
] | 60e49d90c67c46329cc1d3b5c484951dc8bd2c3f | https://github.com/pmelchior/proxmin/blob/60e49d90c67c46329cc1d3b5c484951dc8bd2c3f/proxmin/operators.py#L60-L75 | train | 34,103 |
pmelchior/proxmin | proxmin/operators.py | prox_hard_plus | def prox_hard_plus(X, step, thresh=0):
"""Hard thresholding with projection onto non-negative numbers
"""
return prox_plus(prox_hard(X, step, thresh=thresh), step) | python | def prox_hard_plus(X, step, thresh=0):
"""Hard thresholding with projection onto non-negative numbers
"""
return prox_plus(prox_hard(X, step, thresh=thresh), step) | [
"def",
"prox_hard_plus",
"(",
"X",
",",
"step",
",",
"thresh",
"=",
"0",
")",
":",
"return",
"prox_plus",
"(",
"prox_hard",
"(",
"X",
",",
"step",
",",
"thresh",
"=",
"thresh",
")",
",",
"step",
")"
] | Hard thresholding with projection onto non-negative numbers | [
"Hard",
"thresholding",
"with",
"projection",
"onto",
"non",
"-",
"negative",
"numbers"
] | 60e49d90c67c46329cc1d3b5c484951dc8bd2c3f | https://github.com/pmelchior/proxmin/blob/60e49d90c67c46329cc1d3b5c484951dc8bd2c3f/proxmin/operators.py#L91-L94 | train | 34,104 |
pmelchior/proxmin | proxmin/operators.py | prox_soft | def prox_soft(X, step, thresh=0):
"""Soft thresholding proximal operator
"""
thresh_ = _step_gamma(step, thresh)
return np.sign(X)*prox_plus(np.abs(X) - thresh_, step) | python | def prox_soft(X, step, thresh=0):
"""Soft thresholding proximal operator
"""
thresh_ = _step_gamma(step, thresh)
return np.sign(X)*prox_plus(np.abs(X) - thresh_, step) | [
"def",
"prox_soft",
"(",
"X",
",",
"step",
",",
"thresh",
"=",
"0",
")",
":",
"thresh_",
"=",
"_step_gamma",
"(",
"step",
",",
"thresh",
")",
"return",
"np",
".",
"sign",
"(",
"X",
")",
"*",
"prox_plus",
"(",
"np",
".",
"abs",
"(",
"X",
")",
"-... | Soft thresholding proximal operator | [
"Soft",
"thresholding",
"proximal",
"operator"
] | 60e49d90c67c46329cc1d3b5c484951dc8bd2c3f | https://github.com/pmelchior/proxmin/blob/60e49d90c67c46329cc1d3b5c484951dc8bd2c3f/proxmin/operators.py#L96-L100 | train | 34,105 |
pmelchior/proxmin | proxmin/operators.py | prox_soft_plus | def prox_soft_plus(X, step, thresh=0):
"""Soft thresholding with projection onto non-negative numbers
"""
return prox_plus(prox_soft(X, step, thresh=thresh), step) | python | def prox_soft_plus(X, step, thresh=0):
"""Soft thresholding with projection onto non-negative numbers
"""
return prox_plus(prox_soft(X, step, thresh=thresh), step) | [
"def",
"prox_soft_plus",
"(",
"X",
",",
"step",
",",
"thresh",
"=",
"0",
")",
":",
"return",
"prox_plus",
"(",
"prox_soft",
"(",
"X",
",",
"step",
",",
"thresh",
"=",
"thresh",
")",
",",
"step",
")"
] | Soft thresholding with projection onto non-negative numbers | [
"Soft",
"thresholding",
"with",
"projection",
"onto",
"non",
"-",
"negative",
"numbers"
] | 60e49d90c67c46329cc1d3b5c484951dc8bd2c3f | https://github.com/pmelchior/proxmin/blob/60e49d90c67c46329cc1d3b5c484951dc8bd2c3f/proxmin/operators.py#L102-L105 | train | 34,106 |
pmelchior/proxmin | proxmin/operators.py | prox_max_entropy | def prox_max_entropy(X, step, gamma=1):
"""Proximal operator for maximum entropy regularization.
g(x) = gamma \sum_i x_i ln(x_i)
has the analytical solution of gamma W(1/gamma exp((X-gamma)/gamma)), where
W is the Lambert W function.
"""
from scipy.special import lambertw
gamma_ = _step_gamma(step, gamma)
# minimize entropy: return gamma_ * np.real(lambertw(np.exp((X - gamma_) / gamma_) / gamma_))
above = X > 0
X[above] = gamma_ * np.real(lambertw(np.exp(X[above]/gamma_ - 1) / gamma_))
return X | python | def prox_max_entropy(X, step, gamma=1):
"""Proximal operator for maximum entropy regularization.
g(x) = gamma \sum_i x_i ln(x_i)
has the analytical solution of gamma W(1/gamma exp((X-gamma)/gamma)), where
W is the Lambert W function.
"""
from scipy.special import lambertw
gamma_ = _step_gamma(step, gamma)
# minimize entropy: return gamma_ * np.real(lambertw(np.exp((X - gamma_) / gamma_) / gamma_))
above = X > 0
X[above] = gamma_ * np.real(lambertw(np.exp(X[above]/gamma_ - 1) / gamma_))
return X | [
"def",
"prox_max_entropy",
"(",
"X",
",",
"step",
",",
"gamma",
"=",
"1",
")",
":",
"from",
"scipy",
".",
"special",
"import",
"lambertw",
"gamma_",
"=",
"_step_gamma",
"(",
"step",
",",
"gamma",
")",
"# minimize entropy: return gamma_ * np.real(lambertw(np.exp((X... | Proximal operator for maximum entropy regularization.
g(x) = gamma \sum_i x_i ln(x_i)
has the analytical solution of gamma W(1/gamma exp((X-gamma)/gamma)), where
W is the Lambert W function. | [
"Proximal",
"operator",
"for",
"maximum",
"entropy",
"regularization",
"."
] | 60e49d90c67c46329cc1d3b5c484951dc8bd2c3f | https://github.com/pmelchior/proxmin/blob/60e49d90c67c46329cc1d3b5c484951dc8bd2c3f/proxmin/operators.py#L107-L120 | train | 34,107 |
pmelchior/proxmin | proxmin/operators.py | get_gradient_y | def get_gradient_y(shape, py):
"""Calculate the gradient in the y direction to the line at py
The y gradient operator is a block matrix, where each block is the size of the image width.
The matrix itself is made up of (img_height x img_height) blocks, most of which are all zeros.
"""
import scipy.sparse
height, width = shape
rows = []
empty = scipy.sparse.dia_matrix((width, width))
identity = scipy.sparse.identity(width)
# Create the blocks by row, beginning with blocks leading up to the peak row from the top
for n in range(py):
row = [empty]*n
row += [-identity, identity]
row += [empty]*(height-n-2)
rows.append(row)
# Set all elements in the peak row to zero
rows.append([empty]*height)
# Create the blocks for the rows leading up to the peak row from the bottom
for n in range(height-py-1):
row = [empty]*(py+n)
row += [identity, -identity]
row += [empty]*(height-py-n-2)
rows.append(row)
return scipy.sparse.bmat(rows) | python | def get_gradient_y(shape, py):
"""Calculate the gradient in the y direction to the line at py
The y gradient operator is a block matrix, where each block is the size of the image width.
The matrix itself is made up of (img_height x img_height) blocks, most of which are all zeros.
"""
import scipy.sparse
height, width = shape
rows = []
empty = scipy.sparse.dia_matrix((width, width))
identity = scipy.sparse.identity(width)
# Create the blocks by row, beginning with blocks leading up to the peak row from the top
for n in range(py):
row = [empty]*n
row += [-identity, identity]
row += [empty]*(height-n-2)
rows.append(row)
# Set all elements in the peak row to zero
rows.append([empty]*height)
# Create the blocks for the rows leading up to the peak row from the bottom
for n in range(height-py-1):
row = [empty]*(py+n)
row += [identity, -identity]
row += [empty]*(height-py-n-2)
rows.append(row)
return scipy.sparse.bmat(rows) | [
"def",
"get_gradient_y",
"(",
"shape",
",",
"py",
")",
":",
"import",
"scipy",
".",
"sparse",
"height",
",",
"width",
"=",
"shape",
"rows",
"=",
"[",
"]",
"empty",
"=",
"scipy",
".",
"sparse",
".",
"dia_matrix",
"(",
"(",
"width",
",",
"width",
")",
... | Calculate the gradient in the y direction to the line at py
The y gradient operator is a block matrix, where each block is the size of the image width.
The matrix itself is made up of (img_height x img_height) blocks, most of which are all zeros. | [
"Calculate",
"the",
"gradient",
"in",
"the",
"y",
"direction",
"to",
"the",
"line",
"at",
"py"
] | 60e49d90c67c46329cc1d3b5c484951dc8bd2c3f | https://github.com/pmelchior/proxmin/blob/60e49d90c67c46329cc1d3b5c484951dc8bd2c3f/proxmin/operators.py#L159-L186 | train | 34,108 |
pmelchior/proxmin | proxmin/operators.py | get_gradient_x | def get_gradient_x(shape, px):
"""Calculate the gradient in the x direction to the line at px
The y gradient operator is a block diagonal matrix, where each block is the size of the image width.
The matrix itself is made up of (img_height x img_height) blocks, most of which are all zeros.
"""
import scipy.sparse
height, width = shape
size = height * width
# Set the diagonal to -1, except for the value at the peak, which is zero
c = -np.ones((width,))
c[px] = 0
# Set the pixels leading up to the peak from the left
r = np.zeros(c.shape, dtype=c.dtype)
r[:px] = 1
# Set the pixels leading up to the peak from the right
l = np.zeros(c.shape, dtype=c.dtype)
l[px:] = 1
# Make a block for a single row in the image
block = scipy.sparse.diags([l, c, r], [-1, 0,1], shape=(width,width))
# Use the same block for each row
op = scipy.sparse.block_diag([block for n in range(height)])
return op | python | def get_gradient_x(shape, px):
"""Calculate the gradient in the x direction to the line at px
The y gradient operator is a block diagonal matrix, where each block is the size of the image width.
The matrix itself is made up of (img_height x img_height) blocks, most of which are all zeros.
"""
import scipy.sparse
height, width = shape
size = height * width
# Set the diagonal to -1, except for the value at the peak, which is zero
c = -np.ones((width,))
c[px] = 0
# Set the pixels leading up to the peak from the left
r = np.zeros(c.shape, dtype=c.dtype)
r[:px] = 1
# Set the pixels leading up to the peak from the right
l = np.zeros(c.shape, dtype=c.dtype)
l[px:] = 1
# Make a block for a single row in the image
block = scipy.sparse.diags([l, c, r], [-1, 0,1], shape=(width,width))
# Use the same block for each row
op = scipy.sparse.block_diag([block for n in range(height)])
return op | [
"def",
"get_gradient_x",
"(",
"shape",
",",
"px",
")",
":",
"import",
"scipy",
".",
"sparse",
"height",
",",
"width",
"=",
"shape",
"size",
"=",
"height",
"*",
"width",
"# Set the diagonal to -1, except for the value at the peak, which is zero",
"c",
"=",
"-",
"np... | Calculate the gradient in the x direction to the line at px
The y gradient operator is a block diagonal matrix, where each block is the size of the image width.
The matrix itself is made up of (img_height x img_height) blocks, most of which are all zeros. | [
"Calculate",
"the",
"gradient",
"in",
"the",
"x",
"direction",
"to",
"the",
"line",
"at",
"px"
] | 60e49d90c67c46329cc1d3b5c484951dc8bd2c3f | https://github.com/pmelchior/proxmin/blob/60e49d90c67c46329cc1d3b5c484951dc8bd2c3f/proxmin/operators.py#L188-L212 | train | 34,109 |
ValvePython/vpk | vpk/__init__.py | NewVPK.read_dir | def read_dir(self, path):
"""
Reads the given path into the tree
"""
self.tree = {}
self.file_count = 0
self.path = path
for root, _, filelist in os.walk(path):
rel = root[len(path):].lstrip('/\\')
# empty rel, means file is in root dir
if not rel:
rel = ' '
for filename in filelist:
filename = filename.split('.')
if len(filename) <= 1:
raise RuntimeError("Files without an extension are not supported: {0}".format(
repr(os.path.join(root, '.'.join(filename))),
))
ext = filename[-1]
filename = '.'.join(filename[:-1])
if ext not in self.tree:
self.tree[ext] = {}
if rel not in self.tree[ext]:
self.tree[ext][rel] = []
self.tree[ext][rel].append(filename)
self.file_count += 1
self.tree_length = self.calculate_tree_length() | python | def read_dir(self, path):
"""
Reads the given path into the tree
"""
self.tree = {}
self.file_count = 0
self.path = path
for root, _, filelist in os.walk(path):
rel = root[len(path):].lstrip('/\\')
# empty rel, means file is in root dir
if not rel:
rel = ' '
for filename in filelist:
filename = filename.split('.')
if len(filename) <= 1:
raise RuntimeError("Files without an extension are not supported: {0}".format(
repr(os.path.join(root, '.'.join(filename))),
))
ext = filename[-1]
filename = '.'.join(filename[:-1])
if ext not in self.tree:
self.tree[ext] = {}
if rel not in self.tree[ext]:
self.tree[ext][rel] = []
self.tree[ext][rel].append(filename)
self.file_count += 1
self.tree_length = self.calculate_tree_length() | [
"def",
"read_dir",
"(",
"self",
",",
"path",
")",
":",
"self",
".",
"tree",
"=",
"{",
"}",
"self",
".",
"file_count",
"=",
"0",
"self",
".",
"path",
"=",
"path",
"for",
"root",
",",
"_",
",",
"filelist",
"in",
"os",
".",
"walk",
"(",
"path",
")... | Reads the given path into the tree | [
"Reads",
"the",
"given",
"path",
"into",
"the",
"tree"
] | cc522fc7febbf53efa5d58fcd1ad2103dae37ac8 | https://github.com/ValvePython/vpk/blob/cc522fc7febbf53efa5d58fcd1ad2103dae37ac8/vpk/__init__.py#L43-L76 | train | 34,110 |
ValvePython/vpk | vpk/__init__.py | NewVPK.calculate_tree_length | def calculate_tree_length(self):
"""
Walks the tree and calculate the tree length
"""
tree_length = 0
for ext in self.tree:
tree_length += len(ext) + 2
for relpath in self.tree[ext]:
tree_length += len(relpath) + 2
for filename in self.tree[ext][relpath]:
tree_length += len(filename) + 1 + 18
return tree_length + 1 | python | def calculate_tree_length(self):
"""
Walks the tree and calculate the tree length
"""
tree_length = 0
for ext in self.tree:
tree_length += len(ext) + 2
for relpath in self.tree[ext]:
tree_length += len(relpath) + 2
for filename in self.tree[ext][relpath]:
tree_length += len(filename) + 1 + 18
return tree_length + 1 | [
"def",
"calculate_tree_length",
"(",
"self",
")",
":",
"tree_length",
"=",
"0",
"for",
"ext",
"in",
"self",
".",
"tree",
":",
"tree_length",
"+=",
"len",
"(",
"ext",
")",
"+",
"2",
"for",
"relpath",
"in",
"self",
".",
"tree",
"[",
"ext",
"]",
":",
... | Walks the tree and calculate the tree length | [
"Walks",
"the",
"tree",
"and",
"calculate",
"the",
"tree",
"length"
] | cc522fc7febbf53efa5d58fcd1ad2103dae37ac8 | https://github.com/ValvePython/vpk/blob/cc522fc7febbf53efa5d58fcd1ad2103dae37ac8/vpk/__init__.py#L79-L94 | train | 34,111 |
ValvePython/vpk | vpk/__init__.py | NewVPK.save | def save(self, vpk_output_path):
"""
Saves the VPK at the given path
"""
with fopen(vpk_output_path, 'wb') as f:
# write VPK1 header
f.write(struct.pack("3I", self.signature,
self.version,
self.tree_length))
self.header_length = f.tell()
data_offset = self.header_length + self.tree_length
# write file tree
for ext in self.tree:
f.write("{0}\x00".format(ext).encode('latin-1'))
for relpath in self.tree[ext]:
f.write("{0}\x00".format(relpath).encode('latin-1'))
for filename in self.tree[ext][relpath]:
f.write("{0}\x00".format(filename).encode('latin-1'))
# append file data
metadata_offset = f.tell()
file_offset = data_offset
real_filename = filename if not ext else "{0}.{1}".format(filename, ext)
checksum = 0
f.seek(data_offset)
with fopen(os.path.join(self.path,
'' if relpath == ' ' else relpath,
real_filename
),
'rb') as pakfile:
for chunk in iter(lambda: pakfile.read(1024), b''):
checksum = crc32(chunk, checksum)
f.write(chunk)
data_offset = f.tell()
file_length = f.tell() - file_offset
f.seek(metadata_offset)
# metadata
# crc32
# preload_length
# archive_index
# archive_offset
# file_length
# suffix
f.write(struct.pack("IHHIIH", checksum & 0xFFffFFff,
0,
0x7fff,
file_offset - self.tree_length - self.header_length,
file_length,
0xffff
))
# next relpath
f.write(b"\x00")
# next ext
f.write(b"\x00")
# end of file tree
f.write(b"\x00") | python | def save(self, vpk_output_path):
"""
Saves the VPK at the given path
"""
with fopen(vpk_output_path, 'wb') as f:
# write VPK1 header
f.write(struct.pack("3I", self.signature,
self.version,
self.tree_length))
self.header_length = f.tell()
data_offset = self.header_length + self.tree_length
# write file tree
for ext in self.tree:
f.write("{0}\x00".format(ext).encode('latin-1'))
for relpath in self.tree[ext]:
f.write("{0}\x00".format(relpath).encode('latin-1'))
for filename in self.tree[ext][relpath]:
f.write("{0}\x00".format(filename).encode('latin-1'))
# append file data
metadata_offset = f.tell()
file_offset = data_offset
real_filename = filename if not ext else "{0}.{1}".format(filename, ext)
checksum = 0
f.seek(data_offset)
with fopen(os.path.join(self.path,
'' if relpath == ' ' else relpath,
real_filename
),
'rb') as pakfile:
for chunk in iter(lambda: pakfile.read(1024), b''):
checksum = crc32(chunk, checksum)
f.write(chunk)
data_offset = f.tell()
file_length = f.tell() - file_offset
f.seek(metadata_offset)
# metadata
# crc32
# preload_length
# archive_index
# archive_offset
# file_length
# suffix
f.write(struct.pack("IHHIIH", checksum & 0xFFffFFff,
0,
0x7fff,
file_offset - self.tree_length - self.header_length,
file_length,
0xffff
))
# next relpath
f.write(b"\x00")
# next ext
f.write(b"\x00")
# end of file tree
f.write(b"\x00") | [
"def",
"save",
"(",
"self",
",",
"vpk_output_path",
")",
":",
"with",
"fopen",
"(",
"vpk_output_path",
",",
"'wb'",
")",
"as",
"f",
":",
"# write VPK1 header",
"f",
".",
"write",
"(",
"struct",
".",
"pack",
"(",
"\"3I\"",
",",
"self",
".",
"signature",
... | Saves the VPK at the given path | [
"Saves",
"the",
"VPK",
"at",
"the",
"given",
"path"
] | cc522fc7febbf53efa5d58fcd1ad2103dae37ac8 | https://github.com/ValvePython/vpk/blob/cc522fc7febbf53efa5d58fcd1ad2103dae37ac8/vpk/__init__.py#L97-L163 | train | 34,112 |
ValvePython/vpk | vpk/__init__.py | VPK.get_file | def get_file(self, path):
"""
Returns VPKFile instance for the given path
"""
metadata = self.get_file_meta(path)
return self.get_vpkfile_instance(path, metadata) | python | def get_file(self, path):
"""
Returns VPKFile instance for the given path
"""
metadata = self.get_file_meta(path)
return self.get_vpkfile_instance(path, metadata) | [
"def",
"get_file",
"(",
"self",
",",
"path",
")",
":",
"metadata",
"=",
"self",
".",
"get_file_meta",
"(",
"path",
")",
"return",
"self",
".",
"get_vpkfile_instance",
"(",
"path",
",",
"metadata",
")"
] | Returns VPKFile instance for the given path | [
"Returns",
"VPKFile",
"instance",
"for",
"the",
"given",
"path"
] | cc522fc7febbf53efa5d58fcd1ad2103dae37ac8 | https://github.com/ValvePython/vpk/blob/cc522fc7febbf53efa5d58fcd1ad2103dae37ac8/vpk/__init__.py#L251-L256 | train | 34,113 |
ValvePython/vpk | vpk/__init__.py | VPK.get_file_meta | def get_file_meta(self, path):
"""
Returns metadata for given file path
"""
if self.tree is None:
self.read_index()
if path not in self.tree:
raise KeyError("Path doesn't exist")
return self._make_meta_dict(self.tree[path]) | python | def get_file_meta(self, path):
"""
Returns metadata for given file path
"""
if self.tree is None:
self.read_index()
if path not in self.tree:
raise KeyError("Path doesn't exist")
return self._make_meta_dict(self.tree[path]) | [
"def",
"get_file_meta",
"(",
"self",
",",
"path",
")",
":",
"if",
"self",
".",
"tree",
"is",
"None",
":",
"self",
".",
"read_index",
"(",
")",
"if",
"path",
"not",
"in",
"self",
".",
"tree",
":",
"raise",
"KeyError",
"(",
"\"Path doesn't exist\"",
")",... | Returns metadata for given file path | [
"Returns",
"metadata",
"for",
"given",
"file",
"path"
] | cc522fc7febbf53efa5d58fcd1ad2103dae37ac8 | https://github.com/ValvePython/vpk/blob/cc522fc7febbf53efa5d58fcd1ad2103dae37ac8/vpk/__init__.py#L258-L268 | train | 34,114 |
ValvePython/vpk | vpk/__init__.py | VPK.read_header | def read_header(self):
"""
Reads VPK file header from the file
"""
with fopen(self.vpk_path, 'rb') as f:
(self.signature,
self.version,
self.tree_length
) = struct.unpack("3I", f.read(3*4))
# original format - headerless
if self.signature != 0x55aa1234:
raise ValueError("File is not VPK (invalid magic)")
# v1
elif self.version == 1:
self.header_length += 4*3
# v2 with extended header
#
# according to http://forum.xentax.com/viewtopic.php?f=10&t=11208
# struct VPKDirHeader_t
# {
# int32 m_nHeaderMarker;
# int32 m_nVersion;
# int32 m_nDirectorySize;
# int32 m_nEmbeddedChunkSize;
# int32 m_nChunkHashesSize;
# int32 m_nSelfHashesSize;
# int32 m_nSignatureSize;
# }
elif self.version == 2:
(self.embed_chunk_length,
self.chunk_hashes_length,
self.self_hashes_length,
self.signature_length
) = struct.unpack("4I", f.read(4*4))
self.header_length += 4*7
f.seek(self.tree_length + self.embed_chunk_length + self.chunk_hashes_length, 1)
assert self.self_hashes_length == 48, "Self hashes section size mismatch"
(self.tree_checksum,
self.chunk_hashes_checksum,
self.file_checksum,
) = struct.unpack("16s16s16s", f.read(16*3))
else:
raise ValueError("Invalid header, or unsupported version") | python | def read_header(self):
"""
Reads VPK file header from the file
"""
with fopen(self.vpk_path, 'rb') as f:
(self.signature,
self.version,
self.tree_length
) = struct.unpack("3I", f.read(3*4))
# original format - headerless
if self.signature != 0x55aa1234:
raise ValueError("File is not VPK (invalid magic)")
# v1
elif self.version == 1:
self.header_length += 4*3
# v2 with extended header
#
# according to http://forum.xentax.com/viewtopic.php?f=10&t=11208
# struct VPKDirHeader_t
# {
# int32 m_nHeaderMarker;
# int32 m_nVersion;
# int32 m_nDirectorySize;
# int32 m_nEmbeddedChunkSize;
# int32 m_nChunkHashesSize;
# int32 m_nSelfHashesSize;
# int32 m_nSignatureSize;
# }
elif self.version == 2:
(self.embed_chunk_length,
self.chunk_hashes_length,
self.self_hashes_length,
self.signature_length
) = struct.unpack("4I", f.read(4*4))
self.header_length += 4*7
f.seek(self.tree_length + self.embed_chunk_length + self.chunk_hashes_length, 1)
assert self.self_hashes_length == 48, "Self hashes section size mismatch"
(self.tree_checksum,
self.chunk_hashes_checksum,
self.file_checksum,
) = struct.unpack("16s16s16s", f.read(16*3))
else:
raise ValueError("Invalid header, or unsupported version") | [
"def",
"read_header",
"(",
"self",
")",
":",
"with",
"fopen",
"(",
"self",
".",
"vpk_path",
",",
"'rb'",
")",
"as",
"f",
":",
"(",
"self",
".",
"signature",
",",
"self",
".",
"version",
",",
"self",
".",
"tree_length",
")",
"=",
"struct",
".",
"unp... | Reads VPK file header from the file | [
"Reads",
"VPK",
"file",
"header",
"from",
"the",
"file"
] | cc522fc7febbf53efa5d58fcd1ad2103dae37ac8 | https://github.com/ValvePython/vpk/blob/cc522fc7febbf53efa5d58fcd1ad2103dae37ac8/vpk/__init__.py#L284-L330 | train | 34,115 |
ValvePython/vpk | vpk/__init__.py | VPK.read_index | def read_index(self):
"""
Reads the index and populates the directory tree
"""
if not isinstance(self.tree, dict):
self.tree = dict()
self.tree.clear()
for path, metadata in self.read_index_iter():
self.tree[path] = metadata | python | def read_index(self):
"""
Reads the index and populates the directory tree
"""
if not isinstance(self.tree, dict):
self.tree = dict()
self.tree.clear()
for path, metadata in self.read_index_iter():
self.tree[path] = metadata | [
"def",
"read_index",
"(",
"self",
")",
":",
"if",
"not",
"isinstance",
"(",
"self",
".",
"tree",
",",
"dict",
")",
":",
"self",
".",
"tree",
"=",
"dict",
"(",
")",
"self",
".",
"tree",
".",
"clear",
"(",
")",
"for",
"path",
",",
"metadata",
"in",... | Reads the index and populates the directory tree | [
"Reads",
"the",
"index",
"and",
"populates",
"the",
"directory",
"tree"
] | cc522fc7febbf53efa5d58fcd1ad2103dae37ac8 | https://github.com/ValvePython/vpk/blob/cc522fc7febbf53efa5d58fcd1ad2103dae37ac8/vpk/__init__.py#L363-L373 | train | 34,116 |
ValvePython/vpk | vpk/__init__.py | VPK.read_index_iter | def read_index_iter(self):
"""Generator function that reads the file index from the vpk file
yeilds (file_path, metadata)
"""
with fopen(self.vpk_path, 'rb') as f:
f.seek(self.header_length)
while True:
if self.version > 0 and f.tell() > self.tree_length + self.header_length:
raise ValueError("Error parsing index (out of bounds)")
ext = _read_cstring(f)
if ext == '':
break
while True:
path = _read_cstring(f)
if path == '':
break
if path != ' ':
path = os.path.join(path, '')
else:
path = ''
while True:
name = _read_cstring(f)
if name == '':
break
(crc32,
preload_length,
archive_index,
archive_offset,
file_length,
suffix,
) = metadata = list(struct.unpack("IHHIIH", f.read(18)))
if suffix != 0xffff:
raise ValueError("Error while parsing index")
if archive_index == 0x7fff:
metadata[3] = self.header_length + self.tree_length + archive_offset
metadata = (f.read(preload_length),) + tuple(metadata[:-1])
yield path + name + '.' + ext, metadata | python | def read_index_iter(self):
"""Generator function that reads the file index from the vpk file
yeilds (file_path, metadata)
"""
with fopen(self.vpk_path, 'rb') as f:
f.seek(self.header_length)
while True:
if self.version > 0 and f.tell() > self.tree_length + self.header_length:
raise ValueError("Error parsing index (out of bounds)")
ext = _read_cstring(f)
if ext == '':
break
while True:
path = _read_cstring(f)
if path == '':
break
if path != ' ':
path = os.path.join(path, '')
else:
path = ''
while True:
name = _read_cstring(f)
if name == '':
break
(crc32,
preload_length,
archive_index,
archive_offset,
file_length,
suffix,
) = metadata = list(struct.unpack("IHHIIH", f.read(18)))
if suffix != 0xffff:
raise ValueError("Error while parsing index")
if archive_index == 0x7fff:
metadata[3] = self.header_length + self.tree_length + archive_offset
metadata = (f.read(preload_length),) + tuple(metadata[:-1])
yield path + name + '.' + ext, metadata | [
"def",
"read_index_iter",
"(",
"self",
")",
":",
"with",
"fopen",
"(",
"self",
".",
"vpk_path",
",",
"'rb'",
")",
"as",
"f",
":",
"f",
".",
"seek",
"(",
"self",
".",
"header_length",
")",
"while",
"True",
":",
"if",
"self",
".",
"version",
">",
"0"... | Generator function that reads the file index from the vpk file
yeilds (file_path, metadata) | [
"Generator",
"function",
"that",
"reads",
"the",
"file",
"index",
"from",
"the",
"vpk",
"file"
] | cc522fc7febbf53efa5d58fcd1ad2103dae37ac8 | https://github.com/ValvePython/vpk/blob/cc522fc7febbf53efa5d58fcd1ad2103dae37ac8/vpk/__init__.py#L375-L422 | train | 34,117 |
ValvePython/vpk | vpk/__init__.py | VPKFile.save | def save(self, path):
"""
Save the file to the specified path
"""
# remember and restore file position
pos = self.tell()
self.seek(0)
with fopen(path, 'wb') as output:
output.truncate(self.length)
for chunk in iter(lambda: self.read(1024), b''):
output.write(chunk)
self.seek(pos) | python | def save(self, path):
"""
Save the file to the specified path
"""
# remember and restore file position
pos = self.tell()
self.seek(0)
with fopen(path, 'wb') as output:
output.truncate(self.length)
for chunk in iter(lambda: self.read(1024), b''):
output.write(chunk)
self.seek(pos) | [
"def",
"save",
"(",
"self",
",",
"path",
")",
":",
"# remember and restore file position",
"pos",
"=",
"self",
".",
"tell",
"(",
")",
"self",
".",
"seek",
"(",
"0",
")",
"with",
"fopen",
"(",
"path",
",",
"'wb'",
")",
"as",
"output",
":",
"output",
"... | Save the file to the specified path | [
"Save",
"the",
"file",
"to",
"the",
"specified",
"path"
] | cc522fc7febbf53efa5d58fcd1ad2103dae37ac8 | https://github.com/ValvePython/vpk/blob/cc522fc7febbf53efa5d58fcd1ad2103dae37ac8/vpk/__init__.py#L454-L467 | train | 34,118 |
ValvePython/vpk | vpk/__init__.py | VPKFile.verify | def verify(self):
"""
Returns True if the file contents match with the CRC32 attribute
note: reset
"""
# remember file pointer
pos = self.tell()
self.seek(0)
checksum = 0
for chunk in iter(lambda: self.read(1024), b''):
checksum = crc32(chunk, checksum)
# restore file pointer
self.seek(pos)
return self.crc32 == checksum & 0xffffffff | python | def verify(self):
"""
Returns True if the file contents match with the CRC32 attribute
note: reset
"""
# remember file pointer
pos = self.tell()
self.seek(0)
checksum = 0
for chunk in iter(lambda: self.read(1024), b''):
checksum = crc32(chunk, checksum)
# restore file pointer
self.seek(pos)
return self.crc32 == checksum & 0xffffffff | [
"def",
"verify",
"(",
"self",
")",
":",
"# remember file pointer",
"pos",
"=",
"self",
".",
"tell",
"(",
")",
"self",
".",
"seek",
"(",
"0",
")",
"checksum",
"=",
"0",
"for",
"chunk",
"in",
"iter",
"(",
"lambda",
":",
"self",
".",
"read",
"(",
"102... | Returns True if the file contents match with the CRC32 attribute
note: reset | [
"Returns",
"True",
"if",
"the",
"file",
"contents",
"match",
"with",
"the",
"CRC32",
"attribute"
] | cc522fc7febbf53efa5d58fcd1ad2103dae37ac8 | https://github.com/ValvePython/vpk/blob/cc522fc7febbf53efa5d58fcd1ad2103dae37ac8/vpk/__init__.py#L469-L487 | train | 34,119 |
stephen-bunn/file-config | tasks/__init__.py | publish | def publish(ctx, test=False, force=False, draft=False):
""" Publish the project.
:param bool test: Publishes to PyPi test server (defaults to False)
:param bool force: Skip version check (defaults to False)
:param bool draft: Sample publish (has no effect) (defaults to False)
"""
previous_version = get_previous_version(ctx)
current_version = parver.Version.parse(metadata["version"])
if current_version <= previous_version and not force:
error_message = (
f"current version ({current_version!s}) is <= to previous version "
f"({previous_version!s}), use 'package.version' to update current version"
)
report.error(ctx, "publish", error_message)
raise ValueError(error_message)
report.info(ctx, "publish", f"publishing project {ctx.metadata['name']!r}")
report.warning(
ctx,
"publish",
f"drafting publish for project {ctx.metadata['name']!r} (has no effect)",
)
commit_message = f"Release {current_version!s}"
report.info(ctx, "publish", f"git commiting release {commit_message!r}")
git_commit_command = f"git commit -asm {commit_message!r}"
if not draft:
ctx.run(git_commit_command)
tag_content = get_tag_content(ctx).replace('"', '\\"')
git_tag_command = (
f'git tag -a "v{current_version!s}" -m '
f'"Version {current_version!s}\n\n{tag_content}"'
)
report.info(
ctx, "publish", f"git tagging commit as release for version {current_version!s}"
)
if not draft:
ctx.run(git_tag_command)
artifact_paths = [f"{_.as_posix()!r}" for _ in get_artifact_paths(ctx)]
for artifact_path in artifact_paths:
report.debug(ctx, "publish", f"publishing artifact {artifact_path}")
publish_command = f"twine upload {' '.join(artifact_paths)}"
if test:
publish_command += " --repository 'https://test.pypi.org/legacy/'"
# get user to confirm publish
try:
input(
report._get_text(
ctx,
"success",
"publish",
"about to publish, [Enter] to continue, [Ctrl-C] to abort: ",
)
)
while True:
(username, password) = get_username_password(
ctx, "PyPi Username: ", "PyPi Password: "
)
# TODO: check if username and password are valid before tyring to post
report.info(ctx, "publish", f"publishing project {ctx.metadata['name']!s}")
if not draft:
publish_command += f" -u {username!r} -p {password!r}"
publish_result = ctx.run(publish_command, warn=True)
if publish_result.exited:
report.error(
ctx,
"publish",
f"failed to publish {ctx.metadata['name']!s} (retrying)",
)
continue
break
git_push_command = "git push --tags"
report.info(ctx, "publish", f"pushing git tags")
if not draft:
ctx.run(git_push_command)
except KeyboardInterrupt:
print()
report.error(ctx, "publish", "aborting publish!")
git_remove_tag_command = f"git tag -d {current_version!s}"
report.warn(ctx, "publish", "removing git tags")
if not draft:
ctx.run(git_remove_tag_command)
git_reset_command = f"git reset --soft HEAD^"
report.warn(ctx, "publish", "softly reseting commit")
if not draft:
ctx.run(git_reset_command) | python | def publish(ctx, test=False, force=False, draft=False):
""" Publish the project.
:param bool test: Publishes to PyPi test server (defaults to False)
:param bool force: Skip version check (defaults to False)
:param bool draft: Sample publish (has no effect) (defaults to False)
"""
previous_version = get_previous_version(ctx)
current_version = parver.Version.parse(metadata["version"])
if current_version <= previous_version and not force:
error_message = (
f"current version ({current_version!s}) is <= to previous version "
f"({previous_version!s}), use 'package.version' to update current version"
)
report.error(ctx, "publish", error_message)
raise ValueError(error_message)
report.info(ctx, "publish", f"publishing project {ctx.metadata['name']!r}")
report.warning(
ctx,
"publish",
f"drafting publish for project {ctx.metadata['name']!r} (has no effect)",
)
commit_message = f"Release {current_version!s}"
report.info(ctx, "publish", f"git commiting release {commit_message!r}")
git_commit_command = f"git commit -asm {commit_message!r}"
if not draft:
ctx.run(git_commit_command)
tag_content = get_tag_content(ctx).replace('"', '\\"')
git_tag_command = (
f'git tag -a "v{current_version!s}" -m '
f'"Version {current_version!s}\n\n{tag_content}"'
)
report.info(
ctx, "publish", f"git tagging commit as release for version {current_version!s}"
)
if not draft:
ctx.run(git_tag_command)
artifact_paths = [f"{_.as_posix()!r}" for _ in get_artifact_paths(ctx)]
for artifact_path in artifact_paths:
report.debug(ctx, "publish", f"publishing artifact {artifact_path}")
publish_command = f"twine upload {' '.join(artifact_paths)}"
if test:
publish_command += " --repository 'https://test.pypi.org/legacy/'"
# get user to confirm publish
try:
input(
report._get_text(
ctx,
"success",
"publish",
"about to publish, [Enter] to continue, [Ctrl-C] to abort: ",
)
)
while True:
(username, password) = get_username_password(
ctx, "PyPi Username: ", "PyPi Password: "
)
# TODO: check if username and password are valid before tyring to post
report.info(ctx, "publish", f"publishing project {ctx.metadata['name']!s}")
if not draft:
publish_command += f" -u {username!r} -p {password!r}"
publish_result = ctx.run(publish_command, warn=True)
if publish_result.exited:
report.error(
ctx,
"publish",
f"failed to publish {ctx.metadata['name']!s} (retrying)",
)
continue
break
git_push_command = "git push --tags"
report.info(ctx, "publish", f"pushing git tags")
if not draft:
ctx.run(git_push_command)
except KeyboardInterrupt:
print()
report.error(ctx, "publish", "aborting publish!")
git_remove_tag_command = f"git tag -d {current_version!s}"
report.warn(ctx, "publish", "removing git tags")
if not draft:
ctx.run(git_remove_tag_command)
git_reset_command = f"git reset --soft HEAD^"
report.warn(ctx, "publish", "softly reseting commit")
if not draft:
ctx.run(git_reset_command) | [
"def",
"publish",
"(",
"ctx",
",",
"test",
"=",
"False",
",",
"force",
"=",
"False",
",",
"draft",
"=",
"False",
")",
":",
"previous_version",
"=",
"get_previous_version",
"(",
"ctx",
")",
"current_version",
"=",
"parver",
".",
"Version",
".",
"parse",
"... | Publish the project.
:param bool test: Publishes to PyPi test server (defaults to False)
:param bool force: Skip version check (defaults to False)
:param bool draft: Sample publish (has no effect) (defaults to False) | [
"Publish",
"the",
"project",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/tasks/__init__.py#L76-L169 | train | 34,120 |
stephen-bunn/file-config | src/file_config/_file_config.py | _handle_dumps | def _handle_dumps(self, handler, **kwargs):
""" Dumps caller, used by partial method for dynamic handler assignments.
:param object handler: The dump handler
:return: The dumped string
:rtype: str
"""
return handler.dumps(self.__class__, to_dict(self), **kwargs) | python | def _handle_dumps(self, handler, **kwargs):
""" Dumps caller, used by partial method for dynamic handler assignments.
:param object handler: The dump handler
:return: The dumped string
:rtype: str
"""
return handler.dumps(self.__class__, to_dict(self), **kwargs) | [
"def",
"_handle_dumps",
"(",
"self",
",",
"handler",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"handler",
".",
"dumps",
"(",
"self",
".",
"__class__",
",",
"to_dict",
"(",
"self",
")",
",",
"*",
"*",
"kwargs",
")"
] | Dumps caller, used by partial method for dynamic handler assignments.
:param object handler: The dump handler
:return: The dumped string
:rtype: str | [
"Dumps",
"caller",
"used",
"by",
"partial",
"method",
"for",
"dynamic",
"handler",
"assignments",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/_file_config.py#L48-L56 | train | 34,121 |
stephen-bunn/file-config | src/file_config/_file_config.py | _handle_dump | def _handle_dump(self, handler, file_object, **kwargs):
""" Dump caller, used by partial method for dynamic handler assignments.
:param object handler: The dump handler
:param file file_object: The file object to dump to
:return: The dumped string
:rtype: str
"""
return handler.dump(self.__class__, to_dict(self), file_object, **kwargs) | python | def _handle_dump(self, handler, file_object, **kwargs):
""" Dump caller, used by partial method for dynamic handler assignments.
:param object handler: The dump handler
:param file file_object: The file object to dump to
:return: The dumped string
:rtype: str
"""
return handler.dump(self.__class__, to_dict(self), file_object, **kwargs) | [
"def",
"_handle_dump",
"(",
"self",
",",
"handler",
",",
"file_object",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"handler",
".",
"dump",
"(",
"self",
".",
"__class__",
",",
"to_dict",
"(",
"self",
")",
",",
"file_object",
",",
"*",
"*",
"kwargs",
... | Dump caller, used by partial method for dynamic handler assignments.
:param object handler: The dump handler
:param file file_object: The file object to dump to
:return: The dumped string
:rtype: str | [
"Dump",
"caller",
"used",
"by",
"partial",
"method",
"for",
"dynamic",
"handler",
"assignments",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/_file_config.py#L59-L68 | train | 34,122 |
stephen-bunn/file-config | src/file_config/_file_config.py | config | def config(maybe_cls=None, these=None, title=None, description=None):
""" File config class decorator.
Usage is to simply decorate a **class** to make it a
:func:`config <file_config._file_config.config>` class.
>>> import file_config
>>> @file_config.config(
title="My Config Title",
description="A description about my config"
)
class MyConfig(object):
pass
:param class maybe_cls: The class to inherit from, defaults to None, optional
:param dict these: A dictionary of str to ``file_config.var`` to use as attribs
:param str title: The title of the config, defaults to None, optional
:param str description: A description of the config, defaults to None, optional
:return: Config wrapped class
:rtype: class
"""
def wrap(config_cls):
""" The wrapper function.
:param class config_cls: The class to wrap
:return: The config_cls wrapper
:rtype: class
"""
setattr(config_cls, CONFIG_KEY, dict(title=title, description=description))
# dynamically assign available handlers to the wrapped class
for handler_name in handlers.__all__:
handler = getattr(handlers, handler_name)
if handler.available:
handler = handler()
setattr(
config_cls,
f"dumps_{handler.name}",
partialmethod(_handle_dumps, handler),
)
setattr(
config_cls,
f"dump_{handler.name}",
partialmethod(_handle_dump, handler),
)
setattr(
config_cls,
f"loads_{handler.name}",
partialmethod(_handle_loads, handler),
)
setattr(
config_cls,
f"load_{handler.name}",
partialmethod(_handle_load, handler),
)
config_vars = these if isinstance(these, dict) else None
return attr.s(config_cls, these=config_vars, slots=True)
if maybe_cls is None:
return wrap
else:
return wrap(maybe_cls) | python | def config(maybe_cls=None, these=None, title=None, description=None):
""" File config class decorator.
Usage is to simply decorate a **class** to make it a
:func:`config <file_config._file_config.config>` class.
>>> import file_config
>>> @file_config.config(
title="My Config Title",
description="A description about my config"
)
class MyConfig(object):
pass
:param class maybe_cls: The class to inherit from, defaults to None, optional
:param dict these: A dictionary of str to ``file_config.var`` to use as attribs
:param str title: The title of the config, defaults to None, optional
:param str description: A description of the config, defaults to None, optional
:return: Config wrapped class
:rtype: class
"""
def wrap(config_cls):
""" The wrapper function.
:param class config_cls: The class to wrap
:return: The config_cls wrapper
:rtype: class
"""
setattr(config_cls, CONFIG_KEY, dict(title=title, description=description))
# dynamically assign available handlers to the wrapped class
for handler_name in handlers.__all__:
handler = getattr(handlers, handler_name)
if handler.available:
handler = handler()
setattr(
config_cls,
f"dumps_{handler.name}",
partialmethod(_handle_dumps, handler),
)
setattr(
config_cls,
f"dump_{handler.name}",
partialmethod(_handle_dump, handler),
)
setattr(
config_cls,
f"loads_{handler.name}",
partialmethod(_handle_loads, handler),
)
setattr(
config_cls,
f"load_{handler.name}",
partialmethod(_handle_load, handler),
)
config_vars = these if isinstance(these, dict) else None
return attr.s(config_cls, these=config_vars, slots=True)
if maybe_cls is None:
return wrap
else:
return wrap(maybe_cls) | [
"def",
"config",
"(",
"maybe_cls",
"=",
"None",
",",
"these",
"=",
"None",
",",
"title",
"=",
"None",
",",
"description",
"=",
"None",
")",
":",
"def",
"wrap",
"(",
"config_cls",
")",
":",
"\"\"\" The wrapper function.\n\n :param class config_cls: The class... | File config class decorator.
Usage is to simply decorate a **class** to make it a
:func:`config <file_config._file_config.config>` class.
>>> import file_config
>>> @file_config.config(
title="My Config Title",
description="A description about my config"
)
class MyConfig(object):
pass
:param class maybe_cls: The class to inherit from, defaults to None, optional
:param dict these: A dictionary of str to ``file_config.var`` to use as attribs
:param str title: The title of the config, defaults to None, optional
:param str description: A description of the config, defaults to None, optional
:return: Config wrapped class
:rtype: class | [
"File",
"config",
"class",
"decorator",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/_file_config.py#L101-L163 | train | 34,123 |
stephen-bunn/file-config | src/file_config/_file_config.py | var | def var(
type=None, # noqa
default=None,
name=None,
title=None,
description=None,
required=True,
examples=None,
encoder=None,
decoder=None,
min=None, # noqa
max=None, # noqa
unique=None,
contains=None,
**kwargs,
):
""" Creates a config variable.
Use this method to create the class variables of your
:func:`config <file_config._file_config.config>` decorated class.
>>> import file_config
>>> @file_config.config
class MyConfig(object):
name = file_config.var(str)
:param type type: The expected type of the variable, defaults to None, optional
:param default: The default value of the var, defaults to None, optional
:param str name: The serialized name of the variable, defaults to None, optional
:param str title: The validation title of the variable, defaults to None, optional
:param str description: The validation description of the variable,
defaults to None, optional
:param bool required: Flag to indicate if variable is required during validation,
defaults to True, optional
:param list examples: A list of validation examples, if necessary,
defaults to None, optional
:param encoder: The encoder to use for the var, defaults to None, optional
:param decoder: The decoder to use for the var, defaults to None, optional
:param int min: The minimum constraint of the variable, defaults to None, optional
:param int max: The maximum constraint of the variable, defaults to None, optional
:param bool unique: Flag to indicate if variable should be unique,
may not apply to all variable types, defaults to None, optional
:param contains: Value that list varaible should contain in validation,
may not apply to all variable types, defaults to None, optional
:return: A new config variable
:rtype: attr.Attribute
"""
# NOTE: this method overrides some of the builtin Python method names on purpose in
# order to supply a readable and easy to understand api
# In this case it is not dangerous as they are only overriden in the scope and are
# never used within the scope
kwargs.update(dict(default=default, type=type))
return attr.ib(
metadata={
CONFIG_KEY: _ConfigEntry(
type=type,
default=default,
name=name,
title=title,
description=description,
required=required,
examples=examples,
encoder=encoder,
decoder=decoder,
min=min,
max=max,
unique=unique,
contains=contains,
)
},
**kwargs,
) | python | def var(
type=None, # noqa
default=None,
name=None,
title=None,
description=None,
required=True,
examples=None,
encoder=None,
decoder=None,
min=None, # noqa
max=None, # noqa
unique=None,
contains=None,
**kwargs,
):
""" Creates a config variable.
Use this method to create the class variables of your
:func:`config <file_config._file_config.config>` decorated class.
>>> import file_config
>>> @file_config.config
class MyConfig(object):
name = file_config.var(str)
:param type type: The expected type of the variable, defaults to None, optional
:param default: The default value of the var, defaults to None, optional
:param str name: The serialized name of the variable, defaults to None, optional
:param str title: The validation title of the variable, defaults to None, optional
:param str description: The validation description of the variable,
defaults to None, optional
:param bool required: Flag to indicate if variable is required during validation,
defaults to True, optional
:param list examples: A list of validation examples, if necessary,
defaults to None, optional
:param encoder: The encoder to use for the var, defaults to None, optional
:param decoder: The decoder to use for the var, defaults to None, optional
:param int min: The minimum constraint of the variable, defaults to None, optional
:param int max: The maximum constraint of the variable, defaults to None, optional
:param bool unique: Flag to indicate if variable should be unique,
may not apply to all variable types, defaults to None, optional
:param contains: Value that list varaible should contain in validation,
may not apply to all variable types, defaults to None, optional
:return: A new config variable
:rtype: attr.Attribute
"""
# NOTE: this method overrides some of the builtin Python method names on purpose in
# order to supply a readable and easy to understand api
# In this case it is not dangerous as they are only overriden in the scope and are
# never used within the scope
kwargs.update(dict(default=default, type=type))
return attr.ib(
metadata={
CONFIG_KEY: _ConfigEntry(
type=type,
default=default,
name=name,
title=title,
description=description,
required=required,
examples=examples,
encoder=encoder,
decoder=decoder,
min=min,
max=max,
unique=unique,
contains=contains,
)
},
**kwargs,
) | [
"def",
"var",
"(",
"type",
"=",
"None",
",",
"# noqa",
"default",
"=",
"None",
",",
"name",
"=",
"None",
",",
"title",
"=",
"None",
",",
"description",
"=",
"None",
",",
"required",
"=",
"True",
",",
"examples",
"=",
"None",
",",
"encoder",
"=",
"N... | Creates a config variable.
Use this method to create the class variables of your
:func:`config <file_config._file_config.config>` decorated class.
>>> import file_config
>>> @file_config.config
class MyConfig(object):
name = file_config.var(str)
:param type type: The expected type of the variable, defaults to None, optional
:param default: The default value of the var, defaults to None, optional
:param str name: The serialized name of the variable, defaults to None, optional
:param str title: The validation title of the variable, defaults to None, optional
:param str description: The validation description of the variable,
defaults to None, optional
:param bool required: Flag to indicate if variable is required during validation,
defaults to True, optional
:param list examples: A list of validation examples, if necessary,
defaults to None, optional
:param encoder: The encoder to use for the var, defaults to None, optional
:param decoder: The decoder to use for the var, defaults to None, optional
:param int min: The minimum constraint of the variable, defaults to None, optional
:param int max: The maximum constraint of the variable, defaults to None, optional
:param bool unique: Flag to indicate if variable should be unique,
may not apply to all variable types, defaults to None, optional
:param contains: Value that list varaible should contain in validation,
may not apply to all variable types, defaults to None, optional
:return: A new config variable
:rtype: attr.Attribute | [
"Creates",
"a",
"config",
"variable",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/_file_config.py#L166-L238 | train | 34,124 |
stephen-bunn/file-config | src/file_config/_file_config.py | make_config | def make_config(name, var_dict, title=None, description=None, **kwargs):
""" Creates a config instance from scratch.
Usage is virtually the same as :func:`attr.make_class`.
>>> import file_config
>>> MyConfig = file_config.make_config(
"MyConfig",
{"name": file_config.var(str)}
)
:param str name: The name of the config
:param dict var_dict: The dictionary of config variable definitions
:param str title: The title of the config, defaults to None, optional
:param str description: The description of the config, defaults to None, optional
:return: A new config class
:rtype: class
"""
return config(
attr.make_class(name, attrs={}, **kwargs),
these=var_dict,
title=title,
description=description,
) | python | def make_config(name, var_dict, title=None, description=None, **kwargs):
""" Creates a config instance from scratch.
Usage is virtually the same as :func:`attr.make_class`.
>>> import file_config
>>> MyConfig = file_config.make_config(
"MyConfig",
{"name": file_config.var(str)}
)
:param str name: The name of the config
:param dict var_dict: The dictionary of config variable definitions
:param str title: The title of the config, defaults to None, optional
:param str description: The description of the config, defaults to None, optional
:return: A new config class
:rtype: class
"""
return config(
attr.make_class(name, attrs={}, **kwargs),
these=var_dict,
title=title,
description=description,
) | [
"def",
"make_config",
"(",
"name",
",",
"var_dict",
",",
"title",
"=",
"None",
",",
"description",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"config",
"(",
"attr",
".",
"make_class",
"(",
"name",
",",
"attrs",
"=",
"{",
"}",
",",
"*... | Creates a config instance from scratch.
Usage is virtually the same as :func:`attr.make_class`.
>>> import file_config
>>> MyConfig = file_config.make_config(
"MyConfig",
{"name": file_config.var(str)}
)
:param str name: The name of the config
:param dict var_dict: The dictionary of config variable definitions
:param str title: The title of the config, defaults to None, optional
:param str description: The description of the config, defaults to None, optional
:return: A new config class
:rtype: class | [
"Creates",
"a",
"config",
"instance",
"from",
"scratch",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/_file_config.py#L241-L265 | train | 34,125 |
stephen-bunn/file-config | src/file_config/_file_config.py | _build | def _build(config_cls, dictionary, validate=False): # noqa
""" Builds an instance of ``config_cls`` using ``dictionary``.
:param type config_cls: The class to use for building
:param dict dictionary: The dictionary to use for building ``config_cls``
:param bool validate: Performs validation before building ``config_cls``,
defaults to False, optional
:return: An instance of ``config_cls``
:rtype: object
"""
if not is_config_type(config_cls):
raise ValueError(
f"cannot build {config_cls!r} from {dictionary!r}, "
f"{config_cls!r} is not a config"
)
# perform jsonschema validation on the given dictionary
# (simplifys dynamic typecasting)
if validate:
jsonschema.validate(dictionary, build_schema(config_cls))
kwargs = {}
for var in attr.fields(config_cls):
if not is_config_var(var):
continue
entry = var.metadata[CONFIG_KEY]
arg_key = entry.name if entry.name else var.name
arg_default = var.default if var.default is not None else None
if callable(entry.decoder):
kwargs[var.name] = entry.decoder(dictionary.get(arg_key, arg_default))
continue
if is_array_type(entry.type):
if is_typing_type(entry.type) and len(entry.type.__args__) > 0:
nested_type = entry.type.__args__[0]
if is_config_type(nested_type):
kwargs[var.name] = [
_build(nested_type, item)
for item in dictionary.get(arg_key, [])
]
else:
kwargs[var.name] = typecast(entry.type, dictionary.get(arg_key, []))
elif is_object_type(entry.type):
item = dictionary.get(arg_key, {})
if is_typing_type(entry.type) and len(entry.type.__args__) == 2:
(_, value_type) = entry.type.__args__
kwargs[var.name] = {
key: _build(value_type, value)
if is_config_type(value_type)
else typecast(value_type, value)
for (key, value) in item.items()
}
else:
kwargs[var.name] = typecast(entry.type, item)
elif is_config_type(entry.type):
if arg_key not in dictionary:
# if the default value for a nested config is the nested config class
# then build the empty state of the nested config
if is_config_type(arg_default) and entry.type == arg_default:
kwargs[var.name] = _build(entry.type, {})
else:
kwargs[var.name] = arg_default
else:
kwargs[var.name] = _build(
entry.type, dictionary.get(arg_key, arg_default)
)
else:
if arg_key not in dictionary:
kwargs[var.name] = arg_default
else:
kwargs[var.name] = typecast(
entry.type, dictionary.get(arg_key, arg_default)
)
return config_cls(**kwargs) | python | def _build(config_cls, dictionary, validate=False): # noqa
""" Builds an instance of ``config_cls`` using ``dictionary``.
:param type config_cls: The class to use for building
:param dict dictionary: The dictionary to use for building ``config_cls``
:param bool validate: Performs validation before building ``config_cls``,
defaults to False, optional
:return: An instance of ``config_cls``
:rtype: object
"""
if not is_config_type(config_cls):
raise ValueError(
f"cannot build {config_cls!r} from {dictionary!r}, "
f"{config_cls!r} is not a config"
)
# perform jsonschema validation on the given dictionary
# (simplifys dynamic typecasting)
if validate:
jsonschema.validate(dictionary, build_schema(config_cls))
kwargs = {}
for var in attr.fields(config_cls):
if not is_config_var(var):
continue
entry = var.metadata[CONFIG_KEY]
arg_key = entry.name if entry.name else var.name
arg_default = var.default if var.default is not None else None
if callable(entry.decoder):
kwargs[var.name] = entry.decoder(dictionary.get(arg_key, arg_default))
continue
if is_array_type(entry.type):
if is_typing_type(entry.type) and len(entry.type.__args__) > 0:
nested_type = entry.type.__args__[0]
if is_config_type(nested_type):
kwargs[var.name] = [
_build(nested_type, item)
for item in dictionary.get(arg_key, [])
]
else:
kwargs[var.name] = typecast(entry.type, dictionary.get(arg_key, []))
elif is_object_type(entry.type):
item = dictionary.get(arg_key, {})
if is_typing_type(entry.type) and len(entry.type.__args__) == 2:
(_, value_type) = entry.type.__args__
kwargs[var.name] = {
key: _build(value_type, value)
if is_config_type(value_type)
else typecast(value_type, value)
for (key, value) in item.items()
}
else:
kwargs[var.name] = typecast(entry.type, item)
elif is_config_type(entry.type):
if arg_key not in dictionary:
# if the default value for a nested config is the nested config class
# then build the empty state of the nested config
if is_config_type(arg_default) and entry.type == arg_default:
kwargs[var.name] = _build(entry.type, {})
else:
kwargs[var.name] = arg_default
else:
kwargs[var.name] = _build(
entry.type, dictionary.get(arg_key, arg_default)
)
else:
if arg_key not in dictionary:
kwargs[var.name] = arg_default
else:
kwargs[var.name] = typecast(
entry.type, dictionary.get(arg_key, arg_default)
)
return config_cls(**kwargs) | [
"def",
"_build",
"(",
"config_cls",
",",
"dictionary",
",",
"validate",
"=",
"False",
")",
":",
"# noqa",
"if",
"not",
"is_config_type",
"(",
"config_cls",
")",
":",
"raise",
"ValueError",
"(",
"f\"cannot build {config_cls!r} from {dictionary!r}, \"",
"f\"{config_cls!... | Builds an instance of ``config_cls`` using ``dictionary``.
:param type config_cls: The class to use for building
:param dict dictionary: The dictionary to use for building ``config_cls``
:param bool validate: Performs validation before building ``config_cls``,
defaults to False, optional
:return: An instance of ``config_cls``
:rtype: object | [
"Builds",
"an",
"instance",
"of",
"config_cls",
"using",
"dictionary",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/_file_config.py#L268-L345 | train | 34,126 |
stephen-bunn/file-config | src/file_config/_file_config.py | _dump | def _dump(config_instance, dict_type=OrderedDict):
""" Dumps an instance from ``instance`` to a dictionary type mapping.
:param object instance: The instance to serialized to a dictionary
:param object dict_type: Some dictionary type, defaults to ``OrderedDict``
:return: Dumped dictionary
:rtype: collections.OrderedDict (or instance of ``dict_type``)
"""
if not is_config(config_instance):
raise ValueError(
f"cannot dump instance {config_instance!r} to dict, "
"instance is not a config class"
)
result = dict_type()
for var in attr.fields(config_instance.__class__):
if not is_config_var(var):
continue
entry = var.metadata[CONFIG_KEY]
dump_key = entry.name if entry.name else var.name
dump_default = var.default if var.default else None
if callable(entry.encoder):
result[dump_key] = entry.encoder(
getattr(config_instance, var.name, dump_default)
)
continue
if is_array_type(entry.type):
items = getattr(config_instance, var.name, [])
if items is not None:
result[dump_key] = [
(_dump(item, dict_type=dict_type) if is_config(item) else item)
for item in items
]
elif is_enum_type(entry.type):
dump_value = getattr(config_instance, var.name, dump_default)
result[dump_key] = (
dump_value.value if dump_value in entry.type else dump_value
)
elif is_bytes_type(entry.type):
result[dump_key] = encode_bytes(
getattr(config_instance, var.name, dump_default)
)
else:
if is_config_type(entry.type):
result[dump_key] = _dump(
getattr(config_instance, var.name, {}), dict_type=dict_type
)
else:
dump_value = getattr(config_instance, var.name, dump_default)
if is_object_type(type(dump_value)):
dump_value = {
key: (
_dump(value, dict_type=dict_type)
if is_config(value)
else value
)
for (key, value) in dump_value.items()
}
if dump_value is not None:
result[dump_key] = dump_value
return result | python | def _dump(config_instance, dict_type=OrderedDict):
""" Dumps an instance from ``instance`` to a dictionary type mapping.
:param object instance: The instance to serialized to a dictionary
:param object dict_type: Some dictionary type, defaults to ``OrderedDict``
:return: Dumped dictionary
:rtype: collections.OrderedDict (or instance of ``dict_type``)
"""
if not is_config(config_instance):
raise ValueError(
f"cannot dump instance {config_instance!r} to dict, "
"instance is not a config class"
)
result = dict_type()
for var in attr.fields(config_instance.__class__):
if not is_config_var(var):
continue
entry = var.metadata[CONFIG_KEY]
dump_key = entry.name if entry.name else var.name
dump_default = var.default if var.default else None
if callable(entry.encoder):
result[dump_key] = entry.encoder(
getattr(config_instance, var.name, dump_default)
)
continue
if is_array_type(entry.type):
items = getattr(config_instance, var.name, [])
if items is not None:
result[dump_key] = [
(_dump(item, dict_type=dict_type) if is_config(item) else item)
for item in items
]
elif is_enum_type(entry.type):
dump_value = getattr(config_instance, var.name, dump_default)
result[dump_key] = (
dump_value.value if dump_value in entry.type else dump_value
)
elif is_bytes_type(entry.type):
result[dump_key] = encode_bytes(
getattr(config_instance, var.name, dump_default)
)
else:
if is_config_type(entry.type):
result[dump_key] = _dump(
getattr(config_instance, var.name, {}), dict_type=dict_type
)
else:
dump_value = getattr(config_instance, var.name, dump_default)
if is_object_type(type(dump_value)):
dump_value = {
key: (
_dump(value, dict_type=dict_type)
if is_config(value)
else value
)
for (key, value) in dump_value.items()
}
if dump_value is not None:
result[dump_key] = dump_value
return result | [
"def",
"_dump",
"(",
"config_instance",
",",
"dict_type",
"=",
"OrderedDict",
")",
":",
"if",
"not",
"is_config",
"(",
"config_instance",
")",
":",
"raise",
"ValueError",
"(",
"f\"cannot dump instance {config_instance!r} to dict, \"",
"\"instance is not a config class\"",
... | Dumps an instance from ``instance`` to a dictionary type mapping.
:param object instance: The instance to serialized to a dictionary
:param object dict_type: Some dictionary type, defaults to ``OrderedDict``
:return: Dumped dictionary
:rtype: collections.OrderedDict (or instance of ``dict_type``) | [
"Dumps",
"an",
"instance",
"from",
"instance",
"to",
"a",
"dictionary",
"type",
"mapping",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/_file_config.py#L348-L414 | train | 34,127 |
stephen-bunn/file-config | src/file_config/_file_config.py | validate | def validate(instance):
""" Validates a given ``instance``.
:param object instance: The instance to validate
:raises jsonschema.exceptions.ValidationError: On failed validation
"""
jsonschema.validate(
to_dict(instance, dict_type=dict), build_schema(instance.__class__)
) | python | def validate(instance):
""" Validates a given ``instance``.
:param object instance: The instance to validate
:raises jsonschema.exceptions.ValidationError: On failed validation
"""
jsonschema.validate(
to_dict(instance, dict_type=dict), build_schema(instance.__class__)
) | [
"def",
"validate",
"(",
"instance",
")",
":",
"jsonschema",
".",
"validate",
"(",
"to_dict",
"(",
"instance",
",",
"dict_type",
"=",
"dict",
")",
",",
"build_schema",
"(",
"instance",
".",
"__class__",
")",
")"
] | Validates a given ``instance``.
:param object instance: The instance to validate
:raises jsonschema.exceptions.ValidationError: On failed validation | [
"Validates",
"a",
"given",
"instance",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/_file_config.py#L417-L426 | train | 34,128 |
stephen-bunn/file-config | src/file_config/_file_config.py | from_dict | def from_dict(config_cls, dictionary, validate=False):
""" Loads an instance of ``config_cls`` from a dictionary.
:param type config_cls: The class to build an instance of
:param dict dictionary: The dictionary to load from
:param bool validate: Preforms validation before building ``config_cls``,
defaults to False, optional
:return: An instance of ``config_cls``
:rtype: object
"""
return _build(config_cls, dictionary, validate=validate) | python | def from_dict(config_cls, dictionary, validate=False):
""" Loads an instance of ``config_cls`` from a dictionary.
:param type config_cls: The class to build an instance of
:param dict dictionary: The dictionary to load from
:param bool validate: Preforms validation before building ``config_cls``,
defaults to False, optional
:return: An instance of ``config_cls``
:rtype: object
"""
return _build(config_cls, dictionary, validate=validate) | [
"def",
"from_dict",
"(",
"config_cls",
",",
"dictionary",
",",
"validate",
"=",
"False",
")",
":",
"return",
"_build",
"(",
"config_cls",
",",
"dictionary",
",",
"validate",
"=",
"validate",
")"
] | Loads an instance of ``config_cls`` from a dictionary.
:param type config_cls: The class to build an instance of
:param dict dictionary: The dictionary to load from
:param bool validate: Preforms validation before building ``config_cls``,
defaults to False, optional
:return: An instance of ``config_cls``
:rtype: object | [
"Loads",
"an",
"instance",
"of",
"config_cls",
"from",
"a",
"dictionary",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/_file_config.py#L429-L440 | train | 34,129 |
stephen-bunn/file-config | src/file_config/handlers/_common.py | BaseHandler.imported | def imported(self):
""" The imported handler module.
:return: The imported handler module.
:rtype: module
"""
if not hasattr(self, "_imported"):
self._imported = self._discover_import()
return self._imported | python | def imported(self):
""" The imported handler module.
:return: The imported handler module.
:rtype: module
"""
if not hasattr(self, "_imported"):
self._imported = self._discover_import()
return self._imported | [
"def",
"imported",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"_imported\"",
")",
":",
"self",
".",
"_imported",
"=",
"self",
".",
"_discover_import",
"(",
")",
"return",
"self",
".",
"_imported"
] | The imported handler module.
:return: The imported handler module.
:rtype: module | [
"The",
"imported",
"handler",
"module",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/handlers/_common.py#L15-L24 | train | 34,130 |
stephen-bunn/file-config | src/file_config/handlers/_common.py | BaseHandler.handler | def handler(self):
""" The current imported serialization handler module.
:return: The imported handler
:rtype: module
"""
if not hasattr(self, "_handler"):
self._handler = sys.modules[self.imported]
return self._handler | python | def handler(self):
""" The current imported serialization handler module.
:return: The imported handler
:rtype: module
"""
if not hasattr(self, "_handler"):
self._handler = sys.modules[self.imported]
return self._handler | [
"def",
"handler",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"_handler\"",
")",
":",
"self",
".",
"_handler",
"=",
"sys",
".",
"modules",
"[",
"self",
".",
"imported",
"]",
"return",
"self",
".",
"_handler"
] | The current imported serialization handler module.
:return: The imported handler
:rtype: module | [
"The",
"current",
"imported",
"serialization",
"handler",
"module",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/handlers/_common.py#L27-L36 | train | 34,131 |
stephen-bunn/file-config | src/file_config/handlers/_common.py | BaseHandler.available | def available(self):
""" True if any of the supported modules from ``packages`` is available for use.
:return: True if any modules from ``packages`` exist
:rtype: bool
"""
for module_name in self.packages:
if importlib.util.find_spec(module_name):
return True
return False | python | def available(self):
""" True if any of the supported modules from ``packages`` is available for use.
:return: True if any modules from ``packages`` exist
:rtype: bool
"""
for module_name in self.packages:
if importlib.util.find_spec(module_name):
return True
return False | [
"def",
"available",
"(",
"self",
")",
":",
"for",
"module_name",
"in",
"self",
".",
"packages",
":",
"if",
"importlib",
".",
"util",
".",
"find_spec",
"(",
"module_name",
")",
":",
"return",
"True",
"return",
"False"
] | True if any of the supported modules from ``packages`` is available for use.
:return: True if any modules from ``packages`` exist
:rtype: bool | [
"True",
"if",
"any",
"of",
"the",
"supported",
"modules",
"from",
"packages",
"is",
"available",
"for",
"use",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/handlers/_common.py#L69-L79 | train | 34,132 |
stephen-bunn/file-config | src/file_config/handlers/_common.py | BaseHandler._discover_import | def _discover_import(self, prefer=None):
""" Discovers and imports the best available module from ``packages``.
:raises ModuleNotFoundError: If no module is available
:return: The name of the module to use
:rtype: str
"""
available_packages = self.packages
if isinstance(prefer, str):
available_packages = (prefer,)
for module_name in available_packages:
spec = importlib.util.find_spec(module_name)
if spec is not None:
importlib.import_module(module_name)
imported_hook = getattr(self, f"on_{module_name}_imported", None)
if callable(imported_hook):
imported_hook(sys.modules[module_name])
return module_name
raise ModuleNotFoundError(f"no modules in {available_packages!r} found") | python | def _discover_import(self, prefer=None):
""" Discovers and imports the best available module from ``packages``.
:raises ModuleNotFoundError: If no module is available
:return: The name of the module to use
:rtype: str
"""
available_packages = self.packages
if isinstance(prefer, str):
available_packages = (prefer,)
for module_name in available_packages:
spec = importlib.util.find_spec(module_name)
if spec is not None:
importlib.import_module(module_name)
imported_hook = getattr(self, f"on_{module_name}_imported", None)
if callable(imported_hook):
imported_hook(sys.modules[module_name])
return module_name
raise ModuleNotFoundError(f"no modules in {available_packages!r} found") | [
"def",
"_discover_import",
"(",
"self",
",",
"prefer",
"=",
"None",
")",
":",
"available_packages",
"=",
"self",
".",
"packages",
"if",
"isinstance",
"(",
"prefer",
",",
"str",
")",
":",
"available_packages",
"=",
"(",
"prefer",
",",
")",
"for",
"module_na... | Discovers and imports the best available module from ``packages``.
:raises ModuleNotFoundError: If no module is available
:return: The name of the module to use
:rtype: str | [
"Discovers",
"and",
"imports",
"the",
"best",
"available",
"module",
"from",
"packages",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/handlers/_common.py#L81-L101 | train | 34,133 |
stephen-bunn/file-config | src/file_config/handlers/_common.py | BaseHandler._prefer_package | def _prefer_package(self, package):
""" Prefer a serializtion handler over other handlers.
:param str package: The name of the package to use
:raises ValueError: When the given package name is not one of the available
supported serializtion packages for this handler
:return: The name of the serialization handler
:rtype: str
"""
if isinstance(package, str) and package != self.imported:
if package not in self.packages:
raise ValueError(
f"preferred package {package!r} does not exist, allowed are "
f"{self.packages!r}"
)
# clear out current serialization handler (if exists)
if hasattr(self, "_handler"):
del self._handler
# manually update imported handlers with a given preference
self._imported = self._discover_import(prefer=package)
return package
return self.imported | python | def _prefer_package(self, package):
""" Prefer a serializtion handler over other handlers.
:param str package: The name of the package to use
:raises ValueError: When the given package name is not one of the available
supported serializtion packages for this handler
:return: The name of the serialization handler
:rtype: str
"""
if isinstance(package, str) and package != self.imported:
if package not in self.packages:
raise ValueError(
f"preferred package {package!r} does not exist, allowed are "
f"{self.packages!r}"
)
# clear out current serialization handler (if exists)
if hasattr(self, "_handler"):
del self._handler
# manually update imported handlers with a given preference
self._imported = self._discover_import(prefer=package)
return package
return self.imported | [
"def",
"_prefer_package",
"(",
"self",
",",
"package",
")",
":",
"if",
"isinstance",
"(",
"package",
",",
"str",
")",
"and",
"package",
"!=",
"self",
".",
"imported",
":",
"if",
"package",
"not",
"in",
"self",
".",
"packages",
":",
"raise",
"ValueError",... | Prefer a serializtion handler over other handlers.
:param str package: The name of the package to use
:raises ValueError: When the given package name is not one of the available
supported serializtion packages for this handler
:return: The name of the serialization handler
:rtype: str | [
"Prefer",
"a",
"serializtion",
"handler",
"over",
"other",
"handlers",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/handlers/_common.py#L103-L125 | train | 34,134 |
stephen-bunn/file-config | src/file_config/handlers/_common.py | BaseHandler.dumps | def dumps(self, config, instance, prefer=None, **kwargs):
""" An abstract dumps method which dumps an instance into the subclasses format.
:param class config: The config class of the instance
:param object instance: The instance to dump
:param str prefer: The preferred serialization module name
:raises ValueError: If dump handler does not provide handler method
:return: The dumped content
:rtype: str
"""
dumper = self._prefer_package(prefer)
dumps_hook_name = f"on_{dumper}_dumps"
dumps_hook = getattr(self, dumps_hook_name, None)
if not callable(dumps_hook):
raise ValueError(
f"no dumps handler for {self.imported!r}, requires method "
f"{dumps_hook_name!r} in {self!r}"
)
extras = self.options.copy()
for (key, value) in kwargs.items():
if key not in extras.keys():
warnings.warn(
f"handler 'dumps_{self.name!s}' does not support {key!r} argument"
)
else:
extras[key] = value
return dumps_hook(self.handler, config, instance, **extras) | python | def dumps(self, config, instance, prefer=None, **kwargs):
""" An abstract dumps method which dumps an instance into the subclasses format.
:param class config: The config class of the instance
:param object instance: The instance to dump
:param str prefer: The preferred serialization module name
:raises ValueError: If dump handler does not provide handler method
:return: The dumped content
:rtype: str
"""
dumper = self._prefer_package(prefer)
dumps_hook_name = f"on_{dumper}_dumps"
dumps_hook = getattr(self, dumps_hook_name, None)
if not callable(dumps_hook):
raise ValueError(
f"no dumps handler for {self.imported!r}, requires method "
f"{dumps_hook_name!r} in {self!r}"
)
extras = self.options.copy()
for (key, value) in kwargs.items():
if key not in extras.keys():
warnings.warn(
f"handler 'dumps_{self.name!s}' does not support {key!r} argument"
)
else:
extras[key] = value
return dumps_hook(self.handler, config, instance, **extras) | [
"def",
"dumps",
"(",
"self",
",",
"config",
",",
"instance",
",",
"prefer",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"dumper",
"=",
"self",
".",
"_prefer_package",
"(",
"prefer",
")",
"dumps_hook_name",
"=",
"f\"on_{dumper}_dumps\"",
"dumps_hook",
"... | An abstract dumps method which dumps an instance into the subclasses format.
:param class config: The config class of the instance
:param object instance: The instance to dump
:param str prefer: The preferred serialization module name
:raises ValueError: If dump handler does not provide handler method
:return: The dumped content
:rtype: str | [
"An",
"abstract",
"dumps",
"method",
"which",
"dumps",
"an",
"instance",
"into",
"the",
"subclasses",
"format",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/handlers/_common.py#L127-L155 | train | 34,135 |
stephen-bunn/file-config | src/file_config/handlers/_common.py | BaseHandler.loads | def loads(self, config, content, prefer=None):
""" An abstract loads method which loads an instance from some content.
:param class config: The config class to load into
:param str content: The content to load from
:param str prefer: The preferred serialization module name
:raises ValueError: If load handler does not provided handler method
:return: A dictionary converted from the given content
:rtype: dict
"""
loader = self._prefer_package(prefer)
loads_hook_name = f"on_{loader}_loads"
loads_hook = getattr(self, loads_hook_name, None)
if not callable(loads_hook):
raise ValueError(
f"no loads handler for {self.imported!r}, requires method "
f"{loads_hook_name!r} in {self!r}"
)
return loads_hook(self.handler, config, content) | python | def loads(self, config, content, prefer=None):
""" An abstract loads method which loads an instance from some content.
:param class config: The config class to load into
:param str content: The content to load from
:param str prefer: The preferred serialization module name
:raises ValueError: If load handler does not provided handler method
:return: A dictionary converted from the given content
:rtype: dict
"""
loader = self._prefer_package(prefer)
loads_hook_name = f"on_{loader}_loads"
loads_hook = getattr(self, loads_hook_name, None)
if not callable(loads_hook):
raise ValueError(
f"no loads handler for {self.imported!r}, requires method "
f"{loads_hook_name!r} in {self!r}"
)
return loads_hook(self.handler, config, content) | [
"def",
"loads",
"(",
"self",
",",
"config",
",",
"content",
",",
"prefer",
"=",
"None",
")",
":",
"loader",
"=",
"self",
".",
"_prefer_package",
"(",
"prefer",
")",
"loads_hook_name",
"=",
"f\"on_{loader}_loads\"",
"loads_hook",
"=",
"getattr",
"(",
"self",
... | An abstract loads method which loads an instance from some content.
:param class config: The config class to load into
:param str content: The content to load from
:param str prefer: The preferred serialization module name
:raises ValueError: If load handler does not provided handler method
:return: A dictionary converted from the given content
:rtype: dict | [
"An",
"abstract",
"loads",
"method",
"which",
"loads",
"an",
"instance",
"from",
"some",
"content",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/handlers/_common.py#L157-L176 | train | 34,136 |
stephen-bunn/file-config | src/file_config/handlers/_common.py | BaseHandler.dump | def dump(self, config, instance, file_object, prefer=None, **kwargs):
""" An abstract method that dumps to a given file object.
:param class config: The config class of the instance
:param object instance: The instance to dump
:param file file_object: The file object to dump to
:param str prefer: The preferred serialization module name
"""
file_object.write(self.dumps(config, instance, prefer=prefer, **kwargs)) | python | def dump(self, config, instance, file_object, prefer=None, **kwargs):
""" An abstract method that dumps to a given file object.
:param class config: The config class of the instance
:param object instance: The instance to dump
:param file file_object: The file object to dump to
:param str prefer: The preferred serialization module name
"""
file_object.write(self.dumps(config, instance, prefer=prefer, **kwargs)) | [
"def",
"dump",
"(",
"self",
",",
"config",
",",
"instance",
",",
"file_object",
",",
"prefer",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"file_object",
".",
"write",
"(",
"self",
".",
"dumps",
"(",
"config",
",",
"instance",
",",
"prefer",
"=",... | An abstract method that dumps to a given file object.
:param class config: The config class of the instance
:param object instance: The instance to dump
:param file file_object: The file object to dump to
:param str prefer: The preferred serialization module name | [
"An",
"abstract",
"method",
"that",
"dumps",
"to",
"a",
"given",
"file",
"object",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/handlers/_common.py#L178-L187 | train | 34,137 |
stephen-bunn/file-config | src/file_config/handlers/_common.py | BaseHandler.load | def load(self, config, file_object, prefer=None):
""" An abstract method that loads from a given file object.
:param class config: The config class to load into
:param file file_object: The file object to load from
:param str prefer: The preferred serialization module name
:returns: A dictionary converted from the content of the given file object
:rtype: dict
"""
return self.loads(config, file_object.read(), prefer=prefer) | python | def load(self, config, file_object, prefer=None):
""" An abstract method that loads from a given file object.
:param class config: The config class to load into
:param file file_object: The file object to load from
:param str prefer: The preferred serialization module name
:returns: A dictionary converted from the content of the given file object
:rtype: dict
"""
return self.loads(config, file_object.read(), prefer=prefer) | [
"def",
"load",
"(",
"self",
",",
"config",
",",
"file_object",
",",
"prefer",
"=",
"None",
")",
":",
"return",
"self",
".",
"loads",
"(",
"config",
",",
"file_object",
".",
"read",
"(",
")",
",",
"prefer",
"=",
"prefer",
")"
] | An abstract method that loads from a given file object.
:param class config: The config class to load into
:param file file_object: The file object to load from
:param str prefer: The preferred serialization module name
:returns: A dictionary converted from the content of the given file object
:rtype: dict | [
"An",
"abstract",
"method",
"that",
"loads",
"from",
"a",
"given",
"file",
"object",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/handlers/_common.py#L189-L199 | train | 34,138 |
stephen-bunn/file-config | src/file_config/schema_builder.py | _build_attribute_modifiers | def _build_attribute_modifiers(var, attribute_mapping, ignore=None):
""" Handles adding schema modifiers for a given config var and some mapping.
:param attr._make.Attribute var: The config var to build modifiers for
:param Dict[str, str] attribute_mapping: A mapping of attribute to jsonschema
modifiers
:param List[str] ignore: A list of mapping keys to ignore, defaults to None
:raises ValueError: When the given ``var`` is not an config var
:raises ValueError: When jsonschema modifiers are given the wrong type
:return: A dictionary of the built modifiers
:rtype: Dict[str, Any]
"""
if not isinstance(ignore, list):
ignore = ["type", "name", "required", "default"]
if not is_config_var(var):
raise ValueError(
f"cannot build field modifiers for {var!r}, is not a config var"
)
entry = var.metadata[CONFIG_KEY]
modifiers = {}
for (entry_attribute, entry_value) in zip(
attr.fields(type(entry)), attr.astuple(entry)
):
if entry_value is not None:
if entry_attribute.name in ignore:
continue
elif entry_attribute.name in attribute_mapping:
# NOTE: stupid type comparisons required for off case where
# bool is a subclass of int `isinstance(True, (int, float)) == True`
if entry_attribute.type is not None and (
type(entry_value) in entry_attribute.type
if isinstance(entry_attribute.type, (list, tuple, set))
else type(entry_value) == entry_attribute.type
): # noqa
modifiers[attribute_mapping[entry_attribute.name]] = entry_value
else:
raise ValueError(
f"invalid modifier type for modifier {entry_attribute.name!r} "
f"on var {var.name!r}, expected type {entry_attribute.type!r}, "
f"received {entry_value!r} of type {type(entry_value)!r}"
)
else:
warnings.warn(
f"field modifier {entry_attribute.name!r} has no effect on var "
f"{var.name!r} of type {entry.type!r}"
)
return modifiers | python | def _build_attribute_modifiers(var, attribute_mapping, ignore=None):
""" Handles adding schema modifiers for a given config var and some mapping.
:param attr._make.Attribute var: The config var to build modifiers for
:param Dict[str, str] attribute_mapping: A mapping of attribute to jsonschema
modifiers
:param List[str] ignore: A list of mapping keys to ignore, defaults to None
:raises ValueError: When the given ``var`` is not an config var
:raises ValueError: When jsonschema modifiers are given the wrong type
:return: A dictionary of the built modifiers
:rtype: Dict[str, Any]
"""
if not isinstance(ignore, list):
ignore = ["type", "name", "required", "default"]
if not is_config_var(var):
raise ValueError(
f"cannot build field modifiers for {var!r}, is not a config var"
)
entry = var.metadata[CONFIG_KEY]
modifiers = {}
for (entry_attribute, entry_value) in zip(
attr.fields(type(entry)), attr.astuple(entry)
):
if entry_value is not None:
if entry_attribute.name in ignore:
continue
elif entry_attribute.name in attribute_mapping:
# NOTE: stupid type comparisons required for off case where
# bool is a subclass of int `isinstance(True, (int, float)) == True`
if entry_attribute.type is not None and (
type(entry_value) in entry_attribute.type
if isinstance(entry_attribute.type, (list, tuple, set))
else type(entry_value) == entry_attribute.type
): # noqa
modifiers[attribute_mapping[entry_attribute.name]] = entry_value
else:
raise ValueError(
f"invalid modifier type for modifier {entry_attribute.name!r} "
f"on var {var.name!r}, expected type {entry_attribute.type!r}, "
f"received {entry_value!r} of type {type(entry_value)!r}"
)
else:
warnings.warn(
f"field modifier {entry_attribute.name!r} has no effect on var "
f"{var.name!r} of type {entry.type!r}"
)
return modifiers | [
"def",
"_build_attribute_modifiers",
"(",
"var",
",",
"attribute_mapping",
",",
"ignore",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"ignore",
",",
"list",
")",
":",
"ignore",
"=",
"[",
"\"type\"",
",",
"\"name\"",
",",
"\"required\"",
",",
"\"... | Handles adding schema modifiers for a given config var and some mapping.
:param attr._make.Attribute var: The config var to build modifiers for
:param Dict[str, str] attribute_mapping: A mapping of attribute to jsonschema
modifiers
:param List[str] ignore: A list of mapping keys to ignore, defaults to None
:raises ValueError: When the given ``var`` is not an config var
:raises ValueError: When jsonschema modifiers are given the wrong type
:return: A dictionary of the built modifiers
:rtype: Dict[str, Any] | [
"Handles",
"adding",
"schema",
"modifiers",
"for",
"a",
"given",
"config",
"var",
"and",
"some",
"mapping",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/schema_builder.py#L41-L91 | train | 34,139 |
stephen-bunn/file-config | src/file_config/schema_builder.py | _build_enum_type | def _build_enum_type(var, property_path=None):
""" Builds schema definitions for enum type values.
:param var: The enum type value
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:return: The built schema definition
:rtype: Dict[str, Any]
"""
if not property_path:
property_path = []
entry = var.metadata[CONFIG_KEY]
enum_values = [member.value for member in entry.type.__members__.values()]
schema = {"enum": enum_values}
for (type_name, check) in dict(
bool=is_bool_type,
string=is_string_type,
number=is_number_type,
integer=is_integer_type,
).items():
if all(check(type(_)) for _ in enum_values):
schema["type"] = type_name
break
return schema | python | def _build_enum_type(var, property_path=None):
""" Builds schema definitions for enum type values.
:param var: The enum type value
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:return: The built schema definition
:rtype: Dict[str, Any]
"""
if not property_path:
property_path = []
entry = var.metadata[CONFIG_KEY]
enum_values = [member.value for member in entry.type.__members__.values()]
schema = {"enum": enum_values}
for (type_name, check) in dict(
bool=is_bool_type,
string=is_string_type,
number=is_number_type,
integer=is_integer_type,
).items():
if all(check(type(_)) for _ in enum_values):
schema["type"] = type_name
break
return schema | [
"def",
"_build_enum_type",
"(",
"var",
",",
"property_path",
"=",
"None",
")",
":",
"if",
"not",
"property_path",
":",
"property_path",
"=",
"[",
"]",
"entry",
"=",
"var",
".",
"metadata",
"[",
"CONFIG_KEY",
"]",
"enum_values",
"=",
"[",
"member",
".",
"... | Builds schema definitions for enum type values.
:param var: The enum type value
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:return: The built schema definition
:rtype: Dict[str, Any] | [
"Builds",
"schema",
"definitions",
"for",
"enum",
"type",
"values",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/schema_builder.py#L110-L137 | train | 34,140 |
stephen-bunn/file-config | src/file_config/schema_builder.py | _build_string_type | def _build_string_type(var, property_path=None):
""" Builds schema definitions for string type values.
:param var: The string type value
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:param property_path: [type], optional
:return: The built schema definition
:rtype: Dict[str, Any]
"""
if not property_path:
property_path = []
schema = {"type": "string"}
if is_builtin_type(var):
return schema
if is_regex_type(var):
schema["pattern"] = var.__supertype__.pattern
return schema
if is_config_var(var):
schema.update(
_build_attribute_modifiers(var, {"min": "minLength", "max": "maxLength"})
)
if is_regex_type(var.type):
schema["pattern"] = var.type.__supertype__.pattern
return schema | python | def _build_string_type(var, property_path=None):
""" Builds schema definitions for string type values.
:param var: The string type value
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:param property_path: [type], optional
:return: The built schema definition
:rtype: Dict[str, Any]
"""
if not property_path:
property_path = []
schema = {"type": "string"}
if is_builtin_type(var):
return schema
if is_regex_type(var):
schema["pattern"] = var.__supertype__.pattern
return schema
if is_config_var(var):
schema.update(
_build_attribute_modifiers(var, {"min": "minLength", "max": "maxLength"})
)
if is_regex_type(var.type):
schema["pattern"] = var.type.__supertype__.pattern
return schema | [
"def",
"_build_string_type",
"(",
"var",
",",
"property_path",
"=",
"None",
")",
":",
"if",
"not",
"property_path",
":",
"property_path",
"=",
"[",
"]",
"schema",
"=",
"{",
"\"type\"",
":",
"\"string\"",
"}",
"if",
"is_builtin_type",
"(",
"var",
")",
":",
... | Builds schema definitions for string type values.
:param var: The string type value
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:param property_path: [type], optional
:return: The built schema definition
:rtype: Dict[str, Any] | [
"Builds",
"schema",
"definitions",
"for",
"string",
"type",
"values",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/schema_builder.py#L157-L186 | train | 34,141 |
stephen-bunn/file-config | src/file_config/schema_builder.py | _build_integer_type | def _build_integer_type(var, property_path=None):
""" Builds schema definitions for integer type values.
:param var: The integer type value
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:param property_path: [type], optional
:return: The built schema definition
:rtype: Dict[str, Any]
"""
if not property_path:
property_path = []
schema = {"type": "integer"}
if is_builtin_type(var):
return schema
if is_config_var(var):
schema.update(
_build_attribute_modifiers(var, {"min": "minimum", "max": "maximum"})
)
return schema | python | def _build_integer_type(var, property_path=None):
""" Builds schema definitions for integer type values.
:param var: The integer type value
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:param property_path: [type], optional
:return: The built schema definition
:rtype: Dict[str, Any]
"""
if not property_path:
property_path = []
schema = {"type": "integer"}
if is_builtin_type(var):
return schema
if is_config_var(var):
schema.update(
_build_attribute_modifiers(var, {"min": "minimum", "max": "maximum"})
)
return schema | [
"def",
"_build_integer_type",
"(",
"var",
",",
"property_path",
"=",
"None",
")",
":",
"if",
"not",
"property_path",
":",
"property_path",
"=",
"[",
"]",
"schema",
"=",
"{",
"\"type\"",
":",
"\"integer\"",
"}",
"if",
"is_builtin_type",
"(",
"var",
")",
":"... | Builds schema definitions for integer type values.
:param var: The integer type value
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:param property_path: [type], optional
:return: The built schema definition
:rtype: Dict[str, Any] | [
"Builds",
"schema",
"definitions",
"for",
"integer",
"type",
"values",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/schema_builder.py#L189-L212 | train | 34,142 |
stephen-bunn/file-config | src/file_config/schema_builder.py | _build_number_type | def _build_number_type(var, property_path=None):
""" Builds schema definitions for number type values.
:param var: The number type value
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:param property_path: [type], optional
:return: The built schema definition
:rtype: Dict[str, Any]
"""
if not property_path:
property_path = []
schema = {"type": "number"}
if is_builtin_type(var):
return schema
if is_config_var(var):
schema.update(
_build_attribute_modifiers(var, {"min": "minimum", "max": "maximum"})
)
return schema | python | def _build_number_type(var, property_path=None):
""" Builds schema definitions for number type values.
:param var: The number type value
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:param property_path: [type], optional
:return: The built schema definition
:rtype: Dict[str, Any]
"""
if not property_path:
property_path = []
schema = {"type": "number"}
if is_builtin_type(var):
return schema
if is_config_var(var):
schema.update(
_build_attribute_modifiers(var, {"min": "minimum", "max": "maximum"})
)
return schema | [
"def",
"_build_number_type",
"(",
"var",
",",
"property_path",
"=",
"None",
")",
":",
"if",
"not",
"property_path",
":",
"property_path",
"=",
"[",
"]",
"schema",
"=",
"{",
"\"type\"",
":",
"\"number\"",
"}",
"if",
"is_builtin_type",
"(",
"var",
")",
":",
... | Builds schema definitions for number type values.
:param var: The number type value
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:param property_path: [type], optional
:return: The built schema definition
:rtype: Dict[str, Any] | [
"Builds",
"schema",
"definitions",
"for",
"number",
"type",
"values",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/schema_builder.py#L215-L238 | train | 34,143 |
stephen-bunn/file-config | src/file_config/schema_builder.py | _build_array_type | def _build_array_type(var, property_path=None):
""" Builds schema definitions for array type values.
:param var: The array type value
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:param property_path: [type], optional
:return: The built schema definition
:rtype: Dict[str, Any]
"""
if not property_path:
property_path = []
schema = {"type": "array", "items": {"$id": f"#/{'/'.join(property_path)}/items"}}
if is_builtin_type(var):
return schema
if is_config_var(var):
schema.update(
_build_attribute_modifiers(
var,
{
"min": "minItems",
"max": "maxItems",
"unique": "uniqueItems",
"contains": "contains",
},
)
)
if is_typing_type(var.type) and len(var.type.__args__) > 0:
# NOTE: typing.List only allows one typing argument
nested_type = var.type.__args__[0]
schema["items"].update(
_build(nested_type, property_path=property_path + ["items"])
)
elif is_typing_type(var):
nested_type = var.__args__[0]
schema["items"].update(
_build(nested_type, property_path=property_path + ["items"])
)
return schema | python | def _build_array_type(var, property_path=None):
""" Builds schema definitions for array type values.
:param var: The array type value
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:param property_path: [type], optional
:return: The built schema definition
:rtype: Dict[str, Any]
"""
if not property_path:
property_path = []
schema = {"type": "array", "items": {"$id": f"#/{'/'.join(property_path)}/items"}}
if is_builtin_type(var):
return schema
if is_config_var(var):
schema.update(
_build_attribute_modifiers(
var,
{
"min": "minItems",
"max": "maxItems",
"unique": "uniqueItems",
"contains": "contains",
},
)
)
if is_typing_type(var.type) and len(var.type.__args__) > 0:
# NOTE: typing.List only allows one typing argument
nested_type = var.type.__args__[0]
schema["items"].update(
_build(nested_type, property_path=property_path + ["items"])
)
elif is_typing_type(var):
nested_type = var.__args__[0]
schema["items"].update(
_build(nested_type, property_path=property_path + ["items"])
)
return schema | [
"def",
"_build_array_type",
"(",
"var",
",",
"property_path",
"=",
"None",
")",
":",
"if",
"not",
"property_path",
":",
"property_path",
"=",
"[",
"]",
"schema",
"=",
"{",
"\"type\"",
":",
"\"array\"",
",",
"\"items\"",
":",
"{",
"\"$id\"",
":",
"f\"#/{'/'... | Builds schema definitions for array type values.
:param var: The array type value
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:param property_path: [type], optional
:return: The built schema definition
:rtype: Dict[str, Any] | [
"Builds",
"schema",
"definitions",
"for",
"array",
"type",
"values",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/schema_builder.py#L241-L284 | train | 34,144 |
stephen-bunn/file-config | src/file_config/schema_builder.py | _build_object_type | def _build_object_type(var, property_path=None):
""" Builds schema definitions for object type values.
:param var: The object type value
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:param property_path: [type], optional
:return: The built schema definition
:rtype: Dict[str, Any]
"""
if not property_path:
property_path = []
schema = {"type": "object"}
if is_builtin_type(var):
return schema
entry = var.metadata[CONFIG_KEY]
if isinstance(entry.min, int):
schema["minProperties"] = entry.min
if isinstance(entry.max, int):
schema["maxProperties"] = entry.max
# NOTE: typing.Dict only accepts two typing arguments
if is_typing_type(var.type) and len(var.type.__args__) == 2:
(key_type, value_type) = var.type.__args__
key_pattern = "^(.*)$"
if is_regex_type(key_type):
key_pattern = key_type.__supertype__.pattern
elif not is_string_type(key_type):
raise ValueError(
f"cannot serialize object with key of type {key_type!r}, "
f"located in var {var.name!r}"
)
schema["patternProperties"] = {
key_pattern: _build(value_type, property_path=property_path)
}
return schema | python | def _build_object_type(var, property_path=None):
""" Builds schema definitions for object type values.
:param var: The object type value
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:param property_path: [type], optional
:return: The built schema definition
:rtype: Dict[str, Any]
"""
if not property_path:
property_path = []
schema = {"type": "object"}
if is_builtin_type(var):
return schema
entry = var.metadata[CONFIG_KEY]
if isinstance(entry.min, int):
schema["minProperties"] = entry.min
if isinstance(entry.max, int):
schema["maxProperties"] = entry.max
# NOTE: typing.Dict only accepts two typing arguments
if is_typing_type(var.type) and len(var.type.__args__) == 2:
(key_type, value_type) = var.type.__args__
key_pattern = "^(.*)$"
if is_regex_type(key_type):
key_pattern = key_type.__supertype__.pattern
elif not is_string_type(key_type):
raise ValueError(
f"cannot serialize object with key of type {key_type!r}, "
f"located in var {var.name!r}"
)
schema["patternProperties"] = {
key_pattern: _build(value_type, property_path=property_path)
}
return schema | [
"def",
"_build_object_type",
"(",
"var",
",",
"property_path",
"=",
"None",
")",
":",
"if",
"not",
"property_path",
":",
"property_path",
"=",
"[",
"]",
"schema",
"=",
"{",
"\"type\"",
":",
"\"object\"",
"}",
"if",
"is_builtin_type",
"(",
"var",
")",
":",
... | Builds schema definitions for object type values.
:param var: The object type value
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:param property_path: [type], optional
:return: The built schema definition
:rtype: Dict[str, Any] | [
"Builds",
"schema",
"definitions",
"for",
"object",
"type",
"values",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/schema_builder.py#L287-L329 | train | 34,145 |
stephen-bunn/file-config | src/file_config/schema_builder.py | _build_type | def _build_type(type_, value, property_path=None):
""" Builds the schema definition based on the given type for the given value.
:param type_: The type of the value
:param value: The value to build the schema definition for
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:return: The built schema definition
:rtype: Dict[str, Any]
"""
if not property_path:
property_path = []
for (type_check, builder) in (
(is_enum_type, _build_enum_type),
(is_null_type, _build_null_type),
(is_bool_type, _build_bool_type),
(is_string_type, _build_string_type),
(is_integer_type, _build_integer_type),
(is_number_type, _build_number_type),
(is_array_type, _build_array_type),
(is_object_type, _build_object_type),
):
if type_check(type_):
return builder(value, property_path=property_path)
# NOTE: warning ignores type None (as that is the config var default)
if type_:
warnings.warn(f"unhandled translation for type {type_!r} with value {value!r}")
return {} | python | def _build_type(type_, value, property_path=None):
""" Builds the schema definition based on the given type for the given value.
:param type_: The type of the value
:param value: The value to build the schema definition for
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:return: The built schema definition
:rtype: Dict[str, Any]
"""
if not property_path:
property_path = []
for (type_check, builder) in (
(is_enum_type, _build_enum_type),
(is_null_type, _build_null_type),
(is_bool_type, _build_bool_type),
(is_string_type, _build_string_type),
(is_integer_type, _build_integer_type),
(is_number_type, _build_number_type),
(is_array_type, _build_array_type),
(is_object_type, _build_object_type),
):
if type_check(type_):
return builder(value, property_path=property_path)
# NOTE: warning ignores type None (as that is the config var default)
if type_:
warnings.warn(f"unhandled translation for type {type_!r} with value {value!r}")
return {} | [
"def",
"_build_type",
"(",
"type_",
",",
"value",
",",
"property_path",
"=",
"None",
")",
":",
"if",
"not",
"property_path",
":",
"property_path",
"=",
"[",
"]",
"for",
"(",
"type_check",
",",
"builder",
")",
"in",
"(",
"(",
"is_enum_type",
",",
"_build_... | Builds the schema definition based on the given type for the given value.
:param type_: The type of the value
:param value: The value to build the schema definition for
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:return: The built schema definition
:rtype: Dict[str, Any] | [
"Builds",
"the",
"schema",
"definition",
"based",
"on",
"the",
"given",
"type",
"for",
"the",
"given",
"value",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/schema_builder.py#L332-L362 | train | 34,146 |
stephen-bunn/file-config | src/file_config/schema_builder.py | _build_var | def _build_var(var, property_path=None):
""" Builds a schema definition for a given config var.
:param attr._make.Attribute var: The var to generate a schema definition for
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:raises ValueError: When the given ``var`` is not a file_config var
:return: The built schema definition
:rtype: Dict[str, Any]
"""
if not property_path:
property_path = []
if not is_config_var(var):
raise ValueError(f"var {var!r} is not a config var")
entry = var.metadata[CONFIG_KEY]
var_name = entry.name if entry.name else var.name
schema = {"$id": f"#/{'/'.join(property_path)}/{var_name}"}
if var.default is not None:
schema["default"] = var.default
if entry is not None:
if isinstance(entry.title, str):
schema["title"] = entry.title
if isinstance(entry.description, str):
schema["description"] = entry.description
if isinstance(entry.examples, collections.Iterable) and len(entry.examples) > 0:
schema["examples"] = entry.examples
# handle typing.Union types by simply using the "anyOf" key
if is_union_type(var.type):
type_union = {"anyOf": []}
for allowed_type in var.type.__args__:
# NOTE: requires jsonschema draft-07
type_union["anyOf"].append(
_build_type(
allowed_type, allowed_type, property_path=property_path + [var_name]
)
)
schema.update(type_union)
else:
schema.update(
_build_type(var.type, var, property_path=property_path + [var_name])
)
return schema | python | def _build_var(var, property_path=None):
""" Builds a schema definition for a given config var.
:param attr._make.Attribute var: The var to generate a schema definition for
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:raises ValueError: When the given ``var`` is not a file_config var
:return: The built schema definition
:rtype: Dict[str, Any]
"""
if not property_path:
property_path = []
if not is_config_var(var):
raise ValueError(f"var {var!r} is not a config var")
entry = var.metadata[CONFIG_KEY]
var_name = entry.name if entry.name else var.name
schema = {"$id": f"#/{'/'.join(property_path)}/{var_name}"}
if var.default is not None:
schema["default"] = var.default
if entry is not None:
if isinstance(entry.title, str):
schema["title"] = entry.title
if isinstance(entry.description, str):
schema["description"] = entry.description
if isinstance(entry.examples, collections.Iterable) and len(entry.examples) > 0:
schema["examples"] = entry.examples
# handle typing.Union types by simply using the "anyOf" key
if is_union_type(var.type):
type_union = {"anyOf": []}
for allowed_type in var.type.__args__:
# NOTE: requires jsonschema draft-07
type_union["anyOf"].append(
_build_type(
allowed_type, allowed_type, property_path=property_path + [var_name]
)
)
schema.update(type_union)
else:
schema.update(
_build_type(var.type, var, property_path=property_path + [var_name])
)
return schema | [
"def",
"_build_var",
"(",
"var",
",",
"property_path",
"=",
"None",
")",
":",
"if",
"not",
"property_path",
":",
"property_path",
"=",
"[",
"]",
"if",
"not",
"is_config_var",
"(",
"var",
")",
":",
"raise",
"ValueError",
"(",
"f\"var {var!r} is not a config var... | Builds a schema definition for a given config var.
:param attr._make.Attribute var: The var to generate a schema definition for
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:raises ValueError: When the given ``var`` is not a file_config var
:return: The built schema definition
:rtype: Dict[str, Any] | [
"Builds",
"a",
"schema",
"definition",
"for",
"a",
"given",
"config",
"var",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/schema_builder.py#L365-L412 | train | 34,147 |
stephen-bunn/file-config | src/file_config/schema_builder.py | _build_config | def _build_config(config_cls, property_path=None):
""" Builds the schema definition for a given config class.
:param class config_cls: The config class to build a schema definition for
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:raises ValueError: When the given ``config_cls`` is not a config decorated class
:return: The built schema definition
:rtype: Dict[str, Any]
"""
if not property_path:
property_path = []
if not is_config_type(config_cls):
raise ValueError(f"class {config_cls!r} is not a config class")
schema = {"type": "object", "required": [], "properties": {}}
cls_entry = getattr(config_cls, CONFIG_KEY)
# add schema title, defaults to config classes `__qualname__`
schema_title = cls_entry.get("title", config_cls.__qualname__)
if isinstance(schema_title, str):
schema["title"] = schema_title
schema_description = cls_entry.get("description")
if isinstance(schema_description, str):
schema["description"] = schema_description
# if the length of the property path is 0, assume that current object is root
if len(property_path) <= 0:
schema["$id"] = f"{config_cls.__qualname__}.json"
# NOTE: requires draft-07 for typing.Union type schema generation
schema["$schema"] = "http://json-schema.org/draft-07/schema#"
else:
schema["$id"] = f"#/{'/'.join(property_path)}"
property_path.append("properties")
for var in attr.fields(config_cls):
if not is_config_var(var):
# encountered attribute is not a serialized field (i.e. missing CONFIG_KEY)
continue
entry = var.metadata[CONFIG_KEY]
var_name = entry.name if entry.name else var.name
if entry.required:
schema["required"].append(var_name)
if is_config_type(var.type):
schema["properties"][var_name] = _build_config(
var.type, property_path=property_path + [var_name]
)
else:
schema["properties"][var_name] = _build_var(
var, property_path=property_path
)
return schema | python | def _build_config(config_cls, property_path=None):
""" Builds the schema definition for a given config class.
:param class config_cls: The config class to build a schema definition for
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:raises ValueError: When the given ``config_cls`` is not a config decorated class
:return: The built schema definition
:rtype: Dict[str, Any]
"""
if not property_path:
property_path = []
if not is_config_type(config_cls):
raise ValueError(f"class {config_cls!r} is not a config class")
schema = {"type": "object", "required": [], "properties": {}}
cls_entry = getattr(config_cls, CONFIG_KEY)
# add schema title, defaults to config classes `__qualname__`
schema_title = cls_entry.get("title", config_cls.__qualname__)
if isinstance(schema_title, str):
schema["title"] = schema_title
schema_description = cls_entry.get("description")
if isinstance(schema_description, str):
schema["description"] = schema_description
# if the length of the property path is 0, assume that current object is root
if len(property_path) <= 0:
schema["$id"] = f"{config_cls.__qualname__}.json"
# NOTE: requires draft-07 for typing.Union type schema generation
schema["$schema"] = "http://json-schema.org/draft-07/schema#"
else:
schema["$id"] = f"#/{'/'.join(property_path)}"
property_path.append("properties")
for var in attr.fields(config_cls):
if not is_config_var(var):
# encountered attribute is not a serialized field (i.e. missing CONFIG_KEY)
continue
entry = var.metadata[CONFIG_KEY]
var_name = entry.name if entry.name else var.name
if entry.required:
schema["required"].append(var_name)
if is_config_type(var.type):
schema["properties"][var_name] = _build_config(
var.type, property_path=property_path + [var_name]
)
else:
schema["properties"][var_name] = _build_var(
var, property_path=property_path
)
return schema | [
"def",
"_build_config",
"(",
"config_cls",
",",
"property_path",
"=",
"None",
")",
":",
"if",
"not",
"property_path",
":",
"property_path",
"=",
"[",
"]",
"if",
"not",
"is_config_type",
"(",
"config_cls",
")",
":",
"raise",
"ValueError",
"(",
"f\"class {config... | Builds the schema definition for a given config class.
:param class config_cls: The config class to build a schema definition for
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:raises ValueError: When the given ``config_cls`` is not a config decorated class
:return: The built schema definition
:rtype: Dict[str, Any] | [
"Builds",
"the",
"schema",
"definition",
"for",
"a",
"given",
"config",
"class",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/schema_builder.py#L415-L471 | train | 34,148 |
stephen-bunn/file-config | src/file_config/schema_builder.py | _build | def _build(value, property_path=None):
""" The generic schema definition build method.
:param value: The value to build a schema definition for
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:return: The built schema definition
:rtype: Dict[str, Any]
"""
if not property_path:
property_path = []
if is_config_type(value):
return _build_config(value, property_path=property_path)
elif is_config_var(value):
return _build_var(value, property_path=property_path)
elif is_builtin_type(value):
return _build_type(value, value, property_path=property_path)
elif is_regex_type(value):
# NOTE: building regular expression types assumes type is string
return _build_type(str, value, property_path=property_path)
elif is_typing_type(value):
return _build_type(value, value, property_path=property_path)
return _build_type(type(value), value, property_path=property_path) | python | def _build(value, property_path=None):
""" The generic schema definition build method.
:param value: The value to build a schema definition for
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:return: The built schema definition
:rtype: Dict[str, Any]
"""
if not property_path:
property_path = []
if is_config_type(value):
return _build_config(value, property_path=property_path)
elif is_config_var(value):
return _build_var(value, property_path=property_path)
elif is_builtin_type(value):
return _build_type(value, value, property_path=property_path)
elif is_regex_type(value):
# NOTE: building regular expression types assumes type is string
return _build_type(str, value, property_path=property_path)
elif is_typing_type(value):
return _build_type(value, value, property_path=property_path)
return _build_type(type(value), value, property_path=property_path) | [
"def",
"_build",
"(",
"value",
",",
"property_path",
"=",
"None",
")",
":",
"if",
"not",
"property_path",
":",
"property_path",
"=",
"[",
"]",
"if",
"is_config_type",
"(",
"value",
")",
":",
"return",
"_build_config",
"(",
"value",
",",
"property_path",
"=... | The generic schema definition build method.
:param value: The value to build a schema definition for
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:return: The built schema definition
:rtype: Dict[str, Any] | [
"The",
"generic",
"schema",
"definition",
"build",
"method",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/schema_builder.py#L474-L498 | train | 34,149 |
stephen-bunn/file-config | src/file_config/utils.py | _get_types | def _get_types(type_):
""" Gathers all types within the ``TYPE_MAPPINGS`` for a specific ``Types`` value.
:param Types type_: The type to retrieve
:return: All types within the ``TYPE_MAPPINGS``
:rtype: list
"""
return list(
itertools.chain.from_iterable(
map(lambda x: TYPE_MAPPINGS[x].get(type_, []), TYPE_MAPPINGS)
)
) | python | def _get_types(type_):
""" Gathers all types within the ``TYPE_MAPPINGS`` for a specific ``Types`` value.
:param Types type_: The type to retrieve
:return: All types within the ``TYPE_MAPPINGS``
:rtype: list
"""
return list(
itertools.chain.from_iterable(
map(lambda x: TYPE_MAPPINGS[x].get(type_, []), TYPE_MAPPINGS)
)
) | [
"def",
"_get_types",
"(",
"type_",
")",
":",
"return",
"list",
"(",
"itertools",
".",
"chain",
".",
"from_iterable",
"(",
"map",
"(",
"lambda",
"x",
":",
"TYPE_MAPPINGS",
"[",
"x",
"]",
".",
"get",
"(",
"type_",
",",
"[",
"]",
")",
",",
"TYPE_MAPPING... | Gathers all types within the ``TYPE_MAPPINGS`` for a specific ``Types`` value.
:param Types type_: The type to retrieve
:return: All types within the ``TYPE_MAPPINGS``
:rtype: list | [
"Gathers",
"all",
"types",
"within",
"the",
"TYPE_MAPPINGS",
"for",
"a",
"specific",
"Types",
"value",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/utils.py#L69-L81 | train | 34,150 |
stephen-bunn/file-config | src/file_config/utils.py | decode_bytes | def decode_bytes(string):
""" Decodes a given base64 string into bytes.
:param str string: The string to decode
:return: The decoded bytes
:rtype: bytes
"""
if is_string_type(type(string)):
string = bytes(string, "utf-8")
return base64.decodebytes(string) | python | def decode_bytes(string):
""" Decodes a given base64 string into bytes.
:param str string: The string to decode
:return: The decoded bytes
:rtype: bytes
"""
if is_string_type(type(string)):
string = bytes(string, "utf-8")
return base64.decodebytes(string) | [
"def",
"decode_bytes",
"(",
"string",
")",
":",
"if",
"is_string_type",
"(",
"type",
"(",
"string",
")",
")",
":",
"string",
"=",
"bytes",
"(",
"string",
",",
"\"utf-8\"",
")",
"return",
"base64",
".",
"decodebytes",
"(",
"string",
")"
] | Decodes a given base64 string into bytes.
:param str string: The string to decode
:return: The decoded bytes
:rtype: bytes | [
"Decodes",
"a",
"given",
"base64",
"string",
"into",
"bytes",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/utils.py#L95-L105 | train | 34,151 |
stephen-bunn/file-config | src/file_config/utils.py | is_config_var | def is_config_var(var):
""" Checks if the given value is a valid ``file_config.var``.
:param var: The value to check
:return: True if the given value is a var, otherwise False
:rtype: bool
"""
return (
isinstance(var, (attr._make.Attribute, attr._make._CountingAttr))
and hasattr(var, "metadata")
and CONFIG_KEY in var.metadata
) | python | def is_config_var(var):
""" Checks if the given value is a valid ``file_config.var``.
:param var: The value to check
:return: True if the given value is a var, otherwise False
:rtype: bool
"""
return (
isinstance(var, (attr._make.Attribute, attr._make._CountingAttr))
and hasattr(var, "metadata")
and CONFIG_KEY in var.metadata
) | [
"def",
"is_config_var",
"(",
"var",
")",
":",
"return",
"(",
"isinstance",
"(",
"var",
",",
"(",
"attr",
".",
"_make",
".",
"Attribute",
",",
"attr",
".",
"_make",
".",
"_CountingAttr",
")",
")",
"and",
"hasattr",
"(",
"var",
",",
"\"metadata\"",
")",
... | Checks if the given value is a valid ``file_config.var``.
:param var: The value to check
:return: True if the given value is a var, otherwise False
:rtype: bool | [
"Checks",
"if",
"the",
"given",
"value",
"is",
"a",
"valid",
"file_config",
".",
"var",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/utils.py#L108-L120 | train | 34,152 |
stephen-bunn/file-config | src/file_config/utils.py | is_config_type | def is_config_type(type_):
""" Checks if the given type is ``file_config.config`` decorated.
:param type_: The type to check
:return: True if the type is config decorated, otherwise False
:rtype: bool
"""
return (
isinstance(type_, type)
and hasattr(type_, "__attrs_attrs__")
and hasattr(type_, CONFIG_KEY)
) | python | def is_config_type(type_):
""" Checks if the given type is ``file_config.config`` decorated.
:param type_: The type to check
:return: True if the type is config decorated, otherwise False
:rtype: bool
"""
return (
isinstance(type_, type)
and hasattr(type_, "__attrs_attrs__")
and hasattr(type_, CONFIG_KEY)
) | [
"def",
"is_config_type",
"(",
"type_",
")",
":",
"return",
"(",
"isinstance",
"(",
"type_",
",",
"type",
")",
"and",
"hasattr",
"(",
"type_",
",",
"\"__attrs_attrs__\"",
")",
"and",
"hasattr",
"(",
"type_",
",",
"CONFIG_KEY",
")",
")"
] | Checks if the given type is ``file_config.config`` decorated.
:param type_: The type to check
:return: True if the type is config decorated, otherwise False
:rtype: bool | [
"Checks",
"if",
"the",
"given",
"type",
"is",
"file_config",
".",
"config",
"decorated",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/utils.py#L123-L135 | train | 34,153 |
stephen-bunn/file-config | src/file_config/utils.py | is_enum_type | def is_enum_type(type_):
""" Checks if the given type is an enum type.
:param type_: The type to check
:return: True if the type is a enum type, otherwise False
:rtype: bool
"""
return isinstance(type_, type) and issubclass(type_, tuple(_get_types(Types.ENUM))) | python | def is_enum_type(type_):
""" Checks if the given type is an enum type.
:param type_: The type to check
:return: True if the type is a enum type, otherwise False
:rtype: bool
"""
return isinstance(type_, type) and issubclass(type_, tuple(_get_types(Types.ENUM))) | [
"def",
"is_enum_type",
"(",
"type_",
")",
":",
"return",
"isinstance",
"(",
"type_",
",",
"type",
")",
"and",
"issubclass",
"(",
"type_",
",",
"tuple",
"(",
"_get_types",
"(",
"Types",
".",
"ENUM",
")",
")",
")"
] | Checks if the given type is an enum type.
:param type_: The type to check
:return: True if the type is a enum type, otherwise False
:rtype: bool | [
"Checks",
"if",
"the",
"given",
"type",
"is",
"an",
"enum",
"type",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/utils.py#L175-L183 | train | 34,154 |
stephen-bunn/file-config | src/file_config/utils.py | is_regex_type | def is_regex_type(type_):
""" Checks if the given type is a regex type.
:param type_: The type to check
:return: True if the type is a regex type, otherwise False
:rtype: bool
"""
return (
callable(type_)
and getattr(type_, "__name__", None) == REGEX_TYPE_NAME
and hasattr(type_, "__supertype__")
and is_compiled_pattern(type_.__supertype__)
) | python | def is_regex_type(type_):
""" Checks if the given type is a regex type.
:param type_: The type to check
:return: True if the type is a regex type, otherwise False
:rtype: bool
"""
return (
callable(type_)
and getattr(type_, "__name__", None) == REGEX_TYPE_NAME
and hasattr(type_, "__supertype__")
and is_compiled_pattern(type_.__supertype__)
) | [
"def",
"is_regex_type",
"(",
"type_",
")",
":",
"return",
"(",
"callable",
"(",
"type_",
")",
"and",
"getattr",
"(",
"type_",
",",
"\"__name__\"",
",",
"None",
")",
"==",
"REGEX_TYPE_NAME",
"and",
"hasattr",
"(",
"type_",
",",
"\"__supertype__\"",
")",
"an... | Checks if the given type is a regex type.
:param type_: The type to check
:return: True if the type is a regex type, otherwise False
:rtype: bool | [
"Checks",
"if",
"the",
"given",
"type",
"is",
"a",
"regex",
"type",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/utils.py#L213-L226 | train | 34,155 |
stephen-bunn/file-config | src/file_config/utils.py | is_union_type | def is_union_type(type_):
""" Checks if the given type is a union type.
:param type_: The type to check
:return: True if the type is a union type, otherwise False
:rtype: bool
"""
if is_typing_type(type_) and hasattr(type_, "__origin__"):
# NOTE: union types can only be from typing module
return type_.__origin__ in _get_types(Types.UNION)
return False | python | def is_union_type(type_):
""" Checks if the given type is a union type.
:param type_: The type to check
:return: True if the type is a union type, otherwise False
:rtype: bool
"""
if is_typing_type(type_) and hasattr(type_, "__origin__"):
# NOTE: union types can only be from typing module
return type_.__origin__ in _get_types(Types.UNION)
return False | [
"def",
"is_union_type",
"(",
"type_",
")",
":",
"if",
"is_typing_type",
"(",
"type_",
")",
"and",
"hasattr",
"(",
"type_",
",",
"\"__origin__\"",
")",
":",
"# NOTE: union types can only be from typing module",
"return",
"type_",
".",
"__origin__",
"in",
"_get_types"... | Checks if the given type is a union type.
:param type_: The type to check
:return: True if the type is a union type, otherwise False
:rtype: bool | [
"Checks",
"if",
"the",
"given",
"type",
"is",
"a",
"union",
"type",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/utils.py#L230-L241 | train | 34,156 |
stephen-bunn/file-config | src/file_config/utils.py | is_string_type | def is_string_type(type_):
""" Checks if the given type is a string type.
:param type_: The type to check
:return: True if the type is a string type, otherwise False
:rtype: bool
"""
string_types = _get_types(Types.STRING)
if is_typing_type(type_):
return type_ in string_types or is_regex_type(type_)
return type_ in string_types | python | def is_string_type(type_):
""" Checks if the given type is a string type.
:param type_: The type to check
:return: True if the type is a string type, otherwise False
:rtype: bool
"""
string_types = _get_types(Types.STRING)
if is_typing_type(type_):
return type_ in string_types or is_regex_type(type_)
return type_ in string_types | [
"def",
"is_string_type",
"(",
"type_",
")",
":",
"string_types",
"=",
"_get_types",
"(",
"Types",
".",
"STRING",
")",
"if",
"is_typing_type",
"(",
"type_",
")",
":",
"return",
"type_",
"in",
"string_types",
"or",
"is_regex_type",
"(",
"type_",
")",
"return",... | Checks if the given type is a string type.
:param type_: The type to check
:return: True if the type is a string type, otherwise False
:rtype: bool | [
"Checks",
"if",
"the",
"given",
"type",
"is",
"a",
"string",
"type",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/utils.py#L281-L292 | train | 34,157 |
stephen-bunn/file-config | src/file_config/utils.py | is_array_type | def is_array_type(type_):
""" Checks if the given type is a array type.
:param type_: The type to check
:return: True if the type is a array type, otherwise False
:rtype: bool
"""
array_types = _get_types(Types.ARRAY)
if is_typing_type(type_):
return type_ in array_types or (
hasattr(type_, "__origin__") and type_.__origin__ in array_types
)
return type_ in array_types | python | def is_array_type(type_):
""" Checks if the given type is a array type.
:param type_: The type to check
:return: True if the type is a array type, otherwise False
:rtype: bool
"""
array_types = _get_types(Types.ARRAY)
if is_typing_type(type_):
return type_ in array_types or (
hasattr(type_, "__origin__") and type_.__origin__ in array_types
)
return type_ in array_types | [
"def",
"is_array_type",
"(",
"type_",
")",
":",
"array_types",
"=",
"_get_types",
"(",
"Types",
".",
"ARRAY",
")",
"if",
"is_typing_type",
"(",
"type_",
")",
":",
"return",
"type_",
"in",
"array_types",
"or",
"(",
"hasattr",
"(",
"type_",
",",
"\"__origin_... | Checks if the given type is a array type.
:param type_: The type to check
:return: True if the type is a array type, otherwise False
:rtype: bool | [
"Checks",
"if",
"the",
"given",
"type",
"is",
"a",
"array",
"type",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/utils.py#L320-L333 | train | 34,158 |
stephen-bunn/file-config | src/file_config/utils.py | is_object_type | def is_object_type(type_):
""" Checks if the given type is a object type.
:param type_: The type to check
:return: True if the type is a object type, otherwise False
:rtype: bool
"""
object_types = _get_types(Types.OBJECT)
if is_typing_type(type_):
return type_ in object_types or (
hasattr(type_, "__origin__") and type_.__origin__ in object_types
)
return type_ in object_types | python | def is_object_type(type_):
""" Checks if the given type is a object type.
:param type_: The type to check
:return: True if the type is a object type, otherwise False
:rtype: bool
"""
object_types = _get_types(Types.OBJECT)
if is_typing_type(type_):
return type_ in object_types or (
hasattr(type_, "__origin__") and type_.__origin__ in object_types
)
return type_ in object_types | [
"def",
"is_object_type",
"(",
"type_",
")",
":",
"object_types",
"=",
"_get_types",
"(",
"Types",
".",
"OBJECT",
")",
"if",
"is_typing_type",
"(",
"type_",
")",
":",
"return",
"type_",
"in",
"object_types",
"or",
"(",
"hasattr",
"(",
"type_",
",",
"\"__ori... | Checks if the given type is a object type.
:param type_: The type to check
:return: True if the type is a object type, otherwise False
:rtype: bool | [
"Checks",
"if",
"the",
"given",
"type",
"is",
"a",
"object",
"type",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/utils.py#L336-L349 | train | 34,159 |
stephen-bunn/file-config | src/file_config/utils.py | typecast | def typecast(type_, value):
""" Tries to smartly typecast the given value with the given type.
:param type_: The type to try to use for the given value
:param value: The value to try and typecast to the given type
:return: The typecasted value if possible, otherwise just the original value
"""
# NOTE: does not do any special validation of types before casting
# will just raise errors on type casting failures
if is_builtin_type(type_) or is_collections_type(type_) or is_enum_type(type_):
# FIXME: move to Types enum and TYPE_MAPPING entry
if is_bytes_type(type_):
return decode_bytes(value)
return type_(value)
elif is_regex_type(type_):
return typecast(str, value)
elif is_typing_type(type_):
try:
base_type = type_.__extra__
except AttributeError:
# NOTE: when handling typing._GenericAlias __extra__ is actually __origin__
base_type = type_.__origin__
arg_types = type_.__args__
if is_array_type(type_):
if len(arg_types) == 1:
item_type = arg_types[0]
return base_type([typecast(item_type, item) for item in value])
else:
return base_type(value)
elif is_object_type(type_):
if len(arg_types) == 2:
(key_type, item_type) = arg_types
return base_type(
{
typecast(key_type, key): typecast(item_type, item)
for (key, item) in value.items()
}
)
else:
return base_type(value)
else:
return base_type(value)
else:
return value | python | def typecast(type_, value):
""" Tries to smartly typecast the given value with the given type.
:param type_: The type to try to use for the given value
:param value: The value to try and typecast to the given type
:return: The typecasted value if possible, otherwise just the original value
"""
# NOTE: does not do any special validation of types before casting
# will just raise errors on type casting failures
if is_builtin_type(type_) or is_collections_type(type_) or is_enum_type(type_):
# FIXME: move to Types enum and TYPE_MAPPING entry
if is_bytes_type(type_):
return decode_bytes(value)
return type_(value)
elif is_regex_type(type_):
return typecast(str, value)
elif is_typing_type(type_):
try:
base_type = type_.__extra__
except AttributeError:
# NOTE: when handling typing._GenericAlias __extra__ is actually __origin__
base_type = type_.__origin__
arg_types = type_.__args__
if is_array_type(type_):
if len(arg_types) == 1:
item_type = arg_types[0]
return base_type([typecast(item_type, item) for item in value])
else:
return base_type(value)
elif is_object_type(type_):
if len(arg_types) == 2:
(key_type, item_type) = arg_types
return base_type(
{
typecast(key_type, key): typecast(item_type, item)
for (key, item) in value.items()
}
)
else:
return base_type(value)
else:
return base_type(value)
else:
return value | [
"def",
"typecast",
"(",
"type_",
",",
"value",
")",
":",
"# NOTE: does not do any special validation of types before casting",
"# will just raise errors on type casting failures",
"if",
"is_builtin_type",
"(",
"type_",
")",
"or",
"is_collections_type",
"(",
"type_",
")",
"or"... | Tries to smartly typecast the given value with the given type.
:param type_: The type to try to use for the given value
:param value: The value to try and typecast to the given type
:return: The typecasted value if possible, otherwise just the original value | [
"Tries",
"to",
"smartly",
"typecast",
"the",
"given",
"value",
"with",
"the",
"given",
"type",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/utils.py#L352-L397 | train | 34,160 |
stephen-bunn/file-config | tasks/docs.py | clean | def clean(ctx):
""" Clean built docs.
"""
clean_command = f"make clean"
with ctx.cd(ctx.docs.directory.as_posix()):
report.info(ctx, "docs.clean", "cleaning documentation artifacts")
ctx.run(clean_command) | python | def clean(ctx):
""" Clean built docs.
"""
clean_command = f"make clean"
with ctx.cd(ctx.docs.directory.as_posix()):
report.info(ctx, "docs.clean", "cleaning documentation artifacts")
ctx.run(clean_command) | [
"def",
"clean",
"(",
"ctx",
")",
":",
"clean_command",
"=",
"f\"make clean\"",
"with",
"ctx",
".",
"cd",
"(",
"ctx",
".",
"docs",
".",
"directory",
".",
"as_posix",
"(",
")",
")",
":",
"report",
".",
"info",
"(",
"ctx",
",",
"\"docs.clean\"",
",",
"\... | Clean built docs. | [
"Clean",
"built",
"docs",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/tasks/docs.py#L14-L21 | train | 34,161 |
stephen-bunn/file-config | tasks/docs.py | build_news | def build_news(ctx, draft=False, yes=False):
""" Build towncrier newsfragments.
"""
report.info(ctx, "docs.build-news", "building changelog from news fragments")
build_command = f"towncrier --version {ctx.metadata['version']}"
if draft:
report.warn(
ctx,
"docs.build-news",
"building changelog as draft (results are written to stdout)",
)
build_command += " --draft"
elif yes:
report.warn(
ctx, "docs.build-news", "removing news files without user confirmation (-y)"
)
build_command += " --yes"
ctx.run(build_command, hide=None) | python | def build_news(ctx, draft=False, yes=False):
""" Build towncrier newsfragments.
"""
report.info(ctx, "docs.build-news", "building changelog from news fragments")
build_command = f"towncrier --version {ctx.metadata['version']}"
if draft:
report.warn(
ctx,
"docs.build-news",
"building changelog as draft (results are written to stdout)",
)
build_command += " --draft"
elif yes:
report.warn(
ctx, "docs.build-news", "removing news files without user confirmation (-y)"
)
build_command += " --yes"
ctx.run(build_command, hide=None) | [
"def",
"build_news",
"(",
"ctx",
",",
"draft",
"=",
"False",
",",
"yes",
"=",
"False",
")",
":",
"report",
".",
"info",
"(",
"ctx",
",",
"\"docs.build-news\"",
",",
"\"building changelog from news fragments\"",
")",
"build_command",
"=",
"f\"towncrier --version {c... | Build towncrier newsfragments. | [
"Build",
"towncrier",
"newsfragments",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/tasks/docs.py#L25-L43 | train | 34,162 |
stephen-bunn/file-config | tasks/docs.py | build | def build(ctx, output="html"):
""" Build docs.
"""
with ctx.cd(ctx.docs.directory.as_posix()):
build_command = f"make {output}"
report.info(ctx, "docs.build", f"building {output!r} documentation")
ctx.run(build_command) | python | def build(ctx, output="html"):
""" Build docs.
"""
with ctx.cd(ctx.docs.directory.as_posix()):
build_command = f"make {output}"
report.info(ctx, "docs.build", f"building {output!r} documentation")
ctx.run(build_command) | [
"def",
"build",
"(",
"ctx",
",",
"output",
"=",
"\"html\"",
")",
":",
"with",
"ctx",
".",
"cd",
"(",
"ctx",
".",
"docs",
".",
"directory",
".",
"as_posix",
"(",
")",
")",
":",
"build_command",
"=",
"f\"make {output}\"",
"report",
".",
"info",
"(",
"c... | Build docs. | [
"Build",
"docs",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/tasks/docs.py#L47-L54 | train | 34,163 |
stephen-bunn/file-config | tasks/docs.py | view | def view(ctx):
""" Build and view docs.
"""
report.info(ctx, "docs.view", f"viewing documentation")
build_path = ctx.docs.directory / "build" / "html" / "index.html"
build_path = pathname2url(build_path.as_posix())
webbrowser.open(f"file:{build_path!s}") | python | def view(ctx):
""" Build and view docs.
"""
report.info(ctx, "docs.view", f"viewing documentation")
build_path = ctx.docs.directory / "build" / "html" / "index.html"
build_path = pathname2url(build_path.as_posix())
webbrowser.open(f"file:{build_path!s}") | [
"def",
"view",
"(",
"ctx",
")",
":",
"report",
".",
"info",
"(",
"ctx",
",",
"\"docs.view\"",
",",
"f\"viewing documentation\"",
")",
"build_path",
"=",
"ctx",
".",
"docs",
".",
"directory",
"/",
"\"build\"",
"/",
"\"html\"",
"/",
"\"index.html\"",
"build_pa... | Build and view docs. | [
"Build",
"and",
"view",
"docs",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/tasks/docs.py#L58-L65 | train | 34,164 |
stephen-bunn/file-config | src/file_config/contrib/ini_parser.py | INIParser._encode_var | def _encode_var(cls, var):
""" Encodes a variable to the appropriate string format for ini files.
:param var: The variable to encode
:return: The ini representation of the variable
:rtype: str
"""
if isinstance(var, str):
if any(_ in var for _ in cls.requires_quotes):
# NOTE: quoted strings should just use '"' according to the spec
return '"' + var.replace('"', '\\"') + '"'
return var
else:
return str(var) | python | def _encode_var(cls, var):
""" Encodes a variable to the appropriate string format for ini files.
:param var: The variable to encode
:return: The ini representation of the variable
:rtype: str
"""
if isinstance(var, str):
if any(_ in var for _ in cls.requires_quotes):
# NOTE: quoted strings should just use '"' according to the spec
return '"' + var.replace('"', '\\"') + '"'
return var
else:
return str(var) | [
"def",
"_encode_var",
"(",
"cls",
",",
"var",
")",
":",
"if",
"isinstance",
"(",
"var",
",",
"str",
")",
":",
"if",
"any",
"(",
"_",
"in",
"var",
"for",
"_",
"in",
"cls",
".",
"requires_quotes",
")",
":",
"# NOTE: quoted strings should just use '\"' accord... | Encodes a variable to the appropriate string format for ini files.
:param var: The variable to encode
:return: The ini representation of the variable
:rtype: str | [
"Encodes",
"a",
"variable",
"to",
"the",
"appropriate",
"string",
"format",
"for",
"ini",
"files",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/contrib/ini_parser.py#L33-L47 | train | 34,165 |
stephen-bunn/file-config | src/file_config/contrib/ini_parser.py | INIParser._decode_var | def _decode_var(cls, string):
""" Decodes a given string into the appropriate type in Python.
:param str string: The string to decode
:return: The decoded value
"""
str_match = cls.quoted_string_regex.match(string)
if str_match:
return string.strip("'" if str_match.groups()[0] else '"')
# NOTE: "¹".isdigit() results in True because they are idiots
elif string.isdigit() and cls.is_digit_regex.match(string) is not None:
return int(string)
elif string.lower() in ("true", "false"):
return string.lower() == "true"
elif string.lstrip("-").isdigit():
try:
return int(string)
except ValueError:
# case where we mistake something like "--0" as a int
return string
elif "." in string.lstrip("-"):
try:
return float(string)
except ValueError:
# one off case where we mistake a single "." as a float
return string
else:
return string | python | def _decode_var(cls, string):
""" Decodes a given string into the appropriate type in Python.
:param str string: The string to decode
:return: The decoded value
"""
str_match = cls.quoted_string_regex.match(string)
if str_match:
return string.strip("'" if str_match.groups()[0] else '"')
# NOTE: "¹".isdigit() results in True because they are idiots
elif string.isdigit() and cls.is_digit_regex.match(string) is not None:
return int(string)
elif string.lower() in ("true", "false"):
return string.lower() == "true"
elif string.lstrip("-").isdigit():
try:
return int(string)
except ValueError:
# case where we mistake something like "--0" as a int
return string
elif "." in string.lstrip("-"):
try:
return float(string)
except ValueError:
# one off case where we mistake a single "." as a float
return string
else:
return string | [
"def",
"_decode_var",
"(",
"cls",
",",
"string",
")",
":",
"str_match",
"=",
"cls",
".",
"quoted_string_regex",
".",
"match",
"(",
"string",
")",
"if",
"str_match",
":",
"return",
"string",
".",
"strip",
"(",
"\"'\"",
"if",
"str_match",
".",
"groups",
"(... | Decodes a given string into the appropriate type in Python.
:param str string: The string to decode
:return: The decoded value | [
"Decodes",
"a",
"given",
"string",
"into",
"the",
"appropriate",
"type",
"in",
"Python",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/contrib/ini_parser.py#L50-L78 | train | 34,166 |
stephen-bunn/file-config | src/file_config/contrib/ini_parser.py | INIParser._build_dict | def _build_dict(
cls, parser_dict, delimiter=DEFAULT_DELIMITER, dict_type=collections.OrderedDict
):
""" Builds a dictionary of ``dict_type`` given the ``parser._sections`` dict.
:param dict parser_dict: The ``parser._sections`` mapping
:param str delimiter: The delimiter for nested dictionaries,
defaults to ":", optional
:param class dict_type: The dictionary type to use for building the dict,
defaults to :class:`collections.OrderedDict`, optional
:return: The resulting dictionary
:rtype: dict
"""
result = dict_type()
for (key, value) in parser_dict.items():
if isinstance(value, dict):
nestings = key.split(delimiter)
# build nested dictionaries if they don't exist (up to 2nd to last key)
base_dict = result
for nested_key in nestings[:-1]:
if nested_key not in base_dict:
base_dict[nested_key] = dict_type()
base_dict = base_dict[nested_key]
base_dict[nestings[-1]] = cls._build_dict(
parser_dict.get(key), delimiter=delimiter, dict_type=dict_type
)
else:
if "\n" in value:
result[key] = [
cls._decode_var(_) for _ in value.lstrip("\n").split("\n")
]
else:
result[key] = cls._decode_var(value)
return result | python | def _build_dict(
cls, parser_dict, delimiter=DEFAULT_DELIMITER, dict_type=collections.OrderedDict
):
""" Builds a dictionary of ``dict_type`` given the ``parser._sections`` dict.
:param dict parser_dict: The ``parser._sections`` mapping
:param str delimiter: The delimiter for nested dictionaries,
defaults to ":", optional
:param class dict_type: The dictionary type to use for building the dict,
defaults to :class:`collections.OrderedDict`, optional
:return: The resulting dictionary
:rtype: dict
"""
result = dict_type()
for (key, value) in parser_dict.items():
if isinstance(value, dict):
nestings = key.split(delimiter)
# build nested dictionaries if they don't exist (up to 2nd to last key)
base_dict = result
for nested_key in nestings[:-1]:
if nested_key not in base_dict:
base_dict[nested_key] = dict_type()
base_dict = base_dict[nested_key]
base_dict[nestings[-1]] = cls._build_dict(
parser_dict.get(key), delimiter=delimiter, dict_type=dict_type
)
else:
if "\n" in value:
result[key] = [
cls._decode_var(_) for _ in value.lstrip("\n").split("\n")
]
else:
result[key] = cls._decode_var(value)
return result | [
"def",
"_build_dict",
"(",
"cls",
",",
"parser_dict",
",",
"delimiter",
"=",
"DEFAULT_DELIMITER",
",",
"dict_type",
"=",
"collections",
".",
"OrderedDict",
")",
":",
"result",
"=",
"dict_type",
"(",
")",
"for",
"(",
"key",
",",
"value",
")",
"in",
"parser_... | Builds a dictionary of ``dict_type`` given the ``parser._sections`` dict.
:param dict parser_dict: The ``parser._sections`` mapping
:param str delimiter: The delimiter for nested dictionaries,
defaults to ":", optional
:param class dict_type: The dictionary type to use for building the dict,
defaults to :class:`collections.OrderedDict`, optional
:return: The resulting dictionary
:rtype: dict | [
"Builds",
"a",
"dictionary",
"of",
"dict_type",
"given",
"the",
"parser",
".",
"_sections",
"dict",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/contrib/ini_parser.py#L81-L117 | train | 34,167 |
stephen-bunn/file-config | src/file_config/contrib/ini_parser.py | INIParser._build_parser | def _build_parser(
cls,
dictionary,
parser,
section_name,
delimiter=DEFAULT_DELIMITER,
empty_sections=False,
):
""" Populates a parser instance with the content of a dictionary.
:param dict dictionary: The dictionary to use for populating the parser instance
:param configparser.ConfigParser parser: The parser instance
:param str section_name: The current section name to add the dictionary keys to
:param str delimiter: The nested dictionary delimiter character,
defaults to ":", optional
:param bool empty_sections: Flag to allow the representation of empty sections
to exist, defaults to False, optional
:return: The populated parser
:rtype: configparser.ConfigParser
"""
for (key, value) in dictionary.items():
if isinstance(value, dict):
nested_section = delimiter.join([section_name, key])
is_empty = all(isinstance(_, dict) for _ in value.values())
if not is_empty or empty_sections:
parser.add_section(nested_section)
cls._build_parser(value, parser, nested_section, delimiter=delimiter)
elif isinstance(value, (list, tuple, set, frozenset)):
if any(isinstance(_, dict) for _ in value):
raise ValueError(
f"INI files cannot support arrays with mappings, "
f"found in key {key!r}"
)
parser.set(
section_name, key, "\n".join(cls._encode_var(_) for _ in value)
)
else:
parser.set(section_name, key, cls._encode_var(value))
return parser | python | def _build_parser(
cls,
dictionary,
parser,
section_name,
delimiter=DEFAULT_DELIMITER,
empty_sections=False,
):
""" Populates a parser instance with the content of a dictionary.
:param dict dictionary: The dictionary to use for populating the parser instance
:param configparser.ConfigParser parser: The parser instance
:param str section_name: The current section name to add the dictionary keys to
:param str delimiter: The nested dictionary delimiter character,
defaults to ":", optional
:param bool empty_sections: Flag to allow the representation of empty sections
to exist, defaults to False, optional
:return: The populated parser
:rtype: configparser.ConfigParser
"""
for (key, value) in dictionary.items():
if isinstance(value, dict):
nested_section = delimiter.join([section_name, key])
is_empty = all(isinstance(_, dict) for _ in value.values())
if not is_empty or empty_sections:
parser.add_section(nested_section)
cls._build_parser(value, parser, nested_section, delimiter=delimiter)
elif isinstance(value, (list, tuple, set, frozenset)):
if any(isinstance(_, dict) for _ in value):
raise ValueError(
f"INI files cannot support arrays with mappings, "
f"found in key {key!r}"
)
parser.set(
section_name, key, "\n".join(cls._encode_var(_) for _ in value)
)
else:
parser.set(section_name, key, cls._encode_var(value))
return parser | [
"def",
"_build_parser",
"(",
"cls",
",",
"dictionary",
",",
"parser",
",",
"section_name",
",",
"delimiter",
"=",
"DEFAULT_DELIMITER",
",",
"empty_sections",
"=",
"False",
",",
")",
":",
"for",
"(",
"key",
",",
"value",
")",
"in",
"dictionary",
".",
"items... | Populates a parser instance with the content of a dictionary.
:param dict dictionary: The dictionary to use for populating the parser instance
:param configparser.ConfigParser parser: The parser instance
:param str section_name: The current section name to add the dictionary keys to
:param str delimiter: The nested dictionary delimiter character,
defaults to ":", optional
:param bool empty_sections: Flag to allow the representation of empty sections
to exist, defaults to False, optional
:return: The populated parser
:rtype: configparser.ConfigParser | [
"Populates",
"a",
"parser",
"instance",
"with",
"the",
"content",
"of",
"a",
"dictionary",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/contrib/ini_parser.py#L120-L159 | train | 34,168 |
stephen-bunn/file-config | src/file_config/contrib/ini_parser.py | INIParser.from_dict | def from_dict(
cls,
dictionary,
root_section="root",
delimiter=DEFAULT_DELIMITER,
empty_sections=False,
):
""" Create an instance of ``INIParser`` from a given dictionary.
:param dict dictionary: The dictionary to create an instance from
:param str root_section: The root key of the ini content, defaults to "root",
optional
:param str delimiter: The delimiter character to use for nested dictionaries,
defaults to ":", optional
:param bool empty_sections: Flag to allow representation of empty sections to
exist, defaults to False, optional
:return: The new ``INIParser`` instance
:rtype: INIParser
"""
parser = cls()
parser.add_section(root_section)
return cls._build_parser(
dictionary,
parser,
root_section,
delimiter=delimiter,
empty_sections=empty_sections,
) | python | def from_dict(
cls,
dictionary,
root_section="root",
delimiter=DEFAULT_DELIMITER,
empty_sections=False,
):
""" Create an instance of ``INIParser`` from a given dictionary.
:param dict dictionary: The dictionary to create an instance from
:param str root_section: The root key of the ini content, defaults to "root",
optional
:param str delimiter: The delimiter character to use for nested dictionaries,
defaults to ":", optional
:param bool empty_sections: Flag to allow representation of empty sections to
exist, defaults to False, optional
:return: The new ``INIParser`` instance
:rtype: INIParser
"""
parser = cls()
parser.add_section(root_section)
return cls._build_parser(
dictionary,
parser,
root_section,
delimiter=delimiter,
empty_sections=empty_sections,
) | [
"def",
"from_dict",
"(",
"cls",
",",
"dictionary",
",",
"root_section",
"=",
"\"root\"",
",",
"delimiter",
"=",
"DEFAULT_DELIMITER",
",",
"empty_sections",
"=",
"False",
",",
")",
":",
"parser",
"=",
"cls",
"(",
")",
"parser",
".",
"add_section",
"(",
"roo... | Create an instance of ``INIParser`` from a given dictionary.
:param dict dictionary: The dictionary to create an instance from
:param str root_section: The root key of the ini content, defaults to "root",
optional
:param str delimiter: The delimiter character to use for nested dictionaries,
defaults to ":", optional
:param bool empty_sections: Flag to allow representation of empty sections to
exist, defaults to False, optional
:return: The new ``INIParser`` instance
:rtype: INIParser | [
"Create",
"an",
"instance",
"of",
"INIParser",
"from",
"a",
"given",
"dictionary",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/contrib/ini_parser.py#L162-L190 | train | 34,169 |
stephen-bunn/file-config | src/file_config/contrib/ini_parser.py | INIParser.to_dict | def to_dict(self, delimiter=DEFAULT_DELIMITER, dict_type=collections.OrderedDict):
""" Get the dictionary representation of the current parser.
:param str delimiter: The delimiter used for nested dictionaries,
defaults to ":", optional
:param class dict_type: The dictionary type to use for building the dictionary
reperesentation, defaults to collections.OrderedDict, optional
:return: The dictionary representation of the parser instance
:rtype: dict
"""
root_key = self.sections()[0]
return self._build_dict(
self._sections, delimiter=delimiter, dict_type=dict_type
).get(root_key, {}) | python | def to_dict(self, delimiter=DEFAULT_DELIMITER, dict_type=collections.OrderedDict):
""" Get the dictionary representation of the current parser.
:param str delimiter: The delimiter used for nested dictionaries,
defaults to ":", optional
:param class dict_type: The dictionary type to use for building the dictionary
reperesentation, defaults to collections.OrderedDict, optional
:return: The dictionary representation of the parser instance
:rtype: dict
"""
root_key = self.sections()[0]
return self._build_dict(
self._sections, delimiter=delimiter, dict_type=dict_type
).get(root_key, {}) | [
"def",
"to_dict",
"(",
"self",
",",
"delimiter",
"=",
"DEFAULT_DELIMITER",
",",
"dict_type",
"=",
"collections",
".",
"OrderedDict",
")",
":",
"root_key",
"=",
"self",
".",
"sections",
"(",
")",
"[",
"0",
"]",
"return",
"self",
".",
"_build_dict",
"(",
"... | Get the dictionary representation of the current parser.
:param str delimiter: The delimiter used for nested dictionaries,
defaults to ":", optional
:param class dict_type: The dictionary type to use for building the dictionary
reperesentation, defaults to collections.OrderedDict, optional
:return: The dictionary representation of the parser instance
:rtype: dict | [
"Get",
"the",
"dictionary",
"representation",
"of",
"the",
"current",
"parser",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/contrib/ini_parser.py#L205-L219 | train | 34,170 |
stephen-bunn/file-config | src/file_config/contrib/ini_parser.py | INIParser.to_ini | def to_ini(self):
""" Get the ini string of the current parser.
:return: The ini string of the current parser
:rtype: str
"""
fake_io = io.StringIO()
self.write(fake_io)
return fake_io.getvalue() | python | def to_ini(self):
""" Get the ini string of the current parser.
:return: The ini string of the current parser
:rtype: str
"""
fake_io = io.StringIO()
self.write(fake_io)
return fake_io.getvalue() | [
"def",
"to_ini",
"(",
"self",
")",
":",
"fake_io",
"=",
"io",
".",
"StringIO",
"(",
")",
"self",
".",
"write",
"(",
"fake_io",
")",
"return",
"fake_io",
".",
"getvalue",
"(",
")"
] | Get the ini string of the current parser.
:return: The ini string of the current parser
:rtype: str | [
"Get",
"the",
"ini",
"string",
"of",
"the",
"current",
"parser",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/contrib/ini_parser.py#L221-L230 | train | 34,171 |
stephen-bunn/file-config | tasks/package.py | format | def format(ctx):
""" Auto format package source files.
"""
isort_command = f"isort -rc {ctx.package.directory!s}"
black_command = f"black {ctx.package.directory.parent!s}"
report.info(ctx, "package.format", "sorting imports")
ctx.run(isort_command)
report.info(ctx, "package.format", "formatting code")
ctx.run(black_command) | python | def format(ctx):
""" Auto format package source files.
"""
isort_command = f"isort -rc {ctx.package.directory!s}"
black_command = f"black {ctx.package.directory.parent!s}"
report.info(ctx, "package.format", "sorting imports")
ctx.run(isort_command)
report.info(ctx, "package.format", "formatting code")
ctx.run(black_command) | [
"def",
"format",
"(",
"ctx",
")",
":",
"isort_command",
"=",
"f\"isort -rc {ctx.package.directory!s}\"",
"black_command",
"=",
"f\"black {ctx.package.directory.parent!s}\"",
"report",
".",
"info",
"(",
"ctx",
",",
"\"package.format\"",
",",
"\"sorting imports\"",
")",
"ct... | Auto format package source files. | [
"Auto",
"format",
"package",
"source",
"files",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/tasks/package.py#L32-L42 | train | 34,172 |
stephen-bunn/file-config | tasks/package.py | check | def check(ctx):
""" Check built package is valid.
"""
check_command = f"twine check {ctx.directory!s}/dist/*"
report.info(ctx, "package.check", "checking package")
ctx.run(check_command) | python | def check(ctx):
""" Check built package is valid.
"""
check_command = f"twine check {ctx.directory!s}/dist/*"
report.info(ctx, "package.check", "checking package")
ctx.run(check_command) | [
"def",
"check",
"(",
"ctx",
")",
":",
"check_command",
"=",
"f\"twine check {ctx.directory!s}/dist/*\"",
"report",
".",
"info",
"(",
"ctx",
",",
"\"package.check\"",
",",
"\"checking package\"",
")",
"ctx",
".",
"run",
"(",
"check_command",
")"
] | Check built package is valid. | [
"Check",
"built",
"package",
"is",
"valid",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/tasks/package.py#L56-L62 | train | 34,173 |
stephen-bunn/file-config | tasks/package.py | licenses | def licenses(
ctx,
summary=False,
from_classifier=False,
with_system=False,
with_authors=False,
with_urls=False,
):
""" List dependency licenses.
"""
licenses_command = "pip-licenses --order=license"
report.info(ctx, "package.licenses", "listing licenses of package dependencies")
if summary:
report.debug(ctx, "package.licenses", "summarizing licenses")
licenses_command += " --summary"
if from_classifier:
report.debug(ctx, "package.licenses", "reporting from classifiers")
licenses_command += " --from-classifier"
if with_system:
report.debug(ctx, "package.licenses", "including system packages")
licenses_command += " --with-system"
if with_authors:
report.debug(ctx, "package.licenses", "including package authors")
licenses_command += " --with-authors"
if with_urls:
report.debug(ctx, "package.licenses", "including package urls")
licenses_command += " --with-urls"
ctx.run(licenses_command) | python | def licenses(
ctx,
summary=False,
from_classifier=False,
with_system=False,
with_authors=False,
with_urls=False,
):
""" List dependency licenses.
"""
licenses_command = "pip-licenses --order=license"
report.info(ctx, "package.licenses", "listing licenses of package dependencies")
if summary:
report.debug(ctx, "package.licenses", "summarizing licenses")
licenses_command += " --summary"
if from_classifier:
report.debug(ctx, "package.licenses", "reporting from classifiers")
licenses_command += " --from-classifier"
if with_system:
report.debug(ctx, "package.licenses", "including system packages")
licenses_command += " --with-system"
if with_authors:
report.debug(ctx, "package.licenses", "including package authors")
licenses_command += " --with-authors"
if with_urls:
report.debug(ctx, "package.licenses", "including package urls")
licenses_command += " --with-urls"
ctx.run(licenses_command) | [
"def",
"licenses",
"(",
"ctx",
",",
"summary",
"=",
"False",
",",
"from_classifier",
"=",
"False",
",",
"with_system",
"=",
"False",
",",
"with_authors",
"=",
"False",
",",
"with_urls",
"=",
"False",
",",
")",
":",
"licenses_command",
"=",
"\"pip-licenses --... | List dependency licenses. | [
"List",
"dependency",
"licenses",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/tasks/package.py#L66-L94 | train | 34,174 |
stephen-bunn/file-config | tasks/package.py | version | def version(ctx, version=None, force=False):
""" Specify a new version for the package.
.. important:: If no version is specified, will take the most recent parsable git
tag and bump the patch number.
:param str version: The new version of the package.
:param bool force: If True, skips version check
"""
# define replacement strategies for files where the version needs to be in sync
updates = {
ctx.directory.joinpath("setup.cfg"): [
(r"^(version\s?=\s?)(.*)", "\\g<1>{version}")
],
ctx.package.directory.joinpath("__version__.py"): [
(r"(__version__\s?=\s?)(.*)", '\\g<1>"{version}"')
],
}
previous_version = get_previous_version(ctx)
if isinstance(version, str):
version = parver.Version.parse(version)
if not force and version <= previous_version:
error_message = (
f"version {version!s} is <= to previous version {previous_version!s}"
)
report.error(ctx, "package.version", error_message)
raise ValueError(error_message)
else:
version = previous_version.bump_release(index=len(previous_version.release) - 1)
report.info(ctx, "package.version", f"updating version to {version!s}")
for (path, replacements) in updates.items():
if path.is_file():
content = path.read_text()
for (pattern, sub) in replacements:
report.debug(
ctx,
"package.version",
f"applying replacement ({pattern!r}, {sub!r}) to {path!s}",
)
content = re.sub(pattern, sub.format(version=version), content, re.M)
path.write_text(content) | python | def version(ctx, version=None, force=False):
""" Specify a new version for the package.
.. important:: If no version is specified, will take the most recent parsable git
tag and bump the patch number.
:param str version: The new version of the package.
:param bool force: If True, skips version check
"""
# define replacement strategies for files where the version needs to be in sync
updates = {
ctx.directory.joinpath("setup.cfg"): [
(r"^(version\s?=\s?)(.*)", "\\g<1>{version}")
],
ctx.package.directory.joinpath("__version__.py"): [
(r"(__version__\s?=\s?)(.*)", '\\g<1>"{version}"')
],
}
previous_version = get_previous_version(ctx)
if isinstance(version, str):
version = parver.Version.parse(version)
if not force and version <= previous_version:
error_message = (
f"version {version!s} is <= to previous version {previous_version!s}"
)
report.error(ctx, "package.version", error_message)
raise ValueError(error_message)
else:
version = previous_version.bump_release(index=len(previous_version.release) - 1)
report.info(ctx, "package.version", f"updating version to {version!s}")
for (path, replacements) in updates.items():
if path.is_file():
content = path.read_text()
for (pattern, sub) in replacements:
report.debug(
ctx,
"package.version",
f"applying replacement ({pattern!r}, {sub!r}) to {path!s}",
)
content = re.sub(pattern, sub.format(version=version), content, re.M)
path.write_text(content) | [
"def",
"version",
"(",
"ctx",
",",
"version",
"=",
"None",
",",
"force",
"=",
"False",
")",
":",
"# define replacement strategies for files where the version needs to be in sync",
"updates",
"=",
"{",
"ctx",
".",
"directory",
".",
"joinpath",
"(",
"\"setup.cfg\"",
"... | Specify a new version for the package.
.. important:: If no version is specified, will take the most recent parsable git
tag and bump the patch number.
:param str version: The new version of the package.
:param bool force: If True, skips version check | [
"Specify",
"a",
"new",
"version",
"for",
"the",
"package",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/tasks/package.py#L98-L141 | train | 34,175 |
stephen-bunn/file-config | tasks/package.py | stub | def stub(ctx):
""" Generate typing stubs for the package.
"""
report.info(ctx, "package.stub", f"generating typing stubs for package")
ctx.run(
f"stubgen --include-private --no-import "
f"--output {ctx.directory.joinpath('stubs')!s} "
f"--search-path {ctx.directory.joinpath('src')!s} "
f"--package {ctx.metadata['package_name']}"
) | python | def stub(ctx):
""" Generate typing stubs for the package.
"""
report.info(ctx, "package.stub", f"generating typing stubs for package")
ctx.run(
f"stubgen --include-private --no-import "
f"--output {ctx.directory.joinpath('stubs')!s} "
f"--search-path {ctx.directory.joinpath('src')!s} "
f"--package {ctx.metadata['package_name']}"
) | [
"def",
"stub",
"(",
"ctx",
")",
":",
"report",
".",
"info",
"(",
"ctx",
",",
"\"package.stub\"",
",",
"f\"generating typing stubs for package\"",
")",
"ctx",
".",
"run",
"(",
"f\"stubgen --include-private --no-import \"",
"f\"--output {ctx.directory.joinpath('stubs')!s} \""... | Generate typing stubs for the package. | [
"Generate",
"typing",
"stubs",
"for",
"the",
"package",
"."
] | 93429360c949985202e1f2b9cd0340731819ba75 | https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/tasks/package.py#L145-L155 | train | 34,176 |
tarruda/python-ush | helper.py | s | def s(obj):
"""Helper to normalize linefeeds in strings."""
if isinstance(obj, bytes):
return obj.replace(b'\n', os.linesep.encode())
else:
return obj.replace('\n', os.linesep) | python | def s(obj):
"""Helper to normalize linefeeds in strings."""
if isinstance(obj, bytes):
return obj.replace(b'\n', os.linesep.encode())
else:
return obj.replace('\n', os.linesep) | [
"def",
"s",
"(",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"bytes",
")",
":",
"return",
"obj",
".",
"replace",
"(",
"b'\\n'",
",",
"os",
".",
"linesep",
".",
"encode",
"(",
")",
")",
"else",
":",
"return",
"obj",
".",
"replace",
"(",
... | Helper to normalize linefeeds in strings. | [
"Helper",
"to",
"normalize",
"linefeeds",
"in",
"strings",
"."
] | a339e9e7ebc1b601405b6e0fd84a07514821d579 | https://github.com/tarruda/python-ush/blob/a339e9e7ebc1b601405b6e0fd84a07514821d579/helper.py#L19-L24 | train | 34,177 |
oceanprotocol/oceandb-driver-interface | oceandb_driver_interface/utils.py | parse_config | def parse_config(file_path):
"""Loads the configuration file given as parameter"""
config_parser = configparser.ConfigParser()
config_parser.read(file_path)
plugin_config = {}
options = config_parser.options(CONFIG_OPTION)
for option in options:
try:
plugin_config[option] = config_parser.get(CONFIG_OPTION, option)
if plugin_config[option] == -1:
print("skip: %s" % option)
except Exception as e:
print("exception on %s!" % option)
print(e.message)
plugin_config[option] = None
return plugin_config | python | def parse_config(file_path):
"""Loads the configuration file given as parameter"""
config_parser = configparser.ConfigParser()
config_parser.read(file_path)
plugin_config = {}
options = config_parser.options(CONFIG_OPTION)
for option in options:
try:
plugin_config[option] = config_parser.get(CONFIG_OPTION, option)
if plugin_config[option] == -1:
print("skip: %s" % option)
except Exception as e:
print("exception on %s!" % option)
print(e.message)
plugin_config[option] = None
return plugin_config | [
"def",
"parse_config",
"(",
"file_path",
")",
":",
"config_parser",
"=",
"configparser",
".",
"ConfigParser",
"(",
")",
"config_parser",
".",
"read",
"(",
"file_path",
")",
"plugin_config",
"=",
"{",
"}",
"options",
"=",
"config_parser",
".",
"options",
"(",
... | Loads the configuration file given as parameter | [
"Loads",
"the",
"configuration",
"file",
"given",
"as",
"parameter"
] | ce70fd202ef3d057d857a23b59f977bb3e2e4402 | https://github.com/oceanprotocol/oceandb-driver-interface/blob/ce70fd202ef3d057d857a23b59f977bb3e2e4402/oceandb_driver_interface/utils.py#L15-L30 | train | 34,178 |
oceanprotocol/oceandb-driver-interface | oceandb_driver_interface/utils.py | start_plugin | def start_plugin(file_path=None):
"""This function initialize the Ocean plugin"""
if os.getenv('CONFIG_PATH'):
file_path = os.getenv('CONFIG_PATH')
else:
file_path = file_path
if file_path is not None:
config = parse_config(file_path)
plugin_instance = load_plugin(config)
else:
plugin_instance = load_plugin
return plugin_instance | python | def start_plugin(file_path=None):
"""This function initialize the Ocean plugin"""
if os.getenv('CONFIG_PATH'):
file_path = os.getenv('CONFIG_PATH')
else:
file_path = file_path
if file_path is not None:
config = parse_config(file_path)
plugin_instance = load_plugin(config)
else:
plugin_instance = load_plugin
return plugin_instance | [
"def",
"start_plugin",
"(",
"file_path",
"=",
"None",
")",
":",
"if",
"os",
".",
"getenv",
"(",
"'CONFIG_PATH'",
")",
":",
"file_path",
"=",
"os",
".",
"getenv",
"(",
"'CONFIG_PATH'",
")",
"else",
":",
"file_path",
"=",
"file_path",
"if",
"file_path",
"i... | This function initialize the Ocean plugin | [
"This",
"function",
"initialize",
"the",
"Ocean",
"plugin"
] | ce70fd202ef3d057d857a23b59f977bb3e2e4402 | https://github.com/oceanprotocol/oceandb-driver-interface/blob/ce70fd202ef3d057d857a23b59f977bb3e2e4402/oceandb_driver_interface/utils.py#L33-L44 | train | 34,179 |
usc-isi-i2/etk | etk/segment.py | Segment.store | def store(self, extractions: List[Extraction], attribute: str, group_by_tags: bool = True) -> None:
"""
Records extractions in the container, and for each individual extraction inserts a
ProvenanceRecord to record where the extraction is stored.
Records the "output_segment" in the provenance.
Extractions are always recorded in a list.
Errors out if the segment is primitive, such as a string.
Args:
extractions (List[Extraction]):
attribute (str): where to store the extractions.
group_by_tags (bool): Set to True to use tags as sub-keys, and values of Extractions
with the same tag will be stored in a list as the value of the corresponding key.
(if none of the Extractions has 'tag', do not group by tags)
Returns:
"""
if not isinstance(self._value, dict):
raise StoreExtractionError("segment is type: " + str(type(self._value)))
if not len(extractions):
return
if group_by_tags:
try:
next(x for x in extractions if x.tag) # if there is at least one extraction with a tag
if attribute not in self._extractions:
self._extractions[attribute] = set([])
self._value[attribute] = {}
extraction_provenances = {}
for e in extractions:
tag = e.tag if e.tag else 'NO_TAGS'
if tag not in self.value[attribute]:
self.value[attribute][tag] = [e.value]
else:
if e.value not in self.value[attribute][tag]:
self.value[attribute][tag].append(e.value)
# TODO: handle provenance of non literals
if isinstance(e.value, Number) or isinstance(e.value, str):
extraction_provenances[e.value] = e.prov_id
self._extractions[attribute] = self._extractions[attribute].union(extractions)
new_id = self._document.provenance_id_index # for the purpose of provenance hierarrchy tracking
storage_provenance_record: StorageProvenanceRecord = StorageProvenanceRecord(new_id, self.json_path,
attribute,
extraction_provenances,
self.document)
self._document.provenance_id_index_incrementer()
self._document.provenances[new_id] = storage_provenance_record
self.create_storage_provenance(storage_provenance_record)
return
except StopIteration:
pass
if attribute not in self._extractions:
self._extractions[attribute] = set([])
self._value[attribute] = list()
self._extractions[attribute] = self._extractions[attribute].union(extractions)
extraction_provenances = dict()
for a_extraction in extractions:
# TODO: handle provenance of non literals
if isinstance(a_extraction.value, Number) or isinstance(a_extraction.value, str):
extraction_provenances[a_extraction.value] = a_extraction.prov_id
if a_extraction.value not in self._value[attribute]:
self._value[attribute].append(a_extraction.value)
new_id = self._document.provenance_id_index # for the purpose of provenance hierarchy tracking
storage_provenance_record: StorageProvenanceRecord = StorageProvenanceRecord(new_id, self.json_path, attribute,
extraction_provenances,
self.document)
self._document.provenance_id_index_incrementer()
self._document.provenances[new_id] = storage_provenance_record
self.create_storage_provenance(storage_provenance_record) | python | def store(self, extractions: List[Extraction], attribute: str, group_by_tags: bool = True) -> None:
"""
Records extractions in the container, and for each individual extraction inserts a
ProvenanceRecord to record where the extraction is stored.
Records the "output_segment" in the provenance.
Extractions are always recorded in a list.
Errors out if the segment is primitive, such as a string.
Args:
extractions (List[Extraction]):
attribute (str): where to store the extractions.
group_by_tags (bool): Set to True to use tags as sub-keys, and values of Extractions
with the same tag will be stored in a list as the value of the corresponding key.
(if none of the Extractions has 'tag', do not group by tags)
Returns:
"""
if not isinstance(self._value, dict):
raise StoreExtractionError("segment is type: " + str(type(self._value)))
if not len(extractions):
return
if group_by_tags:
try:
next(x for x in extractions if x.tag) # if there is at least one extraction with a tag
if attribute not in self._extractions:
self._extractions[attribute] = set([])
self._value[attribute] = {}
extraction_provenances = {}
for e in extractions:
tag = e.tag if e.tag else 'NO_TAGS'
if tag not in self.value[attribute]:
self.value[attribute][tag] = [e.value]
else:
if e.value not in self.value[attribute][tag]:
self.value[attribute][tag].append(e.value)
# TODO: handle provenance of non literals
if isinstance(e.value, Number) or isinstance(e.value, str):
extraction_provenances[e.value] = e.prov_id
self._extractions[attribute] = self._extractions[attribute].union(extractions)
new_id = self._document.provenance_id_index # for the purpose of provenance hierarrchy tracking
storage_provenance_record: StorageProvenanceRecord = StorageProvenanceRecord(new_id, self.json_path,
attribute,
extraction_provenances,
self.document)
self._document.provenance_id_index_incrementer()
self._document.provenances[new_id] = storage_provenance_record
self.create_storage_provenance(storage_provenance_record)
return
except StopIteration:
pass
if attribute not in self._extractions:
self._extractions[attribute] = set([])
self._value[attribute] = list()
self._extractions[attribute] = self._extractions[attribute].union(extractions)
extraction_provenances = dict()
for a_extraction in extractions:
# TODO: handle provenance of non literals
if isinstance(a_extraction.value, Number) or isinstance(a_extraction.value, str):
extraction_provenances[a_extraction.value] = a_extraction.prov_id
if a_extraction.value not in self._value[attribute]:
self._value[attribute].append(a_extraction.value)
new_id = self._document.provenance_id_index # for the purpose of provenance hierarchy tracking
storage_provenance_record: StorageProvenanceRecord = StorageProvenanceRecord(new_id, self.json_path, attribute,
extraction_provenances,
self.document)
self._document.provenance_id_index_incrementer()
self._document.provenances[new_id] = storage_provenance_record
self.create_storage_provenance(storage_provenance_record) | [
"def",
"store",
"(",
"self",
",",
"extractions",
":",
"List",
"[",
"Extraction",
"]",
",",
"attribute",
":",
"str",
",",
"group_by_tags",
":",
"bool",
"=",
"True",
")",
"->",
"None",
":",
"if",
"not",
"isinstance",
"(",
"self",
".",
"_value",
",",
"d... | Records extractions in the container, and for each individual extraction inserts a
ProvenanceRecord to record where the extraction is stored.
Records the "output_segment" in the provenance.
Extractions are always recorded in a list.
Errors out if the segment is primitive, such as a string.
Args:
extractions (List[Extraction]):
attribute (str): where to store the extractions.
group_by_tags (bool): Set to True to use tags as sub-keys, and values of Extractions
with the same tag will be stored in a list as the value of the corresponding key.
(if none of the Extractions has 'tag', do not group by tags)
Returns: | [
"Records",
"extractions",
"in",
"the",
"container",
"and",
"for",
"each",
"individual",
"extraction",
"inserts",
"a",
"ProvenanceRecord",
"to",
"record",
"where",
"the",
"extraction",
"is",
"stored",
".",
"Records",
"the",
"output_segment",
"in",
"the",
"provenanc... | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/segment.py#L40-L115 | train | 34,180 |
usc-isi-i2/etk | etk/dependencies/landmark/landmark_extractor/extraction/Landmark.py | loadRule | def loadRule(rule_json_object):
""" Method to load the rules - when adding a new rule it must be added to the if statement within this method. """
name = rule_json_object['name']
rule_type = rule_json_object['rule_type']
validation_regex = None
required = False
removehtml = False
include_end_regex = False #Default to false for bakward compatibility
strip_end_regex = None
sub_rules = []
begin_stripe_id = None
end_stripe_id = None
begin_shift = 0
end_shift = 0
if 'sub_rules' in rule_json_object:
sub_rules = rule_json_object['sub_rules']
if 'validation_regex' in rule_json_object:
validation_regex = rule_json_object['validation_regex']
if 'required' in rule_json_object:
required = rule_json_object['required']
if 'removehtml' in rule_json_object:
removehtml = rule_json_object['removehtml']
if 'include_end_regex' in rule_json_object:
include_end_regex = rule_json_object['include_end_regex']
if 'strip_end_regex' in rule_json_object:
strip_end_regex = rule_json_object['strip_end_regex']
if 'begin_stripe_id' in rule_json_object:
begin_stripe_id = rule_json_object['begin_stripe_id']
if 'end_stripe_id' in rule_json_object:
end_stripe_id = rule_json_object['end_stripe_id']
if 'begin_shift' in rule_json_object:
begin_shift = rule_json_object['begin_shift']
if 'end_shift' in rule_json_object:
end_shift = rule_json_object['end_shift']
rule = {}
""" This is where we add our new type """
if rule_type == ITEM_RULE or rule_type == 'RegexRule':
begin_regex = rule_json_object['begin_regex']
end_regex = rule_json_object['end_regex']
rule = ItemRule(name, begin_regex, end_regex, include_end_regex, strip_end_regex, validation_regex, required,
removehtml, sub_rules, begin_stripe_id, end_stripe_id, begin_shift, end_shift)
if rule_type == ITERATION_RULE or rule_type == 'RegexIterationRule':
begin_regex = rule_json_object['begin_regex']
end_regex = rule_json_object['end_regex']
iter_begin_regex = rule_json_object['iter_begin_regex']
iter_end_regex = rule_json_object['iter_end_regex']
no_first_begin_iter_rule = False
if 'no_first_begin_iter_rule' in rule_json_object:
no_first_begin_iter_rule = rule_json_object['no_first_begin_iter_rule']
no_last_end_iter_rule = False
if 'no_last_end_iter_rule' in rule_json_object:
no_last_end_iter_rule = rule_json_object['no_last_end_iter_rule']
rule = IterationRule(name, begin_regex, end_regex, iter_begin_regex, iter_end_regex,
include_end_regex, strip_end_regex, no_first_begin_iter_rule,
no_last_end_iter_rule, validation_regex, required, removehtml,
sub_rules, begin_shift=begin_shift, end_shift=end_shift)
if 'id' in rule_json_object:
rule.id = rule_json_object['id']
return rule | python | def loadRule(rule_json_object):
""" Method to load the rules - when adding a new rule it must be added to the if statement within this method. """
name = rule_json_object['name']
rule_type = rule_json_object['rule_type']
validation_regex = None
required = False
removehtml = False
include_end_regex = False #Default to false for bakward compatibility
strip_end_regex = None
sub_rules = []
begin_stripe_id = None
end_stripe_id = None
begin_shift = 0
end_shift = 0
if 'sub_rules' in rule_json_object:
sub_rules = rule_json_object['sub_rules']
if 'validation_regex' in rule_json_object:
validation_regex = rule_json_object['validation_regex']
if 'required' in rule_json_object:
required = rule_json_object['required']
if 'removehtml' in rule_json_object:
removehtml = rule_json_object['removehtml']
if 'include_end_regex' in rule_json_object:
include_end_regex = rule_json_object['include_end_regex']
if 'strip_end_regex' in rule_json_object:
strip_end_regex = rule_json_object['strip_end_regex']
if 'begin_stripe_id' in rule_json_object:
begin_stripe_id = rule_json_object['begin_stripe_id']
if 'end_stripe_id' in rule_json_object:
end_stripe_id = rule_json_object['end_stripe_id']
if 'begin_shift' in rule_json_object:
begin_shift = rule_json_object['begin_shift']
if 'end_shift' in rule_json_object:
end_shift = rule_json_object['end_shift']
rule = {}
""" This is where we add our new type """
if rule_type == ITEM_RULE or rule_type == 'RegexRule':
begin_regex = rule_json_object['begin_regex']
end_regex = rule_json_object['end_regex']
rule = ItemRule(name, begin_regex, end_regex, include_end_regex, strip_end_regex, validation_regex, required,
removehtml, sub_rules, begin_stripe_id, end_stripe_id, begin_shift, end_shift)
if rule_type == ITERATION_RULE or rule_type == 'RegexIterationRule':
begin_regex = rule_json_object['begin_regex']
end_regex = rule_json_object['end_regex']
iter_begin_regex = rule_json_object['iter_begin_regex']
iter_end_regex = rule_json_object['iter_end_regex']
no_first_begin_iter_rule = False
if 'no_first_begin_iter_rule' in rule_json_object:
no_first_begin_iter_rule = rule_json_object['no_first_begin_iter_rule']
no_last_end_iter_rule = False
if 'no_last_end_iter_rule' in rule_json_object:
no_last_end_iter_rule = rule_json_object['no_last_end_iter_rule']
rule = IterationRule(name, begin_regex, end_regex, iter_begin_regex, iter_end_regex,
include_end_regex, strip_end_regex, no_first_begin_iter_rule,
no_last_end_iter_rule, validation_regex, required, removehtml,
sub_rules, begin_shift=begin_shift, end_shift=end_shift)
if 'id' in rule_json_object:
rule.id = rule_json_object['id']
return rule | [
"def",
"loadRule",
"(",
"rule_json_object",
")",
":",
"name",
"=",
"rule_json_object",
"[",
"'name'",
"]",
"rule_type",
"=",
"rule_json_object",
"[",
"'rule_type'",
"]",
"validation_regex",
"=",
"None",
"required",
"=",
"False",
"removehtml",
"=",
"False",
"incl... | Method to load the rules - when adding a new rule it must be added to the if statement within this method. | [
"Method",
"to",
"load",
"the",
"rules",
"-",
"when",
"adding",
"a",
"new",
"rule",
"it",
"must",
"be",
"added",
"to",
"the",
"if",
"statement",
"within",
"this",
"method",
"."
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/dependencies/landmark/landmark_extractor/extraction/Landmark.py#L56-L129 | train | 34,181 |
usc-isi-i2/etk | etk/ontology_namespacemanager.py | OntologyNamespaceManager.parse_uri | def parse_uri(self, text: str) -> URIRef:
"""
Parse input text into URI
:param text: can be one of
1. URI, directly return
2. prefix:name, query namespace for prefix, return expanded URI
3. name, use default namespace to expand it and return it
:return: URIRef
"""
if self.check_uriref(text):
return self.check_uriref(text)
elif isinstance(text, str):
text = text.strip()
m = URI_ABBR_PATTERN.match(text)
if m:
prefix, name = m.groups()
base = self.store.namespace(prefix if prefix else '')
if not base:
raise PrefixNotFoundException("Prefix: %s", prefix)
return URIRef(base + name)
raise WrongFormatURIException() | python | def parse_uri(self, text: str) -> URIRef:
"""
Parse input text into URI
:param text: can be one of
1. URI, directly return
2. prefix:name, query namespace for prefix, return expanded URI
3. name, use default namespace to expand it and return it
:return: URIRef
"""
if self.check_uriref(text):
return self.check_uriref(text)
elif isinstance(text, str):
text = text.strip()
m = URI_ABBR_PATTERN.match(text)
if m:
prefix, name = m.groups()
base = self.store.namespace(prefix if prefix else '')
if not base:
raise PrefixNotFoundException("Prefix: %s", prefix)
return URIRef(base + name)
raise WrongFormatURIException() | [
"def",
"parse_uri",
"(",
"self",
",",
"text",
":",
"str",
")",
"->",
"URIRef",
":",
"if",
"self",
".",
"check_uriref",
"(",
"text",
")",
":",
"return",
"self",
".",
"check_uriref",
"(",
"text",
")",
"elif",
"isinstance",
"(",
"text",
",",
"str",
")",... | Parse input text into URI
:param text: can be one of
1. URI, directly return
2. prefix:name, query namespace for prefix, return expanded URI
3. name, use default namespace to expand it and return it
:return: URIRef | [
"Parse",
"input",
"text",
"into",
"URI"
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/ontology_namespacemanager.py#L34-L55 | train | 34,182 |
usc-isi-i2/etk | etk/ontology_namespacemanager.py | OntologyNamespaceManager.bind | def bind(self, prefix: str, namespace: str, override=True, replace=False):
"""
bind a given namespace to the prefix, forbids same prefix with different namespace
:param prefix:
:param namespace:
:param override: if override, rebind, even if the given namespace is already bound to another prefix.
:param replace: if replace, replace any existing prefix with the new namespace
"""
namespace = URIRef(str(namespace))
# When documenting explain that override only applies in what cases
if prefix is None:
prefix = ''
bound_namespace = self.store.namespace(prefix)
# Check if the bound_namespace contains a URI and if so convert it into a URIRef for
# comparison. This is to prevent duplicate namespaces with the same URI.
if bound_namespace:
bound_namespace = URIRef(bound_namespace)
if bound_namespace and bound_namespace != namespace:
if replace:
self.store.bind(prefix, namespace)
# prefix already in use for different namespace
raise PrefixAlreadyUsedException("Prefix (%s, %s) already used, instead of (%s, %s).",
prefix, self.store.namespace(prefix).toPython(),
prefix, namespace.toPython())
else:
bound_prefix = self.store.prefix(namespace)
if bound_prefix is None:
self.store.bind(prefix, namespace)
elif bound_prefix == prefix:
pass # already bound
else:
if override or bound_prefix.startswith("_"):
self.store.bind(prefix, namespace) | python | def bind(self, prefix: str, namespace: str, override=True, replace=False):
"""
bind a given namespace to the prefix, forbids same prefix with different namespace
:param prefix:
:param namespace:
:param override: if override, rebind, even if the given namespace is already bound to another prefix.
:param replace: if replace, replace any existing prefix with the new namespace
"""
namespace = URIRef(str(namespace))
# When documenting explain that override only applies in what cases
if prefix is None:
prefix = ''
bound_namespace = self.store.namespace(prefix)
# Check if the bound_namespace contains a URI and if so convert it into a URIRef for
# comparison. This is to prevent duplicate namespaces with the same URI.
if bound_namespace:
bound_namespace = URIRef(bound_namespace)
if bound_namespace and bound_namespace != namespace:
if replace:
self.store.bind(prefix, namespace)
# prefix already in use for different namespace
raise PrefixAlreadyUsedException("Prefix (%s, %s) already used, instead of (%s, %s).",
prefix, self.store.namespace(prefix).toPython(),
prefix, namespace.toPython())
else:
bound_prefix = self.store.prefix(namespace)
if bound_prefix is None:
self.store.bind(prefix, namespace)
elif bound_prefix == prefix:
pass # already bound
else:
if override or bound_prefix.startswith("_"):
self.store.bind(prefix, namespace) | [
"def",
"bind",
"(",
"self",
",",
"prefix",
":",
"str",
",",
"namespace",
":",
"str",
",",
"override",
"=",
"True",
",",
"replace",
"=",
"False",
")",
":",
"namespace",
"=",
"URIRef",
"(",
"str",
"(",
"namespace",
")",
")",
"# When documenting explain tha... | bind a given namespace to the prefix, forbids same prefix with different namespace
:param prefix:
:param namespace:
:param override: if override, rebind, even if the given namespace is already bound to another prefix.
:param replace: if replace, replace any existing prefix with the new namespace | [
"bind",
"a",
"given",
"namespace",
"to",
"the",
"prefix",
"forbids",
"same",
"prefix",
"with",
"different",
"namespace"
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/ontology_namespacemanager.py#L57-L91 | train | 34,183 |
usc-isi-i2/etk | etk/etk.py | ETK.parse_json_path | def parse_json_path(self, jsonpath):
"""
Parse a jsonpath
Args:
jsonpath: str
Returns: a parsed json path
"""
if jsonpath not in self.parsed:
try:
self.parsed[jsonpath] = self.parser(jsonpath)
except Exception:
self.log("Invalid Json Path: " + jsonpath, "error")
raise InvalidJsonPathError("Invalid Json Path")
return self.parsed[jsonpath] | python | def parse_json_path(self, jsonpath):
"""
Parse a jsonpath
Args:
jsonpath: str
Returns: a parsed json path
"""
if jsonpath not in self.parsed:
try:
self.parsed[jsonpath] = self.parser(jsonpath)
except Exception:
self.log("Invalid Json Path: " + jsonpath, "error")
raise InvalidJsonPathError("Invalid Json Path")
return self.parsed[jsonpath] | [
"def",
"parse_json_path",
"(",
"self",
",",
"jsonpath",
")",
":",
"if",
"jsonpath",
"not",
"in",
"self",
".",
"parsed",
":",
"try",
":",
"self",
".",
"parsed",
"[",
"jsonpath",
"]",
"=",
"self",
".",
"parser",
"(",
"jsonpath",
")",
"except",
"Exception... | Parse a jsonpath
Args:
jsonpath: str
Returns: a parsed json path | [
"Parse",
"a",
"jsonpath"
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/etk.py#L85-L103 | train | 34,184 |
usc-isi-i2/etk | etk/etk.py | ETK.load_glossary | def load_glossary(file_path: str, read_json=False) -> List[str]:
"""
A glossary is a text file, one entry per line.
Args:
file_path (str): path to a text file containing a glossary.
read_json (bool): set True if the glossary is in json format
Returns: List of the strings in the glossary.
"""
if read_json:
if file_path.endswith(".gz"):
return json.load(gzip.open(file_path))
return json.load(open(file_path))
return open(file_path).read().splitlines() | python | def load_glossary(file_path: str, read_json=False) -> List[str]:
"""
A glossary is a text file, one entry per line.
Args:
file_path (str): path to a text file containing a glossary.
read_json (bool): set True if the glossary is in json format
Returns: List of the strings in the glossary.
"""
if read_json:
if file_path.endswith(".gz"):
return json.load(gzip.open(file_path))
return json.load(open(file_path))
return open(file_path).read().splitlines() | [
"def",
"load_glossary",
"(",
"file_path",
":",
"str",
",",
"read_json",
"=",
"False",
")",
"->",
"List",
"[",
"str",
"]",
":",
"if",
"read_json",
":",
"if",
"file_path",
".",
"endswith",
"(",
"\".gz\"",
")",
":",
"return",
"json",
".",
"load",
"(",
"... | A glossary is a text file, one entry per line.
Args:
file_path (str): path to a text file containing a glossary.
read_json (bool): set True if the glossary is in json format
Returns: List of the strings in the glossary. | [
"A",
"glossary",
"is",
"a",
"text",
"file",
"one",
"entry",
"per",
"line",
"."
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/etk.py#L189-L203 | train | 34,185 |
usc-isi-i2/etk | etk/etk.py | ETK.load_spacy_rule | def load_spacy_rule(file_path: str) -> Dict:
"""
A spacy rule file is a json file.
Args:
file_path (str): path to a text file containing a spacy rule sets.
Returns: Dict as the representation of spacy rules
"""
with open(file_path) as fp:
return json.load(fp) | python | def load_spacy_rule(file_path: str) -> Dict:
"""
A spacy rule file is a json file.
Args:
file_path (str): path to a text file containing a spacy rule sets.
Returns: Dict as the representation of spacy rules
"""
with open(file_path) as fp:
return json.load(fp) | [
"def",
"load_spacy_rule",
"(",
"file_path",
":",
"str",
")",
"->",
"Dict",
":",
"with",
"open",
"(",
"file_path",
")",
"as",
"fp",
":",
"return",
"json",
".",
"load",
"(",
"fp",
")"
] | A spacy rule file is a json file.
Args:
file_path (str): path to a text file containing a spacy rule sets.
Returns: Dict as the representation of spacy rules | [
"A",
"spacy",
"rule",
"file",
"is",
"a",
"json",
"file",
"."
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/etk.py#L206-L216 | train | 34,186 |
usc-isi-i2/etk | etk/etk.py | ETK.load_ems | def load_ems(self, modules_paths: List[str]):
"""
Load all extraction modules from the path
Args:
modules_path: str
Returns:
"""
all_em_lst = []
if modules_paths:
for modules_path in modules_paths:
em_lst = []
try:
for file_name in os.listdir(modules_path):
if file_name.startswith("em_") and file_name.endswith(".py"):
sys.path.append(modules_path) # append module dir path
this_module = importlib.import_module(file_name[:-3])
for em in self.classes_in_module(this_module):
em_lst.append(em(self))
except:
self.log("Error when loading etk modules from " + modules_path, "error")
raise NotGetETKModuleError("Wrong file path for ETK modules")
all_em_lst += em_lst
try:
all_em_lst = self.topological_sort(all_em_lst)
except Exception:
self.log("Topological sort for ETK modules fails", "error")
raise NotGetETKModuleError("Topological sort for ETK modules fails")
# if not all_em_lst:
# self.log("No ETK module in " + str(modules_paths), "error")
# raise NotGetETKModuleError("No ETK module in dir, module file should start with em_, end with .py")
return all_em_lst | python | def load_ems(self, modules_paths: List[str]):
"""
Load all extraction modules from the path
Args:
modules_path: str
Returns:
"""
all_em_lst = []
if modules_paths:
for modules_path in modules_paths:
em_lst = []
try:
for file_name in os.listdir(modules_path):
if file_name.startswith("em_") and file_name.endswith(".py"):
sys.path.append(modules_path) # append module dir path
this_module = importlib.import_module(file_name[:-3])
for em in self.classes_in_module(this_module):
em_lst.append(em(self))
except:
self.log("Error when loading etk modules from " + modules_path, "error")
raise NotGetETKModuleError("Wrong file path for ETK modules")
all_em_lst += em_lst
try:
all_em_lst = self.topological_sort(all_em_lst)
except Exception:
self.log("Topological sort for ETK modules fails", "error")
raise NotGetETKModuleError("Topological sort for ETK modules fails")
# if not all_em_lst:
# self.log("No ETK module in " + str(modules_paths), "error")
# raise NotGetETKModuleError("No ETK module in dir, module file should start with em_, end with .py")
return all_em_lst | [
"def",
"load_ems",
"(",
"self",
",",
"modules_paths",
":",
"List",
"[",
"str",
"]",
")",
":",
"all_em_lst",
"=",
"[",
"]",
"if",
"modules_paths",
":",
"for",
"modules_path",
"in",
"modules_paths",
":",
"em_lst",
"=",
"[",
"]",
"try",
":",
"for",
"file_... | Load all extraction modules from the path
Args:
modules_path: str
Returns: | [
"Load",
"all",
"extraction",
"modules",
"from",
"the",
"path"
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/etk.py#L218-L253 | train | 34,187 |
usc-isi-i2/etk | etk/etk.py | ETK.classes_in_module | def classes_in_module(module) -> List:
"""
Return all classes with super class ExtractionModule
Args:
module:
Returns: List of classes
"""
md = module.__dict__
return [
md[c] for c in md if (
isinstance(md[c], type) and
issubclass(md[c], ETKModule
) and
md[c].__module__ == module.__name__)
] | python | def classes_in_module(module) -> List:
"""
Return all classes with super class ExtractionModule
Args:
module:
Returns: List of classes
"""
md = module.__dict__
return [
md[c] for c in md if (
isinstance(md[c], type) and
issubclass(md[c], ETKModule
) and
md[c].__module__ == module.__name__)
] | [
"def",
"classes_in_module",
"(",
"module",
")",
"->",
"List",
":",
"md",
"=",
"module",
".",
"__dict__",
"return",
"[",
"md",
"[",
"c",
"]",
"for",
"c",
"in",
"md",
"if",
"(",
"isinstance",
"(",
"md",
"[",
"c",
"]",
",",
"type",
")",
"and",
"issu... | Return all classes with super class ExtractionModule
Args:
module:
Returns: List of classes | [
"Return",
"all",
"classes",
"with",
"super",
"class",
"ExtractionModule"
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/etk.py#L270-L287 | train | 34,188 |
usc-isi-i2/etk | etk/extractors/readability/readability.py | Document.summary | def summary(self, html_partial=False):
"""Generate the summary of the html docuemnt
:param html_partial: return only the div of the document, don't wrap
in html and body tags.
"""
try:
ruthless = True
#Added recall priority flag
recallPriority = self.recallPriority
if recallPriority:
ruthless = False
self.TEXT_LENGTH_THRESHOLD = 2
self.RETRY_LENGTH = 25
while True:
self._html(True)
for i in self.tags(self.html, 'script', 'style'):
i.drop_tree()
for i in self.tags(self.html, 'body'):
i.set('id', 'readabilityBody')
if ruthless:
self.remove_unlikely_candidates()
self.transform_misused_divs_into_paragraphs()
candidates = self.score_paragraphs()
best_candidates = self.select_best_candidates(candidates)
if best_candidates and not recallPriority:
article = self.get_article_from_candidates(candidates,best_candidates,html_partial)
else:
if ruthless and not recallPriority:
log.debug("ruthless removal did not work. ")
ruthless = False
self.debug(
("ended up stripping too much - "
"going for a safer _parse"))
# try again
continue
else:
log.debug(
("Ruthless and lenient parsing did not work. "
"Returning raw html"))
article = self.html.find('body')
if article is None:
article = self.html
cleaned_article = self.sanitize(article, candidates)
# print(cleaned_article)
article_length = len(cleaned_article or '')
retry_length = self.options.get(
'retry_length',
self.RETRY_LENGTH)
of_acceptable_length = article_length >= retry_length
if ruthless and not of_acceptable_length:
ruthless = False
continue
else:
return cleaned_article
except Exception as e:
print("error: %s", e)
log.exception('error getting summary: ')
raise Exception(Unparseable(str(e)), None, sys.exc_info()[2]) | python | def summary(self, html_partial=False):
"""Generate the summary of the html docuemnt
:param html_partial: return only the div of the document, don't wrap
in html and body tags.
"""
try:
ruthless = True
#Added recall priority flag
recallPriority = self.recallPriority
if recallPriority:
ruthless = False
self.TEXT_LENGTH_THRESHOLD = 2
self.RETRY_LENGTH = 25
while True:
self._html(True)
for i in self.tags(self.html, 'script', 'style'):
i.drop_tree()
for i in self.tags(self.html, 'body'):
i.set('id', 'readabilityBody')
if ruthless:
self.remove_unlikely_candidates()
self.transform_misused_divs_into_paragraphs()
candidates = self.score_paragraphs()
best_candidates = self.select_best_candidates(candidates)
if best_candidates and not recallPriority:
article = self.get_article_from_candidates(candidates,best_candidates,html_partial)
else:
if ruthless and not recallPriority:
log.debug("ruthless removal did not work. ")
ruthless = False
self.debug(
("ended up stripping too much - "
"going for a safer _parse"))
# try again
continue
else:
log.debug(
("Ruthless and lenient parsing did not work. "
"Returning raw html"))
article = self.html.find('body')
if article is None:
article = self.html
cleaned_article = self.sanitize(article, candidates)
# print(cleaned_article)
article_length = len(cleaned_article or '')
retry_length = self.options.get(
'retry_length',
self.RETRY_LENGTH)
of_acceptable_length = article_length >= retry_length
if ruthless and not of_acceptable_length:
ruthless = False
continue
else:
return cleaned_article
except Exception as e:
print("error: %s", e)
log.exception('error getting summary: ')
raise Exception(Unparseable(str(e)), None, sys.exc_info()[2]) | [
"def",
"summary",
"(",
"self",
",",
"html_partial",
"=",
"False",
")",
":",
"try",
":",
"ruthless",
"=",
"True",
"#Added recall priority flag",
"recallPriority",
"=",
"self",
".",
"recallPriority",
"if",
"recallPriority",
":",
"ruthless",
"=",
"False",
"self",
... | Generate the summary of the html docuemnt
:param html_partial: return only the div of the document, don't wrap
in html and body tags. | [
"Generate",
"the",
"summary",
"of",
"the",
"html",
"docuemnt"
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/extractors/readability/readability.py#L146-L217 | train | 34,189 |
usc-isi-i2/etk | etk/extraction.py | ExtractableBase.list2str | def list2str(self, l: List, joiner: str) -> str:
"""
Convert list to str as input for tokenizer
Args:
l (list): list for converting
joiner (str): join the elements using this string to separate them.
Returns: the value of the list as a string
"""
result = str()
for item in l:
if isinstance(item, list):
result = result + self.list2str(item, joiner) + joiner
elif isinstance(item, dict):
result = result + self.dict2str(item, joiner) + joiner
elif item:
result = result + str(item) + joiner
return result | python | def list2str(self, l: List, joiner: str) -> str:
"""
Convert list to str as input for tokenizer
Args:
l (list): list for converting
joiner (str): join the elements using this string to separate them.
Returns: the value of the list as a string
"""
result = str()
for item in l:
if isinstance(item, list):
result = result + self.list2str(item, joiner) + joiner
elif isinstance(item, dict):
result = result + self.dict2str(item, joiner) + joiner
elif item:
result = result + str(item) + joiner
return result | [
"def",
"list2str",
"(",
"self",
",",
"l",
":",
"List",
",",
"joiner",
":",
"str",
")",
"->",
"str",
":",
"result",
"=",
"str",
"(",
")",
"for",
"item",
"in",
"l",
":",
"if",
"isinstance",
"(",
"item",
",",
"list",
")",
":",
"result",
"=",
"resu... | Convert list to str as input for tokenizer
Args:
l (list): list for converting
joiner (str): join the elements using this string to separate them.
Returns: the value of the list as a string | [
"Convert",
"list",
"to",
"str",
"as",
"input",
"for",
"tokenizer"
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/extraction.py#L48-L67 | train | 34,190 |
usc-isi-i2/etk | etk/extraction.py | ExtractableBase.dict2str | def dict2str(self, d: Dict, joiner: str) -> str:
"""
Convert dict to str as input for tokenizer
Args:
d (dict): dict for converting
joiner (str): join the elements using this string to separate them.
Returns: the value of the dict as a string
"""
result = str()
for key in d:
result = result + str(key) + " : "
if isinstance(d[key], list):
result = result + self.list2str(d[key], joiner) + joiner
elif isinstance(d[key], dict):
result = result + self.dict2str(d[key], joiner) + joiner
elif d[key]:
result = result + str(d[key]) + joiner
return result | python | def dict2str(self, d: Dict, joiner: str) -> str:
"""
Convert dict to str as input for tokenizer
Args:
d (dict): dict for converting
joiner (str): join the elements using this string to separate them.
Returns: the value of the dict as a string
"""
result = str()
for key in d:
result = result + str(key) + " : "
if isinstance(d[key], list):
result = result + self.list2str(d[key], joiner) + joiner
elif isinstance(d[key], dict):
result = result + self.dict2str(d[key], joiner) + joiner
elif d[key]:
result = result + str(d[key]) + joiner
return result | [
"def",
"dict2str",
"(",
"self",
",",
"d",
":",
"Dict",
",",
"joiner",
":",
"str",
")",
"->",
"str",
":",
"result",
"=",
"str",
"(",
")",
"for",
"key",
"in",
"d",
":",
"result",
"=",
"result",
"+",
"str",
"(",
"key",
")",
"+",
"\" : \"",
"if",
... | Convert dict to str as input for tokenizer
Args:
d (dict): dict for converting
joiner (str): join the elements using this string to separate them.
Returns: the value of the dict as a string | [
"Convert",
"dict",
"to",
"str",
"as",
"input",
"for",
"tokenizer"
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/extraction.py#L69-L89 | train | 34,191 |
usc-isi-i2/etk | etk/extraction.py | Extractable.get_tokens | def get_tokens(self, tokenizer: Tokenizer) -> List[Token]:
"""
Tokenize this Extractable.
If the value is a string, it returns the tokenized version of the string. Else, convert to string with
get_string method
As it is common to need the same tokens for multiple extractors, the Extractable should cache the
tokenize results, keyed by segment and tokenizer so that given the same segment and tokenizer,
the same results are returned. If the same segment is given, but different tokenizer, the different
results are cached separately.
Args:
tokenizer (Tokenizer)
Returns: a sequence of tokens.
"""
if (self, tokenizer) in self.tokenize_results:
return self.tokenize_results[(self, tokenizer)]
else:
segment_value_for_tokenize = self.get_string()
tokens = tokenizer.tokenize(segment_value_for_tokenize)
self.tokenize_results[(self, tokenizer)] = tokens
return tokens | python | def get_tokens(self, tokenizer: Tokenizer) -> List[Token]:
"""
Tokenize this Extractable.
If the value is a string, it returns the tokenized version of the string. Else, convert to string with
get_string method
As it is common to need the same tokens for multiple extractors, the Extractable should cache the
tokenize results, keyed by segment and tokenizer so that given the same segment and tokenizer,
the same results are returned. If the same segment is given, but different tokenizer, the different
results are cached separately.
Args:
tokenizer (Tokenizer)
Returns: a sequence of tokens.
"""
if (self, tokenizer) in self.tokenize_results:
return self.tokenize_results[(self, tokenizer)]
else:
segment_value_for_tokenize = self.get_string()
tokens = tokenizer.tokenize(segment_value_for_tokenize)
self.tokenize_results[(self, tokenizer)] = tokens
return tokens | [
"def",
"get_tokens",
"(",
"self",
",",
"tokenizer",
":",
"Tokenizer",
")",
"->",
"List",
"[",
"Token",
"]",
":",
"if",
"(",
"self",
",",
"tokenizer",
")",
"in",
"self",
".",
"tokenize_results",
":",
"return",
"self",
".",
"tokenize_results",
"[",
"(",
... | Tokenize this Extractable.
If the value is a string, it returns the tokenized version of the string. Else, convert to string with
get_string method
As it is common to need the same tokens for multiple extractors, the Extractable should cache the
tokenize results, keyed by segment and tokenizer so that given the same segment and tokenizer,
the same results are returned. If the same segment is given, but different tokenizer, the different
results are cached separately.
Args:
tokenizer (Tokenizer)
Returns: a sequence of tokens. | [
"Tokenize",
"this",
"Extractable",
"."
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/extraction.py#L103-L127 | train | 34,192 |
usc-isi-i2/etk | examples/ontology_api/create_kg_with_alignment_ontology.py | AlignmentHelper.uri_from_fields | def uri_from_fields(prefix, *fields):
"""Construct a URI out of the fields, concatenating them after removing offensive characters.
When all the fields are empty, return empty"""
string = '_'.join(AlignmentHelper.alpha_numeric(f.strip().lower(), '') for f in fields)
if len(string) == len(fields) - 1:
return ''
return prefix + string | python | def uri_from_fields(prefix, *fields):
"""Construct a URI out of the fields, concatenating them after removing offensive characters.
When all the fields are empty, return empty"""
string = '_'.join(AlignmentHelper.alpha_numeric(f.strip().lower(), '') for f in fields)
if len(string) == len(fields) - 1:
return ''
return prefix + string | [
"def",
"uri_from_fields",
"(",
"prefix",
",",
"*",
"fields",
")",
":",
"string",
"=",
"'_'",
".",
"join",
"(",
"AlignmentHelper",
".",
"alpha_numeric",
"(",
"f",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
",",
"''",
")",
"for",
"f",
"in",
"fie... | Construct a URI out of the fields, concatenating them after removing offensive characters.
When all the fields are empty, return empty | [
"Construct",
"a",
"URI",
"out",
"of",
"the",
"fields",
"concatenating",
"them",
"after",
"removing",
"offensive",
"characters",
".",
"When",
"all",
"the",
"fields",
"are",
"empty",
"return",
"empty"
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/examples/ontology_api/create_kg_with_alignment_ontology.py#L178-L186 | train | 34,193 |
usc-isi-i2/etk | etk/data_extractors/htiExtractors/parser.py | parse_phone | def parse_phone(parts, allow_multiple=False):
"""
Parse the phone number from the ad's parts
parts -> The backpage ad's posting_body, separated into substrings
allow_multiple -> If false, arbitrarily chooses the most commonly occurring phone
"""
# Get text substitutions (ex: 'three' -> '3')
text_subs = misc.phone_text_subs()
Small = text_subs['Small']
Magnitude = text_subs['Magnitude']
Others = text_subs['Others']
phone_pattern = r'1?(?:[2-9][0-8][0-9])\s?(?:[2-9][0-9]{2})\s?(?:[0-9]{2})\s?(?:[0-9]{2})'
phone_pattern_spaces = r'1?\W?[2-9]\W?[0-8]\W?[0-9]\W?[2-9]\W?[0-9]\W?[0-9]\W?[0-9]\W?[0-9]\W?[0-9]\W?[0-9]'
found_phones = []
return_parts = []
# Check each part for phone # and remove from parts if found
for part in parts:
body = part
# remove '420' references to avoid false positives
body = re.sub(r'420 ?friendly', '', body)
body = body.replace(' 420 ', '')
body = body.replace('420 sp', '')
# Replace all disguising characters in the body
for key in Small:
body = re.sub(r'-?'+re.escape(key)+r'-?', str(Small[key]), body)
for key in Magnitude:
body = re.sub(r'-?'+re.escape(key)+r'-?', str(Magnitude[key]), body)
for key in Others:
body = re.sub(r'-?'+re.escape(key)+r'-?', str(Others[key]), body)
body = re.sub(r'\W', ' ', body)
body = re.sub(r' +', ' ', body)
if len(re.sub(r'\D', '', body)) < 10:
# Less than 10 numeric digits in part - no phone number here
return_parts.append(part)
continue;
phones = re.findall(phone_pattern, body)
if len(phones) == 0:
# No phone number in standard format
phones = re.findall(phone_pattern_spaces, body)
if len(phones) > 0:
# Phone number had spaces between digits
for found in phones:
found_phones.append(re.sub(r'\D', '', found))
else:
# Found phone in standard format
for found in phones:
found_phones.append(re.sub(r'\D', '', found))
if found_phones:
# Phone has been found, remove from part)
for found in found_phones:
filtered_part = parser_helpers.remove_phone(part, found)
if re.sub(r'\W', '', filtered_part):
# get rid of now-empty parts
return_parts.append(filtered_part)
else:
# Phone not found yet, add part to output
return_parts.append(part)
if not allow_multiple:
# Get most commonly occurring phone
found_phone = ''
if len(found_phones) > 0:
found_phone = max(set(found_phones), key=found_phones.count)
# Return the phone along with the original parts (minus any occurrences of the phone number)
return (found_phone, return_parts)
else:
# return all phones
return (list(set(found_phones)), return_parts) | python | def parse_phone(parts, allow_multiple=False):
"""
Parse the phone number from the ad's parts
parts -> The backpage ad's posting_body, separated into substrings
allow_multiple -> If false, arbitrarily chooses the most commonly occurring phone
"""
# Get text substitutions (ex: 'three' -> '3')
text_subs = misc.phone_text_subs()
Small = text_subs['Small']
Magnitude = text_subs['Magnitude']
Others = text_subs['Others']
phone_pattern = r'1?(?:[2-9][0-8][0-9])\s?(?:[2-9][0-9]{2})\s?(?:[0-9]{2})\s?(?:[0-9]{2})'
phone_pattern_spaces = r'1?\W?[2-9]\W?[0-8]\W?[0-9]\W?[2-9]\W?[0-9]\W?[0-9]\W?[0-9]\W?[0-9]\W?[0-9]\W?[0-9]'
found_phones = []
return_parts = []
# Check each part for phone # and remove from parts if found
for part in parts:
body = part
# remove '420' references to avoid false positives
body = re.sub(r'420 ?friendly', '', body)
body = body.replace(' 420 ', '')
body = body.replace('420 sp', '')
# Replace all disguising characters in the body
for key in Small:
body = re.sub(r'-?'+re.escape(key)+r'-?', str(Small[key]), body)
for key in Magnitude:
body = re.sub(r'-?'+re.escape(key)+r'-?', str(Magnitude[key]), body)
for key in Others:
body = re.sub(r'-?'+re.escape(key)+r'-?', str(Others[key]), body)
body = re.sub(r'\W', ' ', body)
body = re.sub(r' +', ' ', body)
if len(re.sub(r'\D', '', body)) < 10:
# Less than 10 numeric digits in part - no phone number here
return_parts.append(part)
continue;
phones = re.findall(phone_pattern, body)
if len(phones) == 0:
# No phone number in standard format
phones = re.findall(phone_pattern_spaces, body)
if len(phones) > 0:
# Phone number had spaces between digits
for found in phones:
found_phones.append(re.sub(r'\D', '', found))
else:
# Found phone in standard format
for found in phones:
found_phones.append(re.sub(r'\D', '', found))
if found_phones:
# Phone has been found, remove from part)
for found in found_phones:
filtered_part = parser_helpers.remove_phone(part, found)
if re.sub(r'\W', '', filtered_part):
# get rid of now-empty parts
return_parts.append(filtered_part)
else:
# Phone not found yet, add part to output
return_parts.append(part)
if not allow_multiple:
# Get most commonly occurring phone
found_phone = ''
if len(found_phones) > 0:
found_phone = max(set(found_phones), key=found_phones.count)
# Return the phone along with the original parts (minus any occurrences of the phone number)
return (found_phone, return_parts)
else:
# return all phones
return (list(set(found_phones)), return_parts) | [
"def",
"parse_phone",
"(",
"parts",
",",
"allow_multiple",
"=",
"False",
")",
":",
"# Get text substitutions (ex: 'three' -> '3')",
"text_subs",
"=",
"misc",
".",
"phone_text_subs",
"(",
")",
"Small",
"=",
"text_subs",
"[",
"'Small'",
"]",
"Magnitude",
"=",
"text_... | Parse the phone number from the ad's parts
parts -> The backpage ad's posting_body, separated into substrings
allow_multiple -> If false, arbitrarily chooses the most commonly occurring phone | [
"Parse",
"the",
"phone",
"number",
"from",
"the",
"ad",
"s",
"parts",
"parts",
"-",
">",
"The",
"backpage",
"ad",
"s",
"posting_body",
"separated",
"into",
"substrings",
"allow_multiple",
"-",
">",
"If",
"false",
"arbitrarily",
"chooses",
"the",
"most",
"com... | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/data_extractors/htiExtractors/parser.py#L525-L602 | train | 34,194 |
usc-isi-i2/etk | etk/extractors/date_extractor.py | DateExtractor._wrap_extraction | def _wrap_extraction(self, date_object: datetime.datetime,
original_text: str,
start_char: int,
end_char: int
) -> Extraction or None:
"""
wrap the final result as an Extraction and return
"""
try:
resolution = self._settings[MIN_RESOLUTION] \
if self._settings[DATE_VALUE_RESOLUTION] == DateResolution.ORIGINAL \
else self._settings[DATE_VALUE_RESOLUTION]
e = Extraction(self._convert_to_iso_format(date_object, resolution=resolution),
start_char=start_char,
end_char=end_char,
extractor_name=self._name,
date_object=date_object,
original_date=original_text)
return e
except Exception as e:
warn('DateExtractor: Failed to wrap result ' + str(original_text) + ' with Extraction class.\n'
'Catch ' + str(e))
return None | python | def _wrap_extraction(self, date_object: datetime.datetime,
original_text: str,
start_char: int,
end_char: int
) -> Extraction or None:
"""
wrap the final result as an Extraction and return
"""
try:
resolution = self._settings[MIN_RESOLUTION] \
if self._settings[DATE_VALUE_RESOLUTION] == DateResolution.ORIGINAL \
else self._settings[DATE_VALUE_RESOLUTION]
e = Extraction(self._convert_to_iso_format(date_object, resolution=resolution),
start_char=start_char,
end_char=end_char,
extractor_name=self._name,
date_object=date_object,
original_date=original_text)
return e
except Exception as e:
warn('DateExtractor: Failed to wrap result ' + str(original_text) + ' with Extraction class.\n'
'Catch ' + str(e))
return None | [
"def",
"_wrap_extraction",
"(",
"self",
",",
"date_object",
":",
"datetime",
".",
"datetime",
",",
"original_text",
":",
"str",
",",
"start_char",
":",
"int",
",",
"end_char",
":",
"int",
")",
"->",
"Extraction",
"or",
"None",
":",
"try",
":",
"resolution"... | wrap the final result as an Extraction and return | [
"wrap",
"the",
"final",
"result",
"as",
"an",
"Extraction",
"and",
"return"
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/extractors/date_extractor.py#L287-L310 | train | 34,195 |
usc-isi-i2/etk | etk/extractors/date_extractor.py | DateExtractor._remove_overlapped_date_str | def _remove_overlapped_date_str(self, results: List[List[dict]]) -> List[Extraction]:
"""
some string may be matched by multiple date templates,
deduplicate the results and return a single list
"""
res = []
all_results = []
for x in results:
all_results = all_results + x
if not all_results or len(all_results) == 0:
return list()
all_results.sort(key=lambda k: k['start'])
cur_max = None
i = 0
while i < len(all_results) and not cur_max:
if self._post_check(all_results[i]):
cur_max = all_results[i]
i += 1
if not cur_max:
return res
while i < len(all_results):
x = all_results[i]
i += 1
if not self._post_check(x):
continue
if cur_max['end'] <= x['start']:
parsed_date = self._parse_date(cur_max)
if parsed_date:
if self._settings[EXTRACT_FIRST_DATE_ONLY]:
return res
res.append(parsed_date)
cur_max = x
else:
if len(x['value']) > len(cur_max['value']):
cur_max = x
elif len(x['value']) == len(cur_max['value']):
if x['order'] in ['SINGLE_YEAR']:
cur_max = x
elif len(x['order']) == len(cur_max['order']):
if len(x['groups']) < len(cur_max['groups']):
cur_max = x
elif len(x['groups']) == len(cur_max['groups']):
if sum(ele is not None for ele in x['groups']) < sum(ele is not None for ele in cur_max['groups']):
cur_max = x
elif self._settings[PREFER_LANGUAGE_DATE_ORDER] and self._lan in language_date_order:
if x['order'] == language_date_order[self._lan]:
cur_max = x
elif x['order'] == self._settings[PREFERRED_DATE_ORDER]:
cur_max = x
elif x['order'] == self._settings[PREFERRED_DATE_ORDER]:
cur_max = x
parsed_date = self._parse_date(cur_max)
if parsed_date:
if self._settings[EXTRACT_FIRST_DATE_ONLY]:
return res
res.append(parsed_date)
return res | python | def _remove_overlapped_date_str(self, results: List[List[dict]]) -> List[Extraction]:
"""
some string may be matched by multiple date templates,
deduplicate the results and return a single list
"""
res = []
all_results = []
for x in results:
all_results = all_results + x
if not all_results or len(all_results) == 0:
return list()
all_results.sort(key=lambda k: k['start'])
cur_max = None
i = 0
while i < len(all_results) and not cur_max:
if self._post_check(all_results[i]):
cur_max = all_results[i]
i += 1
if not cur_max:
return res
while i < len(all_results):
x = all_results[i]
i += 1
if not self._post_check(x):
continue
if cur_max['end'] <= x['start']:
parsed_date = self._parse_date(cur_max)
if parsed_date:
if self._settings[EXTRACT_FIRST_DATE_ONLY]:
return res
res.append(parsed_date)
cur_max = x
else:
if len(x['value']) > len(cur_max['value']):
cur_max = x
elif len(x['value']) == len(cur_max['value']):
if x['order'] in ['SINGLE_YEAR']:
cur_max = x
elif len(x['order']) == len(cur_max['order']):
if len(x['groups']) < len(cur_max['groups']):
cur_max = x
elif len(x['groups']) == len(cur_max['groups']):
if sum(ele is not None for ele in x['groups']) < sum(ele is not None for ele in cur_max['groups']):
cur_max = x
elif self._settings[PREFER_LANGUAGE_DATE_ORDER] and self._lan in language_date_order:
if x['order'] == language_date_order[self._lan]:
cur_max = x
elif x['order'] == self._settings[PREFERRED_DATE_ORDER]:
cur_max = x
elif x['order'] == self._settings[PREFERRED_DATE_ORDER]:
cur_max = x
parsed_date = self._parse_date(cur_max)
if parsed_date:
if self._settings[EXTRACT_FIRST_DATE_ONLY]:
return res
res.append(parsed_date)
return res | [
"def",
"_remove_overlapped_date_str",
"(",
"self",
",",
"results",
":",
"List",
"[",
"List",
"[",
"dict",
"]",
"]",
")",
"->",
"List",
"[",
"Extraction",
"]",
":",
"res",
"=",
"[",
"]",
"all_results",
"=",
"[",
"]",
"for",
"x",
"in",
"results",
":",
... | some string may be matched by multiple date templates,
deduplicate the results and return a single list | [
"some",
"string",
"may",
"be",
"matched",
"by",
"multiple",
"date",
"templates",
"deduplicate",
"the",
"results",
"and",
"return",
"a",
"single",
"list"
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/extractors/date_extractor.py#L312-L371 | train | 34,196 |
usc-isi-i2/etk | etk/extractors/readability/encoding.py | custom_decode | def custom_decode(encoding):
"""Overrides encoding when charset declaration
or charset determination is a subset of a larger
charset. Created because of issues with Chinese websites"""
encoding = encoding.lower()
alternates = {
'big5': 'big5hkscs',
'gb2312': 'gb18030',
'ascii': 'utf-8',
'MacCyrillic': 'cp1251',
}
if encoding in alternates:
return alternates[encoding]
else:
return encoding | python | def custom_decode(encoding):
"""Overrides encoding when charset declaration
or charset determination is a subset of a larger
charset. Created because of issues with Chinese websites"""
encoding = encoding.lower()
alternates = {
'big5': 'big5hkscs',
'gb2312': 'gb18030',
'ascii': 'utf-8',
'MacCyrillic': 'cp1251',
}
if encoding in alternates:
return alternates[encoding]
else:
return encoding | [
"def",
"custom_decode",
"(",
"encoding",
")",
":",
"encoding",
"=",
"encoding",
".",
"lower",
"(",
")",
"alternates",
"=",
"{",
"'big5'",
":",
"'big5hkscs'",
",",
"'gb2312'",
":",
"'gb18030'",
",",
"'ascii'",
":",
"'utf-8'",
",",
"'MacCyrillic'",
":",
"'cp... | Overrides encoding when charset declaration
or charset determination is a subset of a larger
charset. Created because of issues with Chinese websites | [
"Overrides",
"encoding",
"when",
"charset",
"declaration",
"or",
"charset",
"determination",
"is",
"a",
"subset",
"of",
"a",
"larger",
"charset",
".",
"Created",
"because",
"of",
"issues",
"with",
"Chinese",
"websites"
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/extractors/readability/encoding.py#L34-L48 | train | 34,197 |
usc-isi-i2/etk | etk/knowledge_graph_schema.py | KGSchema.iso_date | def iso_date(d) -> str:
"""
Return iso format of a date
Args:
d:
Returns: str
"""
if isinstance(d, datetime):
return d.isoformat()
elif isinstance(d, date):
return datetime.combine(d, datetime.min.time()).isoformat()
else:
try:
datetime.strptime(d, '%Y-%m-%dT%H:%M:%S')
return d
except ValueError:
try:
datetime.strptime(d, '%Y-%m-%d')
return d + "T00:00:00"
except ValueError:
pass
raise ISODateError("Can not convert value to ISO format for kg") | python | def iso_date(d) -> str:
"""
Return iso format of a date
Args:
d:
Returns: str
"""
if isinstance(d, datetime):
return d.isoformat()
elif isinstance(d, date):
return datetime.combine(d, datetime.min.time()).isoformat()
else:
try:
datetime.strptime(d, '%Y-%m-%dT%H:%M:%S')
return d
except ValueError:
try:
datetime.strptime(d, '%Y-%m-%d')
return d + "T00:00:00"
except ValueError:
pass
raise ISODateError("Can not convert value to ISO format for kg") | [
"def",
"iso_date",
"(",
"d",
")",
"->",
"str",
":",
"if",
"isinstance",
"(",
"d",
",",
"datetime",
")",
":",
"return",
"d",
".",
"isoformat",
"(",
")",
"elif",
"isinstance",
"(",
"d",
",",
"date",
")",
":",
"return",
"datetime",
".",
"combine",
"("... | Return iso format of a date
Args:
d:
Returns: str | [
"Return",
"iso",
"format",
"of",
"a",
"date"
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/knowledge_graph_schema.py#L80-L103 | train | 34,198 |
usc-isi-i2/etk | etk/knowledge_graph_schema.py | KGSchema.is_valid | def is_valid(self, field_name, value) -> (bool, object):
"""
Return true if the value type matches or can be coerced to the defined type in schema, otherwise false.
If field not defined, return none
Args:
field_name: str
value:
Returns: bool, value, where the value may have been coerced to the required type.
"""
if self.has_field(field_name):
if self.fields_dict[field_name] == FieldType.KG_ID:
return True, value
if self.fields_dict[field_name] == FieldType.NUMBER:
if isinstance(value, numbers.Number):
return True, value
else:
converted_number = self.parse_number(value)
return (False, value) if not converted_number else (True, value)
if self.fields_dict[field_name] == FieldType.STRING:
if isinstance(value, str):
return True, value.strip()
else:
return True, str(value).strip()
if self.fields_dict[field_name] == FieldType.DATE:
valid, d = self.is_date(value)
if valid:
return True, d.isoformat()
else:
return False, value
if self.fields_dict[field_name] == FieldType.LOCATION:
valid, l = self.is_location(value)
if valid:
return True, l
else:
return False, value
else:
print('{} not found in KG Schema'.format(field_name))
return False, value | python | def is_valid(self, field_name, value) -> (bool, object):
"""
Return true if the value type matches or can be coerced to the defined type in schema, otherwise false.
If field not defined, return none
Args:
field_name: str
value:
Returns: bool, value, where the value may have been coerced to the required type.
"""
if self.has_field(field_name):
if self.fields_dict[field_name] == FieldType.KG_ID:
return True, value
if self.fields_dict[field_name] == FieldType.NUMBER:
if isinstance(value, numbers.Number):
return True, value
else:
converted_number = self.parse_number(value)
return (False, value) if not converted_number else (True, value)
if self.fields_dict[field_name] == FieldType.STRING:
if isinstance(value, str):
return True, value.strip()
else:
return True, str(value).strip()
if self.fields_dict[field_name] == FieldType.DATE:
valid, d = self.is_date(value)
if valid:
return True, d.isoformat()
else:
return False, value
if self.fields_dict[field_name] == FieldType.LOCATION:
valid, l = self.is_location(value)
if valid:
return True, l
else:
return False, value
else:
print('{} not found in KG Schema'.format(field_name))
return False, value | [
"def",
"is_valid",
"(",
"self",
",",
"field_name",
",",
"value",
")",
"->",
"(",
"bool",
",",
"object",
")",
":",
"if",
"self",
".",
"has_field",
"(",
"field_name",
")",
":",
"if",
"self",
".",
"fields_dict",
"[",
"field_name",
"]",
"==",
"FieldType",
... | Return true if the value type matches or can be coerced to the defined type in schema, otherwise false.
If field not defined, return none
Args:
field_name: str
value:
Returns: bool, value, where the value may have been coerced to the required type. | [
"Return",
"true",
"if",
"the",
"value",
"type",
"matches",
"or",
"can",
"be",
"coerced",
"to",
"the",
"defined",
"type",
"in",
"schema",
"otherwise",
"false",
".",
"If",
"field",
"not",
"defined",
"return",
"none"
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/knowledge_graph_schema.py#L123-L166 | train | 34,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.