docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
|---|---|---|
Ask a user for a boolean input
args:
message (str): Prompt for user
returns:
bool_in (boolean): Input boolean
|
def bool_input(message):
while True:
suffix = ' (true or false): '
inp = input(message + suffix)
if inp.lower() == 'true':
return True
elif inp.lower() == 'false':
return False
else:
print(colored('Must be either true or false, try again!', 'red'))
| 746,539
|
Select a project from configuration to run transfer on
args:
user_provided_project (str): Project name that should match a project in the config
returns:
project (dict): Configuration settings for a user selected project
|
def select_project(user_provided_project):
home = os.path.expanduser('~')
if os.path.isfile(os.path.join(home, '.transfer', 'config.yaml')):
with open(os.path.join(home, '.transfer', 'config.yaml'), 'r') as fp:
projects = yaml.load(fp.read())
if len(projects) == 1:
project = projects[0]
else:
if user_provided_project in [project['name'] for project in projects]:
for inner_project in projects:
if user_provided_project == inner_project['name']:
project = inner_project
else:
print('Select your project')
for i, project in enumerate(projects):
print('[' + str(i) + ']: ' + project['name'])
project_index = int_input('project', -1, len(projects), show_range = False)
project = projects[project_index]
else:
print('Transfer is not configured.')
print('Please run:')
print('')
print(colored(' transfer --configure', 'green'))
return
print(colored('Project selected: ' + project['name'], 'cyan'))
return project
| 746,552
|
Store configuration
args:
config (list[dict]): configurations for each project
|
def store_config(config, suffix = None):
home = os.path.expanduser('~')
if suffix is not None:
config_path = os.path.join(home, '.transfer', suffix)
else:
config_path = os.path.join(home, '.transfer')
os.makedirs(config_path, exist_ok = True)
with open(os.path.join(config_path, 'config.yaml'), 'w') as fp:
yaml.dump(config, fp)
| 746,556
|
Update project in configuration
args:
updated_project (dict): Updated project configuration values
|
def update_config(updated_project):
home = os.path.expanduser('~')
if os.path.isfile(os.path.join(home, '.transfer', 'config.yaml')):
with open(os.path.join(home, '.transfer', 'config.yaml'), 'r') as fp:
projects = yaml.load(fp.read())
replace_index = -1
for i, project in enumerate(projects):
if project['name'] == updated_project['name']:
replace_index = i
if replace_index > -1:
projects[replace_index] = updated_project
store_config(projects)
else:
print('Not saving configuration')
print(colored('Project: ' + updated_project['name'] + ' was not found in configured projects!', 'red'))
else:
print('Transfer is not configured.')
print('Please run:')
print('')
print(colored(' transfer --configure', 'cyan'))
return
| 746,557
|
Construct a graph that repeats this graph a number of times
Arguments:
| ``repeat`` -- The number of repetitions.
|
def __mul__(self, repeat):
if not isinstance(repeat, int):
raise TypeError("Can only multiply a graph with an integer")
# copy edges
new_edges = []
for i in range(repeat):
for vertex1, vertex2 in self.edges:
new_edges.append(frozenset([vertex1+i*self.num_vertices, vertex2+i*self.num_vertices]))
# copy numbers
new_numbers = np.zeros((repeat, len(self.numbers)), int)
new_numbers[:] = self.numbers
new_numbers = new_numbers.ravel()
# copy orders
new_orders = np.zeros((repeat, len(self.orders)), int)
new_orders[:] = self.orders
new_orders = new_orders.ravel()
# copy symbols
if self.symbols is not None:
new_symbols = self.symbols*repeat
else:
new_symbols = None
return MolecularGraph(new_edges, new_numbers, new_orders, new_symbols)
| 746,575
|
Return True only if the number of neighbors is correct
Arguments:
| ``index`` -- the index of the vertex/edge on which the criterion is
applied
| ``graph`` -- the graph on which the criterion is tested
|
def __call__(self, index, graph):
return len(graph.neighbors[index]) == self.count
| 746,581
|
Return True only if each neighbor can be linked with an atom number
Arguments:
| ``index`` -- the index of the vertex/edge on which the criterion is
applied
| ``graph`` -- the graph on which the criterion is tested
|
def __call__(self, index, graph):
neighbors = graph.neighbors[index]
if not len(neighbors) == len(self.numbers):
return False
neighbor_numbers = sorted([graph.numbers[neighbor] for neighbor in neighbors])
return neighbor_numbers == self.numbers
| 746,583
|
Return True only if each neighbor can be linked with a positive criterion
Arguments:
| ``index`` -- the index of the vertex/edge on which the criterion is
applied
| ``graph`` -- the graph on which the criterion is tested
|
def __call__(self, index, graph):
def all_permutations(l):
if len(l) == 1:
yield l
return
for i in range(len(l)):
for sub in all_permutations(l[:i]+l[i+1:]):
yield [l[i]] + sub
neighbors = graph.neighbors[index]
if not len(neighbors) == len(self.neighbor_criteria):
return
# consider all permutations. If one matches, return True
for perm_neighbors in all_permutations(list(neighbors)):
ok = True
for neighbor, crit in zip(perm_neighbors, self.neighbor_criteria):
if not crit(neighbor, graph):
ok = False
break
if ok:
return True
return False
| 746,585
|
Initialize a ArrayAttr object
Arguments:
``owner`` -- the instance to read the attribute from
``name`` -- the name of the attribute
|
def __init__(self, owner, name):
StateAttr.__init__(self, owner, name)
array = self.get()
if array.dtype.fields is not None:
raise ValueError("Record arrays are not supported yet.")
| 746,592
|
Register a new attribute to take care of with dump and load
Arguments:
| ``name`` -- the name to be used in the dump file
| ``AttrCls`` -- an attr class describing the attribute
|
def _register(self, name, AttrCls):
if not issubclass(AttrCls, StateAttr):
raise TypeError("The second argument must a StateAttr instance.")
if len(name) > 40:
raise ValueError("Name can count at most 40 characters.")
self._fields[name] = AttrCls(self._owner, name)
| 746,597
|
Return the length of a bond between n1 and n2 of type bond_type
Arguments:
| ``n1`` -- the atom number of the first atom in the bond
| ``n2`` -- the atom number of the second atom the bond
Optional argument:
| ``bond_type`` -- the type of bond [default=BOND_SINGLE]
This is a safe method for querying a bond_length. If no answer can be
found, this get_length returns None.
|
def get_length(self, n1, n2, bond_type=BOND_SINGLE):
dataset = self.lengths.get(bond_type)
if dataset == None:
return None
return dataset.get(frozenset([n1, n2]))
| 746,606
|
Initialize a pair potential object
Arguments:
scaling -- symmetric NxN array with pairwise scaling factors.
When an element is set to zero, it will be excluded.
Optional argument:
coordinates -- the initial Cartesian coordinates of the system,
which can be updated with the update_coordinates
method
|
def __init__(self, scaling, coordinates=None):
if coordinates is not None:
self.update_coordinates(coordinates)
self.scaling = scaling
self.scaling.ravel()[::len(self.scaling)+1] = 0
| 746,625
|
Dump a single molecule to a CML file
Arguments:
| ``f`` -- a file-like object
| ``molecule`` -- a Molecule instance
|
def _dump_cml_molecule(f, molecule):
extra = getattr(molecule, "extra", {})
attr_str = " ".join("%s='%s'" % (key, value) for key, value in extra.items())
f.write(" <molecule id='%s' %s>\n" % (molecule.title, attr_str))
f.write(" <atomArray>\n")
atoms_extra = getattr(molecule, "atoms_extra", {})
for counter, number, coordinate in zip(range(molecule.size), molecule.numbers, molecule.coordinates/angstrom):
atom_extra = atoms_extra.get(counter, {})
attr_str = " ".join("%s='%s'" % (key, value) for key, value in atom_extra.items())
f.write(" <atom id='a%i' elementType='%s' x3='%s' y3='%s' z3='%s' %s />\n" % (
counter, periodic[number].symbol, coordinate[0], coordinate[1],
coordinate[2], attr_str,
))
f.write(" </atomArray>\n")
if molecule.graph is not None:
bonds_extra = getattr(molecule, "bonds_extra", {})
f.write(" <bondArray>\n")
for edge in molecule.graph.edges:
bond_extra = bonds_extra.get(edge, {})
attr_str = " ".join("%s='%s'" % (key, value) for key, value in bond_extra.items())
i1, i2 = edge
f.write(" <bond atomRefs2='a%i a%i' %s />\n" % (i1, i2, attr_str))
f.write(" </bondArray>\n")
f.write(" </molecule>\n")
| 746,648
|
Write a list of molecules to a CML file
Arguments:
| ``f`` -- a filename of a CML file or a file-like object
| ``molecules`` -- a list of molecule objects.
|
def dump_cml(f, molecules):
if isinstance(f, str):
f = open(f, "w")
close = True
else:
close = False
f.write("<?xml version='1.0'?>\n")
f.write("<list xmlns='http://www.xml-cml.org/schema'>\n")
for molecule in molecules:
_dump_cml_molecule(f, molecule)
f.write("</list>\n")
if close:
f.close()
| 746,649
|
Read all the requested fields
Arguments:
| ``filename`` -- the filename of the FCHK file
| ``field_labels`` -- when given, only these fields are read
|
def _read(self, filename, field_labels=None):
# if fields is None, all fields are read
def read_field(f):
datatype = None
while datatype is None:
# find a sane header line
line = f.readline()
if line == "":
return False
label = line[:43].strip()
if field_labels is not None:
if len(field_labels) == 0:
return False
elif label not in field_labels:
return True
else:
field_labels.discard(label)
line = line[43:]
words = line.split()
if len(words) == 0:
return True
if words[0] == 'I':
datatype = int
unreadable = 0
elif words[0] == 'R':
datatype = float
unreadable = np.nan
if len(words) == 2:
try:
value = datatype(words[1])
except ValueError:
return True
elif len(words) == 3:
if words[1] != "N=":
raise FileFormatError("Unexpected line in formatted checkpoint file %s\n%s" % (filename, line[:-1]))
length = int(words[2])
value = np.zeros(length, datatype)
counter = 0
try:
while counter < length:
line = f.readline()
if line == "":
raise FileFormatError("Unexpected end of formatted checkpoint file %s" % filename)
for word in line.split():
try:
value[counter] = datatype(word)
except (ValueError, OverflowError) as e:
print('WARNING: could not interpret word while reading %s: %s' % (word, self.filename))
if self.ignore_errors:
value[counter] = unreadable
else:
raise
counter += 1
except ValueError:
return True
else:
raise FileFormatError("Unexpected line in formatted checkpoint file %s\n%s" % (filename, line[:-1]))
self.fields[label] = value
return True
self.fields = {}
with open(filename, 'r') as f:
self.title = f.readline()[:-1].strip()
words = f.readline().split()
if len(words) == 3:
self.command, self.lot, self.basis = words
elif len(words) == 2:
self.command, self.lot = words
else:
raise FileFormatError('The second line of the FCHK file should contain two or three words.')
while read_field(f):
pass
| 746,654
|
Add the contributions of this energy term to the Hessian
Arguments:
| ``coordinates`` -- A numpy array with 3N Cartesian coordinates.
| ``hessian`` -- A matrix for the full Hessian to which this energy
term has to add its contribution.
|
def add_to_hessian(self, coordinates, hessian):
# Compute the derivatives of the bond stretch towards the two cartesian
# coordinates. The bond length is computed too, but not used.
q, g = self.icfn(coordinates[list(self.indexes)], 1)
# Add the contribution to the Hessian (an outer product)
for ja, ia in enumerate(self.indexes):
# ja is 0, 1, 2, ...
# ia is i0, i1, i2, ...
for jb, ib in enumerate(self.indexes):
contrib = 2*self.force_constant*numpy.outer(g[ja], g[jb])
hessian[3*ia:3*ia+3, 3*ib:3*ib+3] += contrib
| 746,679
|
Compute the rotational symmetry number
Arguments:
| ``molecule`` -- The molecule
| ``graph`` -- The corresponding bond graph
Optional argument:
| ``threshold`` -- only when a rotation results in an rmsd below the
given threshold, the rotation is considered to
transform the molecule onto itself.
|
def compute_rotsym(molecule, graph, threshold=1e-3*angstrom):
result = 0
for match in graph.symmetries:
permutation = list(j for i,j in sorted(match.forward.items()))
new_coordinates = molecule.coordinates[permutation]
rmsd = fit_rmsd(molecule.coordinates, new_coordinates)[2]
if rmsd < threshold:
result += 1
return result
| 746,684
|
Return a vector orthogonal to the given triangle
Arguments:
a, b, c -- three 3D numpy vectors
|
def triangle_normal(a, b, c):
normal = np.cross(a - c, b - c)
norm = np.linalg.norm(normal)
return normal/norm
| 746,717
|
Compute the dot product
Arguments:
| ``r1``, ``r2`` -- two :class:`Vector3` objects
(Returns a Scalar)
|
def dot(r1, r2):
if r1.size != r2.size:
raise ValueError("Both arguments must have the same input size.")
if r1.deriv != r2.deriv:
raise ValueError("Both arguments must have the same deriv.")
return r1.x*r2.x + r1.y*r2.y + r1.z*r2.z
| 746,718
|
Compute the cross product
Arguments:
| ``r1``, ``r2`` -- two :class:`Vector3` objects
(Returns a Vector3)
|
def cross(r1, r2):
if r1.size != r2.size:
raise ValueError("Both arguments must have the same input size.")
if r1.deriv != r2.deriv:
raise ValueError("Both arguments must have the same deriv.")
result = Vector3(r1.size, r1.deriv)
result.x = r1.y*r2.z - r1.z*r2.y
result.y = r1.z*r2.x - r1.x*r2.z
result.z = r1.x*r2.y - r1.y*r2.x
return result
| 746,719
|
Construct a Jacobian for the given internal and Cartesian coordinates
Arguments:
| ``ics`` -- A list of internal coordinate objects.
| ``coordinates`` -- A numpy array with Cartesian coordinates,
shape=(N,3)
The return value will be a numpy array with the Jacobian matrix. There
will be a column for each internal coordinate, and a row for each
Cartesian coordinate (3*N rows).
|
def compute_jacobian(ics, coordinates):
N3 = coordinates.size
jacobian = numpy.zeros((N3, len(ics)), float)
for j, ic in enumerate(ics):
# Let the ic object fill in each column of the Jacobian.
ic.fill_jacobian_column(jacobian[:,j], coordinates)
return jacobian
| 746,755
|
Fill in a column of the Jacobian.
Arguments:
| ``jaccol`` -- The column of Jacobian to which the result must be
added.
| ``coordinates`` -- A numpy array with Cartesian coordinates,
shape=(N,3)
|
def fill_jacobian_column(self, jaccol, coordinates):
q, g = self.icfn(coordinates[list(self.indexes)], 1)
for i, j in enumerate(self.indexes):
jaccol[3*j:3*j+3] += g[i]
return jaccol
| 746,757
|
Initialize a similarity descriptor
Arguments:
distance_matrix -- a matrix with interatomic distances, this can
also be distances in a graph
labels -- a list with integer labels used to identify atoms of
the same type
|
def __init__(self, distance_matrix, labels):
self.table_distances = similarity_table_distances(distance_matrix.astype(float))
self.table_labels = similarity_table_labels(labels.astype(int))
print(len(labels), len(distance_matrix))
order = np.lexsort([self.table_labels[:, 1], self.table_labels[:, 0]])
self.table_labels = self.table_labels[order]
self.table_distances = self.table_distances[order]
| 746,762
|
Initialize a similarity descriptor
Arguments:
molecule -- a Molecules object
labels -- a list with integer labels used to identify atoms of
the same type. When not given, the atom numbers from
the molecule are used.
|
def from_molecule(cls, molecule, labels=None):
if labels is None:
labels = molecule.numbers
return cls(molecule.distance_matrix, labels)
| 746,763
|
Initialize a similarity descriptor
Arguments:
molecular_graphs -- A MolecularGraphs object
labels -- a list with integer labels used to identify atoms of
the same type. When not given, the atom numbers from
the molecular graph are used.
|
def from_molecular_graph(cls, molecular_graph, labels=None):
if labels is None:
labels = molecular_graph.numbers.astype(int)
return cls(molecular_graph.distances, labels)
| 746,764
|
Initialize a similarity descriptor
Arguments:
coordinates -- a Nx3 numpy array
labels -- a list with integer labels used to identify atoms of
the same type
|
def from_coordinates(cls, coordinates, labels):
from molmod.ext import molecules_distance_matrix
distance_matrix = molecules_distance_matrix(coordinates)
return cls(distance_matrix, labels)
| 746,765
|
Initialize a new MolecularDistortion object
Arguments:
affected_atoms -- a list of atoms that undergo the transformation
transformation -- a transformation object
|
def __init__(self, affected_atoms, transformation):
self.affected_atoms = affected_atoms
self.transformation = Complete.cast(transformation)
| 746,790
|
Initialize a RandomManipulation object
Arguments:
affected_atoms -- a list of atoms that undergo the transformation
max_amplitude -- the maximum displacement (unit depends on
actual implementation)
hinge_atoms -- atoms that are invariant under the transformation
|
def __init__(self, affected_atoms, max_amplitude, hinge_atoms):
if len(hinge_atoms) != self.num_hinge_atoms:
raise ValueError("The number of hinge atoms must be %i, got %i." % (
self.num_hinge_atoms,
len(hinge_atoms)
))
self.affected_atoms = affected_atoms
self.max_amplitude = max_amplitude
self.hinge_atoms = hinge_atoms
| 746,793
|
Create transformation that represents a rotation about an axis
Arguments:
| ``center`` -- Point on the axis
| ``angle`` -- Rotation angle
| ``axis`` -- Rotation axis
| ``invert`` -- When True, an inversion rotation is constructed
[default=False]
|
def about_axis(cls, center, angle, axis, invert=False):
return Translation(center) * \
Rotation.from_properties(angle, axis, invert) * \
Translation(-center)
| 746,886
|
Dump a frame to the trajectory file
Arguments:
| ``title`` -- the title of the frame
| ``coordinates`` -- a numpy array with coordinates in atomic units
|
def dump(self, title, coordinates):
print("% 8i" % len(self.symbols), file=self._f)
print(str(title), file=self._f)
for symbol, coordinate in zip(self.symbols, coordinates):
print("% 2s % 12.9f % 12.9f % 12.9f" % ((symbol, ) + tuple(coordinate/self.file_unit)), file=self._f)
| 746,910
|
Efficiently test if counter is in ``xrange(*sub)``
Arguments:
| ``sub`` -- a slice object
| ``counter`` -- an integer
The function returns True if the counter is in
``xrange(sub.start, sub.stop, sub.step)``.
|
def slice_match(sub, counter):
if sub.start is not None and counter < sub.start:
return False
if sub.stop is not None and counter >= sub.stop:
raise StopIteration
if sub.step is not None:
if sub.start is None:
if counter % sub.step != 0:
return False
else:
if (counter - sub.start) % sub.step != 0:
return False
return True
| 746,914
|
The actual wrapper around the function call.
Arguments:
| ``x_prec`` -- the unknowns in preconditioned coordinates
| ``do_gradient`` -- if True, the gradient is also computed and
transformed to preconditioned coordinates
Note that this implementation assumes that the preconditioner is a
linear transformation.
|
def __call__(self, x_prec, do_gradient=False):
if do_gradient:
f, g = self.fun(self.undo(x_prec), do_gradient=True)
return f, self.undo(g)
else:
return self.fun(self.undo(x_prec))
| 746,935
|
Configure the 1D function for a line search
Arguments:
x0 -- the reference point (q=0)
axis -- a unit vector in the direction of the line search
|
def configure(self, x0, axis):
self.x0 = x0
self.axis = axis
| 746,948
|
Compute the values and the normals (gradients) of active constraints.
Arguments:
| ``x`` -- The unknowns.
|
def _compute_equations(self, x, verbose=False):
# compute the error and the normals.
normals = []
values = []
signs = []
error = 0.0
if verbose:
print()
print(' '.join('% 10.3e' % val for val in x), end=' ')
active_str = ''
for i, (sign, equation) in enumerate(self.equations):
value, normal = equation(x)
if (i < len(self.lock) and self.lock[i]) or \
(sign==-1 and value > -self.threshold) or \
(sign==0) or (sign==1 and value < self.threshold):
values.append(value)
normals.append(normal)
signs.append(sign)
error += value**2
if verbose:
active_str += 'X'
if i < len(self.lock):
self.lock[i] = True
elif verbose:
active_str += '-'
error = np.sqrt(error)
normals = np.array(normals, float)
values = np.array(values, float)
signs = np.array(signs, int)
if verbose:
print('[%s]' % active_str, end=' ')
if error < self.threshold:
print('OK')
else:
print('%.5e' % error)
return normals, values, error, signs
| 746,953
|
Take a robust, but not very efficient step towards the constraints.
Arguments:
| ``x`` -- The unknowns.
| ``normals`` -- A numpy array with the gradients of the active
constraints. Each row is one gradient.
| ``values`` -- A numpy array with the values of the constraint
functions.
| ``error`` -- The square root of the constraint cost function.
|
def _rough_shake(self, x, normals, values, error):
counter = 0
while error > self.threshold and counter < self.max_iter:
dxs = []
for i in range(len(normals)):
dx = -normals[i]*values[i]/np.dot(normals[i], normals[i])
dxs.append(dx)
dxs = np.array(dxs)
dx = dxs[abs(values).argmax()]
x = x+dx
self.lock[:] = False
normals, values, error = self._compute_equations(x)[:-1]
counter += 1
return x, normals, values, error
| 746,954
|
Take an efficient (not always robust) step towards the constraints.
Arguments:
| ``x`` -- The unknowns.
| ``normals`` -- A numpy array with the gradients of the active
constraints. Each row is one gradient.
| ``values`` -- A numpy array with the values of the constraint
functions.
| ``error`` -- The square root of the constraint cost function.
|
def _fast_shake(self, x, normals, values, error):
# filter out the degrees of freedom that do not feel the constraints.
mask = (normals!=0).any(axis=0) > 0
normals = normals[:,mask]
# Take a step to lower the constraint cost function. If the step is too
# large, it is reduced iteratively towards a small steepest descent
# step. This is very similar to the Levenberg-Marquardt algorithm.
# Singular Value decomposition is used to make this procedure
# numerically more stable and efficient.
U, S, Vt = np.linalg.svd(normals, full_matrices=False)
rcond = None
counter = 0
while True:
if rcond is None:
rcond = 0.0
elif rcond == 0.0:
rcond = self.rcond1
else:
rcond *= 10
# perform the least-norm correction
Sinv = (S**2+rcond)
if Sinv.max() == 0.0:
continue
Sinv = S/Sinv
# compute the step
dx = -np.dot(Vt.transpose(), np.dot(U.transpose(), values)*Sinv)
new_x = x.copy()
new_x[mask] += dx
# try the step
new_normals, new_values, new_error = self._compute_equations(new_x)[:-1]
if new_error < 0.9*error:
# Only if it decreases the constraint cost sufficiently, the
# step is accepted. This routine is pointless of it converges
# slowly.
return new_x, new_normals, new_values, new_error
elif abs(dx).sum() < self.threshold:
# If the step becomes too small, then give up.
break
elif counter > self.max_iter:
raise ConstraintError('Exceeded maximum number of shake iterations.')
counter += 1
| 746,955
|
Brings unknowns to the constraints.
Arguments:
| ``x`` -- The unknowns.
|
def free_shake(self, x):
self.lock[:] = False
normals, values, error = self._compute_equations(x)[:-1]
counter = 0
while True:
if error <= self.threshold:
break
# try a well-behaved move to the constrains
result = self._fast_shake(x, normals, values, error)
counter += 1
if result is not None:
x, normals, values, error = result
else:
# well-behaved move is too slow.
# do a cumbersome move to satisfy constraints approximately.
x, normals, values, error = self._rough_shake(x, normals, values, error)
counter += 1
# When too many iterations are required, just give up.
if counter > self.max_iter:
raise ConstraintError('Exceeded maximum number of shake iterations.')
return x, counter, len(values)
| 746,956
|
Project a vector (gradient or direction) on the active constraints.
Arguments:
| ``x`` -- The unknowns.
| ``vector`` -- A numpy array with a direction or a gradient.
The return value is a gradient or direction, where the components
that point away from the constraints are projected out. In case of
half-open constraints, the projection is only active of the vector
points into the infeasible region.
|
def project(self, x, vector):
scale = np.linalg.norm(vector)
if scale == 0.0:
return vector
self.lock[:] = False
normals, signs = self._compute_equations(x)[::3]
if len(normals) == 0:
return vector
vector = vector/scale
mask = signs == 0
result = vector.copy()
changed = True
counter = 0
while changed:
changed = False
y = np.dot(normals, result)
for i, sign in enumerate(signs):
if sign != 0:
if sign*y[i] < -self.threshold:
mask[i] = True
changed = True
elif mask[i] and np.dot(normals[i], result-vector) < 0:
mask[i] = False
changed = True
if mask.any():
normals_select = normals[mask]
y = np.dot(normals_select, vector)
U, S, Vt = np.linalg.svd(normals_select, full_matrices=False)
if S.min() == 0.0:
Sinv = S/(S**2+self.rcond1)
else:
Sinv = 1.0/S
result = vector - np.dot(Vt.transpose(), np.dot(U.transpose(), y)*Sinv)
else:
result = vector.copy()
if counter > self.max_iter:
raise ConstraintError('Exceeded maximum number of shake iterations.')
counter += 1
return result*scale
| 746,958
|
Construct a graph that repeats this graph a number of times
Arguments:
| ``repeat`` -- The number of repetitions.
|
def __mul__(self, repeat):
if not isinstance(repeat, int):
raise TypeError("Can only multiply a graph with an integer")
new_edges = []
for i in range(repeat):
for vertex1, vertex2 in self.edges:
new_edges.append(frozenset([
vertex1+i*self.num_vertices,
vertex2+i*self.num_vertices
]))
return Graph(new_edges, self.num_vertices*repeat)
| 746,968
|
Evaluates all the criteria and applies an OR opartion
Arguments:
| ``index`` -- the index of the vertex/edge on which the criterion
is applied
| ``graph`` -- the graph on which the criterion is tested
|
def __call__(self, index, graph):
for c in self.criteria:
if c(index, graph):
return True
return False
| 747,001
|
Evaluates all the criteria and applies a generalized XOR opartion
Arguments:
| ``index`` -- the index of the vertex/edge on which the criterion
is applied
| ``graph`` -- the graph on which the criterion is tested
when the XOR operation is applied to more than two criteria, True
is only returned when an odd number of criteria return True.
|
def __call__(self, index, graph):
count = 0
for c in self.criteria:
if c(index, graph):
count += 1
return (count % 2) == 1
| 747,002
|
Iterator over all matches of self.pattern in the given graph.
Arguments:
| subject_graph -- The subject_graph in which the matches
according to self.pattern have to be found.
| one_match -- If True, only one match will be returned. This
allows certain optimizations.
|
def __call__(self, subject_graph, one_match=False):
# Matches are grown iteratively.
for vertex0, vertex1 in self.pattern.iter_initial_relations(subject_graph):
init_match = self.pattern.MatchClass.from_first_relation(vertex0, vertex1)
# init_match cotains only one source -> dest relation. starting from
# this initial match, the function iter_matches extends the match
# in all possible ways and yields the completed matches
for canonical_match in self._iter_matches(init_match, subject_graph, one_match):
# Some patterns my exclude symmetrically equivalent matches as
# to aviod dupplicates. with such a 'canonical' solution,
# the pattern is allowed to generate just those symmatrical
# duplicates of interest.
ifm = self.pattern.iter_final_matches(canonical_match, subject_graph, one_match)
for final_match in ifm:
self.print_debug("final_match: %s" % final_match)
yield final_match
if one_match: return
| 747,018
|
Parse incoming request and return an email instance.
Args:
request: an HttpRequest object, containing the forwarded email, as
per the SendGrid specification for inbound emails.
Returns:
an EmailMultiAlternatives instance, containing the parsed contents
of the inbound email.
TODO: non-UTF8 charset handling.
TODO: handler headers.
|
def parse(self, request):
assert isinstance(request, HttpRequest), "Invalid request type: %s" % type(request)
try:
# from_email should never be a list (unless we change our API)
from_email = self._get_addresses([_decode_POST_value(request, 'from')])[0]
# ...but all these can and will be a list
to_email = self._get_addresses([_decode_POST_value(request, 'to')])
cc = self._get_addresses([_decode_POST_value(request, 'cc', default='')])
bcc = self._get_addresses([_decode_POST_value(request, 'bcc', default='')])
subject = _decode_POST_value(request, 'subject')
text = _decode_POST_value(request, 'text', default='')
html = _decode_POST_value(request, 'html', default='')
except IndexError as ex:
raise RequestParseError(
"Inbound request lacks a valid from address: %s." % request.get('from')
)
except MultiValueDictKeyError as ex:
raise RequestParseError("Inbound request is missing required value: %s." % ex)
if "@" not in from_email:
# Light sanity check for potential issues related to taking just the
# first element of the 'from' address list
raise RequestParseError("Could not get a valid from address out of: %s." % request)
email = EmailMultiAlternatives(
subject=subject,
body=text,
from_email=from_email,
to=to_email,
cc=cc,
bcc=bcc,
)
if html is not None and len(html) > 0:
email.attach_alternative(html, "text/html")
# TODO: this won't cope with big files - should really read in in chunks
for n, f in list(request.FILES.items()):
if f.size > self.max_file_size:
logger.debug(
"File attachment %s is too large to process (%sB)",
f.name,
f.size
)
raise AttachmentTooLargeError(
email=email,
filename=f.name,
size=f.size
)
else:
email.attach(f.name, f.read(), f.content_type)
return email
| 747,995
|
Parse incoming request and return an email instance.
Args:
request: an HttpRequest object, containing a list of forwarded emails, as
per Mandrill specification for inbound emails.
Returns:
a list of EmailMultiAlternatives instances
|
def parse(self, request):
assert isinstance(request, HttpRequest), "Invalid request type: %s" % type(request)
if settings.INBOUND_MANDRILL_AUTHENTICATION_KEY:
_check_mandrill_signature(
request=request,
key=settings.INBOUND_MANDRILL_AUTHENTICATION_KEY,
)
try:
messages = json.loads(request.POST['mandrill_events'])
except (ValueError, KeyError) as ex:
raise RequestParseError("Request is not a valid json: %s" % ex)
if not messages:
logger.debug("No messages found in mandrill request: %s", request.body)
return []
emails = []
for message in messages:
if message.get('event') != 'inbound':
logger.debug("Discarding non-inbound message")
continue
msg = message.get('msg')
try:
from_email = msg['from_email']
to = list(self._get_recipients(msg['to']))
cc = list(self._get_recipients(msg['cc'])) if 'cc' in msg else []
bcc = list(self._get_recipients(msg['bcc'])) if 'bcc' in msg else []
subject = msg.get('subject', "")
attachments = msg.get('attachments', {})
attachments.update(msg.get('images', {}))
text = msg.get('text', "")
html = msg.get('html', "")
except (KeyError, ValueError) as ex:
raise RequestParseError(
"Inbound request is missing or got an invalid value.: %s." % ex
)
email = EmailMultiAlternatives(
subject=subject,
body=text,
from_email=self._get_sender(
from_email=from_email,
from_name=msg.get('from_name'),
),
to=to,
cc=cc,
bcc=bcc,
)
if html is not None and len(html) > 0:
email.attach_alternative(html, "text/html")
email = self._process_attachments(email, attachments)
emails.append(email)
return emails
| 748,001
|
Initialize Paystack Request object for browsing resource.
Args:
api_url: str
headers: dict
|
def __init__(self, api_url='https://api.paystack.co/',
headers=None):
self.API_BASE_URL = '{api_url}'.format(**locals())
self.headers = headers
| 748,347
|
Perform a method on a resource.
Args:
method: requests.`method`
resource_uri: resource endpoint
Raises:
HTTPError
Returns:
JSON Response
|
def _request(self, method, resource_uri, **kwargs):
data = kwargs.get('data')
response = method(self.API_BASE_URL + resource_uri,
json=data, headers=self.headers)
response.raise_for_status()
return response.json()
| 748,348
|
Get a resource.
Args:
endpoint: resource endpoint.
|
def get(self, endpoint, **kwargs):
return self._request(requests.get, endpoint, **kwargs)
| 748,349
|
Create a resource.
Args:
endpoint: resource endpoint.
|
def post(self, endpoint, **kwargs):
return self._request(requests.post, endpoint, **kwargs)
| 748,350
|
Update a resource.
Args:
endpoint: resource endpoint.
|
def put(self, endpoint, **kwargs):
return self._request(requests.put, endpoint, **kwargs)
| 748,351
|
Static method defined to update paystack customer data by id.
Args:
customer_id: paystack customer id.
first_name: customer's first name(optional).
last_name: customer's last name(optional).
email: customer's email address(optional).
phone:customer's phone number(optional).
Returns:
Json data from paystack API.
|
def update(cls, customer_id, **kwargs):
return cls().requests.put('customer/{customer_id}'.format(**locals()),
data=kwargs)
| 748,352
|
Reentrenar parcialmente un clasificador SVM.
Args:
name (str): Nombre para el clasidicador.
ids (list): Se espera una lista de N ids de textos ya almacenados
en el TextClassifier.
labels (list): Se espera una lista de N etiquetas. Una por cada id
de texto presente en ids.
Nota:
Usa el clasificador de `Scikit-learn <http://scikit-learn.org/>`_
|
def retrain(self, name, ids, labels):
if not all(np.in1d(ids, self.ids)):
raise ValueError("Hay ids de textos que no se encuentran \
almacenados.")
try:
classifier = getattr(self, name)
except AttributeError:
raise AttributeError("No hay ningun clasificador con ese nombre.")
indices = np.in1d(self.ids, ids)
if isinstance(labels, str):
labels = [labels]
classifier.partial_fit(self.tfidf_mat[indices, :], labels)
| 748,505
|
Calcula los vectores de terminos de textos y los almacena.
A diferencia de :func:`~TextClassifier.TextClassifier.store_text` esta
funcion borra cualquier informacion almacenada y comienza el conteo
desde cero. Se usa para redefinir el vocabulario sobre el que se
construyen los vectores.
Args:
texts (list): Una lista de N textos a incorporar.
ids (list): Una lista de N ids alfanumericos para los textos.
|
def reload_texts(self, texts, ids, vocabulary=None):
self._check_id_length(ids)
self.ids = np.array(sorted(ids))
if vocabulary:
self.vectorizer.vocabulary = vocabulary
sorted_texts = [x for (y, x) in sorted(zip(ids, texts))]
self.term_mat = self.vectorizer.fit_transform(sorted_texts)
self._update_tfidf()
| 748,509
|
Create a new release branch.
Args:
component (str):
Version component to bump when creating the release. Can be *major*,
*minor* or *patch*.
exact (str):
The exact version to set for the release. Overrides the component
argument. This allows to re-release a version if something went
wrong with the release upload.
|
def start(component, exact):
# type: (str, str) -> None
version_file = conf.get_path('version_file', 'VERSION')
develop = conf.get('git.devel_branch', 'develop')
common.assert_on_branch(develop)
with conf.within_proj_dir():
out = shell.run('git status --porcelain', capture=True).stdout
lines = out.split(os.linesep)
has_changes = any(
not l.startswith('??') for l in lines if l.strip()
)
if has_changes:
log.info("Cannot release: there are uncommitted changes")
exit(1)
old_ver, new_ver = versioning.bump(component, exact)
log.info("Bumping package version")
log.info(" old version: <35>{}".format(old_ver))
log.info(" new version: <35>{}".format(new_ver))
with conf.within_proj_dir():
branch = 'release/' + new_ver
common.git_checkout(branch, create=True)
log.info("Creating commit for the release")
shell.run('git add {ver_file} && git commit -m "{msg}"'.format(
ver_file=version_file,
msg="Releasing v{}".format(new_ver)
))
| 749,066
|
Lint python files.
Args:
exclude (list[str]):
A list of glob string patterns to test against. If the file/path
matches any of those patters, it will be filtered out.
skip_untracked (bool):
If set to **True** it will skip all files not tracked by git.
commit_only (bool):
Only lint files that are staged for commit.
|
def lint(exclude, skip_untracked, commit_only):
# type: (List[str], bool, bool) -> None
exclude = list(exclude) + conf.get('lint.exclude', [])
runner = LintRunner(exclude, skip_untracked, commit_only)
if not runner.run():
exit(1)
| 749,077
|
Decorator for defining lint tools.
Args:
name (str):
The name of the tool. This name will be used to identify the tool
in `pelconf.yaml`.
|
def tool(name):
# type: (str) -> FunctionType
global g_tools
def decorator(fn): # pylint: disable=missing-docstring
# type: (FunctionType) -> FunctionType
g_tools[name] = fn
return fn
return decorator
| 749,078
|
Run code checks using pylint.
Args:
files (list[str]):
A list of files to check
Returns:
bool: **True** if all files passed the checks, **False** otherwise.
|
def pylint_check(files):
# type: (List[str]) -> int
files = fs.wrap_paths(files)
cfg_path = conf.get_path('lint.pylint_cfg', 'ops/tools/pylint.ini')
pylint_cmd = 'pylint --rcfile {} {}'.format(cfg_path, files)
return shell.run(pylint_cmd, exit_on_error=False).return_code
| 749,080
|
Add a new property to the app (with setattr)
Args:
name (str): the name of the new property
value (any): the value of the new property
|
def enrich_app(self, name, value):
#Method shouldn't be added: https://stackoverflow.com/a/28060251/3042398
if type(value) == type(self.enrich_app):
raise ValueError("enrich_app can't add method")
setattr(self.app, name, value)
| 749,094
|
Bump the given version component.
Args:
component (str):
What part of the version should be bumped. Can be one of:
- major
- minor
- patch
exact (str):
The exact version that should be set instead of bumping the current
one.
Returns:
tuple(str, str): A tuple of old and bumped version.
|
def bump(component='patch', exact=None):
# type: (str, str) -> Tuple[str, str]
old_ver = current()
if exact is None:
new_ver = _bump_version(old_ver, component)
else:
new_ver = exact
write(new_ver)
return old_ver, new_ver
| 749,245
|
Bump the given version component.
Args:
version (str):
The current version. The format is: MAJOR.MINOR[.PATCH].
component (str):
What part of the version should be bumped. Can be one of:
- major
- minor
- patch
Returns:
str: Bumped version as a string.
|
def _bump_version(version, component='patch'):
# type: (str, str) -> str
if component not in ('major', 'minor', 'patch'):
raise ValueError("Invalid version component: {}".format(component))
m = RE_VERSION.match(version)
if m is None:
raise ValueError("Version must be in MAJOR.MINOR[.PATCH] format")
major = m.group('major')
minor = m.group('minor') or '0'
patch = m.group('patch') or None
if patch == '0':
patch = None
if component == 'major':
major = str(int(major) + 1)
minor = '0'
patch = None
elif component == 'minor':
minor = str(int(minor) + 1)
patch = None
else:
patch = patch or 0
patch = str(int(patch) + 1)
new_ver = '{}.{}'.format(major, minor)
if patch is not None:
new_ver += '.' + patch
return new_ver
| 749,246
|
Return the author of the given commit.
Args:
sha1 (str):
The sha1 of the commit to query. If not given, it will return the
sha1 for the current commit.
Returns:
Author: A named tuple ``(name, email)`` with the commit author details.
|
def commit_author(sha1=''):
# type: (str) -> Author
with conf.within_proj_dir():
cmd = 'git show -s --format="%an||%ae" {}'.format(sha1)
result = shell.run(
cmd,
capture=True,
never_pretend=True
).stdout
name, email = result.split('||')
return Author(name, email)
| 749,460
|
Tag the current commit.
Args:
name (str):
The tag name.
message (str):
The tag message. Same as ``-m`` parameter in ``git tag``.
author (Author):
The commit author. Will default to the author of the commit.
pretend (bool):
If set to **True** it will print the full ``git tag`` command
instead of actually executing it.
|
def tag(name, message, author=None):
# type: (str, str, Author, bool) -> None
cmd = (
'git -c "user.name={author.name}" -c "user.email={author.email}" '
'tag -a "{name}" -m "{message}"'
).format(
author=author or latest_commit().author,
name=name,
message=message.replace('"', '\\"').replace('`', '\\`'),
)
shell.run(cmd)
| 749,464
|
Verify if the given branch exists.
Args:
branch_name (str):
The name of the branch to check.
Returns:
bool: **True** if a branch with name *branch_name* exits, **False**
otherwise.
|
def verify_branch(branch_name):
# type: (str) -> bool
try:
shell.run(
'git rev-parse --verify {}'.format(branch_name),
never_pretend=True
)
return True
except IOError:
return False
| 749,467
|
Return details about a given commit.
Args:
sha1 (str):
The sha1 of the commit to query. If not given, it will return
the details for the latest commit.
Returns:
CommitDetails: Commit details. You can use the instance of the
class to query git tree further.
|
def get(cls, sha1=''):
# type: (str) -> CommitDetails
with conf.within_proj_dir():
cmd = 'git show -s --format="%H||%an||%ae||%s||%b||%P" {}'.format(
sha1
)
result = shell.run(cmd, capture=True, never_pretend=True).stdout
sha1, name, email, title, desc, parents = result.split('||')
return CommitDetails(
sha1=sha1,
author=Author(name, email),
title=title,
desc=desc,
parents_sha1=parents.split(),
)
| 749,473
|
Load a YAML configuration.
This will not update the configuration but replace it entirely.
Args:
conf_file (str):
Path to the YAML config. This function will not check the file name
or extension and will just crash if the given file does not exist or
is not a valid YAML file.
|
def load_yaml_config(conf_file):
# type: (str) -> None
global g_config
with open(conf_file) as fp:
# Initialize config
g_config = util.yaml_load(fp)
# Add src_dir to sys.paths if it's set. This is only done with YAML
# configs, py configs have to do this manually.
src_dir = get_path('src_dir', None)
if src_dir is not None:
sys.path.insert(0, src_dir)
for cmd in get('commands', []):
_import(cmd)
| 749,507
|
Load template from file.
The templates are part of the package and must be included as
``package_data`` in project ``setup.py``.
Args:
filename (str):
The template path. Relative to `peltak` package directory.
Returns:
str: The content of the chosen template.
|
def load_template(filename):
# type: (str) -> str
template_file = os.path.join(PKG_DIR, 'templates', filename)
with open(template_file) as fp:
return fp.read()
| 749,509
|
Return absolute path to the repo dir (root project directory).
Args:
path (str):
The path relative to the project root (pelconf.yaml).
Returns:
str: The given path converted to an absolute path.
|
def proj_path(*path_parts):
# type: (str) -> str
path_parts = path_parts or ['.']
# If path represented by path_parts is absolute, do not modify it.
if not os.path.isabs(path_parts[0]):
proj_path = _find_proj_root()
if proj_path is not None:
path_parts = [proj_path] + list(path_parts)
return os.path.normpath(os.path.join(*path_parts))
| 749,510
|
Print error and exit if the current branch is not of a given type.
Args:
branch_type (str):
The branch type. This assumes the branch is in the '<type>/<title>`
format.
|
def assert_branch_type(branch_type):
# type: (str) -> None
branch = git.current_branch(refresh=True)
if branch.type != branch_type:
if context.get('pretend', False):
log.info("Would assert that you're on a <33>{}/*<32> branch",
branch_type)
else:
log.err("Not on a <33>{}<31> branch!", branch_type)
fmt = ("The branch must follow <33>{required_type}/<name><31>"
"format and your branch is called <33>{name}<31>.")
log.err(fmt, required_type=branch_type, name=branch.name)
sys.exit(1)
| 749,544
|
Print error and exit if *branch_name* is not the current branch.
Args:
branch_name (str):
The supposed name of the current branch.
|
def assert_on_branch(branch_name):
# type: (str) -> None
branch = git.current_branch(refresh=True)
if branch.name != branch_name:
if context.get('pretend', False):
log.info("Would assert that you're on a <33>{}<32> branch",
branch_name)
else:
log.err("You're not on a <33>{}<31> branch!", branch_name)
sys.exit(1)
| 749,545
|
Delete the given branch.
Args:
branch_name (str):
Name of the branch to delete.
|
def git_branch_delete(branch_name):
# type: (str) -> None
if branch_name not in git.protected_branches():
log.info("Deleting branch <33>{}", branch_name)
shell.run('git branch -d {}'.format(branch_name))
| 749,546
|
Rename the current branch
Args:
new_name (str):
New name for the current branch.
|
def git_branch_rename(new_name):
# type: (str) -> None
curr_name = git.current_branch(refresh=True).name
if curr_name not in git.protected_branches():
log.info("Renaming branch from <33>{}<32> to <33>{}".format(
curr_name, new_name
))
shell.run('git branch -m {}'.format(new_name))
| 749,547
|
Checkout or create a given branch
Args:
branch_name (str):
The name of the branch to checkout or create.
create (bool):
If set to **True** it will create the branch instead of checking it
out.
|
def git_checkout(branch_name, create=False):
# type: (str, bool) -> None
log.info("Checking out <33>{}".format(branch_name))
shell.run('git checkout {} {}'.format('-b' if create else '', branch_name))
| 749,548
|
Merge *head* into *base*.
Args:
base (str):
The base branch. *head* will be merged into this branch.
head (str):
The branch that will be merged into *base*.
no_ff (bool):
If set to **True** it will force git to create merge commit. If set
to **False** (default) it will do a fast-forward merge if possible.
|
def git_merge(base, head, no_ff=False):
# type: (str, str, bool) -> None
pretend = context.get('pretend', False)
branch = git.current_branch(refresh=True)
if branch.name != base and not pretend:
git_checkout(base)
args = []
if no_ff:
args.append('--no-ff')
log.info("Merging <33>{}<32> into <33>{}<32>", head, base)
shell.run('git merge {args} {branch}'.format(
args=' '.join(args),
branch=head,
))
if branch.name != base and not pretend:
git_checkout(branch.name)
| 749,549
|
Show the user a menu to pick a branch from the existing ones.
Args:
exclude (list[str]):
List of branch names to exclude from the menu. By default it will
exclude master and develop branches. To show all branches pass an
empty array here.
Returns:
str: The name of the branch chosen by the user. If the user inputs an
invalid choice, he will be asked again (and again) until he picks a
a valid branch.
|
def choose_branch(exclude=None):
# type: (List[str]) -> str
if exclude is None:
master = conf.get('git.master_branch', 'master')
develop = conf.get('git.devel_branch', 'develop')
exclude = {master, develop}
branches = list(set(git.branches()) - exclude)
# Print the menu
for i, branch_name in enumerate(branches):
shell.cprint('<90>[{}] <33>{}'.format(i + 1, branch_name))
# Get a valid choice from the user
choice = 0
while choice < 1 or choice > len(branches):
prompt = "Pick a base branch from the above [1-{}]".format(
len(branches)
)
choice = click.prompt(prompt, value_proc=int)
if not (1 <= choice <= len(branches)):
fmt = "Invalid choice {}, you must pick a number between {} and {}"
log.err(fmt.format(choice, 1, len(branches)))
return branches[choice - 1]
| 749,551
|
Generates identicon image based on passed data.
Arguments:
data - Data which should be used for generating an identicon. This data
will be used in order to create a digest which is used for generating the
identicon. If the data passed is a hex digest already, the digest will be
used as-is.
Returns:
Identicon image in raw format.
|
def image(request, data):
# Get image width, height, padding, and format from GET parameters, or
# fall-back to default values from settings.
try:
width = int(request.GET.get("w", PYDENTICON_WIDTH))
except ValueError:
raise SuspiciousOperation("Identicon width must be a positive integer.")
try:
height = int(request.GET.get("h", PYDENTICON_HEIGHT))
except ValueError:
raise SuspiciousOperation("Identicon height must be a positive integer.")
output_format = request.GET.get("f", PYDENTICON_FORMAT)
try:
padding = [int(p) for p in request.GET["p"].split(",")]
except KeyError:
padding = PYDENTICON_PADDING
except ValueError:
raise SuspiciousOperation("Identicon padding must consist out of 4 positive integers separated with commas.")
if "i" in request.GET:
inverted = request.GET.get("i")
if inverted.lower() == "true":
inverted = True
elif inverted.lower() == "false":
inverted = False
else:
raise SuspiciousOperation("Inversion parameter must be a boolean (true/false).")
else:
inverted = PYDENTICON_INVERT
# Validate the input parameters.
if not isinstance(width, int) or width <= 0:
raise SuspiciousOperation("Identicon width must be a positive integer.")
if not isinstance(height, int) or height <= 0:
raise SuspiciousOperation("Identicon height must be a positive integer.")
if not all([isinstance(p, int) and p >= 0 for p in padding]) or len(padding) != 4:
raise SuspiciousOperation("Padding must be a 4-element tuple consisting out of positive integers.")
# Set-up correct content type based on requested identicon format.
if output_format == "png":
content_type = "image/png"
elif output_format == "ascii":
content_type = "text/plain"
else:
raise SuspiciousOperation("Unsupported identicon format requested - '%s' % output_format")
# Initialise a generator.
generator = Generator(PYDENTICON_ROWS, PYDENTICON_COLUMNS,
foreground = PYDENTICON_FOREGROUND, background = PYDENTICON_BACKGROUND,
digest = PYDENTICON_DIGEST)
# Generate the identicion.
content = generator.generate(data, width, height, padding=padding, output_format=output_format, inverted=inverted)
# Create and return the response.
response = HttpResponse(content, content_type=content_type)
return response
| 749,793
|
Set context value.
Args:
name (str):
The name of the context value to change.
value (Any):
The new value for the selected context value
|
def set(self, name, value):
curr = self.values
parts = name.split('.')
for i, part in enumerate(parts[:-1]):
try:
curr = curr.setdefault(part, {})
except AttributeError:
raise InvalidPath('.'.join(parts[:i + 1]))
try:
curr[parts[-1]] = value
except TypeError:
raise InvalidPath('.'.join(parts[:-1]))
| 749,877
|
List all tags for the given image stored in the registry.
Args:
image_name (str):
The name of the image to query. The image must be present on the
registry for this call to return any values.
Returns:
list[str]: List of tags for that image.
|
def list_tags(self, image_name):
# type: (str) -> Iterator[str]
tags_url = self.registry_url + '/v2/{}/tags/list'
r = self.get(tags_url.format(image_name), auth=self.auth)
data = r.json()
if 'tags' in data:
return reversed(sorted(data['tags']))
return []
| 749,989
|
Patches current record and udpates the current instance's 'attrs'
attribute to reflect the new changes.
Args:
payload - hash. This will be JSON-formatted prior to sending the request.
Returns:
`dict`. The JSON formatted response.
Raises:
`requests.exceptions.HTTPError`: The status code is not ok.
|
def patch(self, payload, append_to_arrays=True):
if not isinstance(payload, dict):
raise ValueError("The 'payload' parameter must be provided a dictionary object.")
payload = self.__class__.set_id_in_fkeys(payload)
if append_to_arrays:
for key in payload:
val = payload[key]
if type(val) == list:
val.extend(getattr(self, key))
payload[key] = list(set(val))
payload = self.check_boolean_fields(payload)
payload = self.__class__.add_model_name_to_payload(payload)
self.debug_logger.debug("PATCHING payload {}".format(json.dumps(payload, indent=4)))
res = requests.patch(url=self.record_url, json=payload, headers=HEADERS, verify=False)
self.write_response_html_to_file(res,"bob.html")
res.raise_for_status()
json_res = res.json()
self.debug_logger.debug("Success")
self.attrs = json_res
return json_res
| 750,065
|
Posts the data to the specified record.
Args:
payload: `dict`. This will be JSON-formatted prior to sending the request.
Returns:
`dict`. The JSON formatted response.
Raises:
`Requests.exceptions.HTTPError`: The status code is not ok.
`RecordNotUnique`: The Rails server returned the exception ActiveRecord::RecordNotUnique.
|
def post(cls, payload):
if not isinstance(payload, dict):
raise ValueError("The 'payload' parameter must be provided a dictionary object.")
payload = cls.set_id_in_fkeys(payload)
payload = cls.check_boolean_fields(payload)
payload = cls.add_model_name_to_payload(payload)
# Run any pre-post hooks:
payload = cls.prepost_hooks(payload)
cls.debug_logger.debug("POSTING payload {}".format(json.dumps(payload, indent=4)))
res = requests.post(url=cls.URL, json=(payload), headers=HEADERS, verify=False)
cls.write_response_html_to_file(res,"bob.html")
if not res.ok:
cls.log_error(res.text)
res_json = res.json()
if "exception" in res_json:
exc_type = res_json["exception"]
if exc_type == "ActiveRecord::RecordNotUnique":
raise RecordNotUnique()
res.raise_for_status()
res = res.json()
cls.log_post(res)
cls.debug_logger.debug("Success")
return res
| 750,067
|
Logs the provided error message to both the error logger and the debug logger logging
instances.
Args:
msg: `str`. The error message to log.
|
def log_error(cls, msg):
cls.error_logger.error(msg)
cls.debug_logger.debug(msg)
| 750,068
|
An aid in troubleshooting internal application errors, i.e. <Response [500]>, to be mainly
beneficial when developing the server-side API. This method will write the response HTML
for viewing the error details in the browesr.
Args:
response: `requests.models.Response` instance.
filename: `str`. The output file name.
|
def write_response_html_to_file(response,filename):
fout = open(filename,'w')
if not str(response.status_code).startswith("2"):
Model.debug_logger.debug(response.text)
fout.write(response.text)
fout.close()
| 750,069
|
Calls the SequencingRequest's get_library_barcode_sequence_hash server-side endpoint to
create a hash of the form {LibraryID -> barcode_sequence} for all Libraries on the
SequencingRequest.
Args:
inverse: `bool`. True means to inverse the key and value pairs such that the barcode
sequence serves as the key.
Returns: `dict`.
|
def get_library_barcode_sequence_hash(self, inverse=False):
action = os.path.join(self.record_url, "get_library_barcode_sequence_hash")
res = requests.get(url=action, headers=HEADERS, verify=False)
res.raise_for_status()
res_json = res.json()
# Convert library ID from string to int
new_res = {}
for lib_id in res_json:
new_res[int(lib_id)] = res_json[lib_id]
res_json = new_res
if inverse:
rev = {}
for lib_id in res_json:
rev[res_json[lib_id]] = lib_id
res_json = rev
return res_json
| 750,079
|
Unarchives the user with the specified user ID.
Args:
user_id: `int`. The ID of the user to unarchive.
Returns:
`NoneType`: None.
|
def unarchive_user(self, user_id):
url = self.record_url + "/unarchive"
res = requests.patch(url=url, json={"user_id": user_id}, headers=HEADERS, verify=False)
self.write_response_html_to_file(res,"bob.html")
res.raise_for_status()
| 750,082
|
Deploy the app to AppEngine.
Args:
app_id (str):
AppEngine App ID. Overrides config value app_id if given.
version (str):
AppEngine project version. Overrides config values if given.
promote (bool):
If set to **True** promote the current remote app version to the one
that's being deployed.
quiet (bool):
If set to **True** this will pass the ``--quiet`` flag to gcloud
command.
|
def deploy(app_id, version, promote, quiet):
# type: (str, str, bool, bool) -> None
gae_app = GaeApp.for_branch(git.current_branch().name)
if gae_app is None and None in (app_id, version):
msg = (
"Can't find an AppEngine app setup for branch <35>{}<32> and"
"--project and --version were not given."
)
log.err(msg, git.current_branch().name)
sys.exit(1)
if version is not None:
gae_app.version = version
if app_id is not None:
gae_app.app_id = app_id
gae_app.deploy(promote, quiet)
| 750,179
|
Run devserver.
Args:
port (int):
Port on which the app will be served.
admin_port (int):
Port on which the admin interface is served.
clear (bool):
If set to **True**, clear the datastore on startup.
|
def devserver(port, admin_port, clear):
# type: (int, int, bool) -> None
admin_port = admin_port or (port + 1)
args = [
'--port={}'.format(port),
'--admin_port={}'.format(admin_port)
]
if clear:
args += ['--clear_datastore=yes']
with conf.within_proj_dir():
shell.run('dev_appserver.py . {args}'.format(args=' '.join(args)))
| 750,180
|
Mark function as experimental.
Args:
fn (FunctionType):
The command function to decorate.
|
def mark_experimental(fn):
# type: (FunctionType) -> FunctionType
@wraps(fn)
def wrapper(*args, **kw): # pylint: disable=missing-docstring
from peltak.core import shell
if shell.is_tty:
warnings.warn("This command is has experimental status. The "
"interface is not yet stable and might change "
"without notice within with a patch version update. "
"Use at your own risk")
return fn(*args, **kw)
return wrapper
| 750,185
|
Mark command as deprecated.
Args:
replaced_by (str):
The command that deprecated this command and should be used instead.
|
def mark_deprecated(replaced_by):
# type: (Text) -> FunctionType
def decorator(fn): # pylint: disable=missing-docstring
@wraps(fn)
def wrapper(*args, **kw): # pylint: disable=missing-docstring
from peltak.core import shell
if shell.is_tty:
warnings.warn("This command is has been deprecated. Please use "
"{new} instead.".format(new=replaced_by))
return fn(*args, **kw)
return wrapper
return decorator
| 750,186
|
Dump data to a YAML string/file.
Args:
data (YamlData):
The data to serialize as YAML.
stream (TextIO):
The file-like object to save to. If given, this function will write
the resulting YAML to that stream.
Returns:
str: The YAML string.
|
def yaml_dump(data, stream=None):
# type: (YamlData, Optional[TextIO]) -> Text
return yaml.dump(
data,
stream=stream,
Dumper=Dumper,
default_flow_style=False
)
| 750,188
|
Apply the decorator to the given function.
Args:
fn (FunctionType):
The function to decorate.
:return Function:
The function wrapped in caching logic.
|
def __call__(self, fn):
# type: (FunctionType) -> FunctionType
@wraps(fn)
def wrapper(refresh=False): # pylint: disable=missing-docstring
if refresh or not hasattr(wrapper, self.CACHE_VAR):
result = fn()
setattr(wrapper, self.CACHE_VAR, result)
return getattr(wrapper, self.CACHE_VAR)
return wrapper
| 750,190
|
Clear result cache on the given function.
If the function has no cached result, this call will do nothing.
Args:
fn (FunctionType):
The function whose cache should be cleared.
|
def clear(cls, fn):
# type: (FunctionType) -> None
if hasattr(fn, cls.CACHE_VAR):
delattr(fn, cls.CACHE_VAR)
| 750,191
|
Start working on a new feature by branching off develop.
This will create a new branch off develop called feature/<name>.
Args:
name (str):
The name of the new feature.
|
def start(name):
# type: (str) -> None
branch = git.current_branch(refresh=True)
task_branch = 'task/' + common.to_branch_name(name)
if branch.type not in ('feature', 'hotfix'):
log.err("Task branches can only branch off <33>feature<32> or "
"<33>hotfix<32> branches")
sys.exit(1)
common.git_checkout(task_branch, create=True)
| 750,192
|
Start working on a new hotfix.
This will create a new branch off master called hotfix/<name>.
Args:
name (str):
The name of the new feature.
|
def start(name):
# type: (str) -> None
hotfix_branch = 'hotfix/' + common.to_branch_name(name)
master = conf.get('git.master_branch', 'master')
common.assert_on_branch(master)
common.git_checkout(hotfix_branch, create=True)
| 750,388
|
Send text to stdin. Can only be used on non blocking commands
Args:
value (str): the text to write on stdin
Raises:
TypeError: If command is blocking
Returns:
ShellCommand: return this ShellCommand instance for chaining
|
def send(self, value):
if not self.block and self._stdin is not None:
self.writer.write("{}\n".format(value))
return self
else:
raise TypeError(NON_BLOCKING_ERROR_MESSAGE)
| 750,396
|
Block until a pattern have been found in stdout and stderr
Args:
pattern(:class:`~re.Pattern`): The pattern to search
timeout(int): Maximum number of second to wait. If None, wait infinitely
Raises:
TimeoutError: When timeout is reach
|
def wait_for(self, pattern, timeout=None):
should_continue = True
if self.block:
raise TypeError(NON_BLOCKING_ERROR_MESSAGE)
def stop(signum, frame): # pylint: disable=W0613
nonlocal should_continue
if should_continue:
raise TimeoutError()
if timeout:
signal.signal(signal.SIGALRM, stop)
signal.alarm(timeout)
while should_continue:
output = self.poll_output() + self.poll_error()
filtered = [line for line in output if re.match(pattern, line)]
if filtered:
should_continue = False
| 750,400
|
Create an instance of :class:`~ShellCommand` and run it
Args:
command (str): :class:`~ShellCommand`
block (bool): See :class:`~ShellCommand`
cwd (str): Override the runner cwd. Useb by the :class:`~ShellCommand` instance
|
def run(self, command, block=True, cwd=None, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE):
if cwd is None:
cwd = self.cwd
return ShellCommand(command=command, logger=self.logger, block=block, cwd=cwd, stdin=stdin, stdout=stdout, stderr=stderr).run()
| 750,404
|
Test whether the given *path* matches any patterns in *patterns*
Args:
path (str):
A file path to test for matches.
patterns (list[str]):
A list of glob string patterns to test against. If *path* matches
any of those patters, it will return True.
Returns:
bool: **True** if the *path* matches any pattern in *patterns*.
|
def match_globs(path, patterns):
# type: (str, List[str]) -> bool
for pattern in (p for p in patterns if p):
if pattern.startswith('/'):
regex = fnmatch.translate(pattern[1:])
temp_path = path[1:] if path.startswith('/') else path
m = re.search(regex, temp_path)
if m and m.start() == 0:
return True
elif fnmatch.fnmatch(path, pattern):
return True
return False
| 750,416
|
Test whether the given *path* contains any patterns in *patterns*
Args:
path (str):
A file path to test for matches.
patterns (list[str]):
A list of glob string patterns to test against. If *path* matches
any of those patters, it will return True.
Returns:
bool: **True** if the ``path`` matches any pattern in *patterns*.
|
def search_globs(path, patterns):
# type: (str, List[str]) -> bool
for pattern in (p for p in patterns if p):
if pattern.startswith('/'):
# If pattern starts with root it means it match from root only
regex = fnmatch.translate(pattern[1:])
regex = regex.replace('\\Z', '')
temp_path = path[1:] if path.startswith('/') else path
m = re.search(regex, temp_path)
if m and m.start() == 0:
return True
else:
regex = fnmatch.translate(pattern)
regex = regex.replace('\\Z', '')
if re.search(regex, path):
return True
return False
| 750,417
|
--pretend aware file writing.
You can always write files manually but you should always handle the
--pretend case.
Args:
path (str):
content (str):
mode (str):
|
def write_file(path, content, mode='w'):
# type: (Text, Union[Text,bytes], Text) -> None
from peltak.core import context
from peltak.core import log
if context.get('pretend', False):
log.info("Would overwrite <34>{path}<32> with:\n<90>{content}",
path=path,
content=content)
else:
with open(path, mode) as fp:
fp.write(content)
| 750,418
|
Upload the release to a pypi server.
TODO: Make sure the git directory is clean before allowing a release.
Args:
target (str):
pypi target as defined in ~/.pypirc
|
def upload(target):
# type: (str) -> None
log.info("Uploading to pypi server <33>{}".format(target))
with conf.within_proj_dir():
shell.run('python setup.py sdist register -r "{}"'.format(target))
shell.run('python setup.py sdist upload -r "{}"'.format(target))
| 750,906
|
Generate ~/.pypirc with the given credentials.
Useful for CI builds. Can also get credentials through env variables
``PYPI_USER`` and ``PYPI_PASS``.
Args:
username (str):
pypi username. If not given it will try to take it from the
`` PYPI_USER`` env variable.
password (str):
pypi password. If not given it will try to take it from the
`` PYPI_PASS`` env variable.
|
def gen_pypirc(username=None, password=None):
# type: (str, str) -> None
path = join(conf.getenv('HOME'), '.pypirc')
username = username or conf.getenv('PYPI_USER', None)
password = password or conf.getenv('PYPI_PASS', None)
if username is None or password is None:
log.err("You must provide $PYPI_USER and $PYPI_PASS")
sys.exit(1)
log.info("Generating <94>{}".format(path))
fs.write_file(path, util.remove_indent(.format(
username=username,
password=password
)))
| 750,907
|
For each plugins, check if a "step" method exist on it, and call it
Args:
step (str): The method to search and call on each plugin
|
def call_plugins(self, step):
for plugin in self.plugins:
try:
getattr(plugin, step)()
except AttributeError:
self.logger.debug("{} doesn't exist on plugin {}".format(step, plugin))
except TypeError:
self.logger.debug("{} on plugin {} is not callable".format(step, plugin))
| 751,020
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.