Search is not available for this dataset
text
stringlengths
75
104k
def _skip_section(self): """Skip a section""" self._last = self._f.readline() while len(self._last) > 0 and len(self._last[0].strip()) == 0: self._last = self._f.readline()
def _read_section(self): """Read and return an entire section""" lines = [self._last[self._last.find(":")+1:]] self._last = self._f.readline() while len(self._last) > 0 and len(self._last[0].strip()) == 0: lines.append(self._last) self._last = self._f.readline() ...
def get_next(self, label): """Get the next section with the given label""" while self._get_current_label() != label: self._skip_section() return self._read_section()
def _read_frame(self): """Read a single frame from the trajectory""" self._secfile.get_next("Frame Number") frame = ATRJFrame() # Read the time and energy energy_lines = self._secfile.get_next("Time/Energy") energy_words = energy_lines[0].split() frame.time = floa...
def get_cube_points(origin, axes, nrep): '''Generate the Cartesian coordinates of the points in a cube file *Arguemnts:* origin The cartesian coordinate for the origin of the grid. axes The 3 by 3 array with the grid spacings as rows. nrep The numb...
def from_file(cls, filename): '''Create a cube object by loading data from a file. *Arguemnts:* filename The file to load. It must contain the header with the description of the grid and the molecule. ''' with open(filename) as f: ...
def write_to_file(self, fn): '''Write the cube to a file in the Gaussian cube format.''' with open(fn, 'w') as f: f.write(' {}\n'.format(self.molecule.title)) f.write(' {}\n'.format(self.subtitle)) def write_grid_line(n, v): f.write('%5i % 11.6f % 11....
def copy(self, newdata=None): '''Return a copy of the cube with optionally new data.''' if newdata is None: newdata = self.data.copy() return self.__class__( self.molecule, self.origin.copy(), self.axes.copy(), self.nrep.copy(), newdata, self.subtitle, self.nu...
def _consistent(self): """Checks the constency between self.__index and self.__order""" if len(self.__order) != sum(len(values) for values in self.__index.values()): return False import copy tmp = copy.copy(self.__order) for key, values in self.__index.items(): ...
def append(self, child): """Add a child section or keyword""" if not (isinstance(child, CP2KSection) or isinstance(child, CP2KKeyword)): raise TypeError("The child must be a CP2KSection or a CP2KKeyword, got: %s." % child) l = self.__index.setdefault(child.name, []) l.append(...
def dump_children(self, f, indent=''): """Dump the children of the current section to a file-like object""" for child in self.__order: child.dump(f, indent+' ')
def dump(self, f, indent=''): """Dump this section and its children to a file-like object""" print(("%s&%s %s" % (indent, self.__name, self.section_parameters)).rstrip(), file=f) self.dump_children(f, indent) print("%s&END %s" % (indent, self.__name), file=f)
def readline(self, f): """A helper method that only reads uncommented lines""" while True: line = f.readline() if len(line) == 0: raise EOFError line = line[:line.find('#')] line = line.strip() if len(line) > 0: ...
def load_children(self, f): """Load the children of this section from a file-like object""" while True: line = self.readline(f) if line[0] == '&': if line[1:].startswith("END"): check_name = line[4:].strip().upper() if check...
def load(self, f, line=None): """Load this section from a file-like object""" if line is None: # in case the file contains only a fragment of an input file, # this is useful. line = f.readlin() words = line[1:].split() self.__name = words[0].upper() ...
def dump(self, f, indent=''): """Dump this keyword to a file-like object""" if self.__unit is None: print(("%s%s %s" % (indent, self.__name, self.__value)).rstrip(), file=f) else: print(("%s%s [%s] %s" % (indent, self.__name, self.__unit, self.__value)).rstrip(), file=f)
def load(self, line): """Load this keyword from a file-like object""" words = line.split() try: float(words[0]) self.__name = "" self.__value = " ".join(words) except ValueError: self.__name = words[0].upper() if len(words) > 2 ...
def set_value(self, value): """Set the value associated with the keyword""" if not isinstance(value, str): raise TypeError("A value must be a string, got %s." % value) self.__value = value
def read_from_file(filename): """ Arguments: | ``filename`` -- the filename of the input file Use as follows:: >>> if = CP2KInputFile.read_from_file("somefile.inp") >>> for section in if: ... print section.name """ ...
def _read_frame(self): """Read and return the next time frame""" pos = np.zeros((self.num_atoms, 3), float) vel = np.zeros((self.num_atoms, 3), float) for i in range(self.num_atoms): line = next(self._f) words = line.split() pos[i, 0] = float(words[1])...
def check_matrix(m): """Check the sanity of the given 4x4 transformation matrix""" if m.shape != (4, 4): raise ValueError("The argument must be a 4x4 array.") if max(abs(m[3, 0:3])) > eps: raise ValueError("The given matrix does not have correct translational part") if abs(m[3, 3] - 1.0)...
def superpose(ras, rbs, weights=None): """Compute the transformation that minimizes the RMSD between the points ras and rbs Arguments: | ``ras`` -- a ``np.array`` with 3D coordinates of geometry A, shape=(N,3) | ``rbs`` -- a ``np.array`` with 3D coordinates of geom...
def fit_rmsd(ras, rbs, weights=None): """Fit geometry rbs onto ras, returns more info than superpose Arguments: | ``ras`` -- a numpy array with 3D coordinates of geometry A, shape=(N,3) | ``rbs`` -- a numpy array with 3D coordinates of geometry B, ...
def inv(self): """The inverse translation""" result = Translation(-self.t) result._cache_inv = self return result
def apply_to(self, x, columns=False): """Apply this translation to the given object The argument can be several sorts of objects: * ``np.array`` with shape (3, ) * ``np.array`` with shape (N, 3) * ``np.array`` with shape (3, N), use ``columns=True`` * ``T...
def compare(self, other, t_threshold=1e-3): """Compare two translations The RMSD of the translation vectors is computed. The return value is True when the RMSD is below the threshold, i.e. when the two translations are almost identical. """ return compute_rmsd(s...
def _check_r(self, r): """the columns must orthogonal""" if abs(np.dot(r[:, 0], r[:, 0]) - 1) > eps or \ abs(np.dot(r[:, 0], r[:, 0]) - 1) > eps or \ abs(np.dot(r[:, 0], r[:, 0]) - 1) > eps or \ np.dot(r[:, 0], r[:, 1]) > eps or \ np.dot(r[:, 1], r[:, 2]) ...
def random(cls): """Return a random rotation""" axis = random_unit() angle = np.random.uniform(0,2*np.pi) invert = bool(np.random.randint(0,2)) return Rotation.from_properties(angle, axis, invert)
def from_properties(cls, angle, axis, invert): """Initialize a rotation based on the properties""" norm = np.linalg.norm(axis) if norm > 0: x = axis[0] / norm y = axis[1] / norm z = axis[2] / norm c = np.cos(angle) s = np.sin(angle) ...
def properties(self): """Rotation properties: angle, axis, invert""" # determine wether an inversion rotation has been applied invert = (np.linalg.det(self.r) < 0) factor = {True: -1, False: 1}[invert] # get the rotation data # trace(r) = 1+2*cos(angle) cos_angle ...
def matrix(self): """The 4x4 matrix representation of this rotation""" result = np.identity(4, float) result[0:3, 0:3] = self.r return result
def inv(self): """The inverse rotation""" result = Rotation(self.r.transpose()) result._cache_inv = self return result
def apply_to(self, x, columns=False): """Apply this rotation to the given object The argument can be several sorts of objects: * ``np.array`` with shape (3, ) * ``np.array`` with shape (N, 3) * ``np.array`` with shape (3, N), use ``columns=True`` * ``Tran...
def compare(self, other, r_threshold=1e-3): """Compare two rotations The RMSD of the rotation matrices is computed. The return value is True when the RMSD is below the threshold, i.e. when the two rotations are almost identical. """ return compute_rmsd(self.r, o...
def from_properties(cls, angle, axis, invert, translation): """Initialize a transformation based on the properties""" rot = Rotation.from_properties(angle, axis, invert) return Complete(rot.r, translation)
def cast(cls, c): """Convert the first argument into a Complete object""" if isinstance(c, Complete): return c elif isinstance(c, Translation): return Complete(np.identity(3, float), c.t) elif isinstance(c, Rotation): return Complete(c.r, np.zeros(3, f...
def about_axis(cls, center, angle, axis, invert=False): """Create transformation that represents a rotation about an axis Arguments: | ``center`` -- Point on the axis | ``angle`` -- Rotation angle | ``axis`` -- Rotation axis | ``invert`` -- Whe...
def properties(self): """Transformation properties: angle, axis, invert, translation""" rot = Rotation(self.r) angle, axis, invert = rot.properties return angle, axis, invert, self.t
def inv(self): """The inverse transformation""" result = Complete(self.r.transpose(), np.dot(self.r.transpose(), -self.t)) result._cache_inv = self return result
def compare(self, other, t_threshold=1e-3, r_threshold=1e-3): """Compare two transformations The RMSD values of the rotation matrices and the translation vectors are computed. The return value is True when the RMSD values are below the thresholds, i.e. when the two transformati...
def _read_frame(self): """Read and return the next time frame""" # Read one frame, we assume that the current file position is at the # line 'ITEM: TIMESTEP' and that this line marks the beginning of a # time frame. line = next(self._f) if line != 'ITEM: TIMESTEP\n': ...
def _skip_frame(self): """Skip the next time frame""" for line in self._f: if line == 'ITEM: ATOMS\n': break for i in range(self.num_atoms): next(self._f)
def _add_atom_info(self, atom_info): """Add an atom info object to the database""" self.atoms_by_number[atom_info.number] = atom_info self.atoms_by_symbol[atom_info.symbol.lower()] = atom_info
def update(self, other): """Extend the current cluster with data from another cluster""" Cluster.update(self, other) self.rules.extend(other.rules)
def add_related(self, *objects): """Add related items The arguments can be individual items or cluster objects containing several items. When two groups of related items share one or more common members, they will be merged into one cluster. """ mast...
def _read_frame(self): """Read a single frame from the trajectory""" # auxiliary read function def read_three(msg): """Read three words as floating point numbers""" line = next(self._f) try: return [float(line[:12]), float(line[12:24]), float(l...
def goto_next_frame(self): """Continue reading until the next frame is reached""" marked = False while True: line = next(self._f)[:-1] if marked and len(line) > 0 and not line.startswith(" --------"): try: step = int(line[:10]) ...
def _read_frame(self): """Read a single frame from the trajectory""" # optionally skip the equilibration if self.skip_equi_period: while True: step, line = self.goto_next_frame() self._counter += 1 if step >= self.equi_period: ...
def _read_frame(self): """Read a frame from the XYZ file""" size = self.read_size() title = self._f.readline()[:-1] if self.symbols is None: symbols = [] coordinates = np.zeros((size, 3), float) for counter in range(size): line = self._f.readline(...
def _skip_frame(self): """Skip a single frame from the trajectory""" size = self.read_size() for i in range(size+1): line = self._f.readline() if len(line) == 0: raise StopIteration
def get_first_molecule(self): """Get the first molecule from the trajectory This can be useful to configure your program before handeling the actual trajectory. """ title, coordinates = self._first molecule = Molecule(self.numbers, coordinates, title, symbols=self....
def dump(self, title, coordinates): """Dump a frame to the trajectory file Arguments: | ``title`` -- the title of the frame | ``coordinates`` -- a numpy array with coordinates in atomic units """ print("% 8i" % len(self.symbols), file=self._f) prin...
def get_molecule(self, index=0): """Get a molecule from the trajectory Optional argument: | ``index`` -- The frame index [default=0] """ return Molecule(self.numbers, self.geometries[index], self.titles[index], symbols=self.symbols)
def write_to_file(self, f, file_unit=angstrom): """Write the trajectory to a file Argument: | ``f`` -- a filename or a file-like object to write to Optional argument: | ``file_unit`` -- the unit of the values written to file [de...
def slice_match(sub, counter): """Efficiently test if counter is in ``xrange(*sub)`` Arguments: | ``sub`` -- a slice object | ``counter`` -- an integer The function returns True if the counter is in ``xrange(sub.start, sub.stop, sub.step)``. """ if sub.start is not...
def check_anagrad(fun, x0, epsilon, threshold): """Check the analytical gradient using finite differences Arguments: | ``fun`` -- the function to be tested, more info below | ``x0`` -- the reference point around which the function should be tested | ``epsilo...
def check_delta(fun, x, dxs, period=None): """Check the difference between two function values using the analytical gradient Arguments: | ``fun`` -- The function to be tested, more info below. | ``x`` -- The argument vector. | ``dxs`` -- A matrix where each row is a vector of s...
def compute_fd_hessian(fun, x0, epsilon, anagrad=True): """Compute the Hessian using the finite difference method Arguments: | ``fun`` -- the function for which the Hessian should be computed, more info below | ``x0`` -- the point at which the Hessian must be compu...
def update(self, gradient, step): """Update the search direction given the latest gradient and step""" do_sd = self.gradient_old is None self.gradient_old = self.gradient self.gradient = gradient if do_sd: self._update_sd() else: self._update_cg()
def _update_cg(self): """Update the conjugate gradient""" beta = self._beta() # Automatic direction reset if beta < 0: self.direction = -self.gradient self.status = "SD" else: self.direction = self.direction * beta - self.gradient s...
def update(self, gradient, step): """Update the search direction given the latest gradient and step""" self.old_gradient = self.gradient self.gradient = gradient N = len(self.gradient) if self.inv_hessian is None: # update the direction self.direction = -s...
def limit_step(self, step): """Clip the a step within the maximum allowed range""" if self.qmax is None: return step else: return np.clip(step, -self.qmax, self.qmax)
def _bracket(self, qinit, f0, fun): """Find a bracket that does contain the minimum""" self.num_bracket = 0 qa = qinit fa = fun(qa) counter = 0 if fa >= f0: while True: self.num_bracket += 1 #print " bracket shrink" ...
def _golden(self, triplet, fun): """Reduce the size of the bracket until the minimum is found""" self.num_golden = 0 (qa, fa), (qb, fb), (qc, fc) = triplet while True: self.num_golden += 1 qd = qa + (qb-qa)*phi/(1+phi) fd = fun(qd) if fd < ...
def update(self, counter, f, x_orig, gradient_orig): """Perform an update of the linear transformation Arguments: | ``counter`` -- the iteration counter of the minimizer | ``f`` -- the function value at ``x_orig`` | ``x_orig`` -- the unknowns in original coo...
def update(self, counter, f, x_orig, gradient_orig): """Perform an update of the linear transformation Arguments: | ``counter`` -- the iteration counter of the minimizer | ``f`` -- the function value at ``x_orig`` | ``x_orig`` -- the unknowns in original coo...
def update(self, counter, f, x_orig, gradient_orig): """Perform an update of the linear transformation Arguments: | ``counter`` -- the iteration counter of the minimizer | ``f`` -- the function value at ``x_orig`` | ``x_orig`` -- the unknowns in original coo...
def do(self, x_orig): """Transform the unknowns to preconditioned coordinates This method also transforms the gradient to original coordinates """ if self.scales is None: return x_orig else: return np.dot(self.rotation.transpose(), x_orig)*self.scales
def undo(self, x_prec): """Transform the unknowns to original coordinates This method also transforms the gradient to preconditioned coordinates """ if self.scales is None: return x_prec else: return np.dot(self.rotation, x_prec/self.scales)
def get_header(self): """Returns the header for screen logging of the minimization""" result = " " if self.step_rms is not None: result += " Step RMS" if self.step_max is not None: result += " Step MAX" if self.grad_rms is not None: resul...
def configure(self, x0, axis): """Configure the 1D function for a line search Arguments: x0 -- the reference point (q=0) axis -- a unit vector in the direction of the line search """ self.x0 = x0 self.axis = axis
def _compute_equations(self, x, verbose=False): '''Compute the values and the normals (gradients) of active constraints. Arguments: | ``x`` -- The unknowns. ''' # compute the error and the normals. normals = [] values = [] signs = [] error ...
def _rough_shake(self, x, normals, values, error): '''Take a robust, but not very efficient step towards the constraints. Arguments: | ``x`` -- The unknowns. | ``normals`` -- A numpy array with the gradients of the active constraints. Each row is ...
def _fast_shake(self, x, normals, values, error): '''Take an efficient (not always robust) step towards the constraints. Arguments: | ``x`` -- The unknowns. | ``normals`` -- A numpy array with the gradients of the active constraints. Each row is o...
def free_shake(self, x): '''Brings unknowns to the constraints. Arguments: | ``x`` -- The unknowns. ''' self.lock[:] = False normals, values, error = self._compute_equations(x)[:-1] counter = 0 while True: if error <= self.threshold: ...
def safe_shake(self, x, fun, fmax): '''Brings unknowns to the constraints, without increasing fun above fmax. Arguments: | ``x`` -- The unknowns. | ``fun`` -- The function being minimized. | ``fmax`` -- The highest allowed value of the function being ...
def project(self, x, vector): '''Project a vector (gradient or direction) on the active constraints. Arguments: | ``x`` -- The unknowns. | ``vector`` -- A numpy array with a direction or a gradient. The return value is a gradient or direction, where the components...
def get_final(self): """Return the final solution in the original coordinates""" if self.prec is None: return self.x else: return self.prec.undo(self.x)
def _run(self): """Run the iterative optimizer""" success = self.initialize() while success is None: success = self.propagate() return success
def _print_header(self): """Print the header for screen logging""" header = " Iter Dir " if self.constraints is not None: header += ' SC CC' header += " Function" if self.convergence_condition is not None: header += self.convergence_condition.ge...
def _screen(self, s, newline=False): """Print something on screen when self.verbose == True""" if self.verbose: if newline: print(s) else: print(s, end=' ')
def _line_opt(self): """Perform a line search along the current direction""" direction = self.search_direction.direction if self.constraints is not None: try: direction = self.constraints.project(self.x, direction) except ConstraintError: s...
def edge_index(self): """A map to look up the index of a edge""" return dict((edge, index) for index, edge in enumerate(self.edges))
def neighbors(self): """A dictionary with neighbors The dictionary will have the following form: ``{vertexX: (vertexY1, vertexY2, ...), ...}`` This means that vertexX and vertexY1 are connected etc. This also implies that the following elements are part of the dictio...
def distances(self): """The matrix with the all-pairs shortest path lenghts""" from molmod.ext import graphs_floyd_warshall distances = np.zeros((self.num_vertices,)*2, dtype=int) #distances[:] = -1 # set all -1, which is just a very big integer #distances.ravel()[::len(distances...
def central_vertices(self): """Vertices that have the lowest maximum distance to any other vertex""" max_distances = self.distances.max(0) max_distances_min = max_distances[max_distances > 0].min() return (max_distances == max_distances_min).nonzero()[0]
def independent_vertices(self): """Lists of vertices that are only interconnected within each list This means that there is no path from a vertex in one list to a vertex in another list. In case of a molecular graph, this would yield the atoms that belong to individual molecule...
def fingerprint(self): """A total graph fingerprint The result is invariant under permutation of the vertex indexes. The chance that two different (molecular) graphs yield the same fingerprint is small but not zero. (See unit tests.)""" if self.num_vertices == 0: ...
def vertex_fingerprints(self): """A fingerprint for each vertex The result is invariant under permutation of the vertex indexes. Vertices that are symmetrically equivalent will get the same fingerprint, e.g. the hydrogens in methane would get the same fingerprint. ...
def equivalent_vertices(self): """A dictionary with symmetrically equivalent vertices.""" level1 = {} for i, row in enumerate(self.vertex_fingerprints): key = row.tobytes() l = level1.get(key) if l is None: l = set([i]) level1[k...
def symmetries(self): """Graph symmetries (permutations) that map the graph onto itself.""" symmetry_cycles = set([]) symmetries = set([]) for match in GraphSearch(EqualPattern(self))(self): match.cycles = match.get_closed_cycles() if match.cycles in symmetry_cyc...
def symmetry_cycles(self): """The cycle representations of the graph symmetries""" result = set([]) for symmetry in self.symmetries: result.add(symmetry.cycles) return result
def canonical_order(self): """The vertices in a canonical or normalized order. This routine will return a list of vertices in an order that does not depend on the initial order, but only depends on the connectivity and the return values of the function self.get_vertex_string. ...
def iter_breadth_first(self, start=None, do_paths=False, do_duplicates=False): """Iterate over the vertices with the breadth first algorithm. See http://en.wikipedia.org/wiki/Breadth-first_search for more info. If not start vertex is given, the central vertex is taken. By defa...
def iter_breadth_first_edges(self, start=None): """Iterate over the edges with the breadth first convention. We need this for the pattern matching algorithms, but a quick look at Wikipedia did not result in a known and named algorithm. The edges are yielded one by one, togethe...
def get_subgraph(self, subvertices, normalize=False): """Constructs a subgraph of the current graph Arguments: | ``subvertices`` -- The vertices that should be retained. | ``normalize`` -- Whether or not the vertices should renumbered and reduced to the given...
def get_vertex_fingerprints(self, vertex_strings, edge_strings, num_iter=None): """Return an array with fingerprints for each vertex""" import hashlib def str2array(x): """convert a hash string to a numpy array of bytes""" if len(x) == 0: return np.zeros(0...
def get_halfs(self, vertex1, vertex2): """Split the graph in two halfs by cutting the edge: vertex1-vertex2 If this is not possible (due to loops connecting both ends), a GraphError is raised. Returns the vertices in both halfs. """ def grow(origin, other): ...
def get_part(self, vertex_in, vertices_border): """List all vertices that are connected to vertex_in, but are not included in or 'behind' vertices_border. """ vertices_new = set(self.neighbors[vertex_in]) vertices_part = set([vertex_in]) while len(vertices_new) > 0: ...
def get_halfs_double(self, vertex_a1, vertex_b1, vertex_a2, vertex_b2): """Compute the two parts separated by ``(vertex_a1, vertex_b1)`` and ``(vertex_a2, vertex_b2)`` Raise a GraphError when ``(vertex_a1, vertex_b1)`` and ``(vertex_a2, vertex_b2)`` do not separate the graph in two ...