text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, print_function, unicode_literals, absolute_import
"""
This module implements classes for processing Lammps output files:
1. log file: contains the thermodynamic data with the format set by the
'thermo_style' command
2. trajectory file(dump file): the file generated by the 'dump' command
Restrictions:
The first 2 fields of the ATOMS section in the trajectory(dump) file
must be the atom id and the atom type. There can be arbitrary number
of fields after that and they all will be treated as floats and
updated based on the field names in the ITEM: ATOMS line.
"""
import re
import os
from io import open
import numpy as np
from monty.json import MSONable
from pymatgen.core.periodic_table import _pt_data
from pymatgen.core.structure import Structure
from pymatgen.core.lattice import Lattice
from pymatgen.analysis.diffusion_analyzer import DiffusionAnalyzer
from pymatgen.io.lammps.data import LammpsData
__author__ = "Kiran Mathew"
__email__ = "kmathew@lbl.gov"
__credits__ = "Navnidhi Rajput, Michael Humbert"
# TODO write parser for one and multi thermo_styles
class LammpsLog(MSONable):
"""
Parser for LAMMPS log file.
"""
def __init__(self, log_file="log.lammps"):
"""
Args:
log_file (string): path to the log file
"""
self.log_file = os.path.abspath(log_file)
self.timestep = -1
self._parse_log()
def _parse_log(self):
"""
Parse the log file for run and thermodynamic data.
Sets the thermodynamic data as a structured numpy array with field names
taken from the custom thermo_style command. thermo_style one and multi
are not supported yet
"""
thermo_data = []
fixes = []
d_build = None
thermo_pattern = None
with open(self.log_file, 'r') as logfile:
for line in logfile:
# timestep, the unit depedns on the 'units' command
time = re.search(r'timestep\s+([0-9]+)', line)
if time and not d_build:
self.timestep = float(time.group(1))
# total number md steps
steps = re.search(r'run\s+([0-9]+)', line)
if steps and not d_build:
self.nmdsteps = int(steps.group(1))
# simulation info
fix = re.search(r'fix.+', line)
if fix and not d_build:
fixes.append(fix.group())
# dangerous builds
danger = re.search(r'Dangerous builds\s+([0-9]+)', line)
if danger and not d_build:
d_build = int(steps.group(1))
# logging interval
thermo = re.search(r'thermo\s+([0-9]+)', line)
if thermo and not d_build:
self.interval = float(thermo.group(1))
# thermodynamic data, set by the thermo_style command
fmt = re.search(r'thermo_style.+', line)
if fmt and not d_build:
thermo_type = fmt.group().split()[1]
fields = fmt.group().split()[2:]
no_parse = ["one", "multi"]
if thermo_type in no_parse:
thermo_data.append("cannot parse thermo_style")
else:
thermo_pattern_string = r"\s*([0-9eE\.+-]+)" + "".join(
[r"\s+([0-9eE\.+-]+)" for _ in range(len(fields) - 1)])
thermo_pattern = re.compile(thermo_pattern_string)
if thermo_pattern:
if thermo_pattern.search(line):
m = thermo_pattern.search(line)
thermo_data.append(tuple([float(x) for x in m.groups()]))
if thermo_data:
if isinstance(thermo_data[0], str):
self.thermo_data = [thermo_data]
else:
# numpy arrays are easier to reshape, previously we used np.array with dtypes
self.thermo_data = {
fields[i]: [thermo_data[j][i] for j in range(len(thermo_data))]
for i in range(len(fields))}
self.fixes = fixes
self.dangerous_builds = d_build
def as_dict(self):
d = {}
for attrib in [a for a in dir(self)
if not a.startswith('__') and not callable(getattr(self, a))]:
d[attrib] = getattr(self, attrib)
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
# not really needed ?
@classmethod
def from_dict(cls, d):
return cls(log_file=d["log_file"])
# TODO: @wood-b parse binary dump files(*.dcd)
class LammpsDump(MSONable):
"""
Parse lammps dump file.
"""
def __init__(self, timesteps, natoms, box_bounds, atoms_data):
self.timesteps = timesteps
self.natoms = natoms
self.box_bounds = box_bounds
self.atoms_data = atoms_data
@classmethod
def from_file(cls, dump_file):
timesteps = []
atoms_data = []
natoms = 0
box_bounds = []
bb_flag = 0
parse_timestep, parse_natoms, parse_bb, parse_atoms = False, False, False, False
with open(dump_file) as tf:
for line in tf:
if "ITEM: TIMESTEP" in line:
parse_timestep = True
continue
if parse_timestep:
timesteps.append(float(line))
parse_timestep = False
if "ITEM: NUMBER OF ATOMS" in line:
parse_natoms = True
continue
if parse_natoms:
natoms = int(line)
parse_natoms = False
if "ITEM: BOX BOUNDS" in line:
parse_bb = True
continue
if parse_bb:
box_bounds.append([float(x) for x in line.split()])
bb_flag += 1
parse_bb = False if bb_flag >= 3 else True
if "ITEM: ATOMS" in line:
parse_atoms = True
continue
if parse_atoms:
line_data = [float(x) for x in line.split()]
atoms_data.append(line_data)
parse_atoms = False if len(atoms_data) == len(timesteps)*natoms else True
return cls(timesteps, natoms, box_bounds, atoms_data)
# TODO: @wood-b simplify this, use LammpsDump to parse + use mdanalysis to process.
# make sure its backward compatible
class LammpsRun(MSONable):
"""
Parse the lammps data file, trajectory(dump) file and the log file to extract
useful info about the system.
Note: In order to parse trajectory or dump file, the first 2 fields must be
the id and the atom type. There can be arbitrary number of fields after
that and they all will be treated as floats.
Args:
data_file (str): path to the data file
trajectory_file (str): path to the trajectory file or dump file
log_file (str): path to the log file
"""
def __init__(self, data_file, trajectory_file, log_file="log.lammps"):
self.data_file = os.path.abspath(data_file)
self.trajectory_file = os.path.abspath(trajectory_file)
self.log_file = os.path.abspath(log_file)
self.log = LammpsLog(log_file)
self.lammps_data = LammpsData.from_file(self.data_file)
self._set_mol_masses_and_charges()
self._parse_trajectory()
def _parse_trajectory(self):
"""
parse the trajectory file.
"""
traj_timesteps = []
trajectory = []
timestep_label = "ITEM: TIMESTEP"
# "ITEM: ATOMS id type ...
traj_label_pattern = re.compile(
r"^\s*ITEM:\s+ATOMS\s+id\s+type\s+([A-Za-z0-9[\]_\s]*)")
# default: id type x y z vx vy vz mol"
# updated below based on the field names in the ITEM: ATOMS line
# Note: the first 2 fields must be the id and the atom type. There can
# be arbitrary number of fields after that and they all will be treated
# as floats.
traj_pattern = re.compile(
r"\s*(\d+)\s+(\d+)\s+([0-9eE.+-]+)\s+([0-9eE.+-]+)\s+"
r"([0-9eE.+-]+)\s+"
r"([0-9eE.+-]+)\s+"
r"([0-9eE.+-]+)\s+([0-9eE.+-]+)\s+(\d+)\s*")
parse_timestep = False
with open(self.trajectory_file) as tf:
for line in tf:
if timestep_label in line:
parse_timestep = True
continue
if parse_timestep:
traj_timesteps.append(float(line))
parse_timestep = False
if traj_label_pattern.search(line):
fields = traj_label_pattern.search(line).group(1)
fields = fields.split()
# example:- id type x y z vx vy vz mol ...
traj_pattern_string = r"\s*(\d+)\s+(\d+)" + "".join(
[r"\s+([0-9eE\.+-]+)" for _ in range(len(fields))])
traj_pattern = re.compile(traj_pattern_string)
if traj_pattern.search(line):
# first 2 fields must be id and type, the rest of them
# will be casted as floats
m = traj_pattern.search(line)
line_data = []
line_data.append(int(m.group(1)) - 1)
line_data.append(int(m.group(2)))
line_data.extend(
[float(x) for i, x in enumerate(m.groups()) if
i + 1 > 2])
trajectory.append(tuple(line_data))
traj_dtype = np.dtype([(str('Atoms_id'), np.int64),
(str('atom_type'), np.int64)] +
[(str(fld), np.float64) for fld in fields])
self.trajectory = np.array(trajectory, dtype=traj_dtype)
self.timesteps = np.array(traj_timesteps, dtype=np.float64)
for step in range(self.timesteps.size):
begin = step * self.natoms
end = (step + 1) * self.natoms
self.trajectory[begin:end] = np.sort(self.trajectory[begin:end],
order=str("Atoms_id"))
def _set_mol_masses_and_charges(self):
"""
set the charge, mass and the atomic makeup for each molecule
"""
mol_config = [] # [ [atom id1, atom id2, ...], ... ]
mol_masses = [] # [ [atom mass1, atom mass2, ...], ... ]
# mol_charges = []
unique_atomic_masses = self.lammps_data.masses["mass"].values
mol_ids = self.lammps_data.atoms["molecule-ID"]
atom_ids = self.lammps_data.atoms.index
atomic_types = self.lammps_data.atoms["type"]
unique_mol_ids = np.unique(mol_ids)
atomic_masses = unique_atomic_masses[np.array(atomic_types) - 1]
self.nmols = unique_mol_ids.size
for umid in range(self.nmols):
mol_config.append(np.array(atom_ids)[np.where(mol_ids == umid + 1)] - 1)
mol_masses.append(atomic_masses[np.where(mol_ids == umid + 1)])
self.mol_config = np.array(mol_config)
self.mol_masses = np.array(mol_masses)
def _weighted_average(self, mol_id, mol_vector):
"""
Calculate the weighted average of the array comprising of
atomic vectors corresponding to the molecule with id mol_id.
Args:
mol_id (int): molecule id
mol_vector (numpy array): array of shape,
natoms_in_molecule with id mol_id x 3
Returns:
1D numpy array(3 x 1) of weighted averages in x, y, z directions
"""
mol_masses = self.mol_masses[mol_id]
return np.array(
[np.dot(mol_vector[:, dim], mol_masses) / np.sum(mol_masses)
for dim in range(3)])
def _get_mol_vector(self, step, mol_id, param=("x", "y", "z")):
"""
Returns numpy array corresponding to atomic vectors of parameter
"param" for the given time step and molecule id
Args:
step (int): time step
mol_id (int): molecule id
param (list): the atomic parameter for which the weighted
average is to be computed
Returns:
2D numpy array(natoms_in_molecule x 3) of atomic vectors
"""
begin = step * self.natoms
end = (step + 1) * self.natoms
mol_vector_structured = \
self.trajectory[begin:end][self.mol_config[mol_id]][param]
mol_vector = np.array(mol_vector_structured.tolist())
return mol_vector.copy()
# TODO: remove this and use only get_displacements(an order of magnitude faster)
def get_structures_from_trajectory(self):
"""
Convert the coordinates in each time step to a structure(boxed molecule).
Used to construct DiffusionAnalyzer object.
Returns:
list of Structure objects
"""
lattice = Lattice([[self.box_lengths[0], 0, 0],
[0, self.box_lengths[1], 0],
[0, 0, self.box_lengths[2]]])
structures = []
mass_to_symbol = dict(
(round(y["Atomic mass"], 1), x) for x, y in _pt_data.items())
unique_atomic_masses = self.lammps_data.masses["mass"].values
for step in range(self.timesteps.size):
begin = step * self.natoms
end = (step + 1) * self.natoms
mol_vector_structured = \
self.trajectory[begin:end][:][["x", "y", "z"]]
mol_vector = np.array(mol_vector_structured.tolist())
coords = mol_vector.copy()
species = [mass_to_symbol[round(unique_atomic_masses[atype - 1], 1)]
for atype in self.trajectory[begin:end][:]["atom_type"]]
try:
structure = Structure(lattice, species, coords,
coords_are_cartesian=True)
except ValueError as error:
print("Error: '{}' at timestep {} in the trajectory".format(
error,
int(self.timesteps[step])))
structures.append(structure)
return structures
def get_displacements(self):
"""
Return the initial structure and displacements for each time step.
Used to interface with the DiffusionAnalyzer.
Returns:
Structure object, numpy array of displacements
"""
lattice = Lattice([[self.box_lengths[0], 0, 0],
[0, self.box_lengths[1], 0],
[0, 0, self.box_lengths[2]]])
mass_to_symbol = dict(
(round(y["Atomic mass"], 1), x) for x, y in _pt_data.items())
unique_atomic_masses = self.lammps_data.masses["mass"].values
frac_coords = []
for step in range(self.timesteps.size):
begin = step * self.natoms
end = (step + 1) * self.natoms
mol_vector_structured = \
self.trajectory[begin:end][:][["x", "y", "z"]]
mol_vector = np.array(mol_vector_structured.tolist())
coords = mol_vector.copy()
if step == 0:
species = [
mass_to_symbol[round(unique_atomic_masses[atype - 1], 1)]
for atype in self.trajectory[begin:end][:]["atom_type"]]
structure = Structure(lattice, species, coords,
coords_are_cartesian=True)
step_frac_coords = [lattice.get_fractional_coords(crd)
for crd in coords]
frac_coords.append(np.array(step_frac_coords)[:, None])
frac_coords = np.concatenate(frac_coords, axis=1)
dp = frac_coords[:, 1:] - frac_coords[:, :-1]
dp = dp - np.round(dp)
f_disp = np.cumsum(dp, axis=1)
disp = lattice.get_cartesian_coords(f_disp)
return structure, disp
def get_diffusion_analyzer(self, specie, temperature, time_step, step_skip,
**kwargs):
"""
Args:
specie (Element/Specie): Specie to calculate diffusivity for as a
String. E.g., "Li".
temperature (float): Temperature of the diffusion run in Kelvin.
time_step (int): Time step between measurements.
step_skip (int): Sampling frequency of the displacements (
time_step is multiplied by this number to get the real time
between measurements)
For the other parameters please see the
pymatgen.analysis.diffusion_analyzer.DiffusionAnalyzer documentation.
Returns:
DiffusionAnalyzer
"""
# structures = self.get_structures_from_trajectory()
structure, disp = self.get_displacements()
return DiffusionAnalyzer(structure, disp, specie, temperature,
time_step, step_skip=step_skip,
**kwargs)
@property
def natoms(self):
return len(self.lammps_data.atoms)
@property
def box_lengths(self):
return [l[1] - l[0] for l in self.lammps_data.box_bounds]
@property
def traj_timesteps(self):
"""
trajectory time steps in time units.
e.g. for units = real, time units = fmsec
"""
return self.timesteps * self.log.timestep
@property
def mol_trajectory(self):
"""
Compute the weighted average trajectory of each molecule at each
timestep
Returns:
2D numpy array ((n_timesteps*mols_number) x 3)
"""
traj = []
for step in range(self.timesteps.size):
tmp_mol = []
for mol_id in range(self.nmols):
mol_coords = self._get_mol_vector(step, mol_id,
param=["x", "y", "z"])
# take care of periodic boundary conditions
pbc_wrap(mol_coords, self.box_lengths)
tmp_mol.append(self._weighted_average(mol_id, mol_coords))
traj.append(tmp_mol)
return np.array(traj)
@property
def mol_velocity(self):
"""
Compute the weighted average velcoity of each molecule at each
timestep.
Returns:
2D numpy array ((n_timesteps*mols_number) x 3)
"""
velocity = []
for step in range(self.timesteps.size):
tmp_mol = []
for mol_id in range(self.nmols):
mol_velocities = self._get_mol_vector(step, mol_id,
param=["vx", "vy", "vz"])
tmp_mol.append(self._weighted_average(mol_id, mol_velocities))
velocity.append(tmp_mol)
return np.array(velocity)
def as_dict(self):
d = {}
skip = ["mol_velocity", "mol_trajectory"] # not applicable in general
attributes = [a for a in dir(self) if a not in skip and not a.startswith('__')]
attributes = [a for a in attributes if not callable(getattr(self, a))]
for attrib in attributes:
obj = getattr(self, attrib)
if isinstance(obj, MSONable):
d[attrib] = obj.as_dict()
elif isinstance(obj, np.ndarray):
d[attrib] = obj.tolist()
else:
d[attrib] = obj
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
# not really needed ?
@classmethod
def from_dict(cls, d):
return cls(data_file=d["data_file"], trajectory_file=d["trajectory_file"],
log_file=d["log_file"])
def pbc_wrap(array, box_lengths):
"""
wrap the array for molecule coordinates around the periodic boundary.
Args:
array (numpy.ndarray): molecule coordinates, [[x1,y1,z1],[x2,y2,z2],..]
box_lengths (list): [x_length, y_length, z_length]
"""
ref = array[0, 0]
for i in range(3):
array[:, i] = np.where((array[:, i] - ref) >= box_lengths[i] / 2,
array[:, i] - box_lengths[i], array[:, i])
array[:, i] = np.where((array[:, i] - ref) < -box_lengths[i] / 2,
array[:, i] + box_lengths[i], array[:, i])
|
czhengsci/pymatgen
|
pymatgen/io/lammps/output.py
|
Python
|
mit
| 20,870
|
[
"LAMMPS",
"MDAnalysis",
"pymatgen"
] |
9a361c5c7868a2cec43ef9d246156ad92edfa7eb29a86dd97e10c397c88997f0
|
from vtk import *
import os.path
data_dir = "../../../../VTKData/Data/Infovis/SQLite/"
if not os.path.exists(data_dir):
data_dir = "../../../../../VTKData/Data/Infovis/SQLite/"
if not os.path.exists(data_dir):
data_dir = "../../../../../../VTKData/Data/Infovis/SQLite/"
sqlite_file = data_dir + "SmallEmailTest.db"
database = vtkSQLDatabase.CreateFromURL("sqlite://" + sqlite_file)
database.Open("")
edge_query = database.GetQueryInstance()
edge_query.SetQuery("select source, target from emails")
vertex_query = database.GetQueryInstance()
vertex_query.SetQuery("select Name, Job, Age from employee")
edge_table = vtkRowQueryToTable()
edge_table.SetQuery(edge_query)
edge_query.FastDelete()
vertex_table = vtkRowQueryToTable()
vertex_table.SetQuery(vertex_query)
vertex_query.FastDelete()
graph = vtkTableToGraph()
graph.AddInputConnection(edge_table.GetOutputPort())
graph.AddLinkVertex("source", "Name", False)
graph.AddLinkVertex("target", "Name", False)
graph.AddLinkEdge("source", "target")
graph.SetVertexTableConnection(vertex_table.GetOutputPort())
view = vtkGraphLayoutView()
view.AddRepresentationFromInputConnection(graph.GetOutputPort())
view.SetVertexLabelArrayName("label")
view.SetVertexLabelVisibility(True)
view.SetVertexColorArrayName("Age")
view.SetColorVertices(True)
view.SetLayoutStrategyToSimple2D()
# Add my new lay strategy
# myFoo = vtkCircularLayoutStrategy()
# view.SetLayoutStrategy(myFoo)
theme = vtkViewTheme.CreateMellowTheme()
theme.SetCellColor(.2,.2,.6)
theme.SetLineWidth(5)
theme.SetPointSize(10)
view.ApplyViewTheme(theme)
view.SetVertexLabelFontSize(20)
theme.FastDelete()
view.GetRenderWindow().SetSize(600, 600)
view.ResetCamera()
view.Render()
view.GetInteractor().Start()
database.FastDelete()
|
b3c/VTK-5.8
|
Examples/Infovis/Python/databases.py
|
Python
|
bsd-3-clause
| 1,756
|
[
"VTK"
] |
0f2a99df71e4ed44e5e5b721e0b768715a340afb6417800df7a99e92092f2ba3
|
#
# DESCRIPTION: This script parses the given input bowtie and/or LAST files and creates a csv row of their data in the given output csv.
#
# AUTHOR: Chelsea Tymms
import sys, os.path
import argparse
def getOptions():
"""Function to pull in arguments from the command line"""
description="""This script takes an input fasta file of fusions and identifies all of the identical fusions."""
parser = argparse.ArgumentParser(description=description)
parser.add_argument("-bowtie", "--bowtie_log_names", dest="bowtie", action='store', required=False, nargs = '*', help="bowtie log file names [Optional]")
parser.add_argument("-last", "--last_log_names", dest="last", action='store', required=False, help="LAST log file names [Optional]")
parser.add_argument("-treatment","--treatment_name",dest="treatment",action='store',required=True,nargs= '*', help="Treatment variables [Required]")
parser.add_argument("-o","--output_file",dest="output",action='store',required=True,help="Output file name [Required]")
args = parser.parse_args()
if not args.bowtie and not args.last: #The user should give at least one bowtie or last log argument; otherwise the program does nothing
parser.error('No input logs given; add -bowtie or -last')
return(args)
def main():
args=getOptions()
treatmentArray=args.treatment
firstBowtieTot=0
finalBowtieUnaln=0
uniqAln=0
#If the output file already exists, we will append to it. If it does not, we will open it and write its header.
if os.path.isfile(args.output): #we will append
outputFile=open(args.output,'ab')
else: #write the header
outputFile=open(args.output,'w')
for i in range(1,len(treatmentArray)+1):
outputFile.write('t_var_'+str(i)+',')
if args.bowtie:
for i in range(1,len(args.bowtie)+1):
bowtieNum='bowtie'+str(i)
outputFile.write(','.join(bowtieNum+'_'+n for n in ['tot','aln','unaln','ambig','per_uniq','per_aln'])+',')
if args.last:
outputFile.write(','.join(['last_uniq','last_ambig','last_per_uniq','last_per_aln'])+',')
outputFile.write('per_uniq_aln'+'\n')
outputFile.write(','.join(str(i) for i in treatmentArray)+',')
if args.bowtie:
#Get some important counts from the first and the final bowtie logs
proc,aln,unaln,ambig=parseBowtieLog(args.bowtie[0])
firstBowtieTot=proc
proc,aln,unaln,ambig=parseBowtieLog(args.bowtie[-1])
finalBowtieUnaln=ambig+unaln
#Get and write the counts for each Bowtie log
for bowtieLog in args.bowtie:
proc,aln,unaln,ambig=(parseBowtieLog(bowtieLog))
perUniq,perAln=0,0
if proc!=0:
perUniq=float(aln)/proc * 100
perAln=(float(aln)+ambig)/proc * 100
uniqAln=uniqAln+aln
outputFile.write(','.join(str(i) for i in [proc,aln,unaln,ambig,perUniq,perAln])+',')
#Get and write the counts for the LAST log
if args.last:
lastLog=args.last
ambig,uniq=(parseLastLog(lastLog))
lastPerUniq,lastPerAln = 0,0
if finalBowtieUnaln!=0:
lastPerUniq=float(uniq)/finalBowtieUnaln * 100
lastPerAln=float(ambig)+uniq/finalBowtieUnaln * 100
uniqAln=uniqAln+uniq
outputFile.write(','.join(str(i) for i in [uniq,ambig,lastPerUniq,lastPerAln])+',')
perUniqAln= perUniqAln=float(uniqAln)/firstBowtieTot * 100 if firstBowtieTot!=0 else 0
outputFile.write(str(perUniqAln)+'\n')
outputFile.close()
def parseBowtieLog(fileName):
"""Function to parse a bowtie log file"""
if not os.path.isfile(fileName):
print "WARNING: " +fileName+" does not exist."
return 0,0,0,0
processed,aligned,unaligned,ambig=0,0,0,0
with open(fileName,'rb') as bowtieLogFile:
for line in bowtieLogFile.readlines():
if 'reads processed' in line:
processed=line.split(':')[1].strip()
elif 'reads with at least one reported alignment' in line:
aligned=line.split(':')[1].split(' ')[1]
elif 'reads that failed to align' in line:
unaligned=line.split(':')[1].split(' ')[1]
elif 'reads with alignments suppressed' in line:
ambig=line.split(':')[1].split(' ')[1]
return int(processed),int(aligned),int(unaligned),int(ambig)
def parseLastLog(fileName):
"""Function to parse a LAST log file"""
if not os.path.isfile(fileName):
print "WARNING: " +fileName+" does not exist."
return 0,0
lastAmbig=0
lastUniq=0
with open(fileName,'rb') as lastLogFile:
for line in lastLogFile.readlines():
if "Ambiguously Aligned Reads" in line:
lastAmbig=line.split(':')[1].strip()
elif "Uniquely Aligned Reads" in line:
lastUniq=line.split(':')[1].strip()
return int(lastAmbig),int(lastUniq)
if __name__ == '__main__':
main()
|
McIntyre-Lab/papers
|
lehmann_2015/mapping_and_overall_expression/scripts/logParser.py
|
Python
|
lgpl-3.0
| 5,162
|
[
"Bowtie"
] |
84cc0cd5a423492ad17db7656bddd2fe44ab4feb13ab044964c4cbd963794890
|
import zipfile, os.path, string
def visit(dummy, dirname, names):
for name in names:
include = 1
for ex in excludeending:
i = name.rfind(ex)
if i+1 and i == len(name) - len(ex):
include = 0
break
for ex in excludedir:
if dirname.find(ex)+1:
include = 0
break
filename = os.path.join(dirname, name)
if include and os.path.isfile(filename):
print filename[slashpos:]
archive.write(filename, filename[slashpos:])
reldirtozip = '..' # must be given relative to the cwd
absdirtozip = os.path.normpath(os.getcwd() + os.sep + reldirtozip)
slashpos = string.rfind(absdirtozip, os.sep)+1
dirname = absdirtozip[slashpos:]
archivename = dirname + '.zip'
archive = zipfile.ZipFile(absdirtozip + os.sep + archivename, 'w', zipfile.ZIP_DEFLATED)
#archive.write('../../KineticsKit.pth', 'KineticsKit.pth')
excludeending = [archivename, '.pyc', '.pov', '.bmp', '.avi']
excludedir = []
os.path.walk(absdirtozip, visit, None)
archive.close()
|
yukao/Porphyrograph
|
LYM-sources/pg-sensors/KineticsKit/util/4_zip_it.py
|
Python
|
gpl-3.0
| 1,096
|
[
"VisIt"
] |
b652bcb782df726f3befe57b891a454577872eac2c70631df3db405272f517e8
|
from __future__ import print_function
import caffe
from caffe import layers as L
from caffe import params as P
weight_param = dict(lr_mult=1, decay_mult=1)
bias_param = dict(lr_mult=2, decay_mult=0)
learned_param = [weight_param, bias_param]
frozen_param = [dict(lr_mult=0)] * 2
def conv_relu(bottom, ks, nout, stride=1, pad=0, group=1,
param=learned_param,
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0.1)):
conv = L.Convolution(bottom, kernel_size=ks, stride=stride,
num_output=nout, pad=pad, group=group,
param=param, weight_filler=weight_filler,
bias_filler=bias_filler)
return conv, L.ReLU(conv, in_place=True)
def fc_relu(bottom, nout, param=learned_param,
weight_filler=dict(type='gaussian', std=0.005),
bias_filler=dict(type='constant', value=0.1)):
fc = L.InnerProduct(bottom, num_output=nout, param=param,
weight_filler=weight_filler,
bias_filler=bias_filler)
return fc, L.ReLU(fc, in_place=True)
def max_pool(bottom, ks, stride=1):
return L.Pooling(bottom, pool=P.Pooling.MAX, kernel_size=ks, stride=stride)
def varnet(data_layer_params, datalayer, label=None, train=True, num_classes=65,
classifier_name='fc8', learn_all=False):
"""
Returns a NetSpec specifying VarNet, following the Places AlexNet proto text specification.
Refer to: http://nbviewer.jupyter.org/github/BVLC/caffe/blob/tutorial/examples/03-fine-tuning.ipynb
and for multilabel, refer: http://nbviewer.jupyter.org/github/BVLC/caffe/blob/master/examples/pascal-multilabel-with-datalayer.ipynb
"""
n = caffe.NetSpec()
# Specify the data layer because we are doing mutlilabel classification
n.data, n.label = L.Python(module='yelp_multilabel_datalayers',
layer=datalayer, ntop=2,
param_str=str(data_layer_params))
print(n.label)
param = learned_param if learn_all else frozen_param
n.conv1, n.relu1 = conv_relu(n.data, 11, 96, stride=4, param=param)
n.pool1 = max_pool(n.relu1, 3, stride=2)
n.norm1 = L.LRN(n.pool1, local_size=5, alpha=1e-4, beta=0.75)
n.conv2, n.relu2 = conv_relu(n.norm1, 5, 256, pad=2, group=2, param=param)
n.pool2 = max_pool(n.relu2, 3, stride=2)
n.norm2 = L.LRN(n.pool2, local_size=5, alpha=1e-4, beta=0.75)
n.conv3, n.relu3 = conv_relu(n.norm2, 3, 384, pad=1, param=param)
n.conv4, n.relu4 = conv_relu(n.relu3, 3, 384, pad=1, group=2, param=param)
n.conv5, n.relu5 = conv_relu(n.relu4, 3, 256, pad=1, group=2, param=param)
n.pool5 = max_pool(n.relu5, 3, stride=2)
n.fc6, n.relu6 = fc_relu(n.pool5, 4096, param=param)
######################################
# This is from the fine tuning example
#if train:
# n.drop6 = fc7input = L.Dropout(n.relu6, in_place=True)
#else:
# fc7input = n.relu6
#n.fc7, n.relu7 = fc_relu(fc7input, 4096, param=param)
#
#if train:
# n.drop7 = fc8input = L.Dropout(n.relu7, in_place=True)
#else:
# fc8input = n.relu7
########################################
n.drop6 = L.Dropout(n.relu6, in_place=True)
n.fc7, n.relu7 = fc_relu(n.drop6, 4096, param=param)
n.drop7 = fc8input = L.Dropout(n.relu7, in_place=True)
# always learn fc8 (param=learned_param)
# renamed fc8 to n.score
n.score = L.InnerProduct(fc8input, num_output=num_classes, param=learned_param)
# This layer helps to do multilabel classification
#n.loss = L.SigmoidCrossEntropyLoss(n.score, n.label, loss_weight=100)
# This is a custom layer that gives squared error loss
n.loss = L.Python(n.score, n.label,
module='multilabel_loss',
layer="MultiLabelLossLayer", ntop=1,
param_str=str({'weight':100}))
proto = str(n.to_proto)
# write the net to a file and return its prototxt and filename
filename = "yelp_{0}.prototxt".format(data_layer_params['split'])
with open(filename, 'w') as f:
f.write(str(n.to_proto()))
#print("Net written to {}".format(n.to_proto()))
return proto, filename
|
varunagrawal/YelpNet
|
src/network.py
|
Python
|
mit
| 4,313
|
[
"Gaussian"
] |
1dc7e3f954db77c1e3a21c4cf048e922cc0f1dfcfd41d35f51d57dfee04dcdd2
|
#!/usr/bin/env python
"""
Artificial Intelligence for Humans
Volume 1: Fundamental Algorithms
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2013 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
============================================================================================================
This example shows how to normalize the iris data set. Equilateral encoding is used for the species, and range
is used for the measurements.
Reading CSV file: /Users/jheaton/projects/aifh/vol1/python-examples/datasets/iris.csv
[-0.5555555555555558, 0.24999999999999978, -0.864406779661017, -0.9166666666666666, -0.8660254037844386, -0.5]
[-0.6666666666666664, -0.16666666666666674, -0.864406779661017, -0.9166666666666666, -0.8660254037844386, -0.5]
[-0.7777777777777777, 0.0, -0.8983050847457628, -0.9166666666666666, -0.8660254037844386, -0.5]
[-0.8333333333333335, -0.08333333333333337, -0.8305084745762712, -0.9166666666666666, -0.8660254037844386, -0.5]
[-0.611111111111111, 0.33333333333333326, -0.864406779661017, -0.9166666666666666, -0.8660254037844386, -0.5]
[-0.38888888888888873, 0.583333333333333, -0.7627118644067796, -0.75, -0.8660254037844386, -0.5]
[-0.8333333333333335, 0.16666666666666652, -0.864406779661017, -0.8333333333333334, -0.8660254037844386, -0.5]
[-0.611111111111111, 0.16666666666666652, -0.8305084745762712, -0.9166666666666666, -0.8660254037844386, -0.5]
[-0.9444444444444442, -0.2500000000000002, -0.864406779661017, -0.9166666666666666, -0.8660254037844386, -0.5]
[-0.6666666666666664, -0.08333333333333337, -0.8305084745762712, -1.0, -0.8660254037844386, -0.5]
[-0.38888888888888873, 0.4166666666666665, -0.8305084745762712, -0.9166666666666666, -0.8660254037844386, -0.5]
[-0.7222222222222223, 0.16666666666666652, -0.7966101694915254, -0.9166666666666666, -0.8660254037844386, -0.5]
[-0.7222222222222223, -0.16666666666666674, -0.864406779661017, -1.0, -0.8660254037844386, -0.5]
[-1.0, -0.16666666666666674, -0.9661016949152542, -1.0, -0.8660254037844386, -0.5]
[-0.16666666666666674, 0.6666666666666665, -0.9322033898305084, -0.9166666666666666, -0.8660254037844386, -0.5]
[-0.2222222222222221, 1.0, -0.8305084745762712, -0.75, -0.8660254037844386, -0.5]
[-0.38888888888888873, 0.583333333333333, -0.8983050847457628, -0.75, -0.8660254037844386, -0.5]
[-0.5555555555555558, 0.24999999999999978, -0.864406779661017, -0.8333333333333334, -0.8660254037844386, -0.5]
[-0.2222222222222221, 0.49999999999999956, -0.7627118644067796, -0.8333333333333334, -0.8660254037844386, -0.5]
[-0.5555555555555558, 0.49999999999999956, -0.8305084745762712, -0.8333333333333334, -0.8660254037844386, -0.5]
[-0.38888888888888873, 0.16666666666666652, -0.7627118644067796, -0.9166666666666666, -0.8660254037844386, -0.5]
[-0.5555555555555558, 0.4166666666666665, -0.8305084745762712, -0.75, -0.8660254037844386, -0.5]
[-0.8333333333333335, 0.33333333333333326, -1.0, -0.9166666666666666, -0.8660254037844386, -0.5]
[-0.5555555555555558, 0.08333333333333304, -0.7627118644067796, -0.6666666666666666, -0.8660254037844386, -0.5]
[-0.7222222222222223, 0.16666666666666652, -0.6949152542372882, -0.9166666666666666, -0.8660254037844386, -0.5]
[-0.611111111111111, -0.16666666666666674, -0.7966101694915254, -0.9166666666666666, -0.8660254037844386, -0.5]
[-0.611111111111111, 0.16666666666666652, -0.7966101694915254, -0.75, -0.8660254037844386, -0.5]
[-0.4999999999999999, 0.24999999999999978, -0.8305084745762712, -0.9166666666666666, -0.8660254037844386, -0.5]
[-0.4999999999999999, 0.16666666666666652, -0.864406779661017, -0.9166666666666666, -0.8660254037844386, -0.5]
[-0.7777777777777777, 0.0, -0.7966101694915254, -0.9166666666666666, -0.8660254037844386, -0.5]
[-0.7222222222222223, -0.08333333333333337, -0.7966101694915254, -0.9166666666666666, -0.8660254037844386, -0.5]
[-0.38888888888888873, 0.16666666666666652, -0.8305084745762712, -0.75, -0.8660254037844386, -0.5]
[-0.4999999999999999, 0.7499999999999996, -0.8305084745762712, -1.0, -0.8660254037844386, -0.5]
[-0.33333333333333337, 0.8333333333333333, -0.864406779661017, -0.9166666666666666, -0.8660254037844386, -0.5]
[-0.6666666666666664, -0.08333333333333337, -0.8305084745762712, -1.0, -0.8660254037844386, -0.5]
[-0.611111111111111, 0.0, -0.9322033898305084, -0.9166666666666666, -0.8660254037844386, -0.5]
[-0.33333333333333337, 0.24999999999999978, -0.8983050847457628, -0.9166666666666666, -0.8660254037844386, -0.5]
[-0.6666666666666664, -0.08333333333333337, -0.8305084745762712, -1.0, -0.8660254037844386, -0.5]
[-0.9444444444444442, -0.16666666666666674, -0.8983050847457628, -0.9166666666666666, -0.8660254037844386, -0.5]
[-0.5555555555555558, 0.16666666666666652, -0.8305084745762712, -0.9166666666666666, -0.8660254037844386, -0.5]
[-0.611111111111111, 0.24999999999999978, -0.8983050847457628, -0.8333333333333334, -0.8660254037844386, -0.5]
[-0.8888888888888888, -0.7500000000000002, -0.8983050847457628, -0.8333333333333334, -0.8660254037844386, -0.5]
[-0.9444444444444442, 0.0, -0.8983050847457628, -0.9166666666666666, -0.8660254037844386, -0.5]
[-0.611111111111111, 0.24999999999999978, -0.7966101694915254, -0.5833333333333333, -0.8660254037844386, -0.5]
[-0.5555555555555558, 0.49999999999999956, -0.6949152542372882, -0.75, -0.8660254037844386, -0.5]
[-0.7222222222222223, -0.16666666666666674, -0.864406779661017, -0.8333333333333334, -0.8660254037844386, -0.5]
[-0.5555555555555558, 0.49999999999999956, -0.7966101694915254, -0.9166666666666666, -0.8660254037844386, -0.5]
[-0.8333333333333335, 0.0, -0.864406779661017, -0.9166666666666666, -0.8660254037844386, -0.5]
[-0.44444444444444453, 0.4166666666666665, -0.8305084745762712, -0.9166666666666666, -0.8660254037844386, -0.5]
[-0.611111111111111, 0.08333333333333304, -0.864406779661017, -0.9166666666666666, -0.8660254037844386, -0.5]
[0.4999999999999998, 0.0, 0.2542372881355932, 0.08333333333333326, 0.8660254037844386, -0.5]
[0.16666666666666674, 0.0, 0.18644067796610164, 0.16666666666666674, 0.8660254037844386, -0.5]
[0.4444444444444444, -0.08333333333333337, 0.3220338983050848, 0.16666666666666674, 0.8660254037844386, -0.5]
[-0.33333333333333337, -0.7500000000000002, 0.016949152542372836, 0.0, 0.8660254037844386, -0.5]
[0.2222222222222221, -0.3333333333333336, 0.22033898305084731, 0.16666666666666674, 0.8660254037844386, -0.5]
[-0.2222222222222221, -0.3333333333333336, 0.18644067796610164, 0.0, 0.8660254037844386, -0.5]
[0.11111111111111094, 0.08333333333333304, 0.2542372881355932, 0.25, 0.8660254037844386, -0.5]
[-0.6666666666666664, -0.6666666666666667, -0.22033898305084754, -0.25, 0.8660254037844386, -0.5]
[0.27777777777777746, -0.2500000000000002, 0.22033898305084731, 0.0, 0.8660254037844386, -0.5]
[-0.4999999999999999, -0.41666666666666663, -0.016949152542372947, 0.08333333333333326, 0.8660254037844386, -0.5]
[-0.611111111111111, -1.0, -0.15254237288135597, -0.25, 0.8660254037844386, -0.5]
[-0.11111111111111094, -0.16666666666666674, 0.0847457627118644, 0.16666666666666674, 0.8660254037844386, -0.5]
[-0.05555555555555558, -0.8333333333333333, 0.016949152542372836, -0.25, 0.8660254037844386, -0.5]
[-2.220446049250313e-16, -0.2500000000000002, 0.2542372881355932, 0.08333333333333326, 0.8660254037844386, -0.5]
[-0.277777777777778, -0.2500000000000002, -0.11864406779661019, 0.0, 0.8660254037844386, -0.5]
[0.33333333333333326, -0.08333333333333337, 0.15254237288135597, 0.08333333333333326, 0.8660254037844386, -0.5]
[-0.277777777777778, -0.16666666666666674, 0.18644067796610164, 0.16666666666666674, 0.8660254037844386, -0.5]
[-0.16666666666666674, -0.41666666666666663, 0.05084745762711851, -0.25, 0.8660254037844386, -0.5]
[0.05555555555555558, -0.8333333333333333, 0.18644067796610164, 0.16666666666666674, 0.8660254037844386, -0.5]
[-0.277777777777778, -0.5833333333333334, -0.016949152542372947, -0.16666666666666663, 0.8660254037844386, -0.5]
[-0.11111111111111094, 0.0, 0.2881355932203389, 0.41666666666666674, 0.8660254037844386, -0.5]
[-2.220446049250313e-16, -0.3333333333333336, 0.016949152542372836, 0.0, 0.8660254037844386, -0.5]
[0.11111111111111094, -0.5833333333333334, 0.3220338983050848, 0.16666666666666674, 0.8660254037844386, -0.5]
[-2.220446049250313e-16, -0.3333333333333336, 0.2542372881355932, -0.08333333333333337, 0.8660254037844386, -0.5]
[0.16666666666666674, -0.2500000000000002, 0.11864406779661008, 0.0, 0.8660254037844386, -0.5]
[0.27777777777777746, -0.16666666666666674, 0.15254237288135597, 0.08333333333333326, 0.8660254037844386, -0.5]
[0.3888888888888886, -0.3333333333333336, 0.2881355932203389, 0.08333333333333326, 0.8660254037844386, -0.5]
[0.33333333333333326, -0.16666666666666674, 0.35593220338983045, 0.33333333333333326, 0.8660254037844386, -0.5]
[-0.05555555555555558, -0.2500000000000002, 0.18644067796610164, 0.16666666666666674, 0.8660254037844386, -0.5]
[-0.2222222222222221, -0.5, -0.15254237288135597, -0.25, 0.8660254037844386, -0.5]
[-0.33333333333333337, -0.6666666666666667, -0.05084745762711873, -0.16666666666666663, 0.8660254037844386, -0.5]
[-0.33333333333333337, -0.6666666666666667, -0.0847457627118644, -0.25, 0.8660254037844386, -0.5]
[-0.16666666666666674, -0.41666666666666663, -0.016949152542372947, -0.08333333333333337, 0.8660254037844386, -0.5]
[-0.05555555555555558, -0.41666666666666663, 0.3898305084745761, 0.25, 0.8660254037844386, -0.5]
[-0.38888888888888873, -0.16666666666666674, 0.18644067796610164, 0.16666666666666674, 0.8660254037844386, -0.5]
[-0.05555555555555558, 0.16666666666666652, 0.18644067796610164, 0.25, 0.8660254037844386, -0.5]
[0.33333333333333326, -0.08333333333333337, 0.2542372881355932, 0.16666666666666674, 0.8660254037844386, -0.5]
[0.11111111111111094, -0.7500000000000002, 0.15254237288135597, 0.0, 0.8660254037844386, -0.5]
[-0.277777777777778, -0.16666666666666674, 0.05084745762711851, 0.0, 0.8660254037844386, -0.5]
[-0.33333333333333337, -0.5833333333333334, 0.016949152542372836, 0.0, 0.8660254037844386, -0.5]
[-0.33333333333333337, -0.5, 0.15254237288135597, -0.08333333333333337, 0.8660254037844386, -0.5]
[-2.220446049250313e-16, -0.16666666666666674, 0.22033898305084731, 0.08333333333333326, 0.8660254037844386, -0.5]
[-0.16666666666666674, -0.5, 0.016949152542372836, -0.08333333333333337, 0.8660254037844386, -0.5]
[-0.611111111111111, -0.7500000000000002, -0.22033898305084754, -0.25, 0.8660254037844386, -0.5]
[-0.277777777777778, -0.41666666666666663, 0.0847457627118644, 0.0, 0.8660254037844386, -0.5]
[-0.2222222222222221, -0.16666666666666674, 0.0847457627118644, -0.08333333333333337, 0.8660254037844386, -0.5]
[-0.2222222222222221, -0.2500000000000002, 0.0847457627118644, 0.0, 0.8660254037844386, -0.5]
[0.05555555555555558, -0.2500000000000002, 0.11864406779661008, 0.0, 0.8660254037844386, -0.5]
[-0.5555555555555558, -0.5833333333333334, -0.3220338983050848, -0.16666666666666663, 0.8660254037844386, -0.5]
[-0.2222222222222221, -0.3333333333333336, 0.05084745762711851, 0.0, 0.8660254037844386, -0.5]
[0.11111111111111094, 0.08333333333333304, 0.6949152542372881, 1.0, 0.0, 1.0]
[-0.16666666666666674, -0.41666666666666663, 0.3898305084745761, 0.5, 0.0, 1.0]
[0.5555555555555551, -0.16666666666666674, 0.6610169491525424, 0.6666666666666667, 0.0, 1.0]
[0.11111111111111094, -0.2500000000000002, 0.5593220338983049, 0.41666666666666674, 0.0, 1.0]
[0.2222222222222221, -0.16666666666666674, 0.6271186440677965, 0.7500000000000002, 0.0, 1.0]
[0.833333333333333, -0.16666666666666674, 0.8983050847457625, 0.6666666666666667, 0.0, 1.0]
[-0.6666666666666664, -0.5833333333333334, 0.18644067796610164, 0.33333333333333326, 0.0, 1.0]
[0.6666666666666665, -0.2500000000000002, 0.7966101694915253, 0.41666666666666674, 0.0, 1.0]
[0.33333333333333326, -0.5833333333333334, 0.6271186440677965, 0.41666666666666674, 0.0, 1.0]
[0.6111111111111112, 0.33333333333333326, 0.7288135593220337, 1.0, 0.0, 1.0]
[0.2222222222222221, 0.0, 0.3898305084745761, 0.5833333333333333, 0.0, 1.0]
[0.16666666666666674, -0.41666666666666663, 0.4576271186440677, 0.5, 0.0, 1.0]
[0.3888888888888886, -0.16666666666666674, 0.5254237288135593, 0.6666666666666667, 0.0, 1.0]
[-0.2222222222222221, -0.5833333333333334, 0.35593220338983045, 0.5833333333333333, 0.0, 1.0]
[-0.16666666666666674, -0.3333333333333336, 0.3898305084745761, 0.9166666666666665, 0.0, 1.0]
[0.16666666666666674, 0.0, 0.4576271186440677, 0.8333333333333333, 0.0, 1.0]
[0.2222222222222221, -0.16666666666666674, 0.5254237288135593, 0.41666666666666674, 0.0, 1.0]
[0.8888888888888888, 0.49999999999999956, 0.9322033898305084, 0.7500000000000002, 0.0, 1.0]
[0.8888888888888888, -0.5, 1.0, 0.8333333333333333, 0.0, 1.0]
[-0.05555555555555558, -0.8333333333333333, 0.35593220338983045, 0.16666666666666674, 0.0, 1.0]
[0.4444444444444444, 0.0, 0.5932203389830508, 0.8333333333333333, 0.0, 1.0]
[-0.277777777777778, -0.3333333333333336, 0.3220338983050848, 0.5833333333333333, 0.0, 1.0]
[0.8888888888888888, -0.3333333333333336, 0.9322033898305084, 0.5833333333333333, 0.0, 1.0]
[0.11111111111111094, -0.41666666666666663, 0.3220338983050848, 0.41666666666666674, 0.0, 1.0]
[0.33333333333333326, 0.08333333333333304, 0.5932203389830508, 0.6666666666666667, 0.0, 1.0]
[0.6111111111111112, 0.0, 0.6949152542372881, 0.41666666666666674, 0.0, 1.0]
[0.05555555555555558, -0.3333333333333336, 0.2881355932203389, 0.41666666666666674, 0.0, 1.0]
[-2.220446049250313e-16, -0.16666666666666674, 0.3220338983050848, 0.41666666666666674, 0.0, 1.0]
[0.16666666666666674, -0.3333333333333336, 0.5593220338983049, 0.6666666666666667, 0.0, 1.0]
[0.6111111111111112, -0.16666666666666674, 0.6271186440677965, 0.25, 0.0, 1.0]
[0.7222222222222223, -0.3333333333333336, 0.7288135593220337, 0.5, 0.0, 1.0]
[1.0, 0.49999999999999956, 0.8305084745762712, 0.5833333333333333, 0.0, 1.0]
[0.16666666666666674, -0.3333333333333336, 0.5593220338983049, 0.7500000000000002, 0.0, 1.0]
[0.11111111111111094, -0.3333333333333336, 0.3898305084745761, 0.16666666666666674, 0.0, 1.0]
[-2.220446049250313e-16, -0.5, 0.5593220338983049, 0.08333333333333326, 0.0, 1.0]
[0.8888888888888888, -0.16666666666666674, 0.7288135593220337, 0.8333333333333333, 0.0, 1.0]
[0.11111111111111094, 0.16666666666666652, 0.5593220338983049, 0.9166666666666665, 0.0, 1.0]
[0.16666666666666674, -0.08333333333333337, 0.5254237288135593, 0.41666666666666674, 0.0, 1.0]
[-0.05555555555555558, -0.16666666666666674, 0.2881355932203389, 0.41666666666666674, 0.0, 1.0]
[0.4444444444444444, -0.08333333333333337, 0.4915254237288136, 0.6666666666666667, 0.0, 1.0]
[0.33333333333333326, -0.08333333333333337, 0.5593220338983049, 0.9166666666666665, 0.0, 1.0]
[0.4444444444444444, -0.08333333333333337, 0.3898305084745761, 0.8333333333333333, 0.0, 1.0]
[-0.16666666666666674, -0.41666666666666663, 0.3898305084745761, 0.5, 0.0, 1.0]
[0.3888888888888886, 0.0, 0.6610169491525424, 0.8333333333333333, 0.0, 1.0]
[0.33333333333333326, 0.08333333333333304, 0.5932203389830508, 1.0, 0.0, 1.0]
[0.33333333333333326, -0.16666666666666674, 0.423728813559322, 0.8333333333333333, 0.0, 1.0]
[0.11111111111111094, -0.5833333333333334, 0.35593220338983045, 0.5, 0.0, 1.0]
[0.2222222222222221, -0.16666666666666674, 0.423728813559322, 0.5833333333333333, 0.0, 1.0]
[0.05555555555555558, 0.16666666666666652, 0.4915254237288136, 0.8333333333333333, 0.0, 1.0]
[-0.11111111111111094, -0.16666666666666674, 0.3898305084745761, 0.41666666666666674, 0.0, 1.0]
"""
__author__ = 'jheaton'
import os
import sys
# Find the AIFH core files
aifh_dir = os.path.dirname(os.path.abspath(__file__))
aifh_dir = os.path.abspath(aifh_dir + os.sep + ".." + os.sep + "lib" + os.sep + "aifh")
sys.path.append(aifh_dir)
from normalize import Normalize
# find the Iris data set
irisFile = os.path.dirname(os.path.realpath(__file__))
irisFile = os.path.abspath(irisFile + "../../datasets/iris.csv")
# Read the Iris data set.
print('Reading CSV file: ' + irisFile)
norm = Normalize()
result = norm.load_csv(irisFile)
# Setup the first four fields to "range normalize" between -1 and 1.
for i in range(0, 4):
norm.make_col_numeric(result, i)
norm.norm_col_range(result, i, -1, 1)
# Discover all of the classes for column #4, the iris species.
classes = norm.build_class_map(result, 4)
# Normalize iris species with equilateral encoding
norm.norm_col_equilateral(result, 4, classes, -1, 1)
# Display the resulting data
norm.display_data(result)
|
trenton3983/Artificial_Intelligence_for_Humans
|
vol1/python-examples/examples/example_normalize.py
|
Python
|
apache-2.0
| 17,100
|
[
"VisIt"
] |
c289b51139c1322ee1ce1a5dfa7768c82fedf77e98f72cb8e5cf2c76bbaabbb7
|
#!/usr/bin/env python
"""
basic model line shapes and distribution functions
"""
from __future__ import division
from numpy import (pi, log, exp, sqrt, arctan, cos, where)
from numpy.testing import assert_allclose
from scipy.special import gamma as gamfcn
from scipy.special import gammaln, erf, erfc, wofz
log2 = log(2)
s2pi = sqrt(2*pi)
spi = sqrt(pi)
s2 = sqrt(2.0)
functions = ('gaussian', 'lorentzian', 'voigt', 'pvoigt', 'pearson7',
'breit_wigner', 'damped_oscillator', 'logistic', 'lognormal',
'students_t', 'expgaussian', 'donaich', 'skewed_gaussian',
'skewed_voigt', 'step', 'rectangle', 'erf', 'erfc', 'wofz',
'gamma', 'gammaln', 'exponential', 'powerlaw', 'linear',
'parabolic')
def gaussian(x, amplitude=1.0, center=0.0, sigma=1.0):
"""1 dimensional gaussian:
gaussian(x, amplitude, center, sigma)
"""
return (amplitude/(s2pi*sigma)) * exp(-(1.0*x-center)**2 /(2*sigma**2))
def lorentzian(x, amplitude=1.0, center=0.0, sigma=1.0):
"""1 dimensional lorentzian
lorentzian(x, amplitude, center, sigma)
"""
return (amplitude/(1 + ((1.0*x-center)/sigma)**2) ) / (pi*sigma)
def voigt(x, amplitude=1.0, center=0.0, sigma=1.0, gamma=None):
"""1 dimensional voigt function.
see http://en.wikipedia.org/wiki/Voigt_profile
"""
if gamma is None:
gamma = sigma
z = (x-center + 1j*gamma)/ (sigma*s2)
return amplitude*wofz(z).real / (sigma*s2pi)
def pvoigt(x, amplitude=1.0, center=0.0, sigma=1.0, fraction=0.5):
"""1 dimensional pseudo-voigt:
pvoigt(x, amplitude, center, sigma, fraction)
= amplitude*(1-fraction)*gaussion(x, center, sigma_g) +
amplitude*fraction*lorentzian(x, center, sigma)
where sigma_g (the sigma for the Gaussian component) is
sigma_g = sigma / sqrt(2*log(2)) ~= sigma / 1.17741
so that the Gaussian and Lorentzian components have the
same FWHM of 2*sigma.
"""
sigma_g = sigma / sqrt(2*log2)
return ((1-fraction)*gaussian(x, amplitude, center, sigma_g) +
fraction*lorentzian(x, amplitude, center, sigma))
def pearson7(x, amplitude=1.0, center=0.0, sigma=1.0, expon=1.0):
"""pearson7 lineshape, using the wikipedia definition:
pearson7(x, center, sigma, expon) =
amplitude*(1+arg**2)**(-expon)/(sigma*beta(expon-0.5, 0.5))
where arg = (x-center)/sigma
and beta() is the beta function.
"""
arg = (x-center)/sigma
scale = amplitude * gamfcn(expon)/(gamfcn(0.5)*gamfcn(expon-0.5))
return scale*(1+arg**2)**(-expon)/sigma
def breit_wigner(x, amplitude=1.0, center=0.0, sigma=1.0, q=1.0):
"""Breit-Wigner-Fano lineshape:
= amplitude*(q*sigma/2 + x - center)**2 / ( (sigma/2)**2 + (x - center)**2 )
"""
gam = sigma/2.0
return amplitude*(q*gam + x - center)**2 / (gam*gam + (x-center)**2)
def damped_oscillator(x, amplitude=1.0, center=1., sigma=0.1):
"""amplitude for a damped harmonic oscillator
amplitude/sqrt( (1.0 - (x/center)**2)**2 + (2*sigma*x/center)**2))
"""
center = max(1.e-9, abs(center))
return (amplitude/sqrt( (1.0 - (x/center)**2)**2 + (2*sigma*x/center)**2))
def logistic(x, amplitude=1., center=0., sigma=1.):
"""Logistic lineshape (yet another sigmoidal curve)
= amplitude*(1. - 1. / (1 + exp((x-center)/sigma)))
"""
return amplitude*(1. - 1./(1. + exp((x-center)/sigma)))
def lognormal(x, amplitude=1.0, center=0., sigma=1):
"""log-normal function
lognormal(x, center, sigma)
= (amplitude/x) * exp(-(ln(x) - center)/ (2* sigma**2))
"""
x[where(x<=1.e-19)] = 1.e-19
return (amplitude/(x*sigma*s2pi)) * exp(-(log(x)-center)**2/ (2* sigma**2))
def students_t(x, amplitude=1.0, center=0.0, sigma=1.0):
"""Student's t distribution:
gamma((sigma+1)/2) (1 + (x-center)**2/sigma)^(-(sigma+1)/2)
= -------------------------
sqrt(sigma*pi)gamma(sigma/2)
"""
s1 = (sigma+1)/2.0
denom = (sqrt(sigma*pi)*gamfcn(sigma/2))
return amplitude*(1 + (x-center)**2/sigma)**(-s1) * gamfcn(s1) / denom
def expgaussian(x, amplitude=1, center=0, sigma=1.0, gamma=1.0):
"""exponentially modified Gaussian
= (gamma/2) exp[center*gamma + (gamma*sigma)**2/2 - gamma*x] *
erfc[(center + gamma*sigma**2 - x)/(sqrt(2)*sigma)]
http://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution
"""
gss = gamma*sigma*sigma
arg1 = gamma*(center +gss/2.0 - x)
arg2 = (center + gss - x)/(s2*sigma)
return amplitude*(gamma/2) * exp(arg1) * erfc(arg2)
def donaich(x, amplitude=1.0, center=0, sigma=1.0, gamma=0.0):
"""Doniach Sunjic asymmetric lineshape, used for photo-emission
= amplitude* cos(pi*gamma/2 + (1-gamma) arctan((x-center)/sigma) /
(sigma**2 + (x-center)**2)**[(1-gamma)/2]
see http://www.casaxps.com/help_manual/line_shapes.htm
"""
arg = (x-center)/sigma
gm1 = (1.0 - gamma)
scale = amplitude/(sigma**gm1)
return scale*cos(pi*gamma/2 + gm1*arctan(arg))/(1 + arg**2)**(gm1/2)
def skewed_gaussian(x, amplitude=1.0, center=0.0, sigma=1.0, gamma=0.0):
"""Gaussian, skewed with error function, equal to
gaussian(x, center, sigma)*(1+erf(beta*(x-center)))
with beta = gamma/(sigma*sqrt(2))
with gamma < 0: tail to low value of centroid
gamma > 0: tail to high value of centroid
see http://en.wikipedia.org/wiki/Skew_normal_distribution
"""
asym = 1 + erf(gamma*(x-center)/(s2*sigma))
return asym * gaussian(x, amplitude, center, sigma)
def skewed_voigt(x, amplitude=1.0, center=0.0, sigma=1.0, gamma=None, skew=0.0):
"""Skewed Voigt lineshape, skewed with error function
useful for ad-hoc Compton scatter profile
with beta = skew/(sigma*sqrt(2))
= voigt(x, center, sigma, gamma)*(1+erf(beta*(x-center)))
skew < 0: tail to low value of centroid
skew > 0: tail to high value of centroid
see http://en.wikipedia.org/wiki/Skew_normal_distribution
"""
beta = skew/(s2*sigma)
asym = 1 + erf(beta*(x-center))
return asym * voigt(x, amplitude, center, sigma, gamma=gamma)
def step(x, amplitude=1.0, center=0.0, sigma=1.0, form='linear'):
"""step function:
starts at 0.0, ends at amplitude, with half-max at center, and
rising with form:
'linear' (default) = amplitude * min(1, max(0, arg))
'atan', 'arctan' = amplitude * (0.5 + atan(arg)/pi)
'erf' = amplitude * (1 + erf(arg))/2.0
'logistic' = amplitude * [1 - 1/(1 + exp(arg))]
where arg = (x - center)/sigma
"""
if abs(sigma) < 1.e-13:
sigma = 1.e-13
out = (x - center)/sigma
if form == 'erf':
out = 0.5*(1 + erf(out))
elif form.startswith('logi'):
out = (1. - 1./(1. + exp(out)))
elif form in ('atan', 'arctan'):
out = 0.5 + arctan(out)/pi
else:
out[where(out < 0)] = 0.0
out[where(out > 1)] = 1.0
return amplitude*out
def rectangle(x, amplitude=1.0, center1=0.0, sigma1=1.0,
center2=1.0, sigma2=1.0, form='linear'):
"""rectangle function: step up, step down (see step function)
starts at 0.0, rises to amplitude (at center1 with width sigma1)
then drops to 0.0 (at center2 with width sigma2) with form:
'linear' (default) = ramp_up + ramp_down
'atan', 'arctan' = amplitude*(atan(arg1) + atan(arg2))/pi
'erf' = amplitude*(erf(arg1) + erf(arg2))/2.
'logisitic' = amplitude*[1 - 1/(1 + exp(arg1)) - 1/(1+exp(arg2))]
where arg1 = (x - center1)/sigma1
and arg2 = -(x - center2)/sigma2
"""
if abs(sigma1) < 1.e-13:
sigma1 = 1.e-13
if abs(sigma2) < 1.e-13:
sigma2 = 1.e-13
arg1 = (x - center1)/sigma1
arg2 = (center2 - x)/sigma2
if form == 'erf':
out = 0.5*(erf(arg1) + erf(arg2))
elif form.startswith('logi'):
out = (1. - 1./(1. + exp(arg1)) - 1./(1. + exp(arg2)))
elif form in ('atan', 'arctan'):
out = (arctan(arg1) + arctan(arg2))/pi
else:
arg1[where(arg1 < 0)] = 0.0
arg1[where(arg1 > 1)] = 1.0
arg2[where(arg2 > 0)] = 0.0
arg2[where(arg2 < -1)] = -1.0
out = arg1 + arg2
return amplitude*out
def _erf(x):
"""error function. = 2/sqrt(pi)*integral(exp(-t**2), t=[0, z])"""
return erf(x)
def _erfc(x):
"""complented error function. = 1 - erf(x)"""
return erfc(x)
def _wofz(x):
"""fadeeva function for complex argument. = exp(-x**2)*erfc(-i*x)"""
return wofz(x)
def _gamma(x):
"""gamma function"""
return gamfcn(x)
def _gammaln(x):
"""log of absolute value of gamma function"""
return gammaln(x)
def exponential(x, amplitude=1, decay=1):
"x -> amplitude * exp(-x/decay)"
return amplitude * exp(-x/decay)
def powerlaw(x, amplitude=1, exponent=1.0):
"x -> amplitude * x**exponent"
return amplitude * x**exponent
def linear(x, slope, intercept):
"x -> slope * x + intercept"
return slope * x + intercept
def parabolic(x, a, b, c):
"x -> a * x**2 + b * x + c"
return a * x**2 + b * x + c
def assert_results_close(actual, desired, rtol=1e-03, atol=1e-03,
err_msg='', verbose=True):
"""returns whether all parameter values in actual are close to
those in desired"""
for param_name, value in desired.items():
assert_allclose(actual[param_name], value, rtol,
atol, err_msg, verbose)
|
DiamondLightSource/auto_tomo_calibration-experimental
|
old_code_scripts/simulate_data/lmfit-py/lmfit/lineshapes.py
|
Python
|
apache-2.0
| 9,554
|
[
"Gaussian"
] |
400eca038c860f85184982fbd916118f997d5a75059b6079c57a063c7078bc2a
|
"""plot_utils.py: Some utility function for plotting data in moose.
Last modified: Sun Jan 10, 2016 04:04PM
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2013, NCBS Bangalore"
__credits__ = ["NCBS Bangalore", "Bhalla Lab"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@ncbs.res.in"
__status__ = "Development"
import matplotlib.pyplot as plt
from . import _moose as moose
from . import print_utils as pu
import numpy as np
def plotAscii(yvec, xvec = None, file=None):
"""Plot two list-like object in terminal using gnuplot.
If file is given then save data to file as well.
"""
if xvec is None:
plotInTerminal(yvec, list(range( len(yvec))), file=file)
else:
plotInTerminal(yvec, xvec, file=file)
def plotInTerminal(yvec, xvec = None, file=None):
'''
Plot given vectors in terminal using gnuplot.
If file is not None then write the data to a file.
'''
import subprocess
g = subprocess.Popen(["gnuplot"], stdin=subprocess.PIPE)
g.stdin.write("set term dumb 100 25\n")
g.stdin.write("plot '-' using 1:2 title '{}' with linespoints\n".format(file))
if file:
saveAsGnuplot(yvec, xvec, file=file)
for i,j in zip(xvec, yvec):
g.stdin.write("%f %f\n" % (i, j))
g.stdin.write("\n")
g.stdin.flush()
def xyToString( yvec, xvec, sepby = ' '):
""" Given two list-like objects, returns a text string.
"""
textLines = []
for y, x in zip( yvec, xvec ):
textLines.append("{}{}{}".format(y, sepby, x))
return "\n".join(textLines)
def saveNumpyVec( yvec, xvec, file):
"""save the numpy vectors to a data-file
"""
if file is None:
return
print(("[INFO] Saving plot data to file {}".format(file)))
textLines = xyToString( yvec, xvec)
with open(file, "w") as dataF:
dataF.write(textLines)
def saveAsGnuplot( yvec, xvec, file):
''' Save the plot as stand-alone gnuplot script '''
if file is None:
return
print(("[INFO] Saving plot data to a gnuplot-script: {}".format(file)))
dataText = xyToString( yvec, xvec )
text = []
text.append("#!/bin/bash")
text.append("gnuplot << EOF")
text.append("set term post eps")
text.append("set output \"{0}.eps\"".format(file))
text.append("plot '-' using 0:1 title '{0}'".format(file))
text.append(dataText)
text.append("EOF")
with open(file+".gnuplot","w") as gnuplotF:
gnuplotF.write("\n".join(text))
def scaleVector(vec, scaleF):
""" Scale a vector by a factor """
if scaleF == 1.0 or scaleF is None:
return vec
else:
return [ x*scaleF for x in vec ]
def scaleAxis(xvec, yvec, scaleX, scaleY):
""" Multiply each elements by factor """
xvec = scaleVector( xvec, scaleX )
yvec = scaleVector( yvec, scaleY )
return xvec, yvec
def reformatTable(table, kwargs):
""" Given a table return x and y vectors with proper scaling """
clock = moose.Clock('/clock')
if type(table) == moose.Table:
vecY = table.vector
vecX = np.arange(0, clock.currentTime, len(vecY))
elif type(table) == tuple:
vecX, vecY = table
return (vecX, vecY)
def plotTable(table, **kwargs):
"""Plot a given table. It plots table.vector
This function can scale the x-axis. By default, y-axis and x-axis scaling is
done by a factor of 1.
Pass 'xscale' and/or 'yscale' argument to function to modify scales.
"""
if not type(table) == moose.Table:
msg = "Expected moose.Table, got {}".format( type(table) )
raise TypeError(msg)
vecX, vecY = reformatTable(table, kwargs)
plt.plot(vecX, vecY, label = kwargs.get('label', ""))
# This may not be available on older version of matplotlib.
try:
plt.legend(loc='best', framealpha=0.4)
except:
plt.legend(loc='best')
def plotTables(tables, outfile=None, **kwargs):
"""Plot a list of tables onto one figure only.
"""
assert type(tables) == dict, "Expected a dict of moose.Table"
plt.figure(figsize=(10, 1.5*len(tables)))
subplot = kwargs.get('subplot', True)
for i, tname in enumerate(tables):
if subplot:
plt.subplot(len(tables), 1, i+1)
yvec = tables[tname].vector
xvec = np.linspace(0, moose.Clock('/clock').currentTime, len(yvec))
plt.plot(xvec, yvec, label=tname)
# This may not be available on older version of matplotlib.
try:
plt.legend(loc='best', framealpha=0.4)
except:
plt.legend(loc = 'best')
plt.tight_layout()
if outfile:
pu.dump("PLOT", "Saving plots to file {}".format(outfile))
try:
plt.savefig(outfile, transparent=True)
except Exception as e:
pu.dump("WARN"
, "Failed to save figure, plotting onto a window"
)
plt.show()
else:
plt.show()
def plotVector(vec, xvec = None, **options):
"""plotVector: Plot a given vector. On x-axis, plot the time.
:param vec: Given vector.
:param **kwargs: Optional to pass to maplotlib.
"""
ax = options[ 'ax' ]
assert type(vec) == np.ndarray, "Expected type %s" % type(vec)
legend = options.get('legend', True)
if xvec is None:
clock = moose.Clock('/clock')
xx = np.linspace(0, clock.currentTime, len(vec))
else:
xx = xvec[:]
assert len(xx) == len(vec), "Expecting %s got %s" % (len(vec), len(xvec))
ax.plot(xx, vec, label=options.get('label', ''))
if legend:
# This may not be available on older version of matplotlib.
try:
ax.legend(loc='best', framealpha=0.4)
except:
ax.legend(loc='best')
if xvec is None:
ax.set_xlabel('Time (sec)')
else:
ax.set_xlabel(options.get('xlabel', ''))
ax.set_ylabel = options.get('ylabel', '')
ax.set_title(options.get('title', ''))
if(options.get('legend', True)):
try:
ax.legend(loc='best', framealpha=0.4, prop={'size' : 9})
except:
ax.legend(loc='best', prop={'size' : 9})
return ax
def saveRecords(records, xvec = None, **kwargs):
"""saveRecords
Given a dictionary of data with (key, numpy array) pair, it saves them to a
file 'outfile'
:param outfile
:param dataDict:
:param **kwargs:
comment: Adds comments below the header.
"""
if len(records) == 0:
pu.warn("No data in dictionary to save.")
return False
outfile = kwargs.get('outfile', 'data.moose')
clock = moose.Clock('/clock')
assert clock.currentTime > 0
yvecs = [ ]
text = "time," + ",".join([ str(x) for x in records ])
for k in records:
try:
yvec = records[k].vector
except AtrributeError as e:
yevc = records[k]
yvecs.append(yvec)
xvec = np.linspace(0, clock.currentTime, len(yvecs[0]))
yvecs = [ xvec ] + yvecs
if kwargs.get('comment', ''):
text += ("\n" + kwargs['comment'] )
np.savetxt(outfile, np.array(yvecs).T, delimiter=',' , header = text)
pu.info("Done writing data to %s" % outfile)
def plotRecords(records, xvec = None, **kwargs):
"""plotRecords Plot given records in dictionary.
:param records:
:param xvec: If None, use moose.Clock to generate xvec.
:param **kwargs:
"""
dataDict = {}
try:
for k in sorted(records.keys(), key=str.lower):
dataDict[k] = records[k]
except Exception as e:
dataDict = records
legend = kwargs.get('legend', True)
outfile = kwargs.get('outfile', None)
subplot = kwargs.get('subplot', False)
filters = [ x.lower() for x in kwargs.get('filter', [])]
plt.figure(figsize=(10, 1.5*len(dataDict)))
#plt.rcParams.update( { 'font-size' : 10 } )
for i, k in enumerate(dataDict):
pu.info("+ Plotting for %s" % k)
plotThis = False
if not filters: plotThis = True
for accept in filters:
if accept in k.lower():
plotThis = True
break
if plotThis:
if not subplot:
yvec = dataDict[k].vector
plotVector(yvec, xvec, label=k, **kwargs)
else:
kwargs[ 'ax' ] = plt.subplot(len(dataDict), 1, i)
yvec = dataDict[k].vector
plotVector(yvec, xvec, label=k, **kwargs)
# title in Image.
if 'title' in kwargs:
plt.title(kwargs['title'])
if subplot:
try:
plt.tight_layout()
except: pass
if outfile:
pu.info("Writing plot to %s" % outfile)
plt.savefig("%s" % outfile, transparent=True)
else:
plt.show()
def plot_records( data_dict, xvec = None, **kwargs ):
"""Renamed (deprecated)
"""
return plot_tables( data_dict, xvec, **kwargs )
def plot_tables(data_dict, xvec = None, **kwargs):
"""plot_tables plots moose.Table stored in a dictionary.
:param data_dict:
:param xvec: If None, use moose.Clock to generate xvec.
:param **kwargs:
"""
legend = kwargs.get('legend', True)
outfile = kwargs.get('outfile', None)
subplot = kwargs.get('subplot', False)
filters = [ x.lower() for x in kwargs.get('filter', [])]
ax = kwargs.get( 'ax', None )
if ax is None:
plt.figure(figsize=(10, 1.5*len(data_dict)))
if not subplot:
ax = plt.subplot( 1, 1, 1 )
for i, k in enumerate(data_dict):
pu.info("+ Plotting for %s" % k)
plotThis = False
if not filters: plotThis = True
for accept in filters:
if accept in k.lower():
plotThis = True
break
if plotThis:
if not subplot:
yvec = data_dict[k].vector
plotVector(yvec, xvec, label=k, **kwargs)
else:
ax = plt.subplot(len(data_dict), 1, i)
yvec = data_dict[k]
plotVector(yvec, xvec, label=k, **kwargs)
if subplot:
try:
plt.tight_layout()
except: pass
if outfile:
pu.info("Writing plot to %s" % outfile)
plt.savefig("%s" % outfile, transparent=True)
else:
plt.show()
|
subhacom/moose-core
|
python/moose/plot_utils.py
|
Python
|
gpl-3.0
| 10,511
|
[
"MOOSE"
] |
3d1b8d075170cc2dfae2c9055b6c0714d750c6770069abac2e21d993c4b294ef
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2014 Brian Douglass bhdouglass@gmail.com
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
from agui.awidgets import ATextArea
from agui.backends.gtk.widgets import Widget
class TextArea(Widget, ATextArea):
type = 'TextView'
def __init__(self, item = None):
ATextArea.__init__(self, item)
Widget.__init__(self, item)
self.item.connect('key-release-event', self.emit_changed)
@ATextArea.text.getter
def text(self):
buffer = self.item.get_buffer()
self._text = buffer.get_text(buffer.get_start_iter(), buffer.get_end_iter(), True)
return self._text
@text.setter
def text(self, value):
self.item.get_buffer().set_text(value)
self._text = value
def insert(self, text):
self.item.get_buffer().insert_at_cursor(text)
self._text = self.text
|
bhdouglass/agui
|
agui/backends/gtk/widgets/text_area.py
|
Python
|
gpl-3.0
| 1,520
|
[
"Brian"
] |
3ff498cedb4df84fbdc7b7174cd7dcb211377050fcd003b347112ca883d761ff
|
__author__ = 'jmeireles'
import dryscrape
import re
import requests
import bs4
import urllib
regex = '(?<=file:\s")http://[^\s"]+.(?:mp4|mpg|avi|flv)'
'''
url = 'http://gorillavid.in/embed-sx5p5f4cbgm1-650x400.html'
response = requests.get(url)
soup = bs4.BeautifulSoup(response.text)
text = soup.get_text()
regex = urllib.unquote(regex)
url = re.search(regex, str(text))
print urllib.unquote(url.group()).decode('utf8')
'''
url = "http://embed.movshare.net/embed.php?v=ba47de88ca2a2&width=655&height=362"
session = dryscrape.Session()
session.visit(url)
response = session.body()
soup = bs4.BeautifulSoup(response)
print soup.text
movshareDomain = re.search('(?<=flashvars.domain=)"?(?P<match>[^";]+)', soup.text)
print urllib.unquote(movshareDomain.group("match")).decode('utf8')
movshareFile = re.search('(?<=flashvars.file=)"?(?P<match>[^";]+)', soup.text)
print urllib.unquote(movshareFile.group("match")).decode('utf8')
movshareKey = re.search('(?<=flashvars.filekey=)"?(?P<match>[^";]+)', soup.text)
print urllib.unquote(movshareKey.group("match")).decode('utf8')
movshareCID = re.search('(?<=flashvars.cid=)"?(?P<match>[^";]+)', soup.text)
print urllib.unquote(movshareCID.group("match")).decode('utf8')
'''
movshareVideoURL = getMyContent(movshareDomain + "/api/player.api.php?cid=" + movshareCID + "&file=" +movshareFile + "&key=" + movshareKey, 'TEXT', false);
movshareVideoURL = decodeURIComponent(movshareVideoURL.match('[domain|url]=(.*?)&')[1]);
'''
|
Faianca/Anime-Tv-shows-Scrapper
|
tests/progress.py
|
Python
|
apache-2.0
| 1,472
|
[
"VisIt"
] |
10054bda9c7d397cafab958172146cc151c4d3e0e0812b98ca654e0dd836c089
|
#
# QAPI event generator
#
# Copyright (c) 2014 Wenchao Xia
# Copyright (c) 2015-2016 Red Hat Inc.
#
# Authors:
# Wenchao Xia <wenchaoqemu@gmail.com>
# Markus Armbruster <armbru@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2.
# See the COPYING file in the top-level directory.
from qapi import *
def gen_event_send_proto(name, arg_type):
return 'void qapi_event_send_%(c_name)s(%(param)s)' % {
'c_name': c_name(name.lower()),
'param': gen_params(arg_type, 'Error **errp')}
def gen_event_send_decl(name, arg_type):
return mcgen('''
%(proto)s;
''',
proto=gen_event_send_proto(name, arg_type))
# Declare and initialize an object 'qapi' using parameters from gen_params()
def gen_param_var(typ):
assert not typ.variants
ret = mcgen('''
%(c_name)s param = {
''',
c_name=typ.c_name())
sep = ' '
for memb in typ.members:
ret += sep
sep = ', '
if memb.optional:
ret += 'has_' + c_name(memb.name) + sep
if memb.type.name == 'str':
# Cast away const added in gen_params()
ret += '(char *)'
ret += c_name(memb.name)
ret += mcgen('''
};
''')
return ret
def gen_event_send(name, arg_type):
# FIXME: Our declaration of local variables (and of 'errp' in the
# parameter list) can collide with exploded members of the event's
# data type passed in as parameters. If this collision ever hits in
# practice, we can rename our local variables with a leading _ prefix,
# or split the code into a wrapper function that creates a boxed
# 'param' object then calls another to do the real work.
ret = mcgen('''
%(proto)s
{
QDict *qmp;
Error *err = NULL;
QMPEventFuncEmit emit;
''',
proto=gen_event_send_proto(name, arg_type))
if arg_type and arg_type.members:
ret += mcgen('''
QmpOutputVisitor *qov;
Visitor *v;
''')
ret += gen_param_var(arg_type)
ret += mcgen('''
emit = qmp_event_get_func_emit();
if (!emit) {
return;
}
qmp = qmp_event_build_dict("%(name)s");
''',
name=name)
if arg_type and arg_type.members:
ret += mcgen('''
qov = qmp_output_visitor_new();
v = qmp_output_get_visitor(qov);
visit_start_struct(v, "%(name)s", NULL, 0, &err);
if (err) {
goto out;
}
visit_type_%(c_name)s_members(v, ¶m, &err);
visit_end_struct(v, err ? NULL : &err);
if (err) {
goto out;
}
qdict_put_obj(qmp, "data", qmp_output_get_qobject(qov));
''',
name=name, c_name=arg_type.c_name())
ret += mcgen('''
emit(%(c_enum)s, qmp, &err);
''',
c_enum=c_enum_const(event_enum_name, name))
if arg_type and arg_type.members:
ret += mcgen('''
out:
qmp_output_visitor_cleanup(qov);
''')
ret += mcgen('''
error_propagate(errp, err);
QDECREF(qmp);
}
''')
return ret
class QAPISchemaGenEventVisitor(QAPISchemaVisitor):
def __init__(self):
self.decl = None
self.defn = None
self._event_names = None
def visit_begin(self, schema):
self.decl = ''
self.defn = ''
self._event_names = []
def visit_end(self):
self.decl += gen_enum(event_enum_name, self._event_names)
self.defn += gen_enum_lookup(event_enum_name, self._event_names)
self._event_names = None
def visit_event(self, name, info, arg_type):
self.decl += gen_event_send_decl(name, arg_type)
self.defn += gen_event_send(name, arg_type)
self._event_names.append(name)
(input_file, output_dir, do_c, do_h, prefix, dummy) = parse_command_line()
c_comment = '''
/*
* schema-defined QAPI event functions
*
* Copyright (c) 2014 Wenchao Xia
*
* Authors:
* Wenchao Xia <wenchaoqemu@gmail.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
'''
h_comment = '''
/*
* schema-defined QAPI event functions
*
* Copyright (c) 2014 Wenchao Xia
*
* Authors:
* Wenchao Xia <wenchaoqemu@gmail.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
'''
(fdef, fdecl) = open_output(output_dir, do_c, do_h, prefix,
'qapi-event.c', 'qapi-event.h',
c_comment, h_comment)
fdef.write(mcgen('''
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "%(prefix)sqapi-event.h"
#include "%(prefix)sqapi-visit.h"
#include "qapi/qmp-output-visitor.h"
#include "qapi/qmp-event.h"
''',
prefix=prefix))
fdecl.write(mcgen('''
#include "qapi/error.h"
#include "qapi/qmp/qdict.h"
#include "%(prefix)sqapi-types.h"
''',
prefix=prefix))
event_enum_name = c_name(prefix + "QAPIEvent", protect=False)
schema = QAPISchema(input_file)
gen = QAPISchemaGenEventVisitor()
schema.visit(gen)
fdef.write(gen.defn)
fdecl.write(gen.decl)
close_output(fdef, fdecl)
|
shannonz88/qemu
|
scripts/qapi-event.py
|
Python
|
gpl-2.0
| 5,157
|
[
"VisIt"
] |
48b57f42b32e2b6e5681b8264b854a48b96c790e5c4794b6639deafb808d237e
|
# Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import numpy as np
import itertools
import espressomd.lb
"""
Check linear momentum calculation for lattice-Boltzmann.
"""
AGRID = .5
EXT_FORCE = .1
VISC = 2.7
DENS = 1.7
TIME_STEP = 0.1
BOX_L = 3.0
LB_PARAMS = {'agrid': AGRID,
'dens': DENS,
'visc': VISC,
'tau': TIME_STEP,
'ext_force_density': [0.1, 0.2, 0.3]}
class LinearMomentumTest:
"""Base class of the test that holds the test logic."""
lbf = None
system = espressomd.System(box_l=[BOX_L] * 3)
system.time_step = TIME_STEP
system.cell_system.skin = 0.4 * AGRID
def prepare(self):
"""
Setup random node velocities.
"""
self.system.actors.clear()
self.lbf = self.lbf(**LB_PARAMS)
self.system.actors.add(self.lbf)
for index in itertools.product(np.arange(0, int(np.floor(BOX_L / AGRID))), repeat=3):
self.lbf[index].velocity = np.random.random(3) - 0.5
def test(self):
"""
Compare direct calculation of fluid momentum with analysis function.
"""
self.prepare()
linear_momentum = np.zeros(3)
for index in itertools.product(np.arange(0, int(np.floor(BOX_L / AGRID))), repeat=3):
linear_momentum += DENS * AGRID**3.0 * self.lbf[index].velocity
analyze_linear_momentum = self.system.analysis.linear_momentum(True, # particles
True) # LB fluid
np.testing.assert_allclose(
linear_momentum,
analyze_linear_momentum,
atol=1e-3)
@utx.skipIfMissingFeatures(['EXTERNAL_FORCES'])
class LBCPULinearMomentum(ut.TestCase, LinearMomentumTest):
"""Test for the CPU implementation of the LB."""
def setUp(self):
self.lbf = espressomd.lb.LBFluid
@utx.skipIfMissingGPU()
@utx.skipIfMissingFeatures(['LB_BOUNDARIES_GPU', 'EXTERNAL_FORCES'])
class LBGPULinearMomentum(ut.TestCase, LinearMomentumTest):
"""Test for the GPU implementation of the LB."""
def setUp(self):
self.lbf = espressomd.lb.LBFluidGPU
if __name__ == '__main__':
ut.main()
|
mkuron/espresso
|
testsuite/python/linear_momentum_lb.py
|
Python
|
gpl-3.0
| 2,929
|
[
"ESPResSo"
] |
f985515457ec1682b3472dccd6548ffa5535612988358e0938a3cb7f1a4f667c
|
"""
Inline-optimized Sheet classes
$Id$
"""
__version__='$Revision$'
import param
from topo.base.cf import MaskedCFIter
from topo.base.projection import NeighborhoodMask
from topo.misc.inlinec import inline,provide_unoptimized_equivalent,c_header
from topo.sheet.lissom import LISSOM
from topo.sheet import compute_joint_norm_totals # pyflakes:ignore (replaced by optimized version)
def compute_joint_norm_totals_opt(projlist,active_units_mask):
"""
Compute norm_total for each CF in each projections from a
group to be normalized jointly. The same assumptions are
made as in the original function.
"""
# Assumes that all Projections in the list have the same r,c size
length = len(projlist)
assert length>=1
proj = projlist[0]
iterator = MaskedCFIter(proj,active_units_mask=active_units_mask)
num_cfs = len(proj.flatcfs) # pyflakes:ignore (passed to weave C code)
active_units_mask = iterator.get_active_units_mask()
sheet_mask = iterator.get_sheet_mask() # pyflakes:ignore (passed to weave C code)
cf_type = iterator.cf_type # pyflakes:ignore (passed to weave C code)
# CEBALERT: Not consistent with other C code. E.g. could be
# simplified to use active_units_mask[] and sheet_mask[]?
code = c_header + """
DECLARE_SLOT_OFFSET(_norm_total,cf_type);
DECLARE_SLOT_OFFSET(_has_norm_total,cf_type);
npfloat *x = active_units_mask;
npfloat *m = sheet_mask;
for (int r=0; r<num_cfs; ++r) {
double load = *x++;
double msk = *m++;
if (msk!=0 && load != 0) {
double nt = 0;
for(int p=0; p<length; p++) {
PyObject *proj = PyList_GetItem(projlist,p);
PyObject *cfs = PyObject_GetAttrString(proj,"flatcfs");
PyObject *cf = PyList_GetItem(cfs,r);
PyObject *o = PyObject_GetAttrString(cf,"norm_total");
nt += PyFloat_AsDouble(o);
Py_DECREF(cfs);
Py_DECREF(o);
}
for(int p=0; p<length; p++) {
PyObject *proj = PyList_GetItem(projlist,p);
PyObject *cfs = PyObject_GetAttrString(proj,"flatcfs");
PyObject *cf = PyList_GetItem(cfs,r);
LOOKUP_FROM_SLOT_OFFSET(double,_norm_total,cf);
_norm_total[0] = nt;
LOOKUP_FROM_SLOT_OFFSET(int,_has_norm_total,cf);
_has_norm_total[0] = 1;
Py_DECREF(cfs);
}
}
}
"""
inline(code, ['projlist','active_units_mask','sheet_mask','num_cfs','length','cf_type'],
local_dict=locals(),
headers=['<structmember.h>'])
provide_unoptimized_equivalent("compute_joint_norm_totals_opt",
"compute_joint_norm_totals",locals())
# CEBALERT: not tested
class LISSOM_Opt(LISSOM):
"""
Faster but potentially unsafe optimized version of LISSOM.
Adds a NeighborhoodMask that skips computation for neurons
sufficiently distant from all those activated in the first few
steps of settling. This is safe only if activity bubbles reliably
shrink after the first few steps; otherwise the results will
differ from LISSOM.
Typically useful only for standard LISSOM simulations with
localized (e.g. Gaussian) inputs and that shrink the lateral
excitatory radius, which results in small patches of activity in
an otherwise inactive sheet.
Also overrides the function
JointNormalizingCFSheet.__compute_joint_norm_totals with
C-optimized code for LISSOM sheets.
"""
joint_norm_fn = param.Callable(default=compute_joint_norm_totals_opt)
def __init__(self,**params):
super(LISSOM_Opt,self).__init__(**params)
# CEBALERT: this wipes out any user-specified sheet mask.
self.mask = NeighborhoodMask_Opt(threshold = 0.00001,radius = 0.05,sheet = self)
provide_unoptimized_equivalent("LISSOM_Opt","LISSOM",locals())
class NeighborhoodMask_Opt(NeighborhoodMask):
def calculate(self):
rows,cols = self.data.shape
ignore1,matradius = self.sheet.sheet2matrixidx(self.radius,0)
ignore2,x = self.sheet.sheet2matrixidx(0,0)
matradius = int(abs(matradius -x))
thr = self.threshold # pyflakes:ignore (passed to weave C code)
activity = self.sheet.activity # pyflakes:ignore (passed to weave C code)
mask = self.data # pyflakes:ignore (passed to weave C code)
code = c_header + """
#define min(x,y) (x<y?x:y)
#define max(x,y) (x>y?x:y)
npfloat *X = mask;
npfloat *A = activity;
for (int r=0; r<rows; ++r) {
for (int l=0; l<cols; ++l) {
int lbx = max(0,r-matradius);
int lby = max(0,l-matradius);
int hbx = min(r+matradius+1,rows);
int hby = min(l+matradius+1,cols);
*X = 0.0;
int breakFlag = 0;
for(int k=lbx;k<hbx;k++)
{
for(int l=lby;l<hby;l++)
{
npfloat *a = A+k*rows + l;
if(*a > thr)
{
*X = 1.0;
//JAALERT HACK. Want to jump out both nested loops!!!
breakFlag = 1;
break;
}
}
if(breakFlag)break;
}
X++;
}
}
"""
inline(code, ['thr','activity','matradius','mask','rows','cols'], local_dict=locals())
provide_unoptimized_equivalent("NeighborhoodMask_Opt","NeighborhoodMask",locals())
__all__ = [
"compute_joint_norm_totals",
"LISSOM",
"NeighborhoodMask",
]
|
ioam/svn-history
|
topo/sheet/optimized.py
|
Python
|
bsd-3-clause
| 6,210
|
[
"Gaussian"
] |
74e830811e7e8cc10783d76a9bc3de443453c475b2424682915aaf02a5070040
|
#!/usr/bin/env python3
"""
Check vcf genotype consistence at each genotype level for multiple input files.
@Author: wavefancy@gmail.com
Usage:
VCFOverlapMulti.py -n num [-c cacheLoadnum] [-s] <inputs>...
VCFOverlapMulti.py -h | --help | -v | --version | -f | --format
Notes:
1. Skip phase information, only compare genotype(0/1, or 0|1 for hetero sites.).
**** Don't put hetero as 1/0 or 1/0.
Only output consistence sites, which were supported by at least "supportNum" of input files,
skip failed sites. At genotype level, mask failed genotype as missing '.'. Output consistence
getnotype according to the priority as the input order of input files, copy genotype from the
first consistence file, check according to the file order listed in <inputs> paramter.
Example, if the genotype of three input files as(same individual, same location):
0/1 1/1:0,5 1/1:0,2:2:6:49,6,0
The consistence genotype is 11, the output is 1/1:0,5 (no format checking).
2. Copy meta data from the first input vcf files, including header, INFO, FORMAT, etc. The input
files may have different format, like above example, this scripts **do not** check format,
please make sure the output has consistence format.
3. Output results to stdout.
Options:
-n num Output threshold, at least 'num' of inputs have consistence call.
-c cacheLoadnum The number of lines were pre-loaded for cache, default 1000000.
-s Skip repeated records, only use the first one. otherwise system will exit if met repeated records.
Repeated records defined as same location and same ref and alt allele.
<inputs>... Input vcf files.
-h --help Show this screen.
-v --version Show version.
"""
import sys
from docopt import docopt
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE, SIG_DFL)
def ShowFormat():
'''Input File format example:'''
print('''
''');
from pysam import VariantFile
class allKeys:
keyset = set()
def getOrderedKeyList():
'''get Ordered key list by position, only return sorted keys,
After this call, keyset will be cleared.
Should be reloaded by Record.loadRecords() if need another call.
'''
if allKeys.keyset:
x = sorted(list(allKeys.keyset), key=lambda x: x[1])
re = [y[0] for y in x]
allKeys.keyset.clear()
return re
else:
return []
class Record:
def __init__(self, inFile):
self.currentMap = {}
self.file = inFile
# self.currentContig = ''
# self.min = -1
# self.max = -1
# self.step = 1000000 #pre-load 1000000, 1M.
def getRecord(self, key):
'''
Get record according to input key.
key = r.contig + r.pos + r.alleles[0] + r.alleles[1] #contigName, pos, ref, alt.
'''
# if pos < self.min or pos > self.max or contigName != self.currentContig:
# self.min = pos
# self.max = pos + self.step
# self.loadRecords(contigName, pos, pos + self.step)
# self.currentContig = contigName
# #self.loadRecords(contigName, 13273, 13649)
if key in self.currentMap:
return self.currentMap[key]
else:
return None
def loadRecords(self, contigName, start, end):
'''
clear current cache, and load records from file.
*** both end inclusive.
'''
#sys.stderr.write('load cache!-->%s\n'%(contigName))
self.currentMap.clear()
#for r in self.file.fetch(contigName, pos-1, end):
for r in self.file.fetch(contigName, start, end):
if len(r.alleles) != 2:
sys.stderr.write('ERROR: please decompose the input vcf, only one alt allele permited each line, error record:\n%s\n'
%(r))
sys.exit(-1)
else:
key = r.contig + str(r.pos) + r.alleles[0] + r.alleles[1]
if key in self.currentMap:
# <<<<<<< HEAD
# sys.stderr.write('ERROR: repeated records detected, same meta info, error record:\n%s\n'%(r))
# sys.exit(-1)
# =======
if skipRepeat:
sys.stderr.write('Warning: repeated records detected, only keep the first one, same meta info, error record:\n%s\n'%(r))
else:
sys.stderr.write('ERROR: repeated records detected, same meta info, error record:\n%s\n'%(r))
sys.exit(-1)
# >>>>>>> cc8ce64836c6554aa123819673e8065cfede4394
else:
self.currentMap[key] = r
allKeys.keyset.add((key, r.pos))
if __name__ == '__main__':
args = docopt(__doc__, version='1.0')
#print(args)
if(args['--format']):
ShowFormat()
sys.exit(-1)
vcfMetaCols=9 #number of colummns for vcf meta information.
cacheStep = 1000000
if args['-c']:
cacheStep = int(args['-c'])
supportNum = int(args['-n'])
infiles = [VariantFile(f, 'r') for f in args['<inputs>']]
skipRepeat = False
if args['-s']:
skipRepeat = True
#read contig and its length.
contigs = [] # [(contigName, len),....]
for line in str(infiles[0].header).split():
if line.startswith('##contig'):
ss = line[:-1].split(',')
try:
l = int(ss[1].split('=')[-1])
contigs.append((ss[0].split('=')[-1], l))
except ValueError:
sys.stderr.write('ERROR: Please make sure contig in header has length info, like: ##contig=<ID=chr1,length=248956422>\n')
sys.exit(-1)
if not contigs:
sys.stderr.write('ERROR: Please make sure contig has been deleared in header, like: ##contig=<ID=chr1,length=248956422>\n')
sys.exit(-1)
#print(contigs)
#sys.exit(-1)
#check smaples in input files, same samples, and same order.
for x in infiles[1:]:
if len(infiles[0].header.samples) != len(x.header.samples):
sys.stderr.write('ERROR: different number of samples in input files.\n')
sys.exit(-1)
else:
for m,n in zip(infiles[0].header.samples, x.header.samples):
if m != n:
sys.stderr.write('ERROR: input files should have the same samples, and ordered in same order.\n')
sys.exit(-1)
#output vcf header
sys.stdout.write('%s'%(str(infiles[0].header)))
#compare and output results.
from collections import Counter
#load cache into memory, check and output.
Records = [Record(x) for x in infiles]
for con, conLen in contigs:
for start in range(0, conLen, cacheStep):
end = start + cacheStep
#locad caches for each input files.
[x.loadRecords(con, start, end) for x in Records]
#iterate all possible keys,
keys = allKeys.getOrderedKeyList()
for key in keys:
lines = [x.getRecord(key) for x in Records if x.getRecord(key)]
if len(lines) >= supportNum: # meet threshold at site level. output records, otherwise skip.
#check threshold at genotype level.
sites = [str(x).strip().split() for x in lines]
out = sites[0][:vcfMetaCols]
for col in range(vcfMetaCols, len(sites[0])): #check at genotype level.
genos = [x[col][0] + x[col][2] for x in sites if x[col][0] != '.']
if len(genos) >= supportNum:
count = Counter(genos)
genoCounts = sorted(count.items(), key=lambda x: x[1], reverse=True)
#sys.stderr.write(str(genoCounts))
if genoCounts[0][1] >= supportNum: #meet condition, add one records.
for x in sites:
if x[col][0] != '.' and x[col][0] + x[col][2] == genoCounts[0][0]:
out.append(x[col])
break
else:
out.append('.')
else:
out.append('.')
#output one records.
sys.stdout.write('%s\n'%('\t'.join(out)))
[f.close() for f in infiles]
sys.stdout.flush()
sys.stdout.close()
sys.stderr.flush()
sys.stderr.close()
|
wavefancy/BIDMC-PYTHON
|
Exome/VCFOverlapMulti/VCFOverlapMulti.py
|
Python
|
mit
| 8,816
|
[
"pysam"
] |
10323e1c89d3aa393f1bc63d1ffddc376b0b72e7e61cb6c83d9a214b2d5a79aa
|
"""Compound_io.py: Functions to load MINE databases from and dump compounds
into common cheminformatics formats"""
import collections
import csv
import datetime
import os
import sys
from typing import List, Tuple, Union
from rdkit.Chem import AllChem
from minedatabase import utils
from minedatabase.databases import MINE
def export_sdf(mine_db: MINE, dir_path: str, max_compounds: int = None) -> None:
"""Exports compounds from the database as an MDL SDF file.
Parameters
----------
mine_db : MINE
MINE object that contains the database.
dir_path : str
Directory for files.
max_compounds : int, optional
Maximum number of compounds per file, by default None.
"""
# Make sure that all compounds point to all their reactants
if not mine_db.compounds.find_one({"Product_of": {"$exists": 1}}):
mine_db.add_rxn_pointers()
print(
f"Exporting {mine_db.compounds.count()} compounds from {mine_db.name}"
" as an SDF file"
)
target = utils.prevent_overwrite(os.path.join(dir_path, mine_db.name) + "_1.sdf")
# SDWriter (rdkit) writes Mol objects to SD files
writer = AllChem.SDWriter(target)
writer.SetKekulize(True)
n_files = 1
for compound in mine_db.compounds.find():
# Convert SMILES string to Mol object, replacing 'CoA' and 'R' by '*'
mol = AllChem.MolFromSmiles(compound["SMILES"], True, {"CoA": "*", "R": "*"})
# if Mol object successfully generated, annotate properties
if mol:
mol.SetProp("_id", compound["_id"])
mol.SetProp("Generation", str(compound["Generation"]))
if "Reactant_in" in compound:
mol.SetProp("Reactant_in", str(compound["Reactant_in"]))
if "Product_of" in compound:
mol.SetProp("Product_of", str(compound["Product_of"]))
writer.write(mol)
# Start writing a new sdf file if the maximum (set by user) has
# been reached for the current file
if max_compounds and (writer.NumMols() >= max_compounds):
n_files += 1
target = utils.prevent_overwrite(
os.path.join(dir_path, mine_db.name) + f"_(n_files).sdf"
)
writer = AllChem.SmilesWriter(target)
writer.close()
def export_smiles(mine_db: MINE, dir_path: str, max_compounds: int = None) -> None:
"""Exports compounds from the database as a SMILES file.
Parameters
----------
mine_db : MINE
MINE object that contains the database.
dir_path : str
Directory for files.
max_compounds : int, optional
Maximum number of compounds per file, by default None.
"""
header = ["SMILES", "_id", "Generation", "Reactant_in", "Product_of"]
# Make sure that all compounds point to all their reactants
if not mine_db.compounds.find_one({"Product_of": {"$exists": 1}}):
mine_db.add_rxn_pointers()
print(
f"Exporting {mine_db.compounds.count()} compounds from {mine_db.name()}"
" as SMILES file"
)
target = open(
utils.prevent_overwrite(os.path.join(dir_path, mine_db.name) + "_1.smiles"), "w"
)
# DictWriter allows for each key:value pair of a dictionary to be written
# on its own row (by writerow)
writer = csv.DictWriter(target, fieldnames=header, dialect="excel-tab")
n_files = 1
i = 0
for compound in mine_db.compounds.find({}, dict([(x, 1) for x in header])):
writer.writerow(compound)
i += 1
# If max compounds per file has been set by user and our number of
# compounds that we have written so far is divisible by the max number,
# then we start a new file
if max_compounds and not i % max_compounds:
n_files += 1
target = open(
utils.prevent_overwrite(
os.path.join(dir_path, mine_db.name) + f"_{n_files}.smiles"
),
"w",
)
writer = csv.DictWriter(target, fieldnames=header, dialect="excel-tab")
def export_mol(mine_db: MINE, target: str, name_field: str = "_id") -> None:
"""Exports compounds from the database as a MDL molfiles
Parameters
----------
mine_db : MINE
MINE object that contains the database.
target : str
Directory in which to place the files.
name_field : str, optional
FIeld to provide names for the mol files. Must be unique and universal.
By default, "_id".
"""
# Create the file if it doesn't yet exist
if not os.path.exists(target):
os.mkdir(target)
# Let user know if an id does not exist for every compound in database
if (
mine_db.compounds.find().count()
!= mine_db.compounds.find({name_field: {"$exists": 1}}).count()
):
raise ValueError(
f"{name_field} does not exist for every compound in the database"
)
for compound in mine_db.compounds.find({"_id": {"$regex": "^C"}}):
# Create Mol object from SMILES code for each compound using
# MolFromSmiles (rdkit). Take stereochemistry into account (True),
# and replace CoA and R with *.
mol = AllChem.MolFromSmiles(compound["SMILES"], True, {"CoA": "*", "R": "*"})
if "." in name_field:
compound[name_field] = utils.get_dotted_field(compound, name_field)
# Make things more compact and look nicer
if isinstance(compound[name_field], list):
compound[name_field] = ",".join(compound[name_field])
# Use MolToMolFile (rdkit) to create a mol file from the Mol object
# with the file path specified.
AllChem.MolToMolFile(mol, os.path.join(target, compound[name_field] + ".mol"))
def export_tsv(
mine_db: MINE,
target: str,
compound_fields: Tuple[str] = (
"_id",
"Names",
"Model_SEED",
"Formula",
"Charge",
"Inchi",
),
reaction_fields: Tuple[str] = ("_id", "SMILES_rxn", "C_id_rxn"),
) -> None:
"""Exports MINE compound and reaction data as tab-separated values files
amenable to use in ModelSEED.
Parameters
----------
mine_db : MINE
The database to export.
target : str
Directory, in which to place the files.
compound_fields : Tuple[str], optional
Fields to export in the compound table, by default
('_id', 'Names', 'Model_SEED', 'Formula', 'Charge', 'Inchi').
reaction_fields : Tuple[str], optional
Fields to export in the reaction table, by default
('_id', 'SMILES_rxn', 'C_id_rxn').
"""
db_links = ("KEGG", "Model_SEED", "PubChem")
print(f"Exporting {mine_db.compounds.count()} compounds from {mine_db.name} to tsv")
with open(
utils.prevent_overwrite(os.path.join(target, mine_db.name) + "_compounds.tsv"),
"w",
) as out:
writer = csv.DictWriter(out, fieldnames=compound_fields, dialect="excel-tab")
writer.writeheader()
for compound in mine_db.compounds.find(
{},
dict(
[("SMILES", 1)]
+ [
("DB_links." + x, 1) if x in db_links else (x, 1)
for x in compound_fields
]
),
):
# This is a work around for supporting older MINEs which lack Inchi
if "Inchi" in compound_fields and "Inchi" not in compound:
compound["Inchi"] = AllChem.MolToInchi(
AllChem.MolFromSmiles(compound["SMILES"])
)
if "SMILES" not in compound_fields:
del compound["SMILES"]
if "DB_links" in compound:
for k, v in compound["DB_links"].items():
compound[k] = ", ".join(v)
del compound["DB_links"]
writer.writerow(compound)
print(f"Exporting {mine_db.reactions.count()} reactions from {mine_db.name} to tsv")
with open(
utils.prevent_overwrite(os.path.join(target, mine_db.name) + "_reactions.tsv"),
"w",
) as out:
writer = csv.DictWriter(out, fieldnames=reaction_fields, dialect="excel-tab")
writer.writeheader()
for rxn in mine_db.reactions.find(
{},
dict(
[("Reactants", 1), ("Products", 1)] + [(x, 1) for x in reaction_fields]
),
):
if "C_id_rxn" in reaction_fields:
def to_str(half_rxn):
return [f"({x['stoich']}) {x['c_id']}" for x in half_rxn]
rxn["C_id_rxn"] = (
" + ".join(to_str(rxn["Reactants"]))
+ " => "
+ " + ".join(to_str(rxn["Products"]))
)
if "Reactants" not in reaction_fields:
del rxn["Reactants"]
if "Products" not in reaction_fields:
del rxn["Products"]
writer.writerow(rxn)
def export_kbase(mine_db: MINE, target: str) -> None:
"""Exports MINE compound and reaction data as tab-separated values files
amenable to use in ModelSEED.
Parameters
----------
mine_db : MINE
The database to export.
target : str
Directory in which to place the files.
"""
compound_fields = collections.OrderedDict(
[
("id", "_id"),
("name", ""),
("formula", "Formula"),
("charge", "Charge"),
("aliases", "Names"),
]
)
reaction_fields = collections.OrderedDict(
[
("id", "_id"),
("direction", ">"),
("compartment", "c0"),
("gpr", ""),
("name", ""),
("enzyme", ""),
("pathway", ""),
("reference", ""),
("equation", ""),
]
)
print(
f"Exporting {mine_db.compounds.count()} compounds from {mine_db.name()} to tsv"
)
with open(
utils.prevent_overwrite(os.path.join(target, mine_db.name) + "_compounds.tsv"),
"w",
) as out:
writer = csv.DictWriter(out, fieldnames=compound_fields, dialect="excel-tab")
writer.writeheader()
for compound in mine_db.compounds.find(
{},
dict(
[("Names", 1), ("DB_links.Model_SEED", 1)]
+ [(x, 1) for x in compound_fields.values()]
),
):
if compound["_id"][0] == "X":
continue
for k, v in compound_fields.items():
if v in compound:
compound[k] = compound[v]
del compound[v]
if "name" in compound_fields and "Names" in compound:
compound["name"] = compound["Names"][0]
del compound["Names"]
if "aliases" in compound:
compound["aliases"] = "|".join(compound["aliases"])
if "Model_SEED" in compound["DB_links"]:
compound["aliases"] += "|" + "|".join(
sorted(compound["DB_links"]["Model_SEED"])
)
if "DB_links" in compound:
del compound["DB_links"]
writer.writerow(compound)
print(f"Exporting {mine_db.reactions.count()} reactions from {mine_db.name} to tsv")
with open(
utils.prevent_overwrite(os.path.join(target, mine_db.name) + "_reactions.tsv"),
"w",
) as out:
writer = csv.DictWriter(out, fieldnames=reaction_fields, dialect="excel-tab")
writer.writeheader()
for rxn in mine_db.reactions.find(
{},
dict(
[("Reactants", 1), ("Products", 1)]
+ [(x, 1) for x in reaction_fields.values()]
),
):
for k, v in reaction_fields.items():
if v in rxn:
rxn[k] = rxn[v]
del rxn[v]
if "equation" in reaction_fields:
def to_str(half_rxn):
return [
f"({x['stoich']}) {x['c_id'].replace('X', 'C')}"
for x in half_rxn
]
rxn["equation"] = (
" + ".join(to_str(rxn["Reactants"]))
+ " => "
+ " + ".join(to_str(rxn["Products"]))
)
if "Reactants" not in reaction_fields:
del rxn["Reactants"]
if "Products" not in reaction_fields:
del rxn["Products"]
writer.writerow(rxn)
def export_inchi_rxns(
mine_db: MINE, target: str, rxn_ids: Union[List[str], None] = None
) -> None:
"""Export reactions from a MINE db to a .tsv file.
Parameters
----------
mine_db : MINE
Name of MongoDB to export reactions from.
target : str
Path to folder to save .tsv export file in.
rxn_ids : Union[List[str], None], optional
Only export reactions with these ids, by default None.
"""
reaction_fields = collections.OrderedDict(
[("Reaction Rule", "Operators"), ("ID", "_id"), ("Equation", "")]
)
comp_memo = {}
def get_name_and_inchi(comp_id):
if comp_id not in comp_memo:
comp = mine_db.compounds.find_one(
{"_id": comp_id}, {"Names": 1, "Inchi": 1, "MINE_id": 1}
)
comp_memo[comp_id] = (
comp.get("Names", [comp["MINE_id"]])[0],
comp.get("Inchi"),
)
return comp_memo[comp_id]
def to_str(half_rxn):
lst = []
for x in half_rxn:
name, inchi = get_name_and_inchi(x["c_id"])
lst.append(f"({x['stoich']}) {name}[{inchi}]")
return lst
with open(
utils.prevent_overwrite(os.path.join(target, mine_db.name) + "_reactions.tsv"),
"w",
) as out:
writer = csv.DictWriter(out, fieldnames=reaction_fields, dialect="excel-tab")
writer.writeheader()
if rxn_ids:
query = {"_id": {"$in": rxn_ids}}
else:
query = {}
for rxn in mine_db.reactions.find(
query,
dict(
[("Reactants", 1), ("Products", 1)]
+ [(x, 1) for x in reaction_fields.values()]
),
):
for k, v in reaction_fields.items():
if v in rxn:
if isinstance(rxn[v], list):
rxn[k] = ", ".join(rxn[v])
else:
rxn[k] = rxn[v]
del rxn[v]
if "Equation" in reaction_fields:
rxn["Equation"] = (
" + ".join(to_str(rxn["Reactants"]))
+ " => "
+ " + ".join(to_str(rxn["Products"]))
)
if "Reactants" not in reaction_fields:
del rxn["Reactants"]
if "Products" not in reaction_fields:
del rxn["Products"]
writer.writerow(rxn)
def import_sdf(mine_db: MINE, target: str) -> None:
"""Imports a SDF file as a MINE database.
Parameters
----------
mine_db : MINE
The database to export.
target : str
Directory in which to place the files.
"""
# SDMolSupplier (rdkit) takes entries from sdf file and returns Mol objects
sdf_gen = AllChem.SDMolSupplier(target)
# Go through each generated Mol object and add each to MINE database
for mol in sdf_gen:
mine_db.insert_compound(
mol,
compound_dict=mol.GetPropsAsDict(),
pubchem_db=None,
kegg_db=None,
modelseed_db=None,
)
# Add to log file (metadata)
mine_db.meta_data.insert(
{
"Timestamp": datetime.datetime.now(),
"Action": "SDF Imported",
"Filepath": target,
}
)
def import_smiles(mine_db: MINE, target: str) -> None:
"""Imports a smiles file as a MINE database.
Parameters
----------
mine_db : MINE
The database to export.
target : str
Directory in which to place the files.
"""
# SmilesMolSupplier (rdkit) generates Mol objects from smiles file (.smi)
mols = AllChem.SmilesMolSupplier(target, delimiter="\t", nameColumn=0)
# Go through each generated mol file and add molecule to MINE database
# Stores compound properties in dict (GetPropsAsDict() from rdkit Mol
# class)
for mol in mols:
if mol:
mine_db.insert_compound(
mol,
compound_dict=mol.GetPropsAsDict(),
pubchem_db=None,
kegg_db=None,
modelseed_db=None,
)
# Add to log file (metadata)
mine_db.meta_data.insert(
{
"Timestamp": datetime.datetime.now(),
"Action": "SDF Imported",
"Filepath": target,
}
)
def import_mol_dir(
mine_db: MINE, target: str, name_field: str = "Name", overwrite: bool = False
) -> None:
"""Imports a directory of molfiles as a MINE database.
Parameters
----------
mine_db : MINE
The database to export.
target : str
Directory in which to place the files.
name_field : str, optional
Field for the compound name, by default "Name".
overwrite : bool, optional
Replace old compounds with new ones if a collision happens, by default False.
"""
# For each .mol file in the directory of the target folder (path):
for file in os.listdir(target):
if ".mol" in file:
# MolFromMolFile (rdkit) generates Mol objects from .mol files
mol = AllChem.MolFromMolFile(target + "/" + file)
# Mol object name becomes name of mol file without .mol extension
name = file.rstrip(".mol")
# Check that Mol object is successfully generated
if mol:
# Create hashkey for the compound
cpdhash = utils.get_compound_hash(mol)
# If we don't want to overwrite, and the compound (cpdhash)
# already exists, then add an extra cpdhash for that molecule
if not overwrite and mine_db.compounds.count({"_id": cpdhash}):
mine_db.compounds.update(
{"_id": cpdhash}, {"$addToSet": {name_field: name}}
)
# If we don't care about overwriting, just insert the new
# compound into the database
else:
mine_db.insert_compound(
mol,
compound_dict={name_field: [name], "Generation": 0},
pubchem_db=None,
kegg_db=None,
modelseed_db=None,
)
# Add to log file (metadata)
mine_db.meta_data.insert(
{
"Timestamp": datetime.datetime.now(),
"Action": "MolFiles Imported",
"Filepath": target,
}
)
if __name__ == "__main__":
# User inputs task as first argument (export-sdf, export-smi, export-mol,
# import-sdf, import-smi, or import-mol)
TASK = sys.argv[1]
# User inputs database name as second argument
DB_NAME = sys.argv[2]
# User inputs file path as third argument
PATH = sys.argv[3]
database = MINE(DB_NAME) # pylint: disable=invalid-name
if TASK == "export-sdf":
# If a maximum molecules per file is specified (fourth argument
# entered by user), then pass that to the export function.
if len(sys.argv) == 5:
export_sdf(database, PATH, int(sys.argv[4]))
# Otherwise, assume an unlimited number of molecules per file
else:
export_sdf(database, PATH)
elif TASK == "export-smi":
# If a maximum molecules per file is specified (fourth argument
# entered by user), then pass that to the export function.
if len(sys.argv) == 5:
export_smiles(database, PATH, int(sys.argv[4]))
# Otherwise, assume an unlimited number of molecules per file
else:
export_smiles(database, PATH)
elif TASK == "export-mol":
# If a maximum molecules per file is specified (fourth argument
# entered by user), then pass that to the export function.
if len(sys.argv) == 5:
export_mol(database, PATH, sys.argv[4])
# Otherwise, assume an unlimited number of molecules per file
else:
export_mol(database, PATH)
elif TASK == "export-tsv":
export_tsv(database, PATH)
elif TASK == "export-kbase":
export_kbase(database, PATH)
elif TASK == "import-sdf":
import_sdf(database, PATH)
elif TASK == "import-smi":
import_smiles(database, PATH)
elif TASK == "import-mol":
import_mol_dir(database, PATH)
else:
print("ERROR: Unrecognised TASK")
|
JamesJeffryes/MINE-Database
|
minedatabase/compound_io.py
|
Python
|
mit
| 21,146
|
[
"RDKit"
] |
1fcb9bdf6a5107a3bb4296c4d1cbfb7cc877610ad77f1dba18e3ac86596251e1
|
#!/usr/bin/python
'''
VTK engine room for mrMeshPy viewer
The main vtk processing is done by functions here - although some hardcore
processing is handled in subroutines of other imported modules.
A core concept here is the tracking (kepping in scope) or the "targetVTKWindow"
- this is a vtkRenderWindowInteractor instance in the main program UI (user
interface) - by creatoing multiple instances of vtk windows we can load
multiple meshes. Some functions reference this specifically with a reference
index passed from mrVista --- mainWindowUI.vtkInstances[int(theMeshInstance)]
while others just referene the most recently added instance (e.g. when adding
a new mesh) --- mainWindowUI.vtkInstances[-1]
Note that it is the mainWindowUI that is passed to all functions so that all
funcitons have the content of the main window in scope.
Andre' Gouws 2017
'''
import vtk
from numpy import *
import time
from vtk.util import numpy_support
debug = True
# local modules
from mp_unpackIncomingData import unpackData
from mp_VTKProcessing import *
from mp_VTKDrawing import *
def loadNewMesh(currVTKInstance, commandArgs, mainWindowUI, the_TCPserver):
#first get all the data we are expecting from the server
## NB this assumes that the order of sending by the server is
# 1) vertices
# 2) triangles
# 3) color data r (rgba) for each vertex
# 4) color data g (rgba) for each vertex
# 5) color data b (rgba) for each vertex
# 6) color data a (rgba) for each vertex
if debug:
print('received request for new mesh with Args:')
print(commandArgs)
# sanity check
if ('vertices' in commandArgs[0]) and ('triangles' in commandArgs[1]):
pass
else:
return "error - expecting vertices, then triangles!"
# load the surfaces data
verticesArgs = commandArgs[0].strip().split(',')
vertices = unpackData(verticesArgs[1], int(verticesArgs[2]), the_TCPserver)
vertices = array(vertices,'f')
vertices = vertices.reshape((len(vertices)/3,3))
trianglesArgs = commandArgs[1].strip().split(',')
triangles = unpackData(trianglesArgs[1], int(trianglesArgs[2]), the_TCPserver)
triangles = array(triangles,'f')
if debug: print(triangles)
triangles = triangles.reshape((len(triangles)/3,3))
if debug: print(triangles)
# load the surface colour data
rVecArgs = commandArgs[2].strip().split(',')
r_vec = unpackData(rVecArgs[1], int(rVecArgs[2]), the_TCPserver)
r_vec = array(r_vec,'uint8')
if debug: print(r_vec)
gVecArgs = commandArgs[3].strip().split(',')
g_vec = unpackData(gVecArgs[1], int(gVecArgs[2]), the_TCPserver)
g_vec = array(g_vec,'uint8')
bVecArgs = commandArgs[4].strip().split(',')
b_vec = unpackData(bVecArgs[1], int(bVecArgs[2]), the_TCPserver)
b_vec = array(b_vec,'uint8')
aVecArgs = commandArgs[5].strip().split(',')
a_vec = unpackData(aVecArgs[1], int(aVecArgs[2]), the_TCPserver)
a_vec = array(a_vec,'uint8')
if debug:
print(len(r_vec))
print(len(g_vec))
print(len(b_vec))
print(len(a_vec))
#combine into numpy array
colorDat = squeeze(array(squeeze([r_vec,g_vec,b_vec,a_vec]),'B',order='F').transpose())
# convert this to a VTK unsigned char array
scalars = numpy_support.numpy_to_vtk(colorDat,0)
curr_scalars = vtk.vtkUnsignedCharArray()
curr_scalars.DeepCopy(scalars)
## ---- ok, we hav the data, lets turn it into vtk stuff
# Process vertices
points = vtk.vtkPoints()
for i in range(vertices.shape[0]):
points.InsertPoint(i,vertices[i][0],vertices[i][1],vertices[i][2])
# Process faces (triangles)
polys = vtk.vtkCellArray()
nTriangles = triangles.shape[0]
for i in range(nTriangles):
polys.InsertNextCell(3)
for j in range(3):
polys.InsertCellPoint(int(triangles[i][j]))
# check
if debug: print(points)
if debug: print(polys)
if debug: print(scalars)
if debug: print(currVTKInstance)
# Assemble as PolyData
polyData = vtk.vtkPolyData()
polyData.SetPoints(points)
polyData.SetPolys(polys)
polyData.GetPointData().SetScalars(scalars)
## TODO ? smoothing on first load?
smooth = vtk.vtkSmoothPolyDataFilter()
smooth = vtk.vtkSmoothPolyDataFilter()
smooth.SetNumberOfIterations(0)
smooth.SetRelaxationFactor(0.0)
smooth.FeatureEdgeSmoothingOff()
smooth.SetInputData(polyData)
pdm = vtk.vtkPolyDataMapper()
pdm.SetScalarModeToUsePointData()
pdm.SetInputConnection(smooth.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(pdm)
iren = mainWindowUI.vtkInstances[-1]
## ---- engine room for drawing on the surface
# add a picker that allows is top pick points on the surface
picker = vtk.vtkCellPicker()
picker.SetTolerance(0.0001)
mainWindowUI.vtkInstances[-1].SetPicker(picker)
mainWindowUI.vtkInstances[-1]._Iren.pickedPointIds = [] #place holder for picked vtk point IDs so we can track
mainWindowUI.vtkInstances[-1].pickedPointIds = mainWindowUI.vtkInstances[-1]._Iren.pickedPointIds
mainWindowUI.vtkInstances[-1]._Iren.pickedPointOrigValues = [] #place holder for picked vtk point IDs so we can track
mainWindowUI.vtkInstances[-1].pickedPointOrigValues = mainWindowUI.vtkInstances[-1]._Iren.pickedPointOrigValues
mainWindowUI.vtkInstances[-1]._Iren.pickedPoints = vtk.vtkPoints() #place holder for picked vtk point IDs so we can track
mainWindowUI.vtkInstances[-1].pickedPoints = mainWindowUI.vtkInstances[-1]._Iren.pickedPoints
mainWindowUI.vtkInstances[-1]._Iren.inDrawMode = 0 #TODO
mainWindowUI.vtkInstances[-1].inDrawMode = mainWindowUI.vtkInstances[-1]._Iren.inDrawMode
# drawing functions imported from mp_VTKDrawing
mainWindowUI.vtkInstances[-1].AddObserver('LeftButtonPressEvent', drawingPickPoint, 1.0)
mainWindowUI.vtkInstances[-1].AddObserver('RightButtonPressEvent', drawingMakeROI, 1.0)
ren = mainWindowUI.vtkInstances[-1].ren
mainWindowUI.vtkInstances[-1]._Iren.ren = ren
# ADD A LIGHT SOURCE TODO: MAKE THIS OPTIONAL/DEFAULT?
lightKit = vtk.vtkLightKit()
lightKit.SetKeyLightIntensity(0.5)
# TODO: SOME OPTIONS TO EXPLORE
#lightKit.MaintainLuminanceOn()
#lightKit.SetKeyLightIntensity(1.0)
## warmth of the lights
#lightKit.SetKeyLightWarmth(0.65)
#lightKit.SetFillLightWarmth(0.6)
#lightKit.SetHeadLightWarmth(0.45)
## intensity ratios
## back lights will be very dimm
lightKit.SetKeyToFillRatio(1.)
lightKit.SetKeyToHeadRatio(2.)
lightKit.SetKeyToBackRatio(1.)
lightKit.AddLightsToRenderer(ren)
ren.AddActor(actor)
ren.SetBackground(1,1,1)
ren.ResetCamera()
ren.Render()
mainWindowUI.vtkInstances[-1].Render()
# lets put some of the data objects in the scope of the
# main window so that they can be manipulated later.
mainWindowUI.vtkInstances[-1].curr_actor = actor
mainWindowUI.vtkInstances[-1].curr_smoother = smooth
mainWindowUI.vtkInstances[-1].curr_polydata = polyData
mainWindowUI.vtkInstances[-1].curr_mapper = pdm
mainWindowUI.vtkInstances[-1].curr_camera = ren.GetActiveCamera()
# and the raw mesh coordinate data.. why not
mainWindowUI.vtkInstances[-1].curr_points = points
mainWindowUI.vtkInstances[-1].curr_polys = polys
mainWindowUI.vtkInstances[-1].curr_scalars = curr_scalars #Deep copied
# turns out that later processes access the inherited renderwindowinteractor (?)
# so lets put all the above in the scope of that too
mainWindowUI.vtkInstances[-1]._Iren.curr_actor = actor
mainWindowUI.vtkInstances[-1]._Iren.curr_smoother = smooth
mainWindowUI.vtkInstances[-1]._Iren.curr_polydata = polyData
mainWindowUI.vtkInstances[-1]._Iren.curr_mapper = pdm
mainWindowUI.vtkInstances[-1]._Iren.curr_camera = ren.GetActiveCamera()
mainWindowUI.vtkInstances[-1]._Iren.curr_points = points
mainWindowUI.vtkInstances[-1]._Iren.curr_polys = polys
mainWindowUI.vtkInstances[-1]._Iren.curr_scalars = curr_scalars #Deep copied
# and so we can access ui controls (e.g. statusbar) from the inherited window
mainWindowUI.vtkInstances[-1]._Iren.parent_ui = mainWindowUI
def KeyPress(obj, evt):
key = obj.GetKeySym()
if key == 'l':
currVTKinstance = len(mainWindowUI.vtkInstances)
print(key)
print(mainWindowUI.vtkInstances[currVTKinstance-1])
#let's also track key presses per instance esp for the draw routine :)
mainWindowUI.vtkInstances[-1].AddObserver("KeyPressEvent",KeyPress)
mainWindowUI.tabWidget.setCurrentIndex(len(mainWindowUI.vtkInstances)-1) #zero index
def smoothMesh(theMeshInstance, commandArgs, mainWindowUI, the_TCPserver):
#lets try to get the apt window
try:
targetVTKWindow = mainWindowUI.vtkInstances[mainWindowUI.vtkDict[theMeshInstance]]
except:
print ('No mesh instance with id:%s currently available - may need a re-synch' %theMeshInstance)
#return error
return 1
# lets show the correct tab
mainWindowUI.tabWidget.setCurrentIndex(int(mainWindowUI.vtkDict[theMeshInstance]))
#mainWindowUI.tabWidget.repaint()
mainWindowUI.tabWidget.update()
#lets get the original data
the_smoother = targetVTKWindow.curr_smoother
the_mapper = targetVTKWindow.curr_mapper
if debug: print(targetVTKWindow.curr_actor.GetMapper().GetInput().GetPointData().GetScalars())
if debug: print(targetVTKWindow.curr_actor.GetMapper().GetInput().GetPointData().GetScalars().GetTuple(1000))
#expecting a string that reads something like 'iterations,200,relaxationfactor,1.2'
# sanity check
if ('iterations' in commandArgs[0]) and ('relaxationfactor' in commandArgs[0]):
smoothingArgs = commandArgs[0].strip().split(',')
iterations = int(smoothingArgs[1])
relaxationfactor = float(smoothingArgs[3])
else:
return "error - expecting vertices, then curvature, then triangles!"
if debug: print 'starting smoothing callback'
newActor = VTK_smoothing(the_smoother, the_mapper, iterations, relaxationfactor)
if debug: print 'smoothing callback returned new actor'
if debug: print 'removing old actor'
targetVTKWindow.ren.RemoveActor(targetVTKWindow.curr_actor)
if debug: print 'adding new actor'
targetVTKWindow.ren.AddActor(newActor)
if debug: print 'added new actor - changing curr actor pointer'
targetVTKWindow.curr_actor = newActor #lets keep track
if debug: print 'trying to update '
# run mesh update to reset the color map (smoothing "messes" this up)
updateMeshData(theMeshInstance, [], mainWindowUI, the_TCPserver)
if debug: print 'update completed'
#return success
return 0
def updateMeshData(theMeshInstance, commandArgs, mainWindowUI, the_TCPserver):
# here the base mesh is already loaded and we are simply updating with the
# current View settings in from the vista session WITH THE COLOR VALUES FROM
# VISTA - i.e. do not go through a lookuptable
#lets try to get the apt window
try:
targetVTKWindow = mainWindowUI.vtkInstances[mainWindowUI.vtkDict[theMeshInstance]]
except:
print ('No mesh instance with id:%s currently available - may need a re-synch' %theMeshInstance)
#return error
return 1
# lets show the correct tab
mainWindowUI.tabWidget.setCurrentIndex(int(mainWindowUI.vtkDict[theMeshInstance])) #zero index
#mainWindowUI.tabWidget.repaint()
mainWindowUI.tabWidget.update()
#lets get the original data
the_polyData = targetVTKWindow.curr_polydata
the_mapper = targetVTKWindow.curr_mapper
#first get all the data we are expecting from the server
## NB this assumes that the order of sending by the server is
# 1) r_vector - red component
# 2) g_vector - blue component
# 3) b_vector - green component
# 4) a_vector - aplha component
if debug:
print('received request for UPDATE DIRECT mesh with Args:')
print(commandArgs)
if len(commandArgs) != 0 : #new data has come from MATLAB so recompute
# load the surfaces data
rVecArgs = commandArgs[0].strip().split(',')
r_vec = unpackData(rVecArgs[1], int(rVecArgs[2]), the_TCPserver)
r_vec = array(r_vec,'uint8')
if debug: print(r_vec)
gVecArgs = commandArgs[1].strip().split(',')
g_vec = unpackData(gVecArgs[1], int(gVecArgs[2]), the_TCPserver)
g_vec = array(g_vec,'uint8')
bVecArgs = commandArgs[2].strip().split(',')
b_vec = unpackData(bVecArgs[1], int(bVecArgs[2]), the_TCPserver)
b_vec = array(b_vec,'uint8')
aVecArgs = commandArgs[3].strip().split(',')
a_vec = unpackData(aVecArgs[1], int(aVecArgs[2]), the_TCPserver)
a_vec = array(a_vec,'uint8')
if debug:
print(len(r_vec))
print(len(g_vec))
print(len(b_vec))
print(len(a_vec))
#combine into numpy array
colorDat = squeeze(array(squeeze([r_vec,g_vec,b_vec,a_vec]),'B',order='F').transpose())
# convert this to a VTK unsigned char array
vtkColorArray = numpy_support.numpy_to_vtk(colorDat,0)
# keep a "deep" copy - this is to workaround some artifacts generated
# by vtk algorithms (e.g. smoothing) that also smooth the color data
# on the surface and then automatically update the inherited color map
# - we allow vtk to do this but then overwrite the recomptued color
# map AFTER the algorithms have run
deepCopyScalars = vtk.vtkUnsignedCharArray()
deepCopyScalars.DeepCopy(vtkColorArray)
targetVTKWindow.curr_scalars = deepCopyScalars
#TODO - this may have impact on later processing - investigate
else:
# no new data from MATLAB, probably just an internal re-draw call
# after something like smoothing - just grab the current deep
# copy of the required scalars
vtkColorArray = targetVTKWindow.curr_scalars
# OK - we have the data - let's update the mesh
newActor = VTK_updateMesh(targetVTKWindow, vtkColorArray, mainWindowUI)
targetVTKWindow.ren.AddActor(newActor)
targetVTKWindow.ren.RemoveActor(targetVTKWindow.curr_actor)
targetVTKWindow.curr_actor = newActor #lets keep track
targetVTKWindow.ren.Render()
targetVTKWindow.Render()
print('success with direct mesh update routine')
#return success
return 0
## --------------------------------------------------------------------------------
# test example animation
def rotateMeshAnimation(currVTKInstance, commandArgs, mainWindowUI, the_TCPserver):
#rotation args
rotations = commandArgs[0].strip().split(',')
rotations = unpackData(rotations[1], int(rotations[2]), the_TCPserver)
if debug: print(rotations)
targetVTKWindow = mainWindowUI.vtkInstances[int(currVTKInstance)] #NB zero indexing
camera = targetVTKWindow.ren.GetActiveCamera()
if debug: print(camera)
for i in range(len(rotations)):
camera.Azimuth(rotations[i])
#targetVTKWindow.ren.Render()
targetVTKWindow.iren.Render()
time.sleep(0.02)
the_TCPserver.socket.write(str('send useful message back here TODO'))
## --------------------------------------------------------------------------------
|
andregouws/mrMeshPy
|
mp_VTKRoutines.py
|
Python
|
mit
| 15,719
|
[
"VTK"
] |
d67ef568356621cb63575a7c1ddc164ee69e367658b4cb1d6e7dd4d126edb7cf
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from math import exp
import sys
import ConfigParser as cfg
import os
import numpy as n
import numpy.random as nr
from math import ceil, floor
from collections import OrderedDict
from os import linesep as NL
from python_util.options import OptionsParser
import re
class LayerParsingError(Exception):
pass
# A neuron that doesn't take parameters
class NeuronParser:
def __init__(self, type, func_str, uses_acts=True, uses_inputs=True):
self.type = type
self.func_str = func_str
self.uses_acts = uses_acts
self.uses_inputs = uses_inputs
def parse(self, type):
if type == self.type:
return {'type': self.type,
'params': {},
'usesActs': self.uses_acts,
'usesInputs': self.uses_inputs}
return None
# A neuron that takes parameters
class ParamNeuronParser(NeuronParser):
neuron_regex = re.compile(r'^\s*(\w+)\s*\[\s*(\w+(\s*,\w+)*)\s*\]\s*$')
def __init__(self, type, func_str, uses_acts=True, uses_inputs=True):
NeuronParser.__init__(self, type, func_str, uses_acts, uses_inputs)
m = self.neuron_regex.match(type)
self.base_type = m.group(1)
self.param_names = m.group(2).split(',')
assert len(set(self.param_names)) == len(self.param_names)
def parse(self, type):
m = re.match(r'^%s\s*\[([\d,\.\s\-]*)\]\s*$' % self.base_type, type)
if m:
try:
param_vals = [float(v.strip()) for v in m.group(1).split(',')]
if len(param_vals) == len(self.param_names):
return {'type': self.base_type,
'params': dict(zip(self.param_names, param_vals)),
'usesActs': self.uses_acts,
'usesInputs': self.uses_inputs}
except TypeError:
pass
return None
class AbsTanhNeuronParser(ParamNeuronParser):
def __init__(self):
ParamNeuronParser.__init__(self, 'abstanh[a,b]', 'f(x) = a * |tanh(b * x)|')
def parse(self, type):
dic = ParamNeuronParser.parse(self, type)
# Make b positive, since abs(tanh(bx)) = abs(tanh(-bx)) and the C++ code
# assumes b is positive.
if dic:
dic['params']['b'] = abs(dic['params']['b'])
return dic
class ParamParser:
lrs_regex = re.compile(r'^\s*(\w+)\s*(?:\[\s*(\w+(\s*;\w+)*)\s*\])?\s*$')
param_converters = {'i': int,
'f': float}
def __init__(self, type):
m = self.lrs_regex.match(type)
self.base_type = m.group(1)
param_names_with_type = m.group(2).split(';') if m.group(2) is not None else []
self.param_names = [p[1:] for p in param_names_with_type]
self.param_types = [self.param_converters[p[0]] for p in param_names_with_type]
self.param_regex_inner = ";".join([('\s*%s\s*=\s*[^;,\s=]+\s*' % p) for p in self.param_names])
self.regex_str = ('^%s\s*(?:\[(%s)\])?\s*$') % (self.base_type, self.param_regex_inner)
assert len(set(self.param_names)) == len(self.param_names)
def parse(self, type):
m = re.match(self.regex_str, type, flags=re.IGNORECASE)
if m:
try:
param_vals = [ptype(v.split('=')[1].strip()) for ptype,v in zip(self.param_types, m.group(1).split(';'))] if m.group(1) is not None else []
if len(param_vals) == len(self.param_names):
return {'type': self.base_type,
'params': dict(zip(self.param_names, param_vals))}
except TypeError:
pass
return None
# Subclass that throws more convnet-specific exceptions than the default
class MyConfigParser(cfg.SafeConfigParser):
def safe_get(self, section, option, f=cfg.SafeConfigParser.get, typestr=None, default=None):
try:
return f(self, section, option)
except cfg.NoOptionError, e:
if default is not None:
return default
raise LayerParsingError("Layer '%s': required parameter '%s' missing" % (section, option))
except ValueError, e:
if typestr is None:
raise e
raise LayerParsingError("Layer '%s': parameter '%s' must be %s" % (section, option, typestr))
def safe_get_list(self, section, option, f=str, typestr='strings', default=None):
v = self.safe_get(section, option, default=default)
if type(v) == list:
return v
try:
return [f(x.strip()) for x in v.split(',')]
except:
raise LayerParsingError("Layer '%s': parameter '%s' must be ','-delimited list of %s" % (section, option, typestr))
def safe_get_int(self, section, option, default=None):
return self.safe_get(section, option, f=cfg.SafeConfigParser.getint, typestr='int', default=default)
def safe_get_float(self, section, option, default=None):
return self.safe_get(section, option, f=cfg.SafeConfigParser.getfloat, typestr='float', default=default)
def safe_get_bool(self, section, option, default=None):
return self.safe_get(section, option, f=cfg.SafeConfigParser.getboolean, typestr='bool', default=default)
def safe_get_float_list(self, section, option, default=None):
return self.safe_get_list(section, option, float, typestr='floats', default=default)
def safe_get_int_list(self, section, option, default=None):
return self.safe_get_list(section, option, int, typestr='ints', default=default)
def safe_get_bool_list(self, section, option, default=None):
return self.safe_get_list(section, option, lambda x: x.lower() in ('true', '1'), typestr='bools', default=default)
# A class that implements part of the interface of MyConfigParser
class FakeConfigParser(object):
def __init__(self, dic):
self.dic = dic
def safe_get(self, section, option, default=None):
if option in self.dic:
return self.dic[option]
return default
def safe_get_int(self, section, option, default=None):
return int(self.safe_get(section, option, default))
def safe_get_int_list(self, section, option, default=None):
return list(self.safe_get(section, option, default))
class LayerParser:
def __init__(self):
self.dic = {}
self.set_defaults()
# Post-processing step -- this is called after all layers have been initialized
def optimize(self, layers):
self.dic['actsTarget'] = -1
self.dic['actsGradTarget'] = -1
if len(set(len(l['gpu']) for l in layers.values() if 'inputs' in l and self.dic['name'] in l['inputs'])) > 1:
# print set(len(l['gpu']) for l in layers.values())
raise LayerParsingError("Layer '%s': all next layers must have equal number of replicas." % (self.dic['name']))
def parse_params(self, vals, parsers, param_name, human_name, num_params=1):
dic, name = self.dic, self.dic['name']
# print vals
if len(vals) != num_params and len(vals) != 1:
raise LayerParsingError("Layer '%s': expected list of length %d for %s but got list of length %d."% (name, num_params, param_name, len(vals)))
parsed = []
# print vals
for v in vals:
for p in parsers:
parsedv = p.parse(v)
if parsedv:
parsed += [parsedv]
break
if len(parsed) == 1 and num_params > 1:
parsed = parsed * num_params
if len(parsed) == num_params:
return parsed
# print parsed, vals
raise LayerParsingError("Layer '%s': unable to parse %s %s=%s." % (name, human_name, param_name, ",".join(vals)))
# Add parameters from layer parameter file
def add_params(self, mcp):
pass
# self.dic['conserveMem'] = mcp.convnet.op.get_value('conserve_mem') if mcp.convnet is not None else 0
def init(self, dic):
self.dic = dic
return self
def set_defaults(self):
self.dic['outputs'] = 0
self.dic['parser'] = self
self.dic['requiresParams'] = False
# Does this layer use its own activity matrix
# for some purpose other than computing its output?
# Usually, this will only be true for layers that require their
# own activity matrix for gradient computations. For example, layers
# with logistic units must compute the gradient y * (1 - y), where y is
# the activity matrix.
#
# Layers that do not not use their own activity matrix should advertise
# this, since this will enable memory-saving matrix re-use optimizations.
#
# The default value of this property is True, for safety purposes.
# If a layer advertises that it does not use its own activity matrix when
# in fact it does, bad things will happen.
self.dic['usesActs'] = True
# Does this layer use the activity matrices of its input layers
# for some purpose other than computing its output?
#
# Again true by default for safety
self.dic['usesInputs'] = True
# Force this layer to use its own activity gradient matrix,
# instead of borrowing one from one of its inputs.
#
# This should be true for layers where the mapping from output
# gradient to input gradient is non-elementwise.
self.dic['forceOwnActs'] = True
# Does this layer need the gradient at all?
# Should only be true for layers with parameters (weights).
self.dic['gradConsumer'] = False
# The gpu indices on which this layer runs
self.dic['gpu'] = [-1]
def parse(self, name, mcp, prev_layers, model=None):
self.prev_layers = prev_layers
self.dic['name'] = name
self.dic['type'] = mcp.safe_get(name, 'type')
self.dic['id'] = len(prev_layers)
return self.dic
def verify_float_range(self, v, param_name, _min, _max):
self.verify_num_range(v, param_name, _min, _max, strconv=lambda x: '%.3f' % x)
def verify_num_range(self, v, param_name, _min, _max, strconv=lambda x:'%d' % x):
if type(v) == list:
for i,vv in enumerate(v):
self._verify_num_range(vv, param_name, _min, _max, i, strconv=strconv)
else:
self._verify_num_range(v, param_name, _min, _max, strconv=strconv)
def _verify_num_range(self, v, param_name, _min, _max, input=-1, strconv=lambda x:'%d' % x):
layer_name = self.dic['name'] if input < 0 else '%s[%d]' % (self.dic['name'], input)
if _min is not None and _max is not None and (v < _min or v > _max):
raise LayerParsingError("Layer '%s': parameter '%s' must be in the range %s-%s" % (layer_name, param_name, strconv(_min), strconv(_max)))
elif _min is not None and v < _min:
raise LayerParsingError("Layer '%s': parameter '%s' must be greater than or equal to %s" % (layer_name, param_name, strconv(_min)))
elif _max is not None and v > _max:
raise LayerParsingError("Layer '%s': parameter '%s' must be smaller than or equal to %s" % (layer_name, param_name, strconv(_max)))
def verify_divisible(self, value, div, value_name, div_name=None, input_idx=0):
layer_name = self.dic['name'] if len(self.dic['inputs']) == 0 else '%s[%d]' % (self.dic['name'], input_idx)
if value % div != 0:
raise LayerParsingError("Layer '%s': parameter '%s' must be divisible by %s" % (layer_name, value_name, str(div) if div_name is None else "'%s'" % div_name))
def verify_str_in(self, value, param_name, lst, input_idx=-1):
lname = self.dic['name'] if input_idx == -1 else ('%s[%d]' % (self.dic['name'], input_idx))
if value not in lst:
raise LayerParsingError("Layer '%s': parameter '%s' must be one of %s" % (lname, param_name, ", ".join("'%s'" % s for s in lst)))
def verify_int_in(self, value, param_name, lst):
if value not in lst:
raise LayerParsingError("Layer '%s': parameter '%s' must be one of %s" % (self.dic['name'], param_name, ", ".join("'%d'" % s for s in lst)))
def verify_all_ints_in(self, values, param_name, lst):
if len([v for v in values if v not in lst]) > 0:
raise LayerParsingError("Layer '%s': all parameters to '%s' must be among %s" % (self.dic['name'], param_name, ", ".join("'%d'" % s for s in lst)))
def verify_input_dims(self, dims):
for i,d in enumerate(dims):
if d is not None and self.dic['numInputs'][i] != d: # first input must be labels
raise LayerParsingError("Layer '%s': dimensionality of input %d must be %d" % (self.dic['name'], i, d))
# This looks for neuron=x arguments in various layers, and creates
# separate layer definitions for them.
@staticmethod
def detach_neuron_layers(layers):
for name,l in layers.items():
if l['type'] != 'neuron' and 'neuron' in l and l['neuron']:
NeuronLayerParser().detach_neuron_layer(name, layers)
@staticmethod
def parse_layers(layer_cfg_path, param_cfg_path, model, layers={}):
try:
if not os.path.exists(layer_cfg_path):
raise LayerParsingError("Layer definition file '%s' does not exist" % layer_cfg_path)
if not os.path.exists(param_cfg_path):
raise LayerParsingError("Layer parameter file '%s' does not exist" % param_cfg_path)
if len(layers) == 0:
mcp = MyConfigParser(dict_type=OrderedDict)
mcp.readfp(open(layer_cfg_path))
for name in mcp.sections():
if not mcp.has_option(name, 'type'):
raise LayerParsingError("Layer '%s': no type given" % name)
ltype = mcp.safe_get(name, 'type')
if ltype not in layer_parsers:
raise LayerParsingError("Layer '%s': Unknown layer type: '%s'" % (name, ltype))
layers[name] = layer_parsers[ltype]().parse(name, mcp, layers, model)
LayerParser.detach_neuron_layers(layers)
for l in layers.values():
l['parser'].optimize(layers)
del l['parser']
for name,l in layers.items():
if not l['type'].startswith('cost.'):
found = max(name in l2['inputs'] for l2 in layers.values() if 'inputs' in l2)
if not found:
raise LayerParsingError("Layer '%s' of type '%s' is unused" % (name, l['type']))
mcp = MyConfigParser(dict_type=OrderedDict)
mcp.readfp(open(param_cfg_path))
# mcp.convnet = model
for name,l in layers.items():
if not mcp.has_section(name) and l['requiresParams']:
raise LayerParsingError("Layer '%s' of type '%s' requires extra parameters, but none given in file '%s'." % (name, l['type'], param_cfg_path))
lp = layer_parsers[l['type']]().init(l)
lp.add_params(mcp)
except LayerParsingError, e:
print e
sys.exit(1)
return layers
@staticmethod
def register_layer_parser(ltype, cls):
if ltype in layer_parsers:
raise LayerParsingError("Layer type '%s' already registered" % ltype)
layer_parsers[ltype] = cls
# Any layer that takes an input (i.e. non-data layer)
class LayerWithInputParser(LayerParser):
def __init__(self, num_inputs=-1):
LayerParser.__init__(self)
self.num_inputs = num_inputs
def verify_num_params(self, params, auto_expand=True):
for param in params:
if len(self.dic[param]) != len(self.dic['inputs']):
if auto_expand and len(self.dic[param]) == 1:
self.dic[param] *= len(self.dic['inputs'])
else:
raise LayerParsingError("Layer '%s': %s list length does not match number of inputs" % (self.dic['name'], param))
# layers: dictionary: name -> layer
def optimize(self, layers):
LayerParser.optimize(self, layers)
dic = self.dic
# Check if I have an input that no one else uses.
#print "Layer %s optimizing" % dic['name']
if not dic['forceOwnActs']:
for i, inp in enumerate(dic['inputLayers']):
if inp['outputs'] == dic['outputs'] and sum(('inputs' in ll) and (inp['name'] in ll['inputs']) for ll in layers.itervalues()) == 1:
# I can share my activity matrix with this layer
# if it does not use its activity matrix, and I
# do not need to remember my inputs.
# TODO: a dropout layer should always be able to overwrite
# its input. Make it so.
# print "Layer %s(uses inputs=%d), input %s(uses acts = %d)" % (dic['name'], dic['usesInputs'], inp['name'], inp['usesActs'])
if not inp['usesActs'] and not dic['usesInputs']:
dic['actsTarget'] = i
print "Layer %s using acts from layer %s" % (dic['name'], inp['name'])
# print "Layer '%s' sharing activity matrix with layer '%s'" % (dic['name'], l['name'])
# I can share my gradient matrix with this layer if we're on the same GPU.
# This is different from the logic for actsTarget because this guy doesn't
# have an actsGrad matrix on my GPU if our GPUs are different, so there's
# nothing to share.
if dic['gpu'] == inp['gpu']:
dic['actsGradTarget'] = i
# print "Layer '%s' sharing activity gradient matrix with layer '%s'" % (dic['name'], l['name'])
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerParser.parse(self, name, mcp, prev_layers, model)
dic['inputs'] = [inp.strip() for inp in mcp.safe_get(name, 'inputs').split(',')]
for inp in dic['inputs']:
if inp not in prev_layers:
raise LayerParsingError("Layer '%s': input layer '%s' not defined" % (name, inp))
dic['inputLayers'] = [prev_layers[inp] for inp in dic['inputs']]
dic['gpu'] = mcp.safe_get_int_list(name, 'gpu', default=dic['inputLayers'][0]['gpu'])
dic['gpus'] = ", ".join('%s' % d for d in dic['gpu'])
dic['numReplicas'] = len(dic['gpu'])
if len(set(dic['gpu'])) != len(dic['gpu']):
raise LayerParsingError("Layer '%s': all replicas must run on different GPUs." % (name))
for inp in dic['inputs']:
# Data layers do not explicitly define how many replicas they have.
# The number of replicas for a data layer is given by the number of replicas
# in the next layer(s). So we set that here.
inpl = prev_layers[inp]
if inpl['type'] == 'data':
inpl['numReplicas'] = dic['numReplicas']
if inpl['numReplicas'] % dic['numReplicas'] != 0:
raise LayerParsingError("Layer '%s': number of replicas (%d) must divide number of replicas in all input layers (input %s has %d replicas)." % (name, dic['numReplicas'], inpl['name'], inpl['numReplicas']))
if len(set(inp['numReplicas'] for inp in dic['inputLayers'])) != 1:
raise LayerParsingError("Layer '%s': all input layers must have equal numbers of replicas." % (name))
# Need to also assert that all *next* layers have equal number of replicas but this is hard so it's done in Layer.optimize
for inp in dic['inputLayers']:
if inp['outputs'] == 0:
raise LayerParsingError("Layer '%s': input layer '%s' does not produce any output" % (name, inp['name']))
dic['numInputs'] = [inp['outputs'] for inp in dic['inputLayers']]
# Layers can declare a neuron activation function to apply to their output, as a shortcut
# to avoid declaring a separate neuron layer above themselves.
dic['neuron'] = mcp.safe_get(name, 'neuron', default="")
if self.num_inputs > 0 and len(dic['numInputs']) != self.num_inputs:
raise LayerParsingError("Layer '%s': number of inputs must be %d" % (name, self.num_inputs))
if model:
self.verify_all_ints_in(dic['gpu'], 'gpu', range(len(model.op.get_value('gpu'))))
return dic
def verify_img_size(self):
dic = self.dic
if dic['numInputs'][0] % dic['imgPixels'] != 0 or dic['imgSize'] * dic['imgSize'] != dic['imgPixels']:
raise LayerParsingError("Layer '%s': has %-d dimensional input, not interpretable as %d-channel images" % (dic['name'], dic['numInputs'][0], dic['channels']))
@staticmethod
def grad_consumers_below(dic):
if dic['gradConsumer']:
return True
if 'inputLayers' in dic:
return any(LayerWithInputParser.grad_consumers_below(l) for l in dic['inputLayers'])
def verify_no_grads(self):
if LayerWithInputParser.grad_consumers_below(self.dic):
raise LayerParsingError("Layer '%s': layers of type '%s' cannot propagate gradient and must not be placed over layers with parameters." % (self.dic['name'], self.dic['type']))
class NailbedLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['stride'] = mcp.safe_get_int(name, 'stride')
self.verify_num_range(dic['channels'], 'channels', 1, None)
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['outputsX'] = (dic['imgSize'] + dic['stride'] - 1) / dic['stride']
dic['start'] = (dic['imgSize'] - dic['stride'] * (dic['outputsX'] - 1)) / 2
dic['outputs'] = dic['channels'] * dic['outputsX']**2
self.verify_num_range(dic['outputsX'], 'outputsX', 0, None)
self.verify_img_size()
print "Initialized bed-of-nails layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (name, dic['gpus'], dic['outputsX'], dic['outputsX'], dic['channels'])
return dic
class GaussianBlurLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['outputs'] = dic['numInputs'][0]
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['filterSize'] = mcp.safe_get_int(name, 'filterSize')
dic['stdev'] = mcp.safe_get_float(name, 'stdev')
self.verify_num_range(dic['channels'], 'channels', 1, None)
self.verify_int_in(dic['filterSize'], 'filterSize', [3, 5, 7, 9])
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['filter'] = n.array([exp(-(dic['filterSize']/2 - i)**2 / float(2 * dic['stdev']**2))
for i in xrange(dic['filterSize'])], dtype=n.float32).reshape(1, dic['filterSize'])
dic['filter'] /= dic['filter'].sum()
self.verify_img_size()
if dic['filterSize'] > dic['imgSize']:
raise LayerParsingError("Later '%s': filter size (%d) must be smaller than image size (%d)." % (dic['name'], dic['filterSize'], dic['imgSize']))
print "Initialized Gaussian blur layer '%s', producing %dx%d %d-channel output" % (name, dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class HorizontalReflectionLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['outputs'] = dic['numInputs'][0]
dic['channels'] = mcp.safe_get_int(name, 'channels')
self.verify_num_range(dic['channels'], 'channels', 1, 3)
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
self.verify_img_size()
print "Initialized horizontal reflection layer '%s', producing %dx%d %d-channel output" % (name, dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class ResizeLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['scale'] = mcp.safe_get_float(name, 'scale')
dic['tgtSize'] = int(floor(dic['imgSize'] / dic['scale']))
dic['tgtPixels'] = dic['tgtSize']**2
self.verify_num_range(dic['channels'], 'channels', 1, None)
# Really not recommended to use this for such severe scalings
self.verify_float_range(dic['scale'], 'scale', 0.5, 2)
dic['outputs'] = dic['channels'] * dic['tgtPixels']
self.verify_img_size()
self.verify_no_grads()
print "Initialized resize layer '%s', producing %dx%d %d-channel output" % (name, dic['tgtSize'], dic['tgtSize'], dic['channels'])
return dic
class RandomScaleLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['channels'] = mcp.safe_get_int(name, 'channels')
self.verify_num_range(dic['channels'], 'channels', 1, None)
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['maxScale'] = mcp.safe_get_float(name, 'maxScale')
dic['tgtSize'] = mcp.safe_get_int(name, 'tgtSize')
min_size = int(floor(dic['imgSize'] / dic['maxScale']))
max_size = dic['imgSize'] #int(floor(dic['imgSize'] * dic['maxScale']))
if dic['tgtSize'] < min_size:
raise LayerParsingError("Layer '%s': target size must be greater than minimum image size after rescaling (%d)" % (name, min_size))
if dic['tgtSize'] > max_size:
raise LayerParsingError("Layer '%s': target size must be smaller than maximum image size after rescaling (%d)" % (name, max_size))
dic['tgtPixels'] = dic['tgtSize']**2
self.verify_float_range(dic['maxScale'], 'maxScale', 1, 2)
dic['outputs'] = dic['channels'] * dic['tgtPixels']
self.verify_img_size()
self.verify_no_grads()
print "Initialized random scale layer '%s', producing %dx%d %d-channel output" % (name, dic['tgtSize'], dic['tgtSize'], dic['channels'])
return dic
class CropLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['channels'] = mcp.safe_get_int(name, 'channels')
self.verify_num_range(dic['channels'], 'channels', 1, None)
dic['startX'] = mcp.safe_get_int(name, 'startX')
dic['startY'] = mcp.safe_get_int(name, 'startY', default=dic['startX'])
dic['sizeX'] = mcp.safe_get_int(name, 'sizeX')
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['outputs'] = dic['channels'] * (dic['sizeX']**2)
self.verify_num_range(dic['startX'], 'startX', 0, dic['imgSize']-1)
self.verify_num_range(dic['sizeX'], 'sizeX', 1, dic['imgSize'])
self.verify_num_range(dic['startY'], 'startY', 0, dic['imgSize']-1)
self.verify_img_size()
self.verify_no_grads()
if dic['startX'] + dic['sizeX'] > dic['imgSize']:
raise LayerParsingError("Layer '%s': startX (%d) + sizeX (%d) > imgSize (%d)" % (name, dic['startX'], dic['sizeX'], dic['imgSize']))
print "Initialized cropping layer '%s', producing %dx%d %d-channel output" % (name, dic['sizeX'], dic['sizeX'], dic['channels'])
return dic
class ColorTransformLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / 3
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['channels'] = 3
dic['outputs'] = dic['numInputs'][0]
self.verify_img_size()
self.verify_no_grads()
return dic
class RGBToYUVLayerParser(ColorTransformLayerParser):
def __init__(self):
ColorTransformLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model=None):
dic = ColorTransformLayerParser.parse(self, name, mcp, prev_layers, model)
print "Initialized RGB --> YUV layer '%s', producing %dx%d %d-channel output" % (name, dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class RGBToLABLayerParser(ColorTransformLayerParser):
def __init__(self):
ColorTransformLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model=None):
dic = ColorTransformLayerParser.parse(self, name, mcp, prev_layers, model)
dic['center'] = mcp.safe_get_bool(name, 'center', default=False)
print "Initialized RGB --> LAB layer '%s', producing %dx%d %d-channel output" % (name, dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class NeuronLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
@staticmethod
def get_unused_layer_name(layers, wish):
if wish not in layers:
return wish
for i in xrange(1, 100):
name = '%s.%d' % (wish, i)
if name not in layers:
return name
raise LayerParsingError("This is insane.")
def parse_neuron(self, neuron_str):
for n in neuron_parsers:
p = n.parse(neuron_str)
if p: # Successfully parsed neuron, return it
self.dic['neuron'] = p
self.dic['usesActs'] = self.dic['neuron']['usesActs']
self.dic['usesInputs'] = self.dic['neuron']['usesInputs']
return
# Could not parse neuron
# Print available neuron types
colnames = ['Neuron type', 'Function']
m = max(len(colnames[0]), OptionsParser._longest_value(neuron_parsers, key=lambda x:x.type)) + 2
ntypes = [OptionsParser._bold(colnames[0].ljust(m))] + [n.type.ljust(m) for n in neuron_parsers]
fnames = [OptionsParser._bold(colnames[1])] + [n.func_str for n in neuron_parsers]
usage_lines = NL.join(ntype + fname for ntype,fname in zip(ntypes, fnames))
raise LayerParsingError("Layer '%s': unable to parse neuron type '%s'. Valid neuron types: %sWhere neurons have parameters, they must be floats." % (self.dic['name'], neuron_str, NL + usage_lines + NL))
def detach_neuron_layer(self, src_name, layers):
dic = self.dic
# self.set_defaults()
dic['name'] = NeuronLayerParser.get_unused_layer_name(layers, '%s_neuron' % src_name)
dic['type'] = 'neuron'
dic['inputs'] = src_name
dic['neuron'] = layers[src_name]['neuron']
dic['gpu'] = layers[src_name]['gpu']
# Yes it's not entirely correct to pass all of layers as prev_layers, but it's harmless
dic = self.parse(dic['name'], FakeConfigParser(dic), layers)
dic['src_layer'] = src_name
# Link upper layers to this new one
for l in layers.values():
if 'inputs' in l:
l['inputs'] = [inp if inp != src_name else dic['name'] for inp in l['inputs']]
l['inputLayers'] = [inp if inp['name'] != src_name else dic for inp in l['inputLayers']]
layers[dic['name']] = dic
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['outputs'] = dic['numInputs'][0]
self.parse_neuron(dic['neuron'])
dic['forceOwnActs'] = False
print "Initialized neuron layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class EltwiseSumLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self)
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['coeffs'] = mcp.safe_get_float_list(name, 'coeffs', default=[1.0] * len(dic['inputs']))
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
if len(set(dic['numInputs'])) != 1:
raise LayerParsingError("Layer '%s': all inputs must have the same dimensionality. Got dimensionalities: %s" % (name, ", ".join(str(s) for s in dic['numInputs'])))
dic['outputs'] = dic['numInputs'][0]
dic['usesInputs'] = False
dic['usesActs'] = False
dic['forceOwnActs'] = False
dic['requiresParams'] = True
print "Initialized elementwise sum layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class EltwiseMaxLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
if len(dic['inputs']) < 2:
raise LayerParsingError("Layer '%s': elementwise max layer must have at least 2 inputs, got %d." % (name, len(dic['inputs'])))
if len(set(dic['numInputs'])) != 1:
raise LayerParsingError("Layer '%s': all inputs must have the same dimensionality. Got dimensionalities: %s" % (name, ", ".join(str(s) for s in dic['numInputs'])))
dic['outputs'] = dic['numInputs'][0]
print "Initialized elementwise max layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class SumLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['stride'] = mcp.safe_get_int(name, 'stride', default=1)
self.verify_divisible(dic['numInputs'][0], dic['stride'], 'input dimensionality', 'stride')
dic['outputs'] = dic['numInputs'][0] / dic['stride']
print "Initialized sum layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class DropoutLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['enable'] = mcp.safe_get_bool(name, 'enable', default=True)
dic['keep'] = mcp.safe_get_float(name, 'keep', default=0.5)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
dic['usesInputs'] = False
dic['usesActs'] = False
dic['forceOwnActs'] = False
dic['outputs'] = dic['numInputs'][0]
print "Initialized %s layer '%s' on GPUs %s, producing %d outputs" % (dic['type'], name, dic['gpus'], dic['outputs'])
return dic
class Dropout2LayerParser(DropoutLayerParser):
def __init__(self):
DropoutLayerParser.__init__(self)
class WeightLayerParser(LayerWithInputParser):
LAYER_PAT = re.compile(r'^\s*([^\s\[]+)(?:\[(\d+)\])?\s*$') # matches things like layername[5], etc
def __init__(self, num_inputs=-1):
LayerWithInputParser.__init__(self, num_inputs=num_inputs)
@staticmethod
def get_layer_name(name_str):
m = WeightLayerParser.LAYER_PAT.match(name_str)
if not m:
return None
return m.group(1), m.group(2)
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['momW'] = mcp.safe_get_float_list(name, 'momW')
dic['momB'] = mcp.safe_get_float(name, 'momB')
dic['superEps'] = mcp.safe_get_float(name, 'superEps', default=0.0)
dic['superMom'] = mcp.safe_get_float(name, 'superMom', default=0.0)
dic['wc'] = mcp.safe_get_float_list(name, 'wc', default=[0.0] * len(dic['inputs']))
dic['wball'] = mcp.safe_get_float_list(name, 'wball', default=[0.0] * len(dic['inputs']))
self.verify_num_params(['momW', 'wc', 'wball'])
# dic['wballNormed'] = [wball * nweights for wball,nweights in zip(dic['wball'], dic['weightsPerFilter'])]
dic['wballNormed'] = dic['wball']
# Convert from old-style 0.001,0.02 hyperparam specification to new-stye
# const[base=0.001],const[base=0.02] and so forth
def convert_scalars_to_schedules(scalars):
parts = scalars.split(',')
for i,p in enumerate(parts):
p = p.strip()
if re.match('(?:\d*\.)?\d+$', p):
parts[i] = 'const[base=%s]' % p
return parts
dic['epsW'] = self.parse_params(convert_scalars_to_schedules(mcp.safe_get(name, 'epsW')), lrs_parsers, 'epsW', 'learning rate schedule', num_params=len(dic['inputs']))
dic['epsB'] = self.parse_params(convert_scalars_to_schedules(mcp.safe_get(name, 'epsB')), lrs_parsers, 'epsB', 'learning rate schedule', num_params=1)[0]
dic['updatePeriod'] = mcp.safe_get_int(name, 'updatePeriod', default=0) # 0 means update as often as possible
# TODO: assert that updatePeriod is a multiple of active pass period, which is unknown here.
# the assert has to go in some post-processing step..
dic['gradConsumer'] = dic['epsB']['params']['base'] > 0 or any(w['params']['base'] > 0 for w in dic['epsW'])
@staticmethod
def unshare_weights(layer, layers, matrix_idx=None):
def unshare(layer, layers, indices):
for i in indices:
if layer['weightSourceLayers'][i] >= 0:
src_matrix_idx = layer['weightSourceMatrixIndices'][i]
layer['weightSourceLayers'][i] = ""
layer['weightSourceMatrixIndices'][i] = -1
layer['weights'][i] = layer['weights'][i].copy()
layer['weightsInc'][i] = n.zeros_like(layer['weights'][i])
print "Unshared weight matrix %s[%d] from %s[%d]." % (layer['name'], i, layer['weightSourceLayers'][i], src_matrix_idx)
else:
print "Weight matrix %s[%d] already unshared." % (layer['name'], i)
if 'weightSourceLayers' in layer:
unshare(layer, layers, range(len(layer['inputs'])) if matrix_idx is None else [matrix_idx])
# Load weight/biases initialization module
def call_init_func(self, param_name, shapes, input_idx=-1):
dic = self.dic
func_pat = re.compile('^([^\.]+)\.([^\(\)]+)\s*(?:\(([^,]+(?:,[^,]+)*)\))?$')
m = func_pat.match(dic[param_name])
if not m:
raise LayerParsingError("Layer '%s': '%s' parameter must have format 'moduleName.functionName(param1,param2,...)'; got: %s." % (dic['name'], param_name, dic['initWFunc']))
module, func = m.group(1), m.group(2)
params = m.group(3).split(',') if m.group(3) is not None else []
try:
mod = __import__(module)
return getattr(mod, func)(dic['name'], input_idx, shapes, params=params) if input_idx >= 0 else getattr(mod, func)(dic['name'], shapes, params=params)
except (ImportError, AttributeError, TypeError), e:
raise LayerParsingError("Layer '%s': %s." % (dic['name'], e))
def make_weights(self, initW, rows, cols, order='C'):
dic = self.dic
dic['weights'], dic['weightsInc'] = [], []
if dic['initWFunc']: # Initialize weights from user-supplied python function
# Initialization function is supplied in the format
# module.func
for i in xrange(len(dic['inputs'])):
dic['weights'] += [self.call_init_func('initWFunc', (rows[i], cols[i]), input_idx=i)]
if type(dic['weights'][i]) != n.ndarray:
raise LayerParsingError("Layer '%s[%d]': weight initialization function %s must return numpy.ndarray object. Got: %s." % (dic['name'], i, dic['initWFunc'], type(dic['weights'][i])))
if dic['weights'][i].dtype != n.float32:
raise LayerParsingError("Layer '%s[%d]': weight initialization function %s must weight matrices consisting of single-precision floats. Got: %s." % (dic['name'], i, dic['initWFunc'], dic['weights'][i].dtype))
if dic['weights'][i].shape != (rows[i], cols[i]):
raise LayerParsingError("Layer '%s[%d]': weight matrix returned by weight initialization function %s has wrong shape. Should be: %s; got: %s." % (dic['name'], i, dic['initWFunc'], (rows[i], cols[i]), dic['weights'][i].shape))
# Convert to desired order
dic['weights'][i] = n.require(dic['weights'][i], requirements=order)
dic['weightsInc'] += [n.zeros_like(dic['weights'][i])]
print "Layer '%s[%d]' initialized weight matrices from function %s" % (dic['name'], i, dic['initWFunc'])
else:
for i in xrange(len(dic['inputs'])):
if dic['weightSourceLayers'][i] != '': # Shared weight matrix
src_layer = self.prev_layers[dic['weightSourceLayers'][i]] if dic['weightSourceLayers'][i] != dic['name'] else dic
dic['weights'] += [src_layer['weights'][dic['weightSourceMatrixIndices'][i]]]
dic['weightsInc'] += [src_layer['weightsInc'][dic['weightSourceMatrixIndices'][i]]]
if dic['weights'][i].shape != (rows[i], cols[i]):
raise LayerParsingError("Layer '%s': weight sharing source matrix '%s' has shape %dx%d; should be %dx%d."
% (dic['name'], dic['weightSource'][i], dic['weights'][i].shape[0], dic['weights'][i].shape[1], rows[i], cols[i]))
print "Layer '%s' initialized weight matrix %d from %s" % (dic['name'], i, dic['weightSource'][i])
else:
dic['weights'] += [n.array(initW[i] * nr.randn(rows[i], cols[i]), dtype=n.single, order=order)]
dic['weightsInc'] += [n.zeros_like(dic['weights'][i])]
def make_biases(self, rows, cols, order='C'):
dic = self.dic
if dic['initBFunc']:
dic['biases'] = self.call_init_func('initBFunc', (rows, cols))
if type(dic['biases']) != n.ndarray:
raise LayerParsingError("Layer '%s': bias initialization function %s must return numpy.ndarray object. Got: %s." % (dic['name'], dic['initBFunc'], type(dic['biases'])))
if dic['biases'].dtype != n.float32:
raise LayerParsingError("Layer '%s': bias initialization function %s must return numpy.ndarray object consisting of single-precision floats. Got: %s." % (dic['name'], dic['initBFunc'], dic['biases'].dtype))
if dic['biases'].shape != (rows, cols):
raise LayerParsingError("Layer '%s': bias vector returned by bias initialization function %s has wrong shape. Should be: %s; got: %s." % (dic['name'], dic['initBFunc'], (rows, cols), dic['biases'].shape))
dic['biases'] = n.require(dic['biases'], requirements=order)
print "Layer '%s' initialized bias vector from function %s" % (dic['name'], dic['initBFunc'])
else:
dic['biases'] = dic['initB'] * n.ones((rows, cols), order=order, dtype=n.single)
dic['biasesInc'] = n.zeros_like(dic['biases'])
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
dic['gradConsumer'] = True
dic['usesActs'] = False
dic['initW'] = mcp.safe_get_float_list(name, 'initW', default=0.01)
dic['initB'] = mcp.safe_get_float(name, 'initB', default=0)
dic['initWFunc'] = mcp.safe_get(name, 'initWFunc', default="")
dic['initBFunc'] = mcp.safe_get(name, 'initBFunc', default="")
# Find shared weight matrices
dic['weightSource'] = mcp.safe_get_list(name, 'weightSource', default=[''] * len(dic['inputs']))
self.verify_num_params(['initW'])
self.verify_num_params(['weightSource'], auto_expand=False)
dic['weightSourceLayers'] = []
dic['weightSourceMatrixIndices'] = []
for i, src_name in enumerate(dic['weightSource']):
src_layer_matrix_idx = -1
src_layer_name = ''
if src_name != '':
src_layer_match = WeightLayerParser.get_layer_name(src_name)
if src_layer_match is None:
raise LayerParsingError("Layer '%s': unable to parse weight sharing source '%s'. Format is layer[idx] or just layer, in which case idx=0 is used." % (name, src_name))
src_layer_name = src_layer_match[0]
src_layer_matrix_idx = int(src_layer_match[1]) if src_layer_match[1] is not None else 0
if src_layer_name not in prev_layers and src_layer_name != name:
raise LayerParsingError("Layer '%s': weight sharing source layer '%s' does not exist." % (name, src_layer_name))
# src_layer_idx = prev_names.index(src_layer_name) if src_layer_name != name else len(prev_names)
src_layer = prev_layers[src_layer_name] if src_layer_name != name else dic
if src_layer['gpu'] != dic['gpu']:
raise LayerParsingError("Layer '%s': weight sharing source layer '%s' runs on GPUs %s, while '%s' runs on GPUs %s." % (name, src_layer_name, src_layer['gpu'], name, dic['gpu']))
if src_layer['type'] != dic['type']:
raise LayerParsingError("Layer '%s': weight sharing source layer '%s' is of type '%s'; should be '%s'." % (name, src_layer_name, src_layer['type'], dic['type']))
if src_layer_name != name and len(src_layer['weights']) <= src_layer_matrix_idx:
raise LayerParsingError("Layer '%s': weight sharing source layer '%s' has %d weight matrices, but '%s[%d]' requested." % (name, src_layer_name, len(src_layer['weights']), src_name, src_layer_matrix_idx))
if src_layer_name == name and src_layer_matrix_idx >= i:
raise LayerParsingError("Layer '%s': weight sharing source '%s[%d]' not defined yet." % (name, name, src_layer_matrix_idx))
dic['weightSourceLayers'] += [src_layer_name]
dic['weightSourceMatrixIndices'] += [src_layer_matrix_idx]
return dic
class FCLayerParser(WeightLayerParser):
def __init__(self):
WeightLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = WeightLayerParser.parse(self, name, mcp, prev_layers, model)
dic['outputs'] = mcp.safe_get_int(name, 'outputs')
dic['weightsPerFilter'] = dic['numInputs']
self.verify_num_range(dic['outputs'], 'outputs', 1, None)
self.make_weights(dic['initW'], dic['numInputs'], [dic['outputs']] * len(dic['numInputs']), order='F')
self.make_biases(1, dic['outputs'], order='F')
print "Initialized fully-connected layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class SplitFCLayerParser(WeightLayerParser):
def __init__(self):
WeightLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = WeightLayerParser.parse(self, name, mcp, prev_layers, model)
dic['parts'] = mcp.safe_get_int(name, 'parts')
dic['outputs'] = mcp.safe_get_int(name, 'outputs') * dic['parts']
dic['weightsPerFilter'] = dic['numInputs']
self.verify_num_range(dic['parts'], 'parts', 1, None)
self.make_weights(dic['initW'], dic['numInputs'], [dic['outputs']/dic['parts']] * len(dic['numInputs']), order='F')
self.make_biases(1, dic['outputs'], order='F')
for i in xrange(len(dic['numInputs'])):
self.verify_divisible(dic['numInputs'][i], dic['parts'], 'numInputs', 'parts', input_idx=i)
print "Initialized split fully-connected layer '%s' on GPUs %s, producing %d outputs in %d parts" % (name, dic['gpus'], dic['outputs'], dic['parts'])
return dic
class LocalLayerParser(WeightLayerParser):
def __init__(self):
WeightLayerParser.__init__(self)
# Convert convolutional layer to unshared, locally-connected layer
@staticmethod
def conv_to_local(layers, lname):
layer = layers[lname]
if layer['type'] == 'conv':
layer['type'] = 'local'
for inp,inpname in enumerate(layer['inputs']):
src_layer_name = layer['weightSourceLayers'][inp]
if src_layer_name != '':
src_layer = layers[src_layer_name]
src_matrix_idx = layer['weightSourceMatrixIndices'][inp]
LocalLayerParser.conv_to_local(layers, src_layer_name)
for w in ('weights', 'weightsInc'):
layer[w][inp] = src_layer[w][src_matrix_idx]
else:
layer['weights'][inp] = n.require(n.reshape(n.tile(n.reshape(layer['weights'][inp], (1, n.prod(layer['weights'][inp].shape))), (layer['modules'], 1)),
(layer['modules'] * layer['filterChannels'][inp] * layer['filterPixels'][inp], layer['filters'])),
requirements='C')
layer['weightsInc'][inp] = n.zeros_like(layer['weights'][inp])
if layer['sharedBiases']:
layer['biases'] = n.require(n.repeat(layer['biases'], layer['modules'], axis=0), requirements='C')
layer['biasesInc'] = n.zeros_like(layer['biases'])
print "Converted layer '%s' from convolutional to unshared, locally-connected" % layer['name']
# Also call this function on any layers sharing my weights
for l in layers:
if 'weightSourceLayers' in l and lname in l['weightSourceLayers']:
LocalLayerParser.conv_to_local(layers, l)
return layer
def parse(self, name, mcp, prev_layers, model):
dic = WeightLayerParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
dic['usesActs'] = False
# Supplied values
dic['channels'] = mcp.safe_get_int_list(name, 'channels')
dic['padding'] = mcp.safe_get_int_list(name, 'padding', default=[0]*len(dic['inputs']))
dic['stride'] = mcp.safe_get_int_list(name, 'stride', default=[1]*len(dic['inputs']))
dic['filterSize'] = mcp.safe_get_int_list(name, 'filterSize')
dic['filters'] = mcp.safe_get_int_list(name, 'filters')
dic['groups'] = mcp.safe_get_int_list(name, 'groups', default=[1]*len(dic['inputs']))
dic['initW'] = mcp.safe_get_float_list(name, 'initW')
dic['initCFunc'] = mcp.safe_get(name, 'initCFunc', default='')
dic['modulesX'] = mcp.safe_get_int(name, 'modulesX', default=0)
self.verify_num_params(['channels', 'padding', 'stride', 'filterSize', \
'filters', 'groups', 'initW'])
self.verify_num_range(dic['stride'], 'stride', 1, None)
self.verify_num_range(dic['filterSize'],'filterSize', 1, None)
self.verify_num_range(dic['padding'], 'padding', 0, None)
self.verify_num_range(dic['channels'], 'channels', 1, None)
self.verify_num_range(dic['groups'], 'groups', 1, None)
self.verify_num_range(dic['modulesX'], 'modulesX', 0, None)
for i in xrange(len(dic['filters'])):
self.verify_divisible(dic['filters'][i], 16, 'filters', input_idx=i)
# Computed values
dic['imgPixels'] = [numInputs/channels for numInputs,channels in zip(dic['numInputs'], dic['channels'])]
dic['imgSize'] = [int(n.sqrt(imgPixels)) for imgPixels in dic['imgPixels']]
self.verify_num_range(dic['imgSize'], 'imgSize', 1, None)
dic['filters'] = [filters*groups for filters,groups in zip(dic['filters'], dic['groups'])]
dic['filterPixels'] = [filterSize**2 for filterSize in dic['filterSize']]
if dic['modulesX'] <= 0:
dic['modulesX'] = [1 + int(ceil((2*padding + imgSize - filterSize) / float(stride))) for padding,imgSize,filterSize,stride in zip(dic['padding'], dic['imgSize'], dic['filterSize'], dic['stride'])]
else:
dic['modulesX'] = [dic['modulesX']] * len(dic['inputs'])
dic['filterChannels'] = [channels/groups for channels,groups in zip(dic['channels'], dic['groups'])]
if len(set(dic['modulesX'])) != 1 or len(set(dic['filters'])) != 1:
raise LayerParsingError("Layer '%s': all inputs must produce equally-dimensioned output. Dimensions are: %s." % (name, ", ".join("%dx%dx%d" % (filters, modulesX, modulesX) for filters,modulesX in zip(dic['filters'], dic['modulesX']))))
dic['modulesX'] = dic['modulesX'][0]
dic['modules'] = dic['modulesX']**2
dic['filters'] = dic['filters'][0]
dic['outputs'] = dic['modules'] * dic['filters']
# dic['filterConns'] = [[]] * len(dic['inputs'])
for i in xrange(len(dic['inputs'])):
if dic['numInputs'][i] % dic['imgPixels'][i] != 0 or dic['imgSize'][i] * dic['imgSize'][i] != dic['imgPixels'][i]:
raise LayerParsingError("Layer '%s[%d]': has %-d dimensional input, not interpretable as square %d-channel images" % (name, i, dic['numInputs'][i], dic['channels'][i]))
if dic['channels'][i] > 3 and dic['channels'][i] % 4 != 0:
raise LayerParsingError("Layer '%s[%d]': number of channels must be smaller than 4 or divisible by 4" % (name, i))
# if dic['filterSize'][i] > totalPadding[i] + dic['imgSize'][i]:
# raise LayerParsingError("Layer '%s[%d]': filter size (%d) greater than image size + padding (%d)" % (name, i, dic['filterSize'][i], dic['padding'][i] + dic['imgSize'][i]))
if -dic['padding'][i] + dic['stride'][i] * (dic['modulesX'] - 1) + dic['filterSize'][i] < dic['imgSize'][i]:
raise LayerParsingError("Layer '%s[%d]': %dx%d output map with padding=%d, stride=%d does not cover entire input image." % (name, i, dic['modulesX'], dic['outputsX'], dic['padding'][i], dic['stride'][i]))
if dic['groups'][i] > 1:
self.verify_divisible(dic['channels'][i], 4*dic['groups'][i], 'channels', '4 * groups', input_idx=i)
self.verify_divisible(dic['channels'][i], dic['groups'][i], 'channels', 'groups', input_idx=i)
self.verify_divisible(dic['filters'], 16*dic['groups'][i], 'filters * groups', input_idx=i)
dic['padding'][i] = -dic['padding'][i]
# dic['overSample'] = [groups*filterChannels/channels for groups,filterChannels,channels in zip(dic['groups'], dic['filterChannels'], dic['channels'])]
dic['weightsPerFilter'] = [fc * (fz**2) for fc, fz in zip(dic['filterChannels'], dic['filterSize'])]
return dic
class ConvLayerParser(LocalLayerParser):
def __init__(self):
LocalLayerParser.__init__(self)
def add_params(self, mcp):
LocalLayerParser.add_params(self, mcp)
self.dic['wcNormMax'] = mcp.safe_get_float_list(self.dic['name'], 'wcNormMax', default=[0.0] * len(self.dic['inputs']))
self.dic['wcNormMin'] = mcp.safe_get_float_list(self.dic['name'], 'wcNormMin', default=[0.0] * len(self.dic['inputs']))
self.verify_num_params(['wcNormMax', 'wcNormMin'])
for min,max in zip(self.dic['wcNormMin'], self.dic['wcNormMax']):
if min > max:
raise LayerParsingError("Layer '%s': wcNormMin must be <= wcNormMax." % (self.dic['name']))
def parse(self, name, mcp, prev_layers, model):
dic = LocalLayerParser.parse(self, name, mcp, prev_layers, model)
dic['sumWidth'] = mcp.safe_get_int(name, 'sumWidth')
dic['sharedBiases'] = mcp.safe_get_bool(name, 'sharedBiases', default=True)
num_biases = dic['filters'] if dic['sharedBiases'] else dic['modules']*dic['filters']
eltmult = lambda list1, list2: [l1 * l2 for l1,l2 in zip(list1, list2)]
self.make_weights(dic['initW'], eltmult(dic['filterPixels'], dic['filterChannels']), [dic['filters']] * len(dic['inputs']), order='C')
self.make_biases(num_biases, 1, order='C')
print "Initialized convolutional layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (name, dic['gpus'], dic['modulesX'], dic['modulesX'], dic['filters'])
return dic
class LocalUnsharedLayerParser(LocalLayerParser):
def __init__(self):
LocalLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = LocalLayerParser.parse(self, name, mcp, prev_layers, model)
eltmult = lambda list1, list2: [l1 * l2 for l1,l2 in zip(list1, list2)]
scmult = lambda x, lst: [x * l for l in lst]
self.make_weights(dic['initW'], scmult(dic['modules'], eltmult(dic['filterPixels'], dic['filterChannels'])), [dic['filters']] * len(dic['inputs']), order='C')
self.make_biases(dic['modules'] * dic['filters'], 1, order='C')
print "Initialized locally-connected layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (name, dic['gpus'], dic['modulesX'], dic['modulesX'], dic['filters'])
return dic
class DataLayerParser(LayerParser):
def __init__(self):
LayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = LayerParser.parse(self, name, mcp, prev_layers, model)
dic['dataIdx'] = mcp.safe_get_int(name, 'dataIdx')
dic['start'] = mcp.safe_get_int(name, 'start', default=0)
dic['end'] = mcp.safe_get_int(name, 'end', default=model.train_data_provider.get_data_dims(idx=dic['dataIdx']))
dic['outputs'] = dic['end'] - dic['start']
# dic['usesActs'] = False
print "Initialized data layer '%s', producing %d outputs" % (name, dic['outputs'])
return dic
class SoftmaxLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['outputs'] = dic['inputLayers'][0]['outputs']
print "Initialized softmax layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class ConcatentionLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['outputs'] = sum(l['outputs'] for l in dic['inputLayers'])
dic['copyOffsets'] = [sum(dic['inputLayers'][j]['outputs'] for j in xrange(i)) for i in xrange(len(dic['inputLayers']))]
print "Initialized concatenation layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class PassThroughLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self)
# Note: this doesn't verify all the necessary constraints. Layer construction may still fail in C++ code.
# For example, it does not verify that every layer only has one pass-through parent. Obviously having
# two such parents is incoherent.
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
# if len(dic['inputLayers']) == 1:
# raise LayerParsingError("Layer %s: pass-through layer must have more than one input." % dic['name'])
if len(dic['gpu']) != len(dic['inputLayers'][0]['gpu']):
raise LayerParsingError("Layer '%s': number of replicas in pass-through layer must be equivalent to number of replicas in input layers." % dic['name'])
for inp in dic['inputLayers']:
conflicting_layers = [l for l in prev_layers.values() if l['type'] == 'pass' and inp['name'] in l['inputs'] and len(set(dic['gpu']).intersection(set(l['gpu']))) > 0]
if len(conflicting_layers) > 0:
raise LayerParsingError("Layer '%s' conflicts with layer '%s'. Both pass-through layers take layer '%s' as input and operate on an overlapping set of GPUs." % (dic['name'], conflicting_layers[0]['name'], inp['name']))
dic['outputs'] = sum(l['outputs'] for l in dic['inputLayers'])
# dic['copyOffsets'] = [sum(dic['inputLayers'][j]['outputs'] for j in xrange(i)) for i in xrange(len(dic['inputLayers']))]
print "Initialized pass-through layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class PoolLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['sizeX'] = mcp.safe_get_int(name, 'sizeX')
dic['start'] = mcp.safe_get_int(name, 'start', default=0)
dic['stride'] = mcp.safe_get_int(name, 'stride')
dic['outputsX'] = mcp.safe_get_int(name, 'outputsX', default=0)
dic['pool'] = mcp.safe_get(name, 'pool')
# Avg pooler does not use its acts or inputs
dic['usesActs'] = dic['pool'] != 'avg'
dic['usesInputs'] = dic['pool'] != 'avg'
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
self.verify_num_range(dic['sizeX'], 'sizeX', 1, dic['imgSize'])
self.verify_num_range(dic['stride'], 'stride', 1, dic['sizeX'])
self.verify_num_range(dic['outputsX'], 'outputsX', 0, None)
self.verify_num_range(dic['channels'], 'channels', 1, None)
if LayerWithInputParser.grad_consumers_below(dic):
self.verify_divisible(dic['channels'], 16, 'channels')
self.verify_str_in(dic['pool'], 'pool', ['max', 'maxabs', 'avg'])
self.verify_img_size()
if dic['outputsX'] <= 0:
dic['outputsX'] = int(ceil((dic['imgSize'] - dic['start'] - dic['sizeX']) / float(dic['stride']))) + 1;
dic['outputs'] = dic['outputsX']**2 * dic['channels']
print "Initialized %s-pooling layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (dic['pool'], name, dic['gpus'], dic['outputsX'], dic['outputsX'], dic['channels'])
return dic
class CrossMapPoolLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['size'] = mcp.safe_get_int(name, 'size')
dic['start'] = mcp.safe_get_int(name, 'start', default=0)
dic['stride'] = mcp.safe_get_int(name, 'stride')
dic['outputChannels'] = mcp.safe_get_int(name, 'outputs', default=0)
dic['pool'] = mcp.safe_get(name, 'pool')
dic['requiresParams'] = False
# Avg pooler does not use its acts or inputs
dic['usesActs'] = 'pool' != 'avg'
dic['usesInputs'] = 'pool' != 'avg'
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['outputs'] = dic['outputChannels'] * dic['imgPixels']
self.verify_num_range(dic['size'], 'size', 1, dic['channels'])
self.verify_num_range(dic['stride'], 'stride', 1, dic['size'])
self.verify_num_range(dic['outputChannels'], 'outputChannels', 0, None)
self.verify_num_range(dic['channels'], 'channels', 1, None)
self.verify_num_range(dic['start'], 'start', None, 0)
self.verify_str_in(dic['pool'], 'pool', ['max'])
self.verify_img_size()
covered_chans = dic['start'] + (dic['outputChannels'] - 1) * dic['stride'] + dic['size']
if covered_chans < dic['channels']:
raise LayerParsingError("Layer '%s': cross-map pooling with start=%d, stride=%d, size=%d, outputs=%d covers only %d of %d input channels." % \
(name, dic['start'], dic['stride'], dic['size'], dic['outputChannels'], covered_chans, dic['channels']))
print "Initialized cross-map %s-pooling layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (dic['pool'], name, dic['gpus'], dic['imgSize'], dic['imgSize'], dic['outputChannels'])
return dic
class NormLayerParser(LayerWithInputParser):
RESPONSE_NORM = 'response'
CONTRAST_NORM = 'contrast'
CROSSMAP_RESPONSE_NORM = 'cross-map response'
def __init__(self, norm_type):
LayerWithInputParser.__init__(self, num_inputs=1)
self.norm_type = norm_type
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['scale'] = mcp.safe_get_float(name, 'scale')
dic['scale'] /= dic['size'] if self.norm_type == self.CROSSMAP_RESPONSE_NORM else dic['size']**2
dic['pow'] = mcp.safe_get_float(name, 'pow')
dic['minDiv'] = mcp.safe_get_float(name, 'minDiv', default=1.0)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['size'] = mcp.safe_get_int(name, 'size')
dic['blocked'] = mcp.safe_get_bool(name, 'blocked', default=False)
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
# Contrast normalization layer does not use its inputs
dic['usesInputs'] = self.norm_type != self.CONTRAST_NORM
self.verify_num_range(dic['channels'], 'channels', 1, None)
if self.norm_type == self.CROSSMAP_RESPONSE_NORM:
self.verify_num_range(dic['size'], 'size', 2, dic['channels'])
if dic['channels'] % 16 != 0:
raise LayerParsingError("Layer '%s': number of channels must be divisible by 16 when using crossMap" % name)
else:
self.verify_num_range(dic['size'], 'size', 1, dic['imgSize'])
if self.norm_type != self.CROSSMAP_RESPONSE_NORM and dic['channels'] > 3 and dic['channels'] % 4 != 0:
raise LayerParsingError("Layer '%s': number of channels must be smaller than 4 or divisible by 4" % name)
self.verify_img_size()
dic['outputs'] = dic['imgPixels'] * dic['channels']
print "Initialized %s-normalization layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (self.norm_type, name, dic['gpus'], dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class CostParser(LayerWithInputParser):
def __init__(self, num_inputs=-1):
LayerWithInputParser.__init__(self, num_inputs=num_inputs)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
# Stored as string because python can't pickle lambda functions
dic['outputFilter'] = 'lambda costs,num_cases: [c/num_cases for c in costs]'
dic['children'] = mcp.safe_get_list(name, 'children', default=[])
# Aggregated costs only produce outputs which are additive.
for c in dic['children']:
if c not in prev_layers:
raise LayerParsingError("Layer '%s': child cost layer '%s' not defined" % (name, c))
if prev_layers[c]['type'] != dic['type']:
raise LayerParsingError("Layer '%s': child cost layer '%s' must have same type as parent" % (name, c))
prev_layers[c]['aggregated'] = 1
dic['aggregated'] = dic['children'] != []
del dic['neuron']
return dic
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['coeff'] = mcp.safe_get_float(name, 'coeff')
dic['gradConsumer'] = dic['coeff'] > 0
class CrossEntCostParser(CostParser):
def __init__(self):
CostParser.__init__(self, num_inputs=2)
def parse(self, name, mcp, prev_layers, model):
dic = CostParser.parse(self, name, mcp, prev_layers, model)
if dic['numInputs'][0] != model.train_data_provider.get_num_classes(): # first input must be labels
raise LayerParsingError("Layer '%s': Dimensionality of first input must be equal to number of labels" % name)
if dic['inputLayers'][1]['type'] != 'softmax':
raise LayerParsingError("Layer '%s': Second input must be softmax layer" % name)
if dic['numInputs'][1] != model.train_data_provider.get_num_classes():
raise LayerParsingError("Layer '%s': Softmax input '%s' must produce %d outputs, because that is the number of classes in the dataset" \
% (name, dic['inputs'][1], model.train_data_provider.get_num_classes()))
print "Initialized cross-entropy cost '%s' on GPUs %s" % (name, dic['gpus'])
return dic
class LogregCostParser(CostParser):
def __init__(self):
CostParser.__init__(self, num_inputs=2)
def add_params(self, mcp):
CostParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['topk'] = mcp.safe_get_int(name, 'topk', default=1)
if dic['topk'] > dic['numInputs'][1]:
raise LayerParsingError("Layer '%s': parameter 'topk'must not have value greater than the number of classess." % (name))
def parse(self, name, mcp, prev_layers, model):
dic = CostParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
if dic['numInputs'][0] != 1: # first input must be labels
raise LayerParsingError("Layer '%s': dimensionality of first input must be 1" % name)
if dic['inputLayers'][1]['type'] != 'softmax':
raise LayerParsingError("Layer '%s': second input must be softmax layer" % name)
if dic['numInputs'][1] != model.train_data_provider.get_num_classes():
raise LayerParsingError("Layer '%s': softmax input '%s' must produce %d outputs, because that is the number of classes in the dataset" \
% (name, dic['inputs'][1], model.train_data_provider.get_num_classes()))
print "Initialized logistic regression cost '%s' on GPUs %s" % (name, dic['gpus'])
return dic
class BinomialCrossEntCostParser(CostParser):
def __init__(self):
CostParser.__init__(self, num_inputs=2)
def add_params(self, mcp):
CostParser.add_params(self, mcp)
self.dic['posWeight'] = mcp.safe_get_float(self.dic['name'], 'posWeight', default=1.0)
def parse(self, name, mcp, prev_layers, model):
dic = CostParser.parse(self, name, mcp, prev_layers, model)
if dic['numInputs'][0] != dic['numInputs'][1]:
raise LayerParsingError("Layer '%s': both inputs must produce the same number of outputs" % (name))
if 'neuron' not in dic['inputLayers'][1] or dic['inputLayers'][1]['neuron'] != 'logistic':
print "WARNING: Layer '%s': input '%s' is not logistic, results may not be what you intend." % (dic['name'], dic['inputs'][1])
if dic['type'] == 'cost.bce':
print "Initialized binomial cross-entropy cost '%s' on GPUs %s" % (name, dic['gpus'])
dic['computeSoftmaxErrorRate'] = True
return dic
class DetectionCrossEntCostParser(BinomialCrossEntCostParser):
def __init__(self):
BinomialCrossEntCostParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = BinomialCrossEntCostParser.parse(self, name, mcp, prev_layers, model)
if dic['numInputs'][0] != model.train_data_provider.get_num_classes(): # first input must be labels
raise LayerParsingError("Layer '%s': Dimensionality of first input must be equal to number of labels" % name)
dic['computeSoftmaxErrorRate'] = False
dic['outputFilter'] = 'lambda costs,num_cases: [c/num_cases for c in costs[:2]] + [(class_cost[2] / class_cost[j] if class_cost[j] > 0 else n.inf) for class_cost in [costs[2:][i*3:(i+1)*3] for i in range(len(costs[2:])/3)] for j in range(2)]'
dic['outputFilterFormatter'] = 'lambda self,costs: "(crossent) %.6f, (err) %.6f, " % (costs[0], costs[1]) + ", ".join("(%s) %.6f, %.6f" % (self.train_data_provider.batch_meta["label_names"][i/2-1],costs[i],costs[i+1]) for i in xrange(2, len(costs), 2))'
print "Initialized detection cross-entropy cost '%s' on GPUs %s" % (name, dic['gpus'])
return dic
class SumOfSquaresCostParser(CostParser):
def __init__(self):
CostParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model):
dic = CostParser.parse(self, name, mcp, prev_layers, model)
print "Initialized sum-of-squares cost '%s' on GPUs %s" % (name, dic['gpus'])
return dic
# All the layer parsers
layer_parsers = {'data' : lambda : DataLayerParser(),
'fc': lambda : FCLayerParser(),
'sfc': lambda : SplitFCLayerParser(),
'conv': lambda : ConvLayerParser(),
'local': lambda : LocalUnsharedLayerParser(),
'softmax': lambda : SoftmaxLayerParser(),
'eltsum': lambda : EltwiseSumLayerParser(),
'eltmax': lambda : EltwiseMaxLayerParser(),
'sum': lambda : SumLayerParser(),
'neuron': lambda : NeuronLayerParser(),
'pool': lambda : PoolLayerParser(),
'cmpool': lambda : CrossMapPoolLayerParser(),
'rnorm': lambda : NormLayerParser(NormLayerParser.RESPONSE_NORM),
'cnorm': lambda : NormLayerParser(NormLayerParser.CONTRAST_NORM),
'cmrnorm': lambda : NormLayerParser(NormLayerParser.CROSSMAP_RESPONSE_NORM),
'nailbed': lambda : NailbedLayerParser(),
'blur': lambda : GaussianBlurLayerParser(),
'href': lambda : HorizontalReflectionLayerParser(),
'resize': lambda : ResizeLayerParser(),
'rgb2yuv': lambda : RGBToYUVLayerParser(),
'rgb2lab': lambda : RGBToLABLayerParser(),
'rscale': lambda : RandomScaleLayerParser(),
'crop': lambda : CropLayerParser(),
'concat': lambda : ConcatentionLayerParser(),
'pass': lambda : PassThroughLayerParser(),
'dropout': lambda : DropoutLayerParser(),
'dropout2': lambda : Dropout2LayerParser(),
'cost.logreg': lambda : LogregCostParser(),
'cost.ce': lambda : CrossEntCostParser(),
'cost.bce': lambda : BinomialCrossEntCostParser(),
'cost.dce': lambda : DetectionCrossEntCostParser(),
'cost.sum2': lambda : SumOfSquaresCostParser()}
# All the neuron parsers
# This isn't a name --> parser mapping as the layer parsers above because neurons don't have fixed names.
# A user may write tanh[0.5,0.25], etc.
neuron_parsers = sorted([NeuronParser('ident', 'f(x) = x', uses_acts=False, uses_inputs=False),
NeuronParser('logistic', 'f(x) = 1 / (1 + e^-x)', uses_acts=True, uses_inputs=False),
NeuronParser('abs', 'f(x) = |x|', uses_acts=False, uses_inputs=True),
NeuronParser('relu', 'f(x) = max(0, x)', uses_acts=True, uses_inputs=False),
NeuronParser('nrelu', 'f(x) = max(0, x) + noise', uses_acts=True, uses_inputs=False),
NeuronParser('softrelu', 'f(x) = log(1 + e^x)', uses_acts=True, uses_inputs=False),
NeuronParser('square', 'f(x) = x^2', uses_acts=False, uses_inputs=True),
NeuronParser('sqrt', 'f(x) = sqrt(x)', uses_acts=True, uses_inputs=False),
ParamNeuronParser('log[a]', 'f(x) = log(a + x)', uses_acts=False, uses_inputs=True),
ParamNeuronParser('tanh[a,b]', 'f(x) = a * tanh(b * x)', uses_acts=True, uses_inputs=False),
ParamNeuronParser('brelu[a]', 'f(x) = min(a, max(0, x))', uses_acts=True, uses_inputs=False),
ParamNeuronParser('linear[a,b]', 'f(x) = a * x + b', uses_acts=True, uses_inputs=False),
ParamNeuronParser('drelu[a]', 'f(x) = x - a * tanh(x / a)', uses_acts=False, uses_inputs=True)],
key=lambda x:x.type)
# Learning rate schedules
lrs_parsers = sorted([ParamParser('const[fbase]'),
ParamParser('linear[fbase;ftgtFactor]'),
ParamParser('exp[fbase;ftgtFactor]'),
ParamParser('dexp[fbase;ftgtFactor;inumSteps]')])
|
dennis910130/cuda-convnet2
|
layer.py
|
Python
|
apache-2.0
| 82,368
|
[
"Gaussian",
"NEURON"
] |
b9431e47ca572515a63ba630688429fbfe7126dfbd95c1f1865a2a86f8869d3a
|
from errorHandler import *
from phamerator_manage_db import *
from db_conf import db_conf
class workUnitSeq:
def __init__(self, id, translation):
self.id = id
self.translation = translation
class alignmentWorkUnit(errorHandler):
def __init__(self, c):
errorHandler.__init__(self)
#self.c = c
self.alignments = {}
#class clustalwWorkUnit(alignmentWorkUnit, errorHandler):
class clustalwWorkUnit(errorHandler):
def __init__(self, c, query_id=None):
errorHandler.__init__(self)
if not query_id:
sqlQuery = "SELECT id FROM gene WHERE clustalw_status = 'avail' LIMIT 1"
try:
c.execute(sqlQuery)
try:
self.query_id = str(int(c.fetchall()[0][0]))
except:
self.query_id = None
return
except: self.show_sql_errors(c)
self._mark_pending(c)
self.query_translation = get_translation_from_id(c, self.query_id)
self.create_database(c)
def set_cursor(self, c):
pass
#self.c = c
def _mark_pending(self, c):
sqlQuery = "update gene set clustalw_status = 'pending' where id = %s" % self.query_id
try: c.execute(sqlQuery)
except: self.show_sql_errors(c)
sqlQuery = "COMMIT"
try: c.execute(sqlQuery)
except: self.show_sql_errors(c)
def create_database(self,c):
self.database = []
sqlQuery = "select id, translation from gene where id < %s" % self.query_id
try: c.execute(sqlQuery)
except: self.show_sql_errors(c)
for result in c.fetchall():
record = workUnitSeq(str(int(result[0])), result[1])
self.database.append(record)
def add_matches(self, matches, c):
'''called by a compute node to keep track of a good clustalw alignment'''
# matches = [(qid1, sid1, score1), (qid2, sid2, score2), ...]
for qid, sid, score in matches:
self._add_match(qid, sid, score, c)
sqlQuery = "UPDATE gene SET clustalw_status = 'done' WHERE id = %s" % self.query_id
try: c.execute(sqlQuery)
except: self.show_sql_errors(c)
sqlQuery = "COMMIT"
try: c.execute(sqlQuery)
except: self.show_sql_errors(c)
def _add_match(self, qid, sid, score, c):
'''add a good alignment score to the database'''
# qid and sid are gene table id's for the query and subject
q = get_GeneID_from_id(c, qid)
s = get_GeneID_from_id(c, sid)
sqlQuery = """INSERT INTO scores_summary(query, subject, clustalw_score) VALUES('%s', '%s', ROUND(%s,4))""" % (q, s, score)
try: c.execute(sqlQuery)
except: self.show_sql_errors(c)
sqlQuery = "COMMIT"
try: c.execute(sqlQuery)
except: self.show_sql_errors(c)
def get_matches(self, qid):
'''called by server when adding good clustalw alignment(s) to MySQL db'''
if self.alignments.has_key(qid):
return self.alignments[qid]
return None
class blastWorkUnit(errorHandler):
def __init__(self, c, query_id=None):
errorHandler.__init__(self)
sqlQuery = "SELECT id FROM gene WHERE blast_status = 'avail' LIMIT 1"
try:
c.execute(sqlQuery)
try:
self.query_id = str(int(c.fetchall()[0][0]))
except:
self.query_id = None
return
except: self.show_sql_errors(c)
self._mark_pending(c)
self.query_translation = get_translation_from_id(c, self.query_id)
self.create_database(c)
def _mark_pending(self, c):
sqlQuery = "update gene set blast_status = 'pending' where id = %s" % self.query_id
try: c.execute(sqlQuery)
except: self.show_sql_errors(c)
sqlQuery = "COMMIT"
try: c.execute(sqlQuery)
except: self.show_sql_errors(c)
def create_database(self, c):
self.database = []
sqlQuery = "select id, translation from gene"
try: c.execute(sqlQuery)
except: self.show_sql_errors(c)
print 'rowcount: %s' % c.rowcount
for result in c.fetchall():
record = workUnitSeq(result[0], result[1])
self.database.append(record)
def get_as_fasta(self):
fasta = ""
for record in self.database:
fasta = "%s>%s\n%s\n" % (fasta, record.id, record.translation)
return fasta
def add_matches(self, matches, c):
'''called by a compute node to keep track of a good BLASTp alignment'''
# matches = [(qid1, sid1, score1), (qid2, sid2, score2), ...]
for qid, sid, e, bits in matches:
self._add_match(qid, sid, e, bits, c)
sqlQuery = "UPDATE gene SET blast_status = 'done' WHERE id = %s" % self.query_id
try: c.execute(sqlQuery)
except: self.show_sql_errors(c)
sqlQuery = "COMMIT"
try: c.execute(sqlQuery)
except: self.show_sql_errors(c)
def _add_match(self, qid, sid, e, bits, c):
'''called by a compute node to keep track of a good BLAST alignment'''
# qid and sid are gene table id's for the query and subject
q = get_GeneID_from_id(c, qid)
s = get_GeneID_from_id(c, sid)
sqlQuery = """INSERT INTO scores_summary(query, subject, blast_score, blast_bit_score)
VALUES('%s', '%s', '%s', '%s')""" % (q, s, e, bits)
try: c.execute(sqlQuery)
except: self.show_sql_errors(c)
sqlQuery = "COMMIT"
try: c.execute(sqlQuery)
except: self.show_sql_errors(c)
def get_matches(self, qid):
'''called by server when adding good BLAST alignment(s) to MySQL db'''
if self.alignments.has_key(qid):
return self.alignments[qid]
return None
|
byuphamerator/phamerator-dev
|
phamerator/alignmentDatabase.py
|
Python
|
gpl-2.0
| 5,333
|
[
"BLAST"
] |
f794a827aacf9e4c706579fd5c430969b01c4392de0115ffdd91bdf493996c87
|
# Copyright 2004 by James Casbon. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
Code to deal with COMPASS output, a program for profile/profile comparison.
Compass is described in:
Sadreyev R, Grishin N. COMPASS: a tool for comparison of multiple protein
alignments with assessment of statistical significance. J Mol Biol. 2003 Feb
7;326(1):317-36.
Tested with COMPASS 1.24.
Classes:
Record One result of a compass file
_Scanner Scan compass results
_Consumer Consume scanner events
RecordParser Parse one compass record
Iterator Iterate through a number of compass records
"""
from Bio import File
from Bio.ParserSupport import *
import re,string
class Record:
"""
Hold information from one compass hit.
Ali1 one is the query, Ali2 the hit.
"""
def __init__(self):
self.query=''
self.hit=''
self.gap_threshold=0
self.query_length=0
self.query_filtered_length=0
self.query_nseqs=0
self.query_neffseqs=0
self.hit_length=0
self.hit_filtered_length=0
self.hit_nseqs=0
self.hit_neffseqs=0
self.sw_score=0
self.evalue=-1
self.query_start=-1
self.hit_start=-1
self.query_aln=''
self.hit_aln=''
self.positives=''
def query_coverage(self):
"""Return the length of the query covered in alignment"""
s = string.replace(self.query_aln, "=", "")
return len(s)
def hit_coverage(self):
"""Return the length of the hit covered in the alignment"""
s = string.replace(self.hit_aln, "=", "")
return len(s)
class _Scanner:
"""Reads compass output and generate events"""
def feed(self, handle, consumer):
"""Feed in COMPASS ouput"""
if isinstance(handle, File.UndoHandle):
pass
else:
handle = File.UndoHandle(handle)
assert isinstance(handle, File.UndoHandle), \
"handle must be an UndoHandle"
if handle.peekline():
self._scan_record(handle, consumer)
def _scan_record(self,handle,consumer):
self._scan_names(handle, consumer)
self._scan_threshold(handle, consumer)
self._scan_lengths(handle,consumer)
self._scan_profilewidth(handle, consumer)
self._scan_scores(handle,consumer)
self._scan_alignment(handle,consumer)
def _scan_names(self,handle,consumer):
"""
Ali1: 60456.blo.gz.aln Ali2: allscop//14984.blo.gz.aln
"""
read_and_call(handle, consumer.names, contains="Ali1:")
def _scan_threshold(self,handle, consumer):
"""
Threshold of effective gap content in columns: 0.5
"""
read_and_call(handle, consumer.threshold, start="Threshold")
def _scan_lengths(self,handle, consumer):
"""
length1=388 filtered_length1=386 length2=145 filtered_length2=137
"""
read_and_call(handle, consumer.lengths, start="length1=")
def _scan_profilewidth(self,handle,consumer):
"""
Nseqs1=399 Neff1=12.972 Nseqs2=1 Neff2=6.099
"""
read_and_call(handle, consumer.profilewidth, contains="Nseqs1")
def _scan_scores(self,handle, consumer):
"""
Smith-Waterman score = 37 Evalue = 5.75e+02
"""
read_and_call(handle, consumer.scores, start="Smith-Waterman")
def _scan_alignment(self,handle, consumer):
"""
QUERY 2 LSDRLELVSASEIRKLFDIAAGMKDVISLGIGEPDFDTPQHIKEYAKEALDKGLTHYGPN
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
QUERY 2 LSDRLELVSASEIRKLFDIAAGMKDVISLGIGEPDFDTPQHIKEYAKEALDKGLTHYGPN
QUERY IGLLELREAIAEKLKKQNGIEADPKTEIMVLLGANQAFLMGLSAFLKDGEEVLIPTPAFV
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
QUERY IGLLELREAIAEKLKKQNGIEADPKTEIMVLLGANQAFLMGLSAFLKDGEEVLIPTPAFV
"""
while 1:
line = handle.readline()
if not line:
break
if is_blank_line(line):
continue
else:
consumer.query_alignment(line)
read_and_call(handle, consumer.positive_alignment)
read_and_call(handle, consumer.hit_alignment)
class _Consumer:
# all regular expressions used -- compile only once
_re_names = re.compile("Ali1:\s+(\S+)\s+Ali2:\s+(\S+)\s+")
_re_threshold = \
re.compile("Threshold of effective gap content in columns: (\S+)")
_re_lengths = \
re.compile("length1=(\S+)\s+filtered_length1=(\S+)\s+length2=(\S+)"
+ "\s+filtered_length2=(\S+)")
_re_profilewidth = \
re.compile("Nseqs1=(\S+)\s+Neff1=(\S+)\s+Nseqs2=(\S+)\s+Neff2=(\S+)")
_re_scores = re.compile("Smith-Waterman score = (\S+)\s+Evalue = (\S+)")
_re_start = re.compile("(\d+)")
_re_align = re.compile("^.{15}(\S+)")
_re_positive_alignment = re.compile("^.{15}(.+)")
def __init__(self):
self.data = None
def names(self, line):
"""
Ali1: 60456.blo.gz.aln Ali2: allscop//14984.blo.gz.aln
------query----- -------hit-------------
"""
self.data = Record()
m = self.__class__._re_names.search(line)
self.data.query = m.group(1)
self.data.hit = m.group(2)
def threshold(self,line):
m = self.__class__._re_threshold.search(line)
self.data.gap_threshold = float(m.group(1))
def lengths(self,line):
m = self.__class__._re_lengths.search(line)
self.data.query_length = int(m.group(1))
self.data.query_filtered_length = float(m.group(2))
self.data.hit_length = int(m.group(3))
self.data.hit_filtered_length = float(m.group(4))
def profilewidth(self,line):
m = self.__class__._re_profilewidth.search(line)
self.data.query_nseqs = int(m.group(1))
self.data.query_neffseqs = float(m.group(2))
self.data.hit_nseqs = int(m.group(3))
self.data.hit_neffseqs = float(m.group(4))
def scores(self, line):
m = self.__class__._re_scores.search(line)
if m:
self.data.sw_score = int(m.group(1))
self.data.evalue = float(m.group(2))
else:
self.data.sw_score = 0
self.data.evalue = -1.0
def query_alignment(self, line):
m = self.__class__._re_start.search(line)
if m:
self.data.query_start = int(m.group(1))
m = self.__class__._re_align.match(line)
assert m!=None, "invalid match"
self.data.query_aln = self.data.query_aln + m.group(1)
def positive_alignment(self,line):
m = self.__class__._re_positive_alignment.match(line)
assert m!=None, "invalid match"
self.data.positives = self.data.positives + m.group(1)
def hit_alignment(self,line):
m = self.__class__._re_start.search(line)
if m:
self.data.hit_start = int(m.group(1))
m = self.__class__._re_align.match(line)
assert m!=None, "invalid match"
self.data.hit_aln = self.data.hit_aln + m.group(1)
class RecordParser(AbstractParser):
"""Parses compass results into a Record object.
"""
def __init__(self):
self._scanner = _Scanner()
self._consumer = _Consumer()
def parse(self, handle):
if isinstance(handle, File.UndoHandle):
uhandle = handle
else:
uhandle = File.UndoHandle(handle)
self._scanner.feed(uhandle, self._consumer)
return self._consumer.data
class Iterator:
"""Iterate through a file of compass results"""
def __init__(self, handle):
self._uhandle = File.UndoHandle(handle)
self._parser = RecordParser()
def next(self):
lines = []
while 1:
line = self._uhandle.readline()
if not line:
break
if line[0:4] == "Ali1" and lines:
self._uhandle.saveline(line)
break
lines.append(line)
if not lines:
return None
data = string.join(lines, '')
return self._parser.parse(File.StringHandle(data))
|
dbmi-pitt/DIKB-Micropublication
|
scripts/mp-scripts/Bio/Compass/__init__.py
|
Python
|
apache-2.0
| 8,796
|
[
"Biopython"
] |
1824632c45711ded52b05d30c8629c7c0d1452d1cefe7be99c0c4da2cdf90b54
|
"""
Fit a line based on parameters output from a grid of RADEX models
"""
import numpy as np
from pyspeckit.mpfit import mpfit
from .. import units
from . import fitter,model
import matplotlib.cbook as mpcb
import copy
try:
import astropy.io.fits as pyfits
except ImportError:
import pyfits
class radex_model(object):
def __init__(self, xarr,
grid_vwidth=1.0,
grid_vwidth_scale=False,
texgrid=None,
taugrid=None,
hdr=None,
path_to_texgrid='',
path_to_taugrid='',
temperature_gridnumber=3,
debug=False,
verbose=False,
modelfunc=None,
**kwargs):
"""
Use a grid of RADEX-computed models to make a model line spectrum
The RADEX models have to be available somewhere.
OR they can be passed as arrays. If as arrays, the form should be:
texgrid = ((minfreq1,maxfreq1,texgrid1),(minfreq2,maxfreq2,texgrid2))
xarr must be a SpectroscopicAxis instance
xoff_v, width are both in km/s. With is 'sigma'
grid_vwidth is the velocity assumed when computing the grid in km/s
this is important because tau = modeltau / width (see, e.g.,
Draine 2011 textbook pgs 219-230)
grid_vwidth_scale is True or False: False for LVG, True for Sphere
A modelfunc must be specified. Model functions should take an xarr and
a series of keyword arguments corresponding to the line parameters
(Tex, tau, xoff_v, and width (gaussian sigma, not FWHM))
"""
self.modelfunc = modelfunc
if self.modelfunc is None:
raise ValueError("Must specify a spectral model function. See class help for form.")
if texgrid is None and taugrid is None:
if path_to_texgrid == '' or path_to_taugrid=='':
raise IOError("Must specify model grids to use.")
else:
self.taugrid = [pyfits.getdata(path_to_taugrid)]
self.texgrid = [pyfits.getdata(path_to_texgrid)]
hdr = pyfits.getheader(path_to_taugrid)
self.yinds,self.xinds = np.indices(self.taugrid[0].shape[1:])
self.densityarr = (xinds+hdr['CRPIX1']-1)*hdr['CD1_1']+hdr['CRVAL1'] # log density
self.columnarr = (yinds+hdr['CRPIX2']-1)*hdr['CD2_2']+hdr['CRVAL2'] # log column
self.minfreq = (4.8,)
self.maxfreq = (5.0,)
elif len(taugrid)==len(texgrid) and hdr is not None:
self.minfreq,self.maxfreq,self.texgrid = zip(*texgrid)
self.minfreq,self.maxfreq,self.taugrid = zip(*taugrid)
self.yinds,self.xinds = np.indices(self.taugrid[0].shape[1:])
self.densityarr = (xinds+hdr['CRPIX1']-1)*hdr['CD1_1']+hdr['CRVAL1'] # log density
self.columnarr = (yinds+hdr['CRPIX2']-1)*hdr['CD2_2']+hdr['CRVAL2'] # log column
else:
raise Exception
# Convert X-units to frequency in GHz
self.xarr = copy.copy(xarr)
self.xarr.convert_to_unit('Hz', quiet=True)
#tau = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,taugrid[temperature_gridnumber,:,:])
#tex = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,texgrid[temperature_gridnumber,:,:])
if debug:
import pdb; pdb.set_trace()
def __call__(self, density=4, column=13, xoff_v=0.0, width=1.0,):
self.gridval1 = np.interp(density, self.densityarr[0,:], xinds[0,:])
self.gridval2 = np.interp(column, self.columnarr[:,0], yinds[:,0])
if np.isnan(gridval1) or np.isnan(gridval2):
raise ValueError("Invalid column/density")
if scipyOK:
tau = [scipy.ndimage.map_coordinates(tg[temperature_gridnumber,:,:],np.array([[self.gridval2],[self.gridval1]]),order=1) for tg in self.taugrid]
tex = [scipy.ndimage.map_coordinates(tg[temperature_gridnumber,:,:],np.array([[self.gridval2],[self.gridval1]]),order=1) for tg in self.texgrid]
else:
raise ImportError("Couldn't import scipy, therefore cannot interpolate")
if verbose:
print "density %20.12g column %20.12g: tau %20.12g tex %20.12g" % (density, column, tau, tex)
if debug:
import pdb; pdb.set_trace()
return self.modelfunc(self.xarr,Tex=self.tex,tau=tau,xoff_v=xoff_v,width=width,**kwargs)
|
keflavich/pyspeckit-obsolete
|
pyspeckit/spectrum/models/radex_modelgrid.py
|
Python
|
mit
| 4,499
|
[
"Gaussian"
] |
7cebcd38b176fbf6fb8b2a05ef70edc164d90c001ca83794b2e96b95b103020c
|
# -*- coding: utf-8 -*-
try:
# Python 2.7
from collections import OrderedDict
except:
# Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from datetime import timedelta
from gluon import current, Field, URL
from gluon.html import *
from gluon.storage import Storage
from gluon.validators import IS_NULL_OR, IS_NOT_EMPTY
from s3.s3fields import S3Represent
from s3.s3resource import S3FieldSelector
from s3.s3utils import S3DateTime, s3_auth_user_represent_name, s3_avatar_represent, s3_unicode
from s3.s3validators import IS_INT_AMOUNT, IS_LOCATION_SELECTOR2, IS_ONE_OF
from s3.s3widgets import S3LocationSelectorWidget2
from s3.s3forms import S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineComponentMultiSelectWidget
T = current.T
s3 = current.response.s3
settings = current.deployment_settings
"""
Template settings for Requests Management
- for Philippines
"""
datetime_represent = lambda dt: S3DateTime.datetime_represent(dt, utc=True)
# =============================================================================
# System Settings
# -----------------------------------------------------------------------------
# Authorization Settings
# Users can self-register
#settings.security.self_registration = False
# Users need to verify their email
settings.auth.registration_requires_verification = True
# Users don't need to be approved
#settings.auth.registration_requires_approval = True
# Organisation links are either done automatically
# - by registering with official domain of Org
# or Manually by Call Center staff
#settings.auth.registration_requests_organisation = True
#settings.auth.registration_organisation_required = True
settings.auth.registration_requests_site = False
# Uncomment this to allow Admin to see Organisations in user Admin even if the Registration doesn't request this
settings.auth.admin_sees_organisation = True
# Approval emails get sent to all admins
settings.mail.approver = "ADMIN"
settings.auth.registration_link_user_to = {"staff": T("Staff")}
settings.auth.registration_link_user_to_default = ["staff"]
settings.auth.registration_roles = {"organisation_id": ["USER"],
}
# Terms of Service to be able to Register on the system
# uses <template>/views/tos.html
settings.auth.terms_of_service = True
settings.auth.show_utc_offset = False
settings.auth.show_link = False
#settings.auth.record_approval = True
#settings.auth.record_approval_required_for = ["org_organisation"]
# -----------------------------------------------------------------------------
# Security Policy
settings.security.policy = 5 # Apply Controller, Function and Table ACLs
settings.security.map = True
# Owner Entity
settings.auth.person_realm_human_resource_site_then_org = False
# -----------------------------------------------------------------------------
# Pre-Populate
settings.base.prepopulate = ["Libya"]
settings.base.system_name = T("Sahana")
settings.base.system_name_short = T("Sahana")
# -----------------------------------------------------------------------------
# Theme (folder to use for views/layout.html)
settings.base.theme = "Libya"
settings.ui.formstyle_row = "bootstrap"
settings.ui.formstyle = "bootstrap"
#settings.gis.map_height = 600
#settings.gis.map_width = 854
# -----------------------------------------------------------------------------
# L10n (Localization) settings
settings.L10n.languages = OrderedDict([
("ar", "العربية"),
("en", "English"),
])
# Default Language
settings.L10n.default_language = "en"
# Default timezone for users
settings.L10n.utc_offset = "UTC +0200"
# Unsortable 'pretty' date format
settings.L10n.date_format = "%d %b %y"
# Number formats (defaults to ISO 31-0)
# Decimal separator for numbers (defaults to ,)
settings.L10n.decimal_separator = "."
# Thousands separator for numbers (defaults to space)
settings.L10n.thousands_separator = ","
# Uncomment this to Translate CMS Series Names
# - we want this on when running s3translate but off in normal usage as we use the English names to lookup icons in render_posts
#settings.L10n.translate_cms_series = True
# Uncomment this to Translate Location Names
settings.L10n.translate_gis_location = True
# Restrict the Location Selector to just certain countries
settings.gis.countries = ["LY"]
# Until we add support to LocationSelector2 to set dropdowns from LatLons
#settings.gis.check_within_parent_boundaries = False
# Uncomment to hide Layer Properties tool
#settings.gis.layer_properties = False
# Hide unnecessary Toolbar items
settings.gis.nav_controls = False
# Uncomment to display the Map Legend as a floating DIV
settings.gis.legend = "float"
# -----------------------------------------------------------------------------
# Finance settings
settings.fin.currencies = {
"EUR" : T("Euros"),
#"GBP" : T("Great British Pounds"),
"LYD" : T("Libyan Dinars"),
#"CHF" : T("Swiss Francs"),
"USD" : T("United States Dollars"),
}
settings.fin.currency_default = "LYD"
# -----------------------------------------------------------------------------
# Enable this for a UN-style deployment
#settings.ui.cluster = True
# Enable this to use the label 'Camp' instead of 'Shelter'
settings.ui.camp = True
# -----------------------------------------------------------------------------
# Uncomment to restrict the export formats available
#settings.ui.export_formats = ["xls"]
settings.ui.update_label = "Edit"
# -----------------------------------------------------------------------------
# Summary Pages
settings.ui.summary = [#{"common": True,
# "name": "cms",
# "widgets": [{"method": "cms"}]
# },
{"name": "table",
"label": "Table",
"widgets": [{"method": "datatable"}]
},
{"name": "map",
"label": "Map",
"widgets": [{"method": "map", "ajax_init": True}],
},
{"name": "charts",
"label": "Charts",
"widgets": [{"method": "report2", "ajax_init": True}]
},
]
settings.search.filter_manager = False
# Filter forms - style for Summary pages
#def filter_formstyle(row_id, label, widget, comment, hidden=False):
# return DIV(label, widget, comment,
# _id=row_id,
# _class="horiz_filter_form")
# =============================================================================
# Module Settings
# -----------------------------------------------------------------------------
# Human Resource Management
#settings.hrm.staff_label = "Contacts"
# Uncomment to allow Staff & Volunteers to be registered without an organisation
settings.hrm.org_required = False
# Uncomment to allow Staff & Volunteers to be registered without an email address
settings.hrm.email_required = False
# Uncomment to show the Organisation name in HR represents
settings.hrm.show_organisation = True
# Uncomment to disable Staff experience
settings.hrm.staff_experience = False
# Uncomment to disable Volunteer experience
settings.hrm.vol_experience = False
# Uncomment to disable the use of HR Credentials
settings.hrm.use_credentials = False
# Uncomment to disable the use of HR Skills
settings.hrm.use_skills = False
# Uncomment to disable the use of HR Teams
settings.hrm.teams = False
# Uncomment to hide fields in S3AddPersonWidget[2]
settings.pr.request_dob = False
settings.pr.request_gender = False
# -----------------------------------------------------------------------------
# Org
#settings.org.site_label = "Office/Shelter/Hospital"
settings.org.site_label = "Site"
settings.org.site_autocomplete = True
# Extra fields to show in Autocomplete Representations
settings.org.site_autocomplete_fields = ["location_id$L1",
"location_id$L2",
#"location_id$L3",
#"location_id$L4",
]
#settings.org.site_address_autocomplete = True
# -----------------------------------------------------------------------------
# Project
# Uncomment this to use multiple Organisations per project
#organisations = True
# Links to Filtered Components for Donors & Partners
#settings.project.organisation_roles = {
# 1: T("Host National Society"),
# 2: T("Partner"),
# 3: T("Donor"),
# #4: T("Customer"), # T("Beneficiary")?
# #5: T("Supplier"),
# 9: T("Partner National Society"),
#}
# -----------------------------------------------------------------------------
# Notifications
# Template for the subject line in update notifications
#settings.msg.notify_subject = "$S %s" % T("Notification")
settings.msg.notify_subject = "$S Notification"
# -----------------------------------------------------------------------------
def currency_represent(v):
"""
Custom Representation of Currencies
"""
if v == "USD":
return "$"
elif v == "EUR":
return "€"
elif v == "GBP":
return "£"
else:
# e.g. CHF
return v
# -----------------------------------------------------------------------------
def render_contacts(listid, resource, rfields, record, **attr):
"""
Custom dataList item renderer for Contacts on the Profile pages
@param listid: the HTML ID for this list
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
@param attr: additional HTML attributes for the item
"""
pkey = "hrm_human_resource.id"
# Construct the item ID
if pkey in record:
record_id = record[pkey]
item_id = "%s-%s" % (listid, record_id)
else:
# template
item_id = "%s-[id]" % listid
item_class = "thumbnail"
raw = record._row
#author = record["hrm_human_resource.modified_by"]
date = record["hrm_human_resource.modified_on"]
fullname = record["hrm_human_resource.person_id"]
job_title = raw["hrm_human_resource.job_title_id"] or ""
if job_title:
job_title = "- %s" % record["hrm_human_resource.job_title_id"]
#organisation = record["hrm_human_resource.organisation_id"]
organisation_id = raw["hrm_human_resource.organisation_id"]
#org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
pe_id = raw["pr_person.pe_id"]
person_id = raw["hrm_human_resource.person_id"]
location = record["org_site.location_id"]
location_id = raw["org_site.location_id"]
location_url = URL(c="gis", f="location",
args=[location_id, "profile"])
address = raw["gis_location.addr_street"] or T("no office assigned")
email = raw["pr_email_contact.value"] or T("no email address")
if isinstance(email, list):
email = email[0]
phone = raw["pr_phone_contact.value"] or T("no phone number")
if isinstance(phone, list):
phone = phone[0]
db = current.db
s3db = current.s3db
ltable = s3db.pr_person_user
query = (ltable.pe_id == pe_id)
row = db(query).select(ltable.user_id,
limitby=(0, 1)
).first()
if row:
# Use Personal Avatar
# @ToDo: Optimise by not doing DB lookups (especially duplicate) within render, but doing these in the bulk query
avatar = s3_avatar_represent(row.user_id,
_class="media-object")
else:
avatar = IMG(_src=URL(c="static", f="img", args="blank-user.gif"),
_class="media-object")
# Edit Bar
permit = current.auth.s3_has_permission
table = db.pr_person
if permit("update", table, record_id=person_id):
vars = {"refresh": listid,
"record": record_id,
}
f = current.request.function
if f == "organisation" and organisation_id:
vars["(organisation)"] = organisation_id
edit_url = URL(c="hrm", f="person",
args=[person_id, "update.popup"],
vars=vars)
title_update = current.response.s3.crud_strings.hrm_human_resource.title_update
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=edit_url,
_class="s3_modal",
_title=title_update,
)
else:
edit_btn = ""
edit_url = "#"
title_update = ""
# Deletions failing due to Integrity Errors
#if permit("delete", table, record_id=person_id):
# delete_btn = A(I(" ", _class="icon icon-trash"),
# _class="dl-item-delete",
# )
#else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
avatar = A(avatar,
_href=edit_url,
_class="pull-left s3_modal",
_title=title_update,
)
# Render the item
body = TAG[""](P(fullname,
" ",
SPAN(job_title),
_class="person_pos",
),
P(I(_class="icon-phone"),
" ",
SPAN(phone),
" ",
I(_class="icon-envelope-alt"),
" ",
SPAN(email),
_class="card_1_line",
),
P(I(_class="icon-home"),
" ",
address,
_class="card_manylines",
))
item = DIV(DIV(SPAN(" ", _class="card-title"),
SPAN(A(location,
_href=location_url,
),
_class="location-title",
),
SPAN(date,
_class="date-title",
),
edit_bar,
_class="card-header",
),
DIV(avatar,
DIV(DIV(body,
# Organisation only needed if displaying elsewhere than org profile
# Author confusing with main contact record
#DIV(#author,
# #" - ",
# A(organisation,
# _href=org_url,
# _class="card-organisation",
# ),
# _class="card-person",
# ),
_class="media",
),
_class="media-body",
),
_class="media",
),
#docs,
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def quote_unicode(s):
"""
Quote unicode strings for URLs for Rocket
"""
chars = []
for char in s:
o = ord(char)
if o < 128:
chars.append(char)
else:
chars.append(hex(o).replace("0x", "%").upper())
return "".join(chars)
# -----------------------------------------------------------------------------
def render_locations(listid, resource, rfields, record, **attr):
"""
Custom dataList item renderer for Locations on the Selection Page
@param listid: the HTML ID for this list
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
@param attr: additional HTML attributes for the item
"""
pkey = "gis_location.id"
# Construct the item ID
if pkey in record:
record_id = record[pkey]
item_id = "%s-%s" % (listid, record_id)
else:
# template
item_id = "%s-[id]" % listid
item_class = "thumbnail"
raw = record._row
name = raw["gis_location.name"]
level = raw["gis_location.level"]
L1 = raw["gis_location.L1"]
L2 = raw["gis_location.L2"]
#L3 = raw["gis_location.L3"]
#L4 = raw["gis_location.L4"]
location_url = URL(c="gis", f="location",
args=[record_id, "profile"])
if level == "L1":
represent = name
if level == "L2":
represent = "%s (%s)" % (name, L1)
#elif level == "L3":
# represent = "%s (%s, %s)" % (name, L2, L1)
#elif level == "L4":
# represent = "%s (%s, %s, %s)" % (name, L3, L2, L1)
else:
# L0 or specific
represent = name
# Users don't edit locations
# permit = current.auth.s3_has_permission
# table = current.db.gis_location
# if permit("update", table, record_id=record_id):
# edit_btn = A(I(" ", _class="icon icon-edit"),
# _href=URL(c="gis", f="location",
# args=[record_id, "update.popup"],
# vars={"refresh": listid,
# "record": record_id}),
# _class="s3_modal",
# _title=current.response.s3.crud_strings.gis_location.title_update,
# )
# else:
# edit_btn = ""
# if permit("delete", table, record_id=record_id):
# delete_btn = A(I(" ", _class="icon icon-trash"),
# _class="dl-item-delete",
# )
# else:
# delete_btn = ""
# edit_bar = DIV(edit_btn,
# delete_btn,
# _class="edit-bar fright",
# )
# Tallies
# NB We assume that all records are readable here
# Search all sub-locations
locations = current.gis.get_children(record_id)
locations = [l.id for l in locations]
locations.append(record_id)
db = current.db
s3db = current.s3db
stable = s3db.org_site
query = (stable.deleted == False) & \
(stable.location_id.belongs(locations))
tally_sites = db(query).count()
table = s3db.req_req
query = (table.deleted == False) & \
(stable.site_id == table.site_id) & \
(stable.location_id.belongs(locations))
tally_reqs = db(query).count()
table = s3db.req_commit
query = (table.deleted == False) & \
(table.location_id.belongs(locations))
tally_commits = db(query).count()
if level == "L4":
next_Lx = ""
next_Lx_label = ""
else:
if level == "L0":
next_Lx = "L1"
next_Lx_label = "Districts"
if level == "L1":
next_Lx = "L2"
next_Lx_label = "Cities and Towns"
#elif level == "L2":
# next_Lx = "L3"
# next_Lx_label = "Municipalities / Cities"
#elif level == "L3":
# next_Lx = "L4"
# next_Lx_label = "Barangays"
table = db.gis_location
query = (table.deleted == False) & \
(table.level == next_Lx) & \
(table.parent == record_id)
tally_Lx = db(query).count()
next_url = URL(c="gis", f="location",
args=["datalist"],
vars={"~.level": next_Lx,
"~.parent": record_id,
})
next_Lx_label = A(next_Lx_label,
_href=next_url,
)
next_Lx = SPAN(tally_Lx,
_class="badge",
)
# Build the icon, if it doesn't already exist
filename = "%s.svg" % record_id
import os
filepath = os.path.join(current.request.folder, "static", "cache", "svg", filename)
if not os.path.exists(filepath):
gtable = db.gis_location
loc = db(gtable.id == record_id).select(gtable.wkt,
limitby=(0, 1)
).first()
if loc:
from s3.codecs.svg import S3SVG
S3SVG.write_file(filename, loc.wkt)
# Render the item
item = DIV(DIV(A(IMG(_class="media-object",
_src=URL(c="static",
f="cache",
args=["svg", filename],
)
),
_class="pull-left",
_href=location_url,
),
DIV(SPAN(A(represent,
_href=location_url,
_class="media-heading"
),
),
#edit_bar,
_class="card-header-select",
),
DIV(P(next_Lx_label,
next_Lx,
T("Sites"),
SPAN(tally_sites,
_class="badge",
),
T("Requests"),
SPAN(tally_reqs,
_class="badge",
),
T("Donations"),
SPAN(tally_commits,
_class="badge",
),
_class="tally",
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def render_locations_profile(listid, resource, rfields, record, **attr):
"""
Custom dataList item renderer for Locations on the Profile Page
- UNUSED
@param listid: the HTML ID for this list
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
@param attr: additional HTML attributes for the item
"""
pkey = "gis_location.id"
# Construct the item ID
if pkey in record:
record_id = record[pkey]
item_id = "%s-%s" % (listid, record_id)
else:
# template
item_id = "%s-[id]" % listid
item_class = "thumbnail"
raw = record._row
name = record["gis_location.name"]
location_url = URL(c="gis", f="location",
args=[record_id, "profile"])
# Placeholder to maintain style
#logo = DIV(IMG(_class="media-object"),
# _class="pull-left")
# We don't Edit Locations
# Edit Bar
# permit = current.auth.s3_has_permission
# table = current.db.gis_location
# if permit("update", table, record_id=record_id):
# vars = {"refresh": listid,
# "record": record_id,
# }
# f = current.request.function
# if f == "organisation" and organisation_id:
# vars["(organisation)"] = organisation_id
# edit_btn = A(I(" ", _class="icon icon-edit"),
# _href=URL(c="gis", f="location",
# args=[record_id, "update.popup"],
# vars=vars),
# _class="s3_modal",
# _title=current.response.s3.crud_strings.gis_location.title_update,
# )
# else:
# edit_btn = ""
# if permit("delete", table, record_id=record_id):
# delete_btn = A(I(" ", _class="icon icon-trash"),
# _class="dl-item-delete",
# )
# else:
# delete_btn = ""
# edit_bar = DIV(edit_btn,
# delete_btn,
# _class="edit-bar fright",
# )
# Render the item
item = DIV(DIV(DIV(#SPAN(A(name,
# _href=location_url,
# ),
# _class="location-title"),
#" ",
#edit_bar,
P(A(name,
_href=location_url,
),
_class="card_comments"),
_class="span5"), # card-details
_class="row",
),
)
return item
# -----------------------------------------------------------------------------
def render_sites(listid, resource, rfields, record, **attr):
"""
Custom dataList item renderer for Facilities on the Profile pages
@param listid: the HTML ID for this list
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
@param attr: additional HTML attributes for the item
"""
pkey = "org_facility.id"
# Construct the item ID
if pkey in record:
record_id = record[pkey]
item_id = "%s-%s" % (listid, record_id)
else:
# template
item_id = "%s-[id]" % listid
item_class = "thumbnail"
raw = record._row
name = record["org_facility.name"]
site_id = raw["org_facility.id"]
opening_times = raw["org_facility.opening_times"] or ""
author = record["org_facility.modified_by"]
date = record["org_facility.modified_on"]
organisation = record["org_facility.organisation_id"]
organisation_id = raw["org_facility.organisation_id"]
location = record["org_facility.location_id"]
level = raw["gis_location.level"]
if level:
location_id = raw["org_facility.location_id"]
else:
location_id = raw["gis_location.parent"]
location_url = URL(c="gis", f="location",
args=[location_id, "profile"])
address = raw["gis_location.addr_street"] or ""
phone = raw["org_facility.phone1"] or ""
facility_type = record["org_site_facility_type.facility_type_id"]
comments = record["org_facility.comments"] or ""
logo = raw["org_organisation.logo"]
site_url = URL(c="org", f="facility", args=[site_id, "profile"])
org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
if logo:
logo = A(IMG(_src=URL(c="default", f="download", args=[logo]),
_class="media-object",
),
_href=org_url,
_class="pull-left",
)
else:
logo = DIV(IMG(_class="media-object"),
_class="pull-left")
facility_status = raw["org_site_status.facility_status"] or ""
if facility_status:
if facility_status == 1:
icon = "thumbs-up-alt"
colour = "green"
elif facility_status == 2:
icon = "thumbs-down-alt"
colour = "amber"
elif facility_status == 3:
icon = "reply-all"
colour = "red"
elif facility_status == 4:
icon = "remove"
colour = "red"
elif facility_status == 99:
icon = "question"
colour = ""
facility_status = P(#I(_class="icon-%s" % icon),
#" ",
SPAN("%s: %s" % (T("Status"), record["org_site_status.facility_status"])),
" ",
_class="card_1_line %s" % colour,
)
power_supply_type = raw["org_site_status.power_supply_type"] or ""
if power_supply_type:
if power_supply_type == 1:
icon = "thumbs-up-alt"
colour = "green"
elif power_supply_type == 2:
icon = "cogs"
colour = "amber"
elif power_supply_type == 98:
icon = "question"
colour = "amber"
elif power_supply_type == 99:
icon = "remove"
colour = "red"
power_supply_type = P(#I(_class="icon-%s" % icon),
#" ",
SPAN("%s: %s" % (T("Power"), record["org_site_status.power_supply_type"])),
" ",
_class="card_1_line %s" % colour,
)
# Edit Bar
permit = current.auth.s3_has_permission
table = current.db.org_facility
if permit("update", table, record_id=record_id):
vars = {"refresh": listid,
"record": record_id,
}
f = current.request.function
if f == "organisation" and organisation_id:
vars["(organisation)"] = organisation_id
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="org", f="facility",
args=[record_id, "update.popup"],
vars=vars),
_class="s3_modal",
_title=current.response.s3.crud_strings.org_facility.title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-trash"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Render the item
body = TAG[""](P(I(_class="icon-flag"),
" ",
SPAN(facility_type),
" ",
_class="card_1_line",
),
P(I(_class="icon-home"),
" ",
address,
_class="card_manylines",
),
P(I(_class="icon-time"),
" ",
SPAN(opening_times),
" ",
_class="card_1_line",
),
P(I(_class="icon-phone"),
" ",
SPAN(phone),
" ",
_class="card_1_line",
),
facility_status,
power_supply_type,
P(comments,
_class="card_manylines s3-truncate",
),
)
item = DIV(DIV(SPAN(A(name,
_href=site_url,
),
_class="card-title",
),
SPAN(A(location,
_href=location_url,
),
_class="location-title",
),
SPAN(date,
_class="date-title",
),
edit_bar,
_class="card-header",
),
DIV(logo,
DIV(DIV(body,
DIV(author,
" - ",
A(organisation,
_href=org_url,
_class="card-organisation",
),
_class="card-person",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
#docs,
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def render_organisations(listid, resource, rfields, record, **attr):
"""
Custom dataList item renderer for Organisations on the Stakeholder Selection Page
@param listid: the HTML ID for this list
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
@param attr: additional HTML attributes for the item
"""
pkey = "org_organisation.id"
# Construct the item ID
if pkey in record:
record_id = record[pkey]
item_id = "%s-%s" % (listid, record_id)
else:
# template
item_id = "%s-[id]" % listid
item_class = "thumbnail span6"
raw = record._row
name = record["org_organisation.name"]
logo = raw["org_organisation.logo"]
phone = raw["org_organisation.phone"] or ""
website = raw["org_organisation.website"] or ""
if website:
website = A(website, _href=website)
money = raw["req_organisation_needs.money"]
if money:
money_details = record["req_organisation_needs.money_details"]
money_details = SPAN(XML(money_details),
_class="s3-truncate")
money_details = P(I(_class="icon icon-dollar"),
" ",
money_details,
_class="card_manylines",
)
else:
# Include anyway to make cards align
money_details = P(I(_class="icon icon-dollar"),
" ",
_class="card_1_line",
)
#time = raw["req_organisation_needs.vol"]
#if time:
# time_details = record["req_organisation_needs.vol_details"]
# time_details = P(I(_class="icon icon-time"),
# " ",
# XML(time_details),
# _class="card_1_line",
# )
#else:
# time_details = ""
org_url = URL(c="org", f="organisation", args=[record_id, "profile"])
if logo:
logo = A(IMG(_src=URL(c="default", f="download", args=[logo]),
_class="media-object",
),
_href=org_url,
_class="pull-left",
)
else:
logo = DIV(IMG(_class="media-object"),
_class="pull-left")
db = current.db
permit = current.auth.s3_has_permission
table = db.org_organisation
if permit("update", table, record_id=record_id):
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="org", f="organisation",
args=[record_id, "update.popup"],
vars={"refresh": listid,
"record": record_id}),
_class="s3_modal",
_title=current.response.s3.crud_strings.org_organisation.title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-trash"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Tallies
# NB We assume that all records are readable here
s3db = current.s3db
stable = s3db.org_site
query = (stable.deleted == False) & \
(stable.obsolete == False) & \
(stable.organisation_id == record_id)
tally_sites = db(query).count()
table = s3db.req_req
query = (table.deleted == False) & \
(stable.site_id == table.site_id) & \
(stable.organisation_id == record_id)
tally_reqs = db(query).count()
table = s3db.req_commit
query = (table.deleted == False) & \
(table.organisation_id == record_id)
tally_commits = db(query).count()
# Render the item
item = DIV(DIV(logo,
DIV(SPAN(A(name,
_href=org_url,
_class="media-heading"
),
),
edit_bar,
_class="card-header-select",
),
DIV(P(I(_class="icon icon-phone"),
" ",
phone,
_class="card_1_line",
),
P(I(_class="icon icon-map"),
" ",
website,
_class="card_1_line",
),
money_details,
#time_details,
P(T("Sites"),
SPAN(tally_sites,
_class="badge",
),
T("Requests"),
SPAN(tally_reqs,
_class="badge",
),
T("Donations"),
SPAN(tally_commits,
_class="badge",
),
_class="tally",
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def render_org_needs(listid, resource, rfields, record, **attr):
"""
Custom dataList item renderer for Needs
- UNUSED
@param listid: the HTML ID for this list
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
@param attr: additional HTML attributes for the item
"""
pkey = "req_organisation_needs.id"
# Construct the item ID
if pkey in record:
record_id = record[pkey]
item_id = "%s-%s" % (listid, record_id)
else:
# template
item_id = "%s-[id]" % listid
item_class = "thumbnail span6"
raw = record._row
logo = raw["org_organisation.logo"]
phone = raw["org_organisation.phone"] or ""
website = raw["org_organisation.website"] or ""
if website:
website = A(website, _href=website)
author = record["req_organisation_needs.modified_by"]
date = record["req_organisation_needs.modified_on"]
money = raw["req_organisation_needs.money"]
if money:
money_details = record["req_organisation_needs.money_details"]
money_details = P(I(_class="icon icon-dollar"),
" ",
XML(money_details),
_class="card_manylines",
)
else:
money_details = ""
time = raw["req_organisation_needs.vol"]
if time:
time_details = record["req_organisation_needs.vol_details"]
time_details = P(I(_class="icon icon-time"),
" ",
XML(time_details),
_class="card_manylines",
)
else:
time_details = ""
org_id = raw["org_organisation.id"]
org_url = URL(c="org", f="organisation", args=[org_id, "profile"])
if logo:
logo = A(IMG(_src=URL(c="default", f="download", args=[logo]),
_class="media-object",
),
_href=org_url,
_class="pull-left",
)
else:
logo = DIV(IMG(_class="media-object"),
_class="pull-left")
permit = current.auth.s3_has_permission
table = current.db.req_organisation_needs
if permit("update", table, record_id=record_id):
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="req", f="organisation_needs",
args=[record_id, "update.popup"],
vars={"refresh": listid,
"record": record_id}),
_class="s3_modal",
_title=current.response.s3.crud_strings.req_organisation_needs.title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-trash"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
if current.request.controller == "org":
# Org Profile page - no need to repeat Org Name
title = " "
else:
title = raw["org_organisation.name"]
# Render the item
item = DIV(DIV(SPAN(title, _class="card-title"),
SPAN(author, _class="location-title"),
SPAN(date, _class="date-title"),
edit_bar,
_class="card-header",
),
DIV(logo,
DIV(P(I(_class="icon icon-phone"),
" ",
phone,
_class="card_1_line",
),
P(I(_class="icon icon-map"),
" ",
website,
_class="card_1_line",
),
money_details,
time_details,
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
s3.render_org_needs = render_org_needs
# -----------------------------------------------------------------------------
def render_site_needs(listid, resource, rfields, record, **attr):
"""
Custom dataList item renderer for Needs
- UNUSED
@param listid: the HTML ID for this list
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
@param attr: additional HTML attributes for the item
"""
pkey = "req_site_needs.id"
# Construct the item ID
if pkey in record:
record_id = record[pkey]
item_id = "%s-%s" % (listid, record_id)
else:
# template
item_id = "%s-[id]" % listid
item_class = "thumbnail span6"
raw = record._row
logo = raw["org_organisation.logo"]
addresses = raw["gis_location.addr_street"]
if addresses:
if isinstance(addresses, list):
address = addresses[0]
else:
address = addresses
else:
address = ""
#contact = raw["org_facility.contact"] or ""
opening_times = raw["org_facility.opening_times"] or ""
phone = raw["org_facility.phone1"] or ""
website = raw["org_organisation.website"] or ""
if website:
website = A(website, _href=website)
author = record["req_site_needs.modified_by"]
date = record["req_site_needs.modified_on"]
#goods = raw["req_site_needs.goods"]
#if goods:
# goods_details = record["req_site_needs.goods_details"]
# goods_details = P(I(_class="icon icon-truck"),
# " ",
# XML(goods_details),
# _class="card_1_line",
# )
#else:
# goods_details = ""
#time = raw["req_site_needs.vol"]
#if time:
# time_details = record["req_site_needs.vol_details"]
# time_details = P(I(_class="icon icon-time"),
# " ",
# XML(time_details),
# _class="card_1_line",
# )
#else:
# time_details = ""
site_url = URL(c="org", f="facility", args=[record_id, "profile"])
if logo:
logo = A(IMG(_src=URL(c="default", f="download", args=[logo]),
_class="media-object",
),
_href=site_url,
_class="pull-left",
)
else:
logo = DIV(IMG(_class="media-object"),
_class="pull-left")
permit = current.auth.s3_has_permission
table = current.db.req_site_needs
if permit("update", table, record_id=record_id):
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="req", f="site_needs",
args=[record_id, "update.popup"],
vars={"refresh": listid,
"record": record_id}),
_class="s3_modal",
_title=current.response.s3.crud_strings.req_site_needs.title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-trash"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
if current.request.controller == "org":
# Site Profile page - no need to repeat Site Name
title = " "
else:
title = raw["org_facility.name"]
# Render the item
item = DIV(DIV(SPAN(title, _class="card-title"),
SPAN(author, _class="location-title"),
SPAN(date, _class="date-title"),
edit_bar,
_class="card-header",
),
DIV(logo,
DIV(#goods_details,
#time_details,
P(I(_class="icon icon-home"),
" ",
address,
_class="card_manylines",
),
P(I(_class="icon-time"),
" ",
SPAN(opening_times),
" ",
_class="card_1_line",
),
P(I(_class="icon icon-phone"),
" ",
phone,
_class="card_1_line",
),
P(I(_class="icon icon-map"),
" ",
website,
_class="card_1_line",
),
P(I(_class="icon icon-user"),
" ",
contact,
_class="card_1_line",
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
s3.render_site_needs = render_site_needs
# -----------------------------------------------------------------------------
def customize_gis_location(**attr):
"""
Customize gis_location controller
- Profile Page
"""
db = current.db
s3 = current.response.s3
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.interactive:
s3db = current.s3db
table = s3db.gis_location
if r.method == "datalist":
# Lx selection page
# 2-column datalist, 6 rows per page
s3.dl_pagelength = 12
s3.dl_rowsize = 2
# Default 5 triggers an AJAX call, we should load all by default
s3.dl_pagelength = 17
level = current.request.get_vars.get("~.level", None)
if not level:
# Just show PH L1s
level = "L1"
s3.filter = (table.L0 == "Libya") & (table.level == "L1")
parent = current.request.get_vars.get("~.parent", None)
if level == "L1":
s3.crud_strings["gis_location"].title_list = T("Districts")
elif level == "L2":
if parent:
parent = db(table.id == parent).select(table.name,
limitby=(0, 1)
).first().name
s3.crud_strings["gis_location"].title_list = T("Cities and Towns in %s") % parent
else:
s3.crud_strings["gis_location"].title_list = T("Cities and Towns")
list_fields = ["name",
"level",
"L1",
"L2",
#"L3",
#"L4",
]
s3db.configure("gis_location",
filter_widgets = None,
list_fields = list_fields,
list_layout = render_locations,
)
elif r.method == "profile":
# Customise tables used by widgets
#customize_hrm_human_resource_fields()
customize_org_facility_fields()
s3db.req_customize_req_fields()
s3db.req_customize_commit_fields()
# gis_location table (Sub-Locations)
table.parent.represent = s3db.gis_LocationRepresent(sep=" | ")
list_fields = ["name",
"id",
]
location = r.record
record_id = location.id
# Override context as that's a Path
default = "~.(location)=%s" % record_id
map_widget = dict(label = "Map",
type = "map",
context = "location",
icon = "icon-map",
height = 383,
width = 568,
bbox = {"lat_max" : location.lat_max,
"lon_max" : location.lon_max,
"lat_min" : location.lat_min,
"lon_min" : location.lon_min
},
)
#locations_widget = dict(label = "Locations",
# insert = False,
# #title_create = "Add New Location",
# type = "datalist",
# tablename = "gis_location",
# context = "location",
# icon = "icon-globe",
# # @ToDo: Show as Polygons?
# show_on_map = False,
# list_layout = render_locations_profile,
# )
#needs_widget = dict(label = "Needs",
# title_create = "Add New Need",
# type = "datalist",
# tablename = "req_site_needs",
# context = "location",
# icon = "icon-hand-up",
# multiple = False,
# # Would just show up on Sites
# show_on_map = False,
# list_layout = render_site_needs,
# )
reqs_widget = dict(label = "Requests",
title_create = "Add New Request",
type = "datalist",
tablename = "req_req",
context = "location",
default = default,
filter = S3FieldSelector("req_status").belongs([0, 1]),
icon = "icon-flag",
layer = "Requests",
# provided by Catalogue Layer
#marker = "request",
list_layout = s3db.req_render_reqs,
)
commits_widget = dict(label = "Donations",
title_create = "Add New Donation",
type = "datalist",
tablename = "req_commit",
context = "location",
default = default,
filter = S3FieldSelector("cancel") == False,
icon = "icon-truck",
layer = "Donations",
# provided by Catalogue Layer
#marker = "donation",
list_layout = s3db.req_render_commits,
)
sites_widget = dict(label = "Sites",
title_create = "Add New Site",
type = "datalist",
tablename = "org_facility",
context = "location",
default = default,
filter = S3FieldSelector("obsolete") == False,
icon = "icon-home",
layer = "Facilities",
# provided by Catalogue Layer
#marker = "office",
list_layout = render_sites,
)
# Build the icon, if it doesn't already exist
filename = "%s.svg" % record_id
import os
filepath = os.path.join(current.request.folder, "static", "cache", "svg", filename)
if not os.path.exists(filepath):
gtable = db.gis_location
loc = db(gtable.id == record_id).select(gtable.wkt,
limitby=(0, 1)
).first()
if loc and loc.wkt:
from s3.codecs.svg import S3SVG
S3SVG.write_file(filename, loc.wkt)
if current.auth.s3_has_permission("update", table, record_id=record_id):
edit_btn = A(I(_class="icon icon-edit"),
_href=URL(c="gis", f="location",
args=[record_id, "update.popup"],
vars={"refresh": "datalist"}),
_class="s3_modal",
_title=s3.crud_strings["gis_location"].title_update,
)
else:
edit_btn = ""
name = location.name
s3db.configure("gis_location",
list_fields = list_fields,
profile_title = "%s : %s" % (s3.crud_strings["gis_location"].title_list,
name),
profile_header = DIV(edit_btn,
A(IMG(_class="media-object",
_src=URL(c="static",
f="cache",
args=["svg", filename],
),
),
_class="pull-left",
#_href=location_url,
),
H2(name),
_class="profile_header",
),
profile_widgets = [reqs_widget,
map_widget,
commits_widget,
sites_widget,
#locations_widget,
],
)
return True
s3.prep = custom_prep
return attr
settings.ui.customize_gis_location = customize_gis_location
# -----------------------------------------------------------------------------
def customize_hrm_human_resource_fields():
"""
Customize hrm_human_resource for Profile widgets and 'more' popups
"""
s3db = current.s3db
table = s3db.hrm_human_resource
table.site_id.represent = S3Represent(lookup="org_site")
s3db.org_site.location_id.represent = s3db.gis_LocationRepresent(sep=" | ")
#table.modified_by.represent = s3_auth_user_represent_name
table.modified_on.represent = datetime_represent
if current.request.controller == "vol":
settings.pr.request_dob = True
settings.pr.request_gender = True
from s3.s3validators import IS_ADD_PERSON_WIDGET2
from s3.s3widgets import S3AddPersonWidget2
field = table.person_id
field.requires = IS_ADD_PERSON_WIDGET2()
field.widget = S3AddPersonWidget2(controller="vol")
list_fields = ["person_id",
"person_id$pe_id",
"organisation_id",
"site_id$location_id",
"site_id$location_id$addr_street",
"job_title_id",
"email.value",
"phone.value",
#"modified_by",
"modified_on",
]
s3db.configure("hrm_human_resource",
list_fields = list_fields,
)
# -----------------------------------------------------------------------------
def customize_hrm_human_resource(**attr):
"""
Customize hrm_human_resource controller
- used for 'more' popups
"""
s3 = current.response.s3
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
customize_hrm_human_resource_fields()
if r.method == "datalist":
current.s3db.configure("hrm_human_resource",
# Don't include a Create form in 'More' popups
listadd = False,
list_layout = render_contacts,
)
return True
s3.prep = custom_prep
return attr
settings.ui.customize_hrm_human_resource = customize_hrm_human_resource
# -----------------------------------------------------------------------------
def customize_hrm_job_title(**attr):
"""
Customize hrm_job_title controller
"""
s3 = current.response.s3
table = current.s3db.hrm_job_title
# Configure fields
field = table.organisation_id
field.readable = field.writable = False
field.default = None
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
if r.interactive:
actions = [dict(label=str(T("Open")),
_class="action-btn",
url=URL(c="hrm", f="job_title",
args=["[id]", "read"]))
]
db = current.db
auth = current.auth
has_permission = auth.s3_has_permission
ownership_required = auth.permission.ownership_required
s3_accessible_query = auth.s3_accessible_query
if has_permission("update", table):
action = dict(label=str(T("Edit")),
_class="action-btn",
url=URL(c="hrm", f="job_title",
args=["[id]", "update"]),
)
if ownership_required("update", table):
# Check which records can be updated
query = s3_accessible_query("update", table)
rows = db(query).select(table._id)
restrict = []
rappend = restrict.append
for row in rows:
row_id = row.get("id", None)
if row_id:
rappend(str(row_id))
action["restrict"] = restrict
actions.append(action)
if has_permission("delete", table):
action = dict(label=str(T("Delete")),
_class="action-btn",
url=URL(c="hrm", f="job_title",
args=["[id]", "delete"]),
)
if ownership_required("delete", table):
# Check which records can be deleted
query = s3_accessible_query("delete", table)
rows = db(query).select(table._id)
restrict = []
rappend = restrict.append
for row in rows:
row_id = row.get("id", None)
if row_id:
rappend(str(row_id))
action["restrict"] = restrict
actions.append(action)
s3.actions = actions
if isinstance(output, dict):
if "form" in output:
output["form"].add_class("hrm_job_title")
elif "item" in output and hasattr(output["item"], "add_class"):
output["item"].add_class("hrm_job_title")
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
return output
s3.postp = custom_postp
return attr
settings.ui.customize_hrm_job_title = customize_hrm_job_title
# -----------------------------------------------------------------------------
def customize_org_facility_fields():
"""
Customize org_facility for Profile widgets and 'more' popups
"""
# Truncate comments fields
from s3.s3utils import s3_trunk8
s3_trunk8(lines=2)
s3db = current.s3db
tablename = "org_facility"
table = s3db.org_facility
table.location_id.represent = s3db.gis_LocationRepresent(sep=" | ")
table.modified_by.represent = s3_auth_user_represent_name
table.modified_on.represent = datetime_represent
field = table.comments
field.represent = lambda body: XML(s3_URLise(body))
field.comment = None
table.phone1.label = T("Phone")
# CRUD strings
ADD_FAC = T("Add Site")
current.response.s3.crud_strings[tablename] = Storage(
title_create = ADD_FAC,
title_display = T("Site Details"),
title_list = T("Sites"),
title_update = T("Edit Site"),
title_search = T("Search Sites"),
subtitle_create = ADD_FAC,
label_list_button = T("List Sites"),
label_create_button = ADD_FAC,
label_delete_button = T("Delete Site"),
msg_record_created = T("Site Added"),
msg_record_modified = T("Site Updated"),
msg_record_deleted = T("Site Canceled"),
msg_list_empty = T("No Sites registered"))
list_fields = ["name",
"code",
"site_facility_type.facility_type_id",
"organisation_id",
"location_id",
"location_id$addr_street",
"location_id$level",
"location_id$parent",
"modified_by",
"modified_on",
"organisation_id$logo",
"opening_times",
"human_resource.person_id",
#"contact",
"phone1",
"status.facility_status",
"status.power_supply_type",
"comments",
]
#from s3.s3validators import IS_ADD_PERSON_WIDGET2
#from s3.s3widgets import S3AddPersonWidget2
#field = s3db.hrm_human_resource_site.human_resource_id
#field.requires = IS_ADD_PERSON_WIDGET2()
#field.widget = S3AddPersonWidget2(controller="pr")
crud_form = S3SQLCustomForm("name",
"code",
S3SQLInlineComponentMultiSelectWidget(
"facility_type",
label = T("Facility Type"),
field = "facility_type_id",
widget = "multiselect",
),
"organisation_id",
"location_id",
"opening_times",
# This is too Ugly right now!
#S3SQLInlineComponent(
# "human_resource_site",
# label = T("Focal Point"),
# field = ["human_resource_id"],
# multiple = False,
#),
#"contact",
"phone1",
# This is too Ugly right now!
#S3SQLInlineComponent(
# "needs",
# label = T("Needs"),
# multiple = False,
#),
S3SQLInlineComponent(
"status",
label = T("Status"),
multiple = False,
),
"comments",
)
s3db.configure(tablename,
crud_form = crud_form,
list_fields = list_fields,
)
# -----------------------------------------------------------------------------
def customize_org_facility(**attr):
"""
Customize org_facility controller
"""
s3 = current.response.s3
s3db = current.s3db
table = s3db.org_facility
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.interactive:
customize_org_facility_fields()
# Which levels of Hierarchy are we using?
hierarchy = current.gis.get_location_hierarchy()
levels = hierarchy.keys()
if len(current.deployment_settings.gis.countries) == 1:
levels.remove("L0")
# Filter from a Profile page?
# If so, then default the fields we know
get_vars = current.request.get_vars
location_id = get_vars.get("~.(location)", None)
organisation_id = get_vars.get("~.(organisation)", None)
if organisation_id:
org_field = table.organisation_id
org_field.default = organisation_id
org_field.readable = org_field.writable = False
location_field = table.location_id
if location_id:
location_field.default = location_id
location_field.readable = location_field.writable = False
else:
# Don't add new Locations here
location_field.comment = None
location_field.requires = IS_LOCATION_SELECTOR2(levels=levels)
location_field.widget = S3LocationSelectorWidget2(levels=levels,
show_address=True,
show_map=True)
# @ToDo: Proper button if we want this & amend fucntionality for Bootstrap)
#s3.cancel = True
if r.method == "datalist":
# Site selection page
# 2-column datalist, 6 rows per page
#s3.dl_pagelength = 12
#s3.dl_rowsize = 2
from s3.s3filter import S3TextFilter, S3OptionsFilter, S3LocationFilter
filter_widgets = [
S3LocationFilter("location_id",
levels=levels,
widget="multiselect",
hidden=True,
),
S3OptionsFilter(name="facility_search_type",
label=T("Type"),
field="site_facility_type.facility_type_id",
widget="multiselect",
hidden=True,
),
]
#get_vars = current.request.get_vars
#goods = get_vars.get("needs.goods", None)
#vol = get_vars.get("needs.vol", None)
#if goods:
# needs_fields = ["needs.goods_details"]
# s3.crud_strings["org_facility"].title_list = T("Sites where you can Drop-off Goods")
#elif vol:
# needs_fields = ["needs.vol_details"]
# s3.crud_strings["org_facility"].title_list = T("Sites where you can Volunteer your time")
#else:
# yesno = {True: T("Yes"), False: T("No")}
# needs_fields = ["needs.goods_details", "needs.vol_details"]
# filter_widgets.insert(0, S3OptionsFilter("needs.goods",
# label = T("Drop-off Goods"),
# options = yesno,
# multiple=False,
# widget="groupedopts",
# hidden=True,
# ))
# filter_widgets.insert(1, S3OptionsFilter("needs.vol",
# label = T("Volunteer Time"),
# options = yesno,
# multiple=False,
# widget="groupedopts",
# hidden=True,
# ))
filter_widgets.insert(0, S3TextFilter(["name",
"code",
"comments",
], #+ needs_fields,
label = T("Search")))
s3db.configure("org_facility",
# Don't include a Create form in 'More' popups
listadd = False,
list_layout = render_sites,
filter_widgets = filter_widgets,
)
elif r.method == "profile":
# Customise tables used by widgets
customize_hrm_human_resource_fields()
customize_site_needs_fields(profile=True)
s3db.req_customize_req_fields()
list_fields = ["name",
"id",
]
record = r.record
record_id = record.id
# @ToDo: Add this Site on the Map
map_widget = dict(label = "Map",
type = "map",
context = "site",
icon = "icon-map",
height = 383,
width = 568,
)
contacts_widget = dict(label = "Contacts",
title_create = "Add New Contact",
type = "datalist",
tablename = "hrm_human_resource",
context = "site",
create_controller = "pr",
create_function = "person",
icon = "icon-contact",
show_on_map = False, # Since they will show within Sites
list_layout = render_contacts,
)
reqs_widget = dict(label = "Requests",
title_create = "Add New Request",
type = "datalist",
tablename = "req_req",
context = "site",
filter = S3FieldSelector("req_status").belongs([0, 1]),
icon = "icon-flag",
show_on_map = False, # Since they will show within Sites
list_layout = s3db.req_render_reqs,
)
commits_widget = dict(label = "Donations",
#title_create = "Add New Donation",
type = "datalist",
tablename = "req_commit",
context = "site",
filter = S3FieldSelector("cancel") == False,
icon = "icon-truck",
layer = "Donations",
# provided by Catalogue Layer
#marker = "donation",
list_layout = s3db.req_render_commits,
)
if current.auth.s3_has_permission("update", table, record_id=record_id):
edit_btn = A(I(_class = "icon icon-edit"),
_href=URL(c="org", f="facility",
args=[record_id, "update.popup"],
vars={"refresh": "datalist"}),
_class="s3_modal",
_title=s3.crud_strings["org_facility"].title_update,
)
else:
edit_btn = ""
name = record.name
code = record.code
if code:
name_code = "%s - %s" % (name, code)
else:
name_code = code
location = table.location_id.represent(record.location_id)
organisation_id = record.organisation_id
db = current.db
otable = db.org_organisation
query = (otable.id == organisation_id)
org = db(query).select(otable.name,
otable.logo,
limitby=(0, 1)).first()
if org and org.logo:
logo = URL(c="default", f="download", args=[org.logo])
else:
# @ToDo: Placeholder
logo = "#"
s3db.configure("org_facility",
list_fields = list_fields,
profile_title = "%s : %s" % (s3.crud_strings["org_facility"].title_list,
name),
profile_header = DIV(edit_btn,
IMG(_class="media-object",
_src=logo,
),
H2(name),
record.code and P(record.code) or "",
P(I(_class="icon-sitemap"),
" ",
SPAN(org and org.name or current.messages.NONE),
" ",
_class="card_1_line",
),
P(I(_class="icon-globe"),
" ",
SPAN(location),
" ",
_class="card_1_line",
),
P(record.comments,
_class="s3-truncate"),
_class="profile_header",
),
profile_widgets = [reqs_widget,
map_widget,
commits_widget,
contacts_widget,
],
)
if r.interactive or r.representation == "aadata":
# Configure fields
#table.code.readable = table.code.writable = False
#table.phone1.readable = table.phone1.writable = False
table.phone2.readable = table.phone2.writable = False
table.email.readable = table.email.writable = False
return True
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
if r.interactive:
if isinstance(output, dict) and \
current.auth.s3_has_permission("create", r.table):
# Insert a Button to Create New in Modal
output["showadd_btn"] = A(I(_class="icon icon-plus-sign big-add"),
_href=URL(c="org", f="facility",
args=["create.popup"],
vars={"refresh": "datalist"}),
_class="btn btn-primary s3_modal",
_role="button",
_title=T("Add New Site"),
)
actions = [dict(label=str(T("Open")),
_class="action-btn",
url=URL(c="org", f="facility",
args=["[id]", "read"]))
]
db = current.db
auth = current.auth
has_permission = auth.s3_has_permission
ownership_required = auth.permission.ownership_required
s3_accessible_query = auth.s3_accessible_query
if has_permission("update", table):
action = dict(label=str(T("Edit")),
_class="action-btn",
url=URL(c="org", f="facility",
args=["[id]", "update"]),
)
if ownership_required("update", table):
# Check which records can be updated
query = s3_accessible_query("update", table)
rows = db(query).select(table._id)
restrict = []
rappend = restrict.append
for row in rows:
row_id = row.get("id", None)
if row_id:
rappend(str(row_id))
action["restrict"] = restrict
actions.append(action)
if has_permission("delete", table):
action = dict(label=str(T("Delete")),
_class="action-btn",
url=URL(c="org", f="facility",
args=["[id]", "delete"]),
)
if ownership_required("delete", table):
# Check which records can be deleted
query = s3_accessible_query("delete", table)
rows = db(query).select(table._id)
restrict = []
rappend = restrict.append
for row in rows:
row_id = row.get("id", None)
if row_id:
rappend(str(row_id))
action["restrict"] = restrict
actions.append(action)
s3.actions = actions
if isinstance(output, dict):
if "form" in output:
output["form"].add_class("org_facility")
elif "item" in output and hasattr(output["item"], "add_class"):
output["item"].add_class("org_facility")
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
return output
s3.postp = custom_postp
attr["hide_filter"] = False
# @ToDo: Don't just hide but prevent building
#attr["rheader"] = None
return attr
settings.ui.customize_org_facility = customize_org_facility
# -----------------------------------------------------------------------------
def customize_org_needs_fields(profile=False):
# Truncate details field(s)
from s3.s3utils import s3_trunk8
s3_trunk8(lines=2)
s3db = current.s3db
table = s3db.req_organisation_needs
table.modified_by.represent = s3_auth_user_represent_name
table.modified_on.represent = datetime_represent
table.vol.readable = table.vol.writable = False
table.vol_details.readable = table.vol_details.writable = False
list_fields = ["id",
"organisation_id",
# @ToDo: Are these better displayed elsewhere in Profile view?
"organisation_id$logo",
"organisation_id$phone",
"organisation_id$website",
"money",
"money_details",
#"vol",
#"vol_details",
"modified_on",
"modified_by",
]
if not profile:
list_fields += ["organisation_id$name",
]
s3db.configure("req_organisation_needs",
list_fields=list_fields,
)
return
# -----------------------------------------------------------------------------
def customize_req_organisation_needs(**attr):
"""
Customize req_organisation_needs controller
"""
customize_org_needs_fields()
return attr
settings.ui.customize_req_organisation_needs = customize_req_organisation_needs
# -----------------------------------------------------------------------------
def customize_org_organisation(**attr):
"""
Customize org_organisation controller
- Profile Page
- Requests
"""
s3 = current.response.s3
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.interactive:
# Load normal Model
s3db = current.s3db
table = s3db.org_organisation
list_fields = ["id",
"name",
"logo",
"phone",
"website",
"needs.money",
"needs.money_details",
#"needs.vol",
#"needs.vol_details",
]
if r.method == "profile":
# Customise tables used by widgets
customize_hrm_human_resource_fields()
customize_org_facility_fields()
customize_org_needs_fields(profile=True)
contacts_widget = dict(label = "Contacts",
title_create = "Add New Contact",
type = "datalist",
tablename = "hrm_human_resource",
context = "organisation",
create_controller = "pr",
create_function = "person",
icon = "icon-contact",
show_on_map = False, # Since they will show within Offices
list_layout = render_contacts,
)
map_widget = dict(label = "Map",
type = "map",
context = "organisation",
icon = "icon-map",
height = 383,
width = 568,
)
needs_widget = dict(label = "Needs",
title_create = "Add New Need",
type = "datalist",
tablename = "req_organisation_needs",
multiple = False,
context = "organisation",
icon = "icon-hand-up",
show_on_map = False,
list_layout = render_org_needs,
)
reqs_widget = dict(label = "Requests",
title_create = "Add New Request",
type = "datalist",
tablename = "req_req",
context = "organisation",
filter = S3FieldSelector("req_status").belongs([0, 1]),
icon = "icon-flag",
layer = "Requests",
# provided by Catalogue Layer
#marker = "request",
list_layout = s3db.req_render_reqs,
)
commits_widget = dict(label = "Donations",
#title_create = "Add New Donation",
type = "datalist",
tablename = "req_commit",
context = "organisation",
filter = S3FieldSelector("cancel") == False,
icon = "icon-truck",
layer = "Donations",
# provided by Catalogue Layer
#marker = "donation",
list_layout = s3db.req_render_commits,
)
sites_widget = dict(label = "Sites",
title_create = "Add New Site",
type = "datalist",
tablename = "org_facility",
context = "organisation",
filter = S3FieldSelector("obsolete") == False,
icon = "icon-home",
layer = "Facilities",
# provided by Catalogue Layer
#marker = "office",
list_layout = render_sites,
)
record = r.record
record_id = record.id
if current.auth.s3_has_permission("update", table, record_id=record_id):
edit_btn = A(I(_class = "icon icon-edit"),
_href=URL(c="org", f="organisation",
args=[record_id, "update.popup"],
vars={"refresh": "datalist"}),
_class="s3_modal",
_title=s3.crud_strings["org_organisation"].title_update,
)
else:
edit_btn = ""
s3db.configure("org_organisation",
profile_title = "%s : %s" % (s3.crud_strings["org_organisation"].title_list,
record.name),
profile_header = DIV(edit_btn,
IMG(_class="media-object",
_src=URL(c="default", f="download",
args=[record.logo]),
),
H2(record.name),
_class="profile_header",
),
profile_widgets = [reqs_widget,
map_widget,
commits_widget,
needs_widget,
contacts_widget,
sites_widget,
]
)
elif r.method == "datalist":
# Stakeholder selection page
# 2-column datalist, 6 rows per page
s3.dl_pagelength = 12
s3.dl_rowsize = 2
from s3.s3filter import S3TextFilter, S3OptionsFilter
filter_widgets = [
# no other filter widgets here yet?
]
# Needs page
# Truncate details field(s)
from s3.s3utils import s3_trunk8
s3_trunk8(lines=2)
get_vars = current.request.get_vars
money = get_vars.get("needs.money", None)
#vol = get_vars.get("needs.vol", None)
if money:
needs_fields = ["needs.money_details"]
s3.crud_strings["org_organisation"].title_list = T("Organizations soliciting Money")
#elif vol:
# needs_fields = ["needs.vol_details"]
# s3.crud_strings["org_organisation"].title_list = T("Organizations with remote Volunteer opportunities")
else:
yesno = {True: T("Yes"), False: T("No")}
needs_fields = ["needs.money_details", "needs.vol_details"]
filter_widgets.insert(0, S3OptionsFilter("needs.money",
options = yesno,
multiple=False,
hidden=True,
))
#filter_widgets.insert(1, S3OptionsFilter("needs.vol",
# options = yesno,
# multiple=False,
# hidden=True,
# ))
filter_widgets.insert(0, S3TextFilter(["name",
"acronym",
"website",
"comments",
] + needs_fields,
label = T("Search")))
ntable = s3db.req_organisation_needs
s3db.configure("org_organisation",
filter_widgets=filter_widgets
)
# Represent used in rendering
current.auth.settings.table_user.organisation_id.represent = s3db.org_organisation_represent
# Hide fields
table.organisation_type_id.readable = table.organisation_type_id.writable = False
table.region_id.readable = table.region_id.writable = False
table.country.readable = table.country.writable = False
table.year.readable = table.year.writable = False
# Return to List view after create/update/delete (unless done via Modal)
url_next = URL(c="org", f="organisation", args="datalist")
s3db.configure("org_organisation",
create_next = url_next,
delete_next = url_next,
update_next = url_next,
# We want the Create form to be in a modal, not inline, for consistency
listadd = False,
list_fields = list_fields,
list_layout = render_organisations,
)
return True
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
if r.interactive and \
isinstance(output, dict) and \
current.auth.s3_has_permission("create", r.table):
# Insert a Button to Create New in Modal
output["showadd_btn"] = A(I(_class="icon icon-plus-sign big-add"),
_href=URL(c="org", f="organisation",
args=["create.popup"],
vars={"refresh": "datalist"}),
_class="btn btn-primary s3_modal",
_role="button",
_title=T("Add New Organization"),
)
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
return output
s3.postp = custom_postp
attr["hide_filter"] = False
return attr
settings.ui.customize_org_organisation = customize_org_organisation
# -----------------------------------------------------------------------------
def customize_site_needs_fields(profile=False):
s3db = current.s3db
table = s3db.req_site_needs
table.modified_by.represent = s3_auth_user_represent_name
table.modified_on.represent = datetime_represent
list_fields = ["id",
"organisation_id$id",
# @ToDo: Are these better displayed elsewhere in Profile view?
"organisation_id$name",
"organisation_id$logo",
"organisation_id$website",
"location_id$L1",
"location_id$L2",
"location_id$L3",
"location_id$L4",
"location_id$addr_street",
"phone1",
#"goods",
#"goods_details",
#"vol",
#"vol_details",
"modified_on",
"modified_by",
]
if not profile:
list_fields += ["site_id$name"]
s3db.configure("req_site_needs",
list_fields=list_fields,
)
return
s3.customize_site_needs_fields = customize_site_needs_fields
# -----------------------------------------------------------------------------
def customize_pr_person(**attr):
"""
Customize pr_person controller
"""
s3db = current.s3db
s3 = current.response.s3
tablename = "pr_person"
table = s3db.pr_person
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
controller = r.controller
if controller == "pr":
# Call standard prep for 'contacts' view but not 'vol' view
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.method == "validate":
# Can't validate image without the file
image_field = s3db.pr_image.image
image_field.requires = None
if r.interactive or r.representation == "aadata":
if controller == "vol":
# CRUD Strings
ADD_CONTACT = T("Add New Volunteer")
s3.crud_strings[tablename] = Storage(
title_create = T("Add Volunteer"),
title_display = T("Volunteer Details"),
title_list = T("Volunteer Directory"),
title_update = T("Edit Volunteer Details"),
title_search = T("Search Volunteers"),
subtitle_create = ADD_CONTACT,
label_list_button = T("List Volunteers"),
label_create_button = ADD_CONTACT,
label_delete_button = T("Delete Volunteer"),
msg_record_created = T("Volunteer added"),
msg_record_modified = T("Volunteer details updated"),
msg_record_deleted = T("Volunteer deleted"),
msg_list_empty = T("No Volunteers currently registered"))
field = s3db.pr_person_details.place_of_birth
field.label = ""
field.readable = field.writable = True
from gluon.sqlhtml import StringWidget
s3db.hrm_competency.comments.widget = StringWidget().widget
elif controller != "default":
# CRUD Strings
ADD_CONTACT = T("Add New Contact")
s3.crud_strings[tablename] = Storage(
title_create = T("Add Contact"),
title_display = T("Contact Details"),
title_list = T("Contact Directory"),
title_update = T("Edit Contact Details"),
title_search = T("Search Contacts"),
subtitle_create = ADD_CONTACT,
label_list_button = T("List Contacts"),
label_create_button = ADD_CONTACT,
label_delete_button = T("Delete Contact"),
msg_record_created = T("Contact added"),
msg_record_modified = T("Contact details updated"),
msg_record_deleted = T("Contact deleted"),
msg_list_empty = T("No Contacts currently registered"))
MOBILE = settings.get_ui_label_mobile_phone()
EMAIL = T("Email")
htable = s3db.hrm_human_resource
htable.organisation_id.widget = None
site_field = htable.site_id
represent = S3Represent(lookup="org_site")
site_field.represent = represent
site_field.requires = IS_ONE_OF(current.db, "org_site.site_id",
represent,
orderby = "org_site.name")
from s3layouts import S3AddResourceLink
site_field.comment = S3AddResourceLink(c="org", f="facility",
vars={"child": "site_id"},
label=T("Add New Site"),
title=T("Site"),
tooltip=T("If you don't see the Site in the list, you can add a new one by clicking link 'Add New Site'."))
# Best to have no labels when only 1 field in the row
s3db.pr_contact.value.label = ""
image_field = s3db.pr_image.image
image_field.label = ""
# ImageCrop widget doesn't currently work within an Inline Form
image_field.widget = None
hr_fields = ["organisation_id",
"job_title_id",
"site_id",
"site_contact",
]
if r.method in ("create", "update"):
# Context from a Profile page?"
organisation_id = r.get_vars.get("(organisation)", None)
if organisation_id:
field = s3db.hrm_human_resource.organisation_id
field.default = organisation_id
field.readable = field.writable = False
hr_fields.remove("organisation_id")
s3_sql_custom_fields = [
"first_name",
"middle_name",
"last_name",
"gender",
"date_of_birth",
S3SQLInlineComponent(
"person_details",
name = "person_details",
label = T("Place of Birth"),
multiple = False,
fields = ["place_of_birth"],
),
S3SQLInlineComponent(
"competency",
name = "competency",
label = T("Skills"),
fields = ["skill_id",
"comments",
],
),
#S3SQLInlineComponent(
# "human_resource",
# name = "human_resource",
# label = "",
# multiple = False,
# fields = hr_fields,
#),
#S3SQLInlineComponent(
# "image",
# name = "image",
# label = T("Photo"),
# multiple = False,
# fields = ["image"],
# filterby = dict(field = "profile",
# options=[True]
# )
#),
"comments",
]
list_fields = [#(current.messages.ORGANISATION, "human_resource.organisation_id"),
"first_name",
"middle_name",
"last_name",
#(T("Job Title"), "human_resource.job_title_id"),
#(T("Site"), "human_resource.site_id"),
#(T("Site Contact"), "human_resource.site_contact"),
]
# Don't include Email/Phone for unauthenticated users
if current.auth.is_logged_in():
list_fields += [(MOBILE, "phone.value"),
(EMAIL, "email.value"),
]
s3_sql_custom_fields.insert(6,
S3SQLInlineComponent(
"contact",
name = "phone",
label = MOBILE,
multiple = False,
fields = ["value"],
filterby = dict(field = "contact_method",
options = "SMS")),
)
s3_sql_custom_fields.insert(6,
S3SQLInlineComponent(
"contact",
name = "email",
label = EMAIL,
multiple = False,
fields = ["value"],
filterby = dict(field = "contact_method",
options = "EMAIL")),
)
crud_form = S3SQLCustomForm(*s3_sql_custom_fields)
if r.id and controller == "default":
url_next = URL(c="default", f="person", args=[r.id, "read"])
else:
# Return to List view after create/update/delete (unless done via Modal)
url_next = URL(c=controller, f="person")
s3db.configure(tablename,
create_next = url_next,
delete_next = url_next,
update_next = url_next,
crud_form = crud_form,
list_fields = list_fields,
# Don't include a Create form in 'More' popups
listadd = False if r.method=="datalist" else True,
list_layout = render_contacts,
)
# Move fields to their desired Locations
# Disabled as breaks submission of inline_component
#i18n = []
#iappend = i18n.append
#iappend('''i18n.office="%s"''' % T("Office"))
#iappend('''i18n.organisation="%s"''' % T("Organization"))
#iappend('''i18n.job_title="%s"''' % T("Job Title"))
#i18n = '''\n'''.join(i18n)
#s3.js_global.append(i18n)
#s3.scripts.append('/%s/static/themes/DRMP/js/contacts.js' % r.application)
return True
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
if r.interactive and isinstance(output, dict):
output["rheader"] = ""
actions = [dict(label=str(T("Open")),
_class="action-btn",
url=URL(c=r.controller, f="person",
args=["[id]", "read"]))
]
s3.actions = actions
if "form" in output:
output["form"].add_class("pr_person")
elif "item" in output and hasattr(output["item"], "add_class"):
output["item"].add_class("pr_person")
return output
s3.postp = custom_postp
return attr
settings.ui.customize_pr_person = customize_pr_person
# -----------------------------------------------------------------------------
def customize_doc_document(**attr):
"""
Customize doc_document controller
"""
s3 = current.response.s3
s3db = current.s3db
tablename = "doc_document"
table = s3db.doc_document
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
# Filter Out Docs from Newsfeed
current.response.s3.filter = (table.name != None)
if r.interactive:
s3.crud_strings[tablename] = Storage(
title_create = T("Add Document"),
title_display = T("Document"),
title_list = T("Documents"),
title_update = T("Edit Document"),
title_search = T("Search Documents"),
subtitle_create = T("Add Document"),
label_list_button = T("List New Documents"),
label_create_button = T("Add Documents"),
label_delete_button = T("Remove Documents"),
msg_record_created = T("Documents added"),
msg_record_modified = T("Documents updated"),
msg_record_deleted = T("Documents removed"),
msg_list_empty = T("No Documents currently recorded"))
# Force added docs to have a name
table.name.requires = IS_NOT_EMPTY()
list_fields = ["name",
"file",
"url",
"organisation_id",
"comments",
]
crud_form = S3SQLCustomForm(*list_fields)
s3db.configure(tablename,
list_fields = list_fields,
crud_form = crud_form,
)
return True
s3.prep = custom_prep
return attr
settings.ui.customize_doc_document = customize_doc_document
# -----------------------------------------------------------------------------
settings.req.req_type = ["Other"]
settings.req.requester_label = "Contact"
# Uncomment if the User Account logging the Request is NOT normally the Requester
settings.req.requester_is_author = False
# Uncomment to have Donations include a 'Value' field
settings.req.commit_value = True
# Uncomment if the User Account logging the Commitment is NOT normally the Committer
#settings.req.comittter_is_author = False
# Uncomment to allow Donations to be made without a matching Request
#settings.req.commit_without_request = True
# Set the Requester as being an HR for the Site if no HR record yet & as Site contact if none yet exists
settings.req.requester_to_site = True
def customize_req_req(**attr):
"""
Customize req_req controller
"""
s3 = current.response.s3
# Custom PreP
#standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
#if callable(standard_prep):
# result = standard_prep(r)
s3db = current.s3db
if r.component_name == "commit":
s3db.req_customize_commit_fields()
else:
s3db.req_customize_req_fields()
if r.method in ("datalist", "datalist.dl"):
s3.filter = (r.table.req_status.belongs([0, 1]))
elif r.method == "profile":
# Customise tables used by widgets
s3db.req_customize_commit_fields()
customize_org_facility_fields()
record = r.record
record_id = record.id
commits_widget = dict(label = "Donations",
title_create = "Add New Donation",
type = "datalist",
tablename = "req_commit",
context = "request",
default = "req_id=%s" % record_id,
filter = S3FieldSelector("cancel") == False,
icon = "icon-truck",
layer = "Donations",
# provided by Catalogue Layer
#marker = "donation",
list_layout = s3db.req_render_commits,
)
filter = (S3FieldSelector("obsolete") == False)
sites_widget = dict(label = "Sites",
#title_create = "Add New Site",
type = "datalist",
tablename = "org_facility",
multiple = False,
context = "request",
filter = filter,
icon = "icon-home",
layer = "Facilities",
# provided by Catalogue Layer
#marker = "office",
list_layout = render_sites,
)
if current.auth.s3_has_permission("update", r.table, record_id=record_id):
edit_btn = A(I(_class = "icon icon-edit"),
_href=URL(c="req", f="req",
args=[record_id, "update.popup"],
vars={"refresh": "datalist"}),
_class="s3_modal",
_title=s3.crud_strings["req_req"].title_update,
)
else:
edit_btn = ""
db = current.db
stable = db.org_site
query = (stable.site_id == record.site_id)
site = db(query).select(stable.name,
stable.location_id,
stable.organisation_id,
limitby=(0, 1)
).first()
location = s3db.gis_LocationRepresent(sep=" | ")(site.location_id)
otable = db.org_organisation
org = db(otable.id == site.organisation_id).select(otable.name,
otable.logo,
limitby=(0, 1)
).first()
if org and org.logo:
logo = URL(c="default", f="download", args=[org.logo])
else:
# @ToDo: Placeholder
logo = "#"
s3db.configure("req_req",
profile_title = s3.crud_strings["req_req"].title_list,
profile_header = DIV(edit_btn,
A(IMG(_class="media-object",
_src=logo,
),
_class="pull-left",
#_href=org_url,
),
H2(site.name),
P(I(_class="icon-sitemap"),
" ",
SPAN(org and org.name or current.messages.NONE),
" ",
_class="card_1_line",
),
P(I(_class="icon-globe"),
" ",
SPAN(location),
" ",
_class="card_1_line",
),
P(record.purpose,
_class="s3-truncate"),
_class="profile_header",
),
profile_widgets = [commits_widget,
sites_widget,
],
)
return True
s3.prep = custom_prep
# Disable postp
s3.postp = None
attr["hide_filter"] = False
return attr
settings.ui.customize_req_req = customize_req_req
# -----------------------------------------------------------------------------
def customize_req_commit(**attr):
"""
Customize req_commit controller
"""
s3 = current.response.s3
# Custom PreP
#standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
#if callable(standard_prep):
# result = standard_prep(r)
current.s3db.req_customize_commit_fields()
if r.method in ("datalist", "datalist.dl"):
s3.filter = (r.table.cancel != True)
return True
s3.prep = custom_prep
# Disable postp
s3.postp = None
attr["hide_filter"] = False
return attr
settings.ui.customize_req_commit = customize_req_commit
# =============================================================================
# Template Modules
# Comment/uncomment modules here to disable/enable them
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = "Home",
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = "Administration",
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = "Administration",
#description = "Site Administration",
restricted = True,
module_type = None # No Menu
)),
("errors", Storage(
name_nice = "Ticket Viewer",
#description = "Needed for Breadcrumbs",
restricted = False,
module_type = None # No Menu
)),
("sync", Storage(
name_nice = "Synchronization",
#description = "Synchronization",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("translate", Storage(
name_nice = "Translation Functionality",
#description = "Selective translation of strings based on module.",
module_type = None,
)),
("gis", Storage(
name_nice = "Map",
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
module_type = 1, # 1st item in the menu
)),
("pr", Storage(
name_nice = "Persons",
#description = "Central point to record details on People",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
module_type = None
)),
("org", Storage(
name_nice = "Organizations",
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
module_type = None
)),
# All modules below here should be possible to disable safely
("hrm", Storage(
name_nice = "Contacts",
#description = "Human Resources Management",
restricted = True,
module_type = None,
)),
("vol", Storage(
name_nice = "Volunteers",
#description = "Human Resources Management",
restricted = True,
module_type = None,
)),
("cms", Storage(
name_nice = "Content Management",
restricted = True,
module_type = None,
)),
("doc", Storage(
name_nice = "Documents",
#description = "A library of digital resources, such as photos, documents and reports",
restricted = True,
module_type = None,
)),
("msg", Storage(
name_nice = "Messaging",
#description = "Sends & Receives Alerts via Email & SMS",
restricted = True,
# The user-visible functionality of this module isn't normally required. Rather it's main purpose is to be accessed from other modules.
module_type = None,
)),
#("event", Storage(
# name_nice = "Disasters",
# #description = "Events",
# restricted = True,
# module_type = None
#)),
("req", Storage(
name_nice = "Requests",
#description = "Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.",
restricted = True,
module_type = None,
)),
#("project", Storage(
# name_nice = "Projects",
# restricted = True,
# module_type = None
#)),
("stats", Storage(
name_nice = "Statistics",
restricted = True,
module_type = None
)),
#("vulnerability", Storage(
# name_nice = "Vulnerability",
# restricted = True,
# module_type = None
#)),
#("transport", Storage(
# name_nice = "Transport",
# restricted = True,
# module_type = None
#)),
#("hms", Storage(
# name_nice = "Hospitals",
# restricted = True,
# module_type = None
#)),
#("cr", Storage(
# name_nice = "Shelters",
# restricted = True,
# module_type = None
#)),
("supply", Storage(
name_nice = "Supply Chain Management",
restricted = True,
module_type = None
)),
])
|
flavour/tldrmp
|
private/templates/Libya/config.py
|
Python
|
mit
| 124,696
|
[
"Amber"
] |
d564753ef457d7a37b41192341273bb17c197a639bc2c7e4016f42309e473a40
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from .elastic import *
from .stress import *
from .strain import *
|
dongsenfo/pymatgen
|
pymatgen/analysis/elasticity/__init__.py
|
Python
|
mit
| 177
|
[
"pymatgen"
] |
3d2dd80babbb640fa7d588ab1f060c3a068d461085f853af0dd642391ac68cb2
|
import qt
# Importing vtk initializes vtkPythonMap owned by vtkPythonUtil and prevent
# call to vtkPythonUtil::GetObjectFromPointer() from segfaulting.
# PythonQt internally uses vtkPythonUtil to properly wrap/unwrap VTK objects
from vtk import *
t = _testWrappedVTKQInvokableInstance.getTable()
print t.GetClassName()
t2 = vtkTable()
_testWrappedVTKQInvokableInstance.setTable(t2)
if _testWrappedVTKQInvokableInstance.getTable() != t2:
qt.QApplication.exit(1)
qt.QApplication.exit(0)
|
pieper/CTK
|
Applications/ctkSimplePythonShell/Testing/Python/wrappedVTKQInvokableTest.py
|
Python
|
apache-2.0
| 492
|
[
"VTK"
] |
ab30d610e5eaa8b538d4f33b2e67495c6ca50675d35af25656bc57126d647ec2
|
# encoding: utf-8
from mock import patch
import copy
from django.test import SimpleTestCase
from corehq.apps.app_manager.models import (
DetailColumn,
Application,
FormSchedule,
ScheduleVisit,
SchedulePhase,
SchedulePhaseForm,
FormActionCondition,
)
from corehq.apps.app_manager.exceptions import ScheduleError
from corehq.apps.app_manager.tests.util import TestXmlMixin
class ScheduleTest(SimpleTestCase, TestXmlMixin):
file_path = ('data', 'suite')
def setUp(self):
self.is_usercase_in_use_patch = patch('corehq.apps.app_manager.models.is_usercase_in_use')
self.is_usercase_in_use_mock = self.is_usercase_in_use_patch.start()
self.is_usercase_in_use_mock.return_value = True
self.app = Application.wrap(self.get_json('suite-advanced'))
self.module = self.app.get_module(1)
self.module.has_schedule = True
self.form_1 = self.module.get_form(0)
self.form_2 = self.module.get_form(1)
self.form_3 = self.module.get_form(2)
self._add_form_abbreviations()
self._add_form_schedules()
self._add_form_detail_variable()
def _add_form_abbreviations(self):
self.form_1.schedule_form_id = u"हिन्दी"
self.form_2.schedule_form_id = u"a983e9"
self.form_3.schedule_form_id = u"39f0ea"
def _add_form_schedules(self):
self.form_1.schedule = FormSchedule(
expires=120,
starts=-5,
visits=[
ScheduleVisit(due=5, expires=4, starts=-5),
ScheduleVisit(due=10, expires=9),
ScheduleVisit(starts=5, expires=100, repeats=True, increment=15)
]
)
self.form_2.schedule = FormSchedule(
allow_unscheduled=True,
visits=[
ScheduleVisit(due=7, expires=4),
ScheduleVisit(due=15)
]
)
self.form_3.schedule = FormSchedule(
visits=[
ScheduleVisit(due=9, expires=1),
ScheduleVisit(due=11)
]
)
def _add_form_detail_variable(self):
self.module.case_details.short.columns.append(
DetailColumn(
header={'en': 'Next due'},
model='case',
field='schedule:nextdue',
format='plain',
)
)
def tearDown(self):
self.is_usercase_in_use_patch.stop()
def _apply_schedule_phases(self):
self.module.schedule_phases = [
SchedulePhase( # phase 1
anchor='edd',
forms=[SchedulePhaseForm(form_id=self.form_1.unique_id),
SchedulePhaseForm(form_id=self.form_2.unique_id)],
),
SchedulePhase( # phase 2
anchor='dob',
forms=[SchedulePhaseForm(form_id=self.form_3.unique_id)]
),
]
def test_get_phase(self):
phase = SchedulePhase(
anchor='some_case_property',
forms=[SchedulePhaseForm(form_id=self.form_1.unique_id),
SchedulePhaseForm(form_id=self.form_2.unique_id)],
)
self.module.schedule_phases = [phase]
self.assertEqual(self.form_1.get_phase(), phase)
self.assertEqual(self.form_3.get_phase(), None)
def test_phase_requires_anchor(self):
self.module.schedule_phases = [
SchedulePhase(
forms=[SchedulePhaseForm(form_id=self.form_3.unique_id)]
),
]
with self.assertRaises(ScheduleError):
self.app.create_suite()
def test_get_or_create_schedule_phase(self):
pre_made_phase = SchedulePhase(anchor='sea-floor')
self.module.schedule_phases = [pre_made_phase]
phase, created = self.module.get_or_create_schedule_phase(anchor='hook')
self.assertTrue(created)
phase_2, created = self.module.get_or_create_schedule_phase(anchor='sea-floor')
self.assertFalse(created)
self.assertEqual(phase_2, pre_made_phase)
with self.assertRaises(ScheduleError):
self.module.get_or_create_schedule_phase(anchor=' \n\n\n\t\t')
with self.assertRaises(ScheduleError):
self.module.get_or_create_schedule_phase(anchor=None)
def test_update_schedule_phases(self):
pre_made_phase = SchedulePhase(anchor='foo')
pre_made_phase_2 = SchedulePhase(anchor='bar')
self.module.schedule_phases = [pre_made_phase, pre_made_phase_2]
new_phase = SchedulePhase(anchor='baz')
updated_phases = [new_phase.anchor, pre_made_phase.anchor]
self.module.update_schedule_phases(updated_phases)
self.assertEqual([phase.anchor for phase in self.module.get_schedule_phases()], updated_phases)
# Test reordering
self.module.schedule_phases = [pre_made_phase, pre_made_phase_2]
updated_phases = [pre_made_phase_2.anchor, pre_made_phase.anchor]
self.module.update_schedule_phases(updated_phases)
self.assertEqual([phase.anchor for phase in self.module.get_schedule_phases()], updated_phases)
# Test deletion
self.module.schedule_phases = [pre_made_phase, pre_made_phase_2]
updated_phases = []
self.module.update_schedule_phases(updated_phases)
self.assertEqual([phase.anchor for phase in self.module.get_schedule_phases()], updated_phases)
# Test deletion with forms
phase_with_forms = SchedulePhase(
anchor='edd',
forms=[SchedulePhaseForm(form_id=self.form_1.unique_id)],
)
self.module.schedule_phases = [phase_with_forms]
updated_phases = []
with self.assertRaises(ScheduleError):
self.module.update_schedule_phases(updated_phases)
def test_update_schedule_phase_anchors(self):
pre_made_phase = SchedulePhase(anchor='foo',
forms=[SchedulePhaseForm(form_id=self.form_1.unique_id)],)
pre_made_phase_2 = SchedulePhase(anchor='bar',
forms=[SchedulePhaseForm(form_id=self.form_2.unique_id)])
pre_made_phase_3 = SchedulePhase(anchor='burp')
self.module.schedule_phases = [pre_made_phase, pre_made_phase_2, pre_made_phase_3]
new_anchors = [(2, 'baz'), (1, 'quux')]
self.module.update_schedule_phase_anchors(new_anchors)
self.assertEqual(self.module.schedule_phases[0].forms[0].form_id, self.form_1.unique_id)
self.assertEqual(self.module.schedule_phases[0].anchor, new_anchors[1][1])
self.assertEqual(self.module.schedule_phases[1].forms[0].form_id, self.form_2.unique_id)
self.assertEqual(self.module.schedule_phases[1].anchor, new_anchors[0][1])
self.assertEqual(self.module.schedule_phases[2].anchor, 'burp')
def test_form_in_phase_requires_schedule(self):
self._apply_schedule_phases()
self.form_3.schedule = None
with self.assertRaises(ScheduleError):
self.app.create_suite()
self.module.schedule_phases.pop()
self.app.create_suite()
def test_remove_form_from_phase(self):
form_1 = self.form_1
form_2 = self.form_2
self.module.schedule_phases = [
SchedulePhase(
anchor='dob',
forms=[SchedulePhaseForm(form_id=form_1.unique_id),
SchedulePhaseForm(form_id=form_2.unique_id)]
)
]
phase = next(self.module.get_schedule_phases()) # get the phase through the module so we have a _parent
phase.remove_form(form_1)
self.assertEqual(len(phase.forms), 1)
self.assertEqual([form_2], list(phase.get_forms()))
with self.assertRaises(ScheduleError):
phase.remove_form(form_1)
def test_add_form_to_phase(self):
self.module.schedule_phases = [
SchedulePhase(
anchor='dob',
forms=[SchedulePhaseForm(form_id=self.form_1.unique_id),
SchedulePhaseForm(form_id=self.form_2.unique_id)]
),
SchedulePhase(anchor='second_phase', forms=[]),
]
phases = list(self.module.get_schedule_phases())
phase1 = phases[0]
phase1.add_form(self.form_3)
self.assertEqual(phase1.get_phase_form_index(self.form_3), 2)
# adding a form to a different phase removes it from the first phase
phase2 = phases[1]
phase2.add_form(self.form_3)
self.assertEqual(phase2.get_phase_form_index(self.form_3), 0)
self.assertIsNone(phase1.get_form(self.form_3))
def test_schedule_detail(self):
self._apply_schedule_phases()
suite = self.app.create_suite()
self.assertXmlPartialEqual(self.get_xml('schedule-entry'), suite, "./detail[@id='m1_case_short']")
def test_schedule_fixture(self):
self._apply_schedule_phases()
suite = self.app.create_suite()
self.assertXmlPartialEqual(self.get_xml('schedule-fixture'), suite, './fixture')
def test_multiple_modules(self):
self._apply_schedule_phases()
other_module = self.app.get_module(2)
other_module.has_schedule = True
scheduled_form = other_module.get_form(0)
scheduled_form.schedule = FormSchedule(
visits=[
ScheduleVisit(due=9),
ScheduleVisit(due=11)
]
)
other_module.forms.append(copy.copy(scheduled_form))
other_module.schedule_phases = [
SchedulePhase(
anchor='case_property',
forms=[SchedulePhaseForm(form_id=scheduled_form.unique_id)]
)
]
expected_fixture = u"""
<partial>
<fixture id="schedule:m2:p1:f0">
<schedule expires="" allow_unscheduled="False">
<visit id="1" due="9" repeats="False"/>
<visit id="2" due="11" repeats="False"/>
</schedule>
</fixture>
</partial>
"""
suite = self.app.create_suite()
self.assertXmlPartialEqual(expected_fixture, suite, './fixture[@id="schedule:m2:p1:f0"]')
self.assertXmlHasXpath(suite, './fixture[@id="schedule:m1:p1:f0"]')
def test_form_filtering(self):
self._apply_schedule_phases()
suite = self.app.create_suite()
form_ids = (self.form_1.schedule_form_id, self.form_2.schedule_form_id)
case_load_actions = ["case_id_case_clinic", "case_id_load_clinic0"]
case = ["instance('casedb')/casedb/case[@case_id=instance('commcaresession')/session/data/{}]"
.format(action) for action in case_load_actions]
for form_num, form_id in enumerate(form_ids):
anchor = "{case}/edd".format(case=case[form_num])
current_schedule_phase = "{case}/current_schedule_phase".format(case=case[form_num])
visit = "instance('schedule:m1:p1:f{form_num}')/schedule/visit".format(form_num=form_num)
schedule = "instance('schedule:m1:p1:f{form_num}')/schedule".format(form_num=form_num)
current_phase_query = (
"({current_schedule_phase} = '' or {current_schedule_phase} = 1)"
).format(current_schedule_phase=current_schedule_phase)
within_form_relevancy = (
u"today() >= (date({anchor}) + int({schedule}/@starts)) and "
u"({schedule}/@expires = '' or today() <= (date({anchor}) + int({schedule}/@expires)))"
).format(schedule=schedule, anchor=anchor)
next_valid_schedules = (
u"{current_phase_query} and "
"{anchor} != '' and "
"({within_form_relevancy})"
).format(current_phase_query=current_phase_query, anchor=anchor,
within_form_relevancy=within_form_relevancy)
allow_unscheduled = (
u"{schedule}/@allow_unscheduled = 'True'"
).format(schedule=schedule)
upcoming_scheduled_visits = (
u"{visit}"
"[{case}/last_visit_number_{form_id} = '' or "
"if(@repeats = 'True', @id >= {case}/last_visit_number_{form_id},"
" @id > {case}/last_visit_number_{form_id})]"
"[if(@repeats = 'True', "
"today() >= (date({case}/last_visit_date_{form_id}) + int(@increment) + int(@starts)) and " # noqa
"(@expires = '' or today() <= (date({case}/last_visit_date_{form_id}) + int(@increment)" # noqa
" + int(@expires))), "
"today() >= (date({anchor}) + int(@due) + int(@starts)) and "
"(@expires = '' or today() <= (date({anchor}) + int(@due) + int(@expires)))"
")]"
).format(visit=visit, case=case[form_num], form_id=form_id, anchor=anchor)
visit_allowed = (
u"{allow_unscheduled} or "
"count({upcoming_scheduled_visits}) > 0"
).format(allow_unscheduled=allow_unscheduled, upcoming_scheduled_visits=upcoming_scheduled_visits)
filter_condition = (
u"({next_valid_schedules}) and ({visit_allowed})"
).format(next_valid_schedules=next_valid_schedules, visit_allowed=visit_allowed)
partial = u"""
<partial>
<command id='m1-f{form_num}' relevant="{filter_condition}" />
</partial>
""".format(form_num=form_num, filter_condition=filter_condition)
self.assertXmlPartialEqual(partial, suite, './menu/command[@id="m1-f{}"]'.format(form_num))
def _fetch_sources(self):
for form in self.module.forms:
name = '{}.xml'.format(form.unique_id)
form.source = self.app.lazy_fetch_attachment(name)
# xmlns is added because I needed to use WrappedNode.find() in the next few tests
xmlns = ("xmlns='http://www.w3.org/2002/xforms' "
"xmlns:h='http://www.w3.org/1999/xhtml' "
"xmlns:jr='http://openrosa.org/javarosa' "
"xmlns:orx='http://openrosa.org/jr/xforms' "
"xmlns:xsd='http://www.w3.org/2001/XMLSchema'")
def test_current_schedule_phase(self):
""" Current Schedule Phase is set depending on transition and termination conditions """
self._fetch_sources()
current_schedule_phase_partial = """
<partial>
<bind type="xs:integer"
nodeset="/data/case_case_clinic/case/update/current_schedule_phase"
calculate="{value}"
{xmlns}/>
</partial>
"""
transition_question = '/data/successful_birth'
transition_answer = 'yes'
self.form_1.schedule.transition_condition = FormActionCondition(
type='if',
question=transition_question,
answer=transition_answer,
)
termination_question = '/data/passed_away'
termination_answer = 'yes'
self.form_1.schedule.termination_condition = FormActionCondition(
type='if',
question=termination_question,
answer=termination_answer,
)
self._apply_schedule_phases()
xform_1 = self.form_1.wrapped_xform()
self.form_1.add_stuff_to_xform(xform_1)
value = "if({termination_condition}, -1, if({transition_condition}, 2, 1))".format(
termination_condition="{} = '{}'".format(termination_question, termination_answer),
transition_condition="{} = '{}'".format(transition_question, transition_answer),
)
self.assertXmlPartialEqual(
current_schedule_phase_partial.format(value=value, xmlns=self.xmlns),
(xform_1.model_node.find(
'./bind[@nodeset="/data/case_case_clinic/case/update/current_schedule_phase"]')
.render()),
'.'
)
def test_current_schedule_phase_no_transitions(self):
"""The current_schedule_phase is set to the phase of the current form"""
self._fetch_sources()
self._apply_schedule_phases()
current_schedule_phase_partial = u"""
<partial>
<bind type="xs:integer"
nodeset="/data/case_load_clinic0/case/update/current_schedule_phase"
calculate="{value}"
{xmlns}/>
</partial>
"""
value = "if(false(), -1, if(false(), 2, {}))".format(self.form_2.get_phase().id)
xform_2 = self.form_2.wrapped_xform()
self.form_2.add_stuff_to_xform(xform_2)
self.assertXmlPartialEqual(
current_schedule_phase_partial.format(value=value, xmlns=self.xmlns),
(xform_2.model_node.find(
'./bind[@nodeset="/data/case_load_clinic0/case/update/current_schedule_phase"]')
.render()),
'.'
)
def test_last_visit_number(self):
""" Increment the visit number for that particular form. If it is empty, set it to 1 """
last_visit_number_partial = u"""
<partial>
<bind nodeset="/data/case_case_clinic/case/update/last_visit_number_{form_id}"
calculate="/data/current_visit_number"
relevant="not(/data/unscheduled_visit)"
{xmlns}/>
</partial>
"""
self._fetch_sources()
self._apply_schedule_phases()
xform_1 = self.form_1.wrapped_xform()
form_id = self.form_1.schedule_form_id
self.form_1.add_stuff_to_xform(xform_1)
self.assertXmlPartialEqual(
last_visit_number_partial.format(form_id=form_id, xmlns=self.xmlns),
(xform_1.model_node.find(u'./bind[@nodeset="/data/case_case_clinic/case/update/last_visit_number_{}"]'
.format(form_id)).render()),
'.'
)
def test_last_visit_date(self):
""" Set the date of the last visit when a form gets submitted """
last_visit_date_partial = u"""
<partial>
<bind nodeset="/data/case_case_clinic/case/update/last_visit_date_{form_id}"
type="xsd:dateTime"
calculate="/data/meta/timeEnd"
relevant="not(/data/unscheduled_visit)"
{xmlns}/>
</partial>
"""
self._fetch_sources()
self._apply_schedule_phases()
xform_1 = self.form_1.wrapped_xform()
form_id = self.form_1.schedule_form_id
self.form_1.add_stuff_to_xform(xform_1)
self.assertXmlPartialEqual(
last_visit_date_partial.format(form_id=form_id, xmlns=self.xmlns),
(xform_1.model_node.find(u'./bind[@nodeset="/data/case_case_clinic/case/update/last_visit_date_{}"]'
.format(form_id)).render()),
'.'
)
def test_next_visit_date(self):
""" add next_visit_date to each form """
next_visit_date_partial = u"""
<partial>
<bind nodeset="/data/next_visit_date"
calculate="date(min({form_names}))"
{xmlns}/>
</partial>
"""
self._fetch_sources()
self._apply_schedule_phases()
phase_forms = [self.form_1, self.form_2]
xform_1 = self.form_1.wrapped_xform()
self.form_1.add_stuff_to_xform(xform_1)
form_names = [u"/data/next_{}".format(f.schedule_form_id) for f in phase_forms]
self.assertXmlPartialEqual(
next_visit_date_partial.format(form_names=",".join(form_names), xmlns=self.xmlns),
xform_1.model_node.find(u'./bind[@nodeset="/data/next_visit_date"]').render(),
'.'
)
for form in phase_forms:
self.assertTrue(
len(xform_1.model_node.find(u"./bind[@nodeset='/data/next_{}']"
.format(form.schedule_form_id)).render())
> 0)
|
qedsoftware/commcare-hq
|
corehq/apps/app_manager/tests/test_schedule.py
|
Python
|
bsd-3-clause
| 20,173
|
[
"VisIt"
] |
620a52d0bb63027bce3c75af4fb18f34bf33215dbbf38cce032c414f3689e6da
|
"""
sphinx.writers.html
~~~~~~~~~~~~~~~~~~~
docutils writers handling Sphinx' custom nodes.
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import copy
import os
import posixpath
import re
import warnings
from typing import TYPE_CHECKING, Iterable, Tuple, cast
from docutils import nodes
from docutils.nodes import Element, Node, Text
from docutils.writers.html4css1 import HTMLTranslator as BaseTranslator
from docutils.writers.html4css1 import Writer
from sphinx import addnodes
from sphinx.builders import Builder
from sphinx.deprecation import RemovedInSphinx50Warning
from sphinx.locale import _, __, admonitionlabels
from sphinx.util import logging
from sphinx.util.docutils import SphinxTranslator
from sphinx.util.images import get_image_size
if TYPE_CHECKING:
from sphinx.builders.html import StandaloneHTMLBuilder
logger = logging.getLogger(__name__)
# A good overview of the purpose behind these classes can be found here:
# http://www.arnebrodowski.de/blog/write-your-own-restructuredtext-writer.html
def multiply_length(length: str, scale: int) -> str:
"""Multiply *length* (width or height) by *scale*."""
matched = re.match(r'^(\d*\.?\d*)\s*(\S*)$', length)
if not matched:
return length
elif scale == 100:
return length
else:
amount, unit = matched.groups()
result = float(amount) * scale / 100
return "%s%s" % (int(result), unit)
class HTMLWriter(Writer):
# override embed-stylesheet default value to 0.
settings_spec = copy.deepcopy(Writer.settings_spec)
for _setting in settings_spec[2]:
if '--embed-stylesheet' in _setting[1]:
_setting[2]['default'] = 0
def __init__(self, builder: "StandaloneHTMLBuilder") -> None:
super().__init__()
self.builder = builder
def translate(self) -> None:
# sadly, this is mostly copied from parent class
visitor = self.builder.create_translator(self.document, self.builder)
self.visitor = cast(HTMLTranslator, visitor)
self.document.walkabout(visitor)
self.output = self.visitor.astext()
for attr in ('head_prefix', 'stylesheet', 'head', 'body_prefix',
'body_pre_docinfo', 'docinfo', 'body', 'fragment',
'body_suffix', 'meta', 'title', 'subtitle', 'header',
'footer', 'html_prolog', 'html_head', 'html_title',
'html_subtitle', 'html_body', ):
setattr(self, attr, getattr(visitor, attr, None))
self.clean_meta = ''.join(self.visitor.meta[2:])
class HTMLTranslator(SphinxTranslator, BaseTranslator):
"""
Our custom HTML translator.
"""
builder: "StandaloneHTMLBuilder" = None
def __init__(self, document: nodes.document, builder: Builder) -> None:
super().__init__(document, builder)
self.highlighter = self.builder.highlighter
self.docnames = [self.builder.current_docname] # for singlehtml builder
self.manpages_url = self.config.manpages_url
self.protect_literal_text = 0
self.secnumber_suffix = self.config.html_secnumber_suffix
self.param_separator = ''
self.optional_param_level = 0
self._table_row_index = 0
self._fieldlist_row_index = 0
self.required_params_left = 0
def visit_start_of_file(self, node: Element) -> None:
# only occurs in the single-file builder
self.docnames.append(node['docname'])
self.body.append('<span id="document-%s"></span>' % node['docname'])
def depart_start_of_file(self, node: Element) -> None:
self.docnames.pop()
#############################################################
# Domain-specific object descriptions
#############################################################
# Top-level nodes for descriptions
##################################
def visit_desc(self, node: Element) -> None:
self.body.append(self.starttag(node, 'dl'))
def depart_desc(self, node: Element) -> None:
self.body.append('</dl>\n\n')
def visit_desc_signature(self, node: Element) -> None:
# the id is set automatically
self.body.append(self.starttag(node, 'dt'))
self.protect_literal_text += 1
def depart_desc_signature(self, node: Element) -> None:
self.protect_literal_text -= 1
if not node.get('is_multiline'):
self.add_permalink_ref(node, _('Permalink to this definition'))
self.body.append('</dt>\n')
def visit_desc_signature_line(self, node: Element) -> None:
pass
def depart_desc_signature_line(self, node: Element) -> None:
if node.get('add_permalink'):
# the permalink info is on the parent desc_signature node
self.add_permalink_ref(node.parent, _('Permalink to this definition'))
self.body.append('<br />')
def visit_desc_content(self, node: Element) -> None:
self.body.append(self.starttag(node, 'dd', ''))
def depart_desc_content(self, node: Element) -> None:
self.body.append('</dd>')
def visit_desc_inline(self, node: Element) -> None:
self.body.append(self.starttag(node, 'span', ''))
def depart_desc_inline(self, node: Element) -> None:
self.body.append('</span>')
# Nodes for high-level structure in signatures
##############################################
def visit_desc_name(self, node: Element) -> None:
self.body.append(self.starttag(node, 'code', ''))
def depart_desc_name(self, node: Element) -> None:
self.body.append('</code>')
def visit_desc_addname(self, node: Element) -> None:
self.body.append(self.starttag(node, 'code', ''))
def depart_desc_addname(self, node: Element) -> None:
self.body.append('</code>')
def visit_desc_type(self, node: Element) -> None:
pass
def depart_desc_type(self, node: Element) -> None:
pass
def visit_desc_returns(self, node: Element) -> None:
self.body.append(' <span class="sig-return">')
self.body.append('<span class="sig-return-icon">→</span>')
self.body.append(' <span class="sig-return-typehint">')
def depart_desc_returns(self, node: Element) -> None:
self.body.append('</span></span>')
def visit_desc_parameterlist(self, node: Element) -> None:
self.body.append('<span class="sig-paren">(</span>')
self.first_param = 1
self.optional_param_level = 0
# How many required parameters are left.
self.required_params_left = sum([isinstance(c, addnodes.desc_parameter)
for c in node.children])
self.param_separator = node.child_text_separator
def depart_desc_parameterlist(self, node: Element) -> None:
self.body.append('<span class="sig-paren">)</span>')
# If required parameters are still to come, then put the comma after
# the parameter. Otherwise, put the comma before. This ensures that
# signatures like the following render correctly (see issue #1001):
#
# foo([a, ]b, c[, d])
#
def visit_desc_parameter(self, node: Element) -> None:
if self.first_param:
self.first_param = 0
elif not self.required_params_left:
self.body.append(self.param_separator)
if self.optional_param_level == 0:
self.required_params_left -= 1
if not node.hasattr('noemph'):
self.body.append('<em>')
def depart_desc_parameter(self, node: Element) -> None:
if not node.hasattr('noemph'):
self.body.append('</em>')
if self.required_params_left:
self.body.append(self.param_separator)
def visit_desc_optional(self, node: Element) -> None:
self.optional_param_level += 1
self.body.append('<span class="optional">[</span>')
def depart_desc_optional(self, node: Element) -> None:
self.optional_param_level -= 1
self.body.append('<span class="optional">]</span>')
def visit_desc_annotation(self, node: Element) -> None:
self.body.append(self.starttag(node, 'em', '', CLASS='property'))
def depart_desc_annotation(self, node: Element) -> None:
self.body.append('</em>')
##############################################
def visit_versionmodified(self, node: Element) -> None:
self.body.append(self.starttag(node, 'div', CLASS=node['type']))
def depart_versionmodified(self, node: Element) -> None:
self.body.append('</div>\n')
# overwritten
def visit_reference(self, node: Element) -> None:
atts = {'class': 'reference'}
if node.get('internal') or 'refuri' not in node:
atts['class'] += ' internal'
else:
atts['class'] += ' external'
if 'refuri' in node:
atts['href'] = node['refuri'] or '#'
if self.settings.cloak_email_addresses and atts['href'].startswith('mailto:'):
atts['href'] = self.cloak_mailto(atts['href'])
self.in_mailto = True
else:
assert 'refid' in node, \
'References must have "refuri" or "refid" attribute.'
atts['href'] = '#' + node['refid']
if not isinstance(node.parent, nodes.TextElement):
assert len(node) == 1 and isinstance(node[0], nodes.image)
atts['class'] += ' image-reference'
if 'reftitle' in node:
atts['title'] = node['reftitle']
if 'target' in node:
atts['target'] = node['target']
self.body.append(self.starttag(node, 'a', '', **atts))
if node.get('secnumber'):
self.body.append(('%s' + self.secnumber_suffix) %
'.'.join(map(str, node['secnumber'])))
def visit_number_reference(self, node: Element) -> None:
self.visit_reference(node)
def depart_number_reference(self, node: Element) -> None:
self.depart_reference(node)
# overwritten -- we don't want source comments to show up in the HTML
def visit_comment(self, node: Element) -> None: # type: ignore
raise nodes.SkipNode
# overwritten
def visit_admonition(self, node: Element, name: str = '') -> None:
self.body.append(self.starttag(
node, 'div', CLASS=('admonition ' + name)))
if name:
node.insert(0, nodes.title(name, admonitionlabels[name]))
self.set_first_last(node)
def visit_seealso(self, node: Element) -> None:
self.visit_admonition(node, 'seealso')
def depart_seealso(self, node: Element) -> None:
self.depart_admonition(node)
def get_secnumber(self, node: Element) -> Tuple[int, ...]:
if node.get('secnumber'):
return node['secnumber']
elif isinstance(node.parent, nodes.section):
if self.builder.name == 'singlehtml':
docname = self.docnames[-1]
anchorname = "%s/#%s" % (docname, node.parent['ids'][0])
if anchorname not in self.builder.secnumbers:
anchorname = "%s/" % docname # try first heading which has no anchor
else:
anchorname = '#' + node.parent['ids'][0]
if anchorname not in self.builder.secnumbers:
anchorname = '' # try first heading which has no anchor
if self.builder.secnumbers.get(anchorname):
return self.builder.secnumbers[anchorname]
return None
def add_secnumber(self, node: Element) -> None:
secnumber = self.get_secnumber(node)
if secnumber:
self.body.append('<span class="section-number">%s</span>' %
('.'.join(map(str, secnumber)) + self.secnumber_suffix))
def add_fignumber(self, node: Element) -> None:
def append_fignumber(figtype: str, figure_id: str) -> None:
if self.builder.name == 'singlehtml':
key = "%s/%s" % (self.docnames[-1], figtype)
else:
key = figtype
if figure_id in self.builder.fignumbers.get(key, {}):
self.body.append('<span class="caption-number">')
prefix = self.config.numfig_format.get(figtype)
if prefix is None:
msg = __('numfig_format is not defined for %s') % figtype
logger.warning(msg)
else:
numbers = self.builder.fignumbers[key][figure_id]
self.body.append(prefix % '.'.join(map(str, numbers)) + ' ')
self.body.append('</span>')
figtype = self.builder.env.domains['std'].get_enumerable_node_type(node)
if figtype:
if len(node['ids']) == 0:
msg = __('Any IDs not assigned for %s node') % node.tagname
logger.warning(msg, location=node)
else:
append_fignumber(figtype, node['ids'][0])
def add_permalink_ref(self, node: Element, title: str) -> None:
if node['ids'] and self.config.html_permalinks and self.builder.add_permalinks:
format = '<a class="headerlink" href="#%s" title="%s">%s</a>'
self.body.append(format % (node['ids'][0], title,
self.config.html_permalinks_icon))
def generate_targets_for_listing(self, node: Element) -> None:
"""Generate hyperlink targets for listings.
Original visit_bullet_list(), visit_definition_list() and visit_enumerated_list()
generates hyperlink targets inside listing tags (<ul>, <ol> and <dl>) if multiple
IDs are assigned to listings. That is invalid DOM structure.
(This is a bug of docutils <= 0.12)
This exports hyperlink targets before listings to make valid DOM structure.
"""
for id in node['ids'][1:]:
self.body.append('<span id="%s"></span>' % id)
node['ids'].remove(id)
# overwritten
def visit_bullet_list(self, node: Element) -> None:
if len(node) == 1 and isinstance(node[0], addnodes.toctree):
# avoid emitting empty <ul></ul>
raise nodes.SkipNode
self.generate_targets_for_listing(node)
super().visit_bullet_list(node)
# overwritten
def visit_enumerated_list(self, node: Element) -> None:
self.generate_targets_for_listing(node)
super().visit_enumerated_list(node)
# overwritten
def visit_definition(self, node: Element) -> None:
# don't insert </dt> here.
self.body.append(self.starttag(node, 'dd', ''))
# overwritten
def depart_definition(self, node: Element) -> None:
self.body.append('</dd>\n')
# overwritten
def visit_classifier(self, node: Element) -> None:
self.body.append(self.starttag(node, 'span', '', CLASS='classifier'))
# overwritten
def depart_classifier(self, node: Element) -> None:
self.body.append('</span>')
next_node: Node = node.next_node(descend=False, siblings=True)
if not isinstance(next_node, nodes.classifier):
# close `<dt>` tag at the tail of classifiers
self.body.append('</dt>')
# overwritten
def visit_term(self, node: Element) -> None:
self.body.append(self.starttag(node, 'dt', ''))
# overwritten
def depart_term(self, node: Element) -> None:
next_node: Node = node.next_node(descend=False, siblings=True)
if isinstance(next_node, nodes.classifier):
# Leave the end tag to `self.depart_classifier()`, in case
# there's a classifier.
pass
else:
if isinstance(node.parent.parent.parent, addnodes.glossary):
# add permalink if glossary terms
self.add_permalink_ref(node, _('Permalink to this term'))
self.body.append('</dt>')
# overwritten
def visit_title(self, node: Element) -> None:
if isinstance(node.parent, addnodes.compact_paragraph) and node.parent.get('toctree'):
self.body.append(self.starttag(node, 'p', '', CLASS='caption', ROLE='heading'))
self.body.append('<span class="caption-text">')
self.context.append('</span></p>\n')
else:
super().visit_title(node)
self.add_secnumber(node)
self.add_fignumber(node.parent)
if isinstance(node.parent, nodes.table):
self.body.append('<span class="caption-text">')
def depart_title(self, node: Element) -> None:
close_tag = self.context[-1]
if (self.config.html_permalinks and self.builder.add_permalinks and
node.parent.hasattr('ids') and node.parent['ids']):
# add permalink anchor
if close_tag.startswith('</h'):
self.add_permalink_ref(node.parent, _('Permalink to this headline'))
elif close_tag.startswith('</a></h'):
self.body.append('</a><a class="headerlink" href="#%s" ' %
node.parent['ids'][0] +
'title="%s">%s' % (
_('Permalink to this headline'),
self.config.html_permalinks_icon))
elif isinstance(node.parent, nodes.table):
self.body.append('</span>')
self.add_permalink_ref(node.parent, _('Permalink to this table'))
elif isinstance(node.parent, nodes.table):
self.body.append('</span>')
super().depart_title(node)
# overwritten
def visit_literal_block(self, node: Element) -> None:
if node.rawsource != node.astext():
# most probably a parsed-literal block -- don't highlight
return super().visit_literal_block(node)
lang = node.get('language', 'default')
linenos = node.get('linenos', False)
highlight_args = node.get('highlight_args', {})
highlight_args['force'] = node.get('force', False)
opts = self.config.highlight_options.get(lang, {})
if linenos and self.config.html_codeblock_linenos_style:
linenos = self.config.html_codeblock_linenos_style
highlighted = self.highlighter.highlight_block(
node.rawsource, lang, opts=opts, linenos=linenos,
location=node, **highlight_args
)
starttag = self.starttag(node, 'div', suffix='',
CLASS='highlight-%s notranslate' % lang)
self.body.append(starttag + highlighted + '</div>\n')
raise nodes.SkipNode
def visit_caption(self, node: Element) -> None:
if isinstance(node.parent, nodes.container) and node.parent.get('literal_block'):
self.body.append('<div class="code-block-caption">')
else:
super().visit_caption(node)
self.add_fignumber(node.parent)
self.body.append(self.starttag(node, 'span', '', CLASS='caption-text'))
def depart_caption(self, node: Element) -> None:
self.body.append('</span>')
# append permalink if available
if isinstance(node.parent, nodes.container) and node.parent.get('literal_block'):
self.add_permalink_ref(node.parent, _('Permalink to this code'))
elif isinstance(node.parent, nodes.figure):
self.add_permalink_ref(node.parent, _('Permalink to this image'))
elif node.parent.get('toctree'):
self.add_permalink_ref(node.parent.parent, _('Permalink to this toctree'))
if isinstance(node.parent, nodes.container) and node.parent.get('literal_block'):
self.body.append('</div>\n')
else:
super().depart_caption(node)
def visit_doctest_block(self, node: Element) -> None:
self.visit_literal_block(node)
# overwritten to add the <div> (for XHTML compliance)
def visit_block_quote(self, node: Element) -> None:
self.body.append(self.starttag(node, 'blockquote') + '<div>')
def depart_block_quote(self, node: Element) -> None:
self.body.append('</div></blockquote>\n')
# overwritten
def visit_literal(self, node: Element) -> None:
if 'kbd' in node['classes']:
self.body.append(self.starttag(node, 'kbd', '',
CLASS='docutils literal notranslate'))
else:
self.body.append(self.starttag(node, 'code', '',
CLASS='docutils literal notranslate'))
self.protect_literal_text += 1
def depart_literal(self, node: Element) -> None:
if 'kbd' in node['classes']:
self.body.append('</kbd>')
else:
self.protect_literal_text -= 1
self.body.append('</code>')
def visit_productionlist(self, node: Element) -> None:
self.body.append(self.starttag(node, 'pre'))
names = []
productionlist = cast(Iterable[addnodes.production], node)
for production in productionlist:
names.append(production['tokenname'])
maxlen = max(len(name) for name in names)
lastname = None
for production in productionlist:
if production['tokenname']:
lastname = production['tokenname'].ljust(maxlen)
self.body.append(self.starttag(production, 'strong', ''))
self.body.append(lastname + '</strong> ::= ')
elif lastname is not None:
self.body.append('%s ' % (' ' * len(lastname)))
production.walkabout(self)
self.body.append('\n')
self.body.append('</pre>\n')
raise nodes.SkipNode
def depart_productionlist(self, node: Element) -> None:
pass
def visit_production(self, node: Element) -> None:
pass
def depart_production(self, node: Element) -> None:
pass
def visit_centered(self, node: Element) -> None:
self.body.append(self.starttag(node, 'p', CLASS="centered") +
'<strong>')
def depart_centered(self, node: Element) -> None:
self.body.append('</strong></p>')
# overwritten
def should_be_compact_paragraph(self, node: Node) -> bool:
"""Determine if the <p> tags around paragraph can be omitted."""
if isinstance(node.parent, addnodes.desc_content):
# Never compact desc_content items.
return False
if isinstance(node.parent, addnodes.versionmodified):
# Never compact versionmodified nodes.
return False
return super().should_be_compact_paragraph(node)
def visit_compact_paragraph(self, node: Element) -> None:
pass
def depart_compact_paragraph(self, node: Element) -> None:
pass
def visit_download_reference(self, node: Element) -> None:
atts = {'class': 'reference download',
'download': ''}
if not self.builder.download_support:
self.context.append('')
elif 'refuri' in node:
atts['class'] += ' external'
atts['href'] = node['refuri']
self.body.append(self.starttag(node, 'a', '', **atts))
self.context.append('</a>')
elif 'filename' in node:
atts['class'] += ' internal'
atts['href'] = posixpath.join(self.builder.dlpath, node['filename'])
self.body.append(self.starttag(node, 'a', '', **atts))
self.context.append('</a>')
else:
self.context.append('')
def depart_download_reference(self, node: Element) -> None:
self.body.append(self.context.pop())
# overwritten
def visit_figure(self, node: Element) -> None:
# set align=default if align not specified to give a default style
node.setdefault('align', 'default')
return super().visit_figure(node)
# overwritten
def visit_image(self, node: Element) -> None:
olduri = node['uri']
# rewrite the URI if the environment knows about it
if olduri in self.builder.images:
node['uri'] = posixpath.join(self.builder.imgpath,
self.builder.images[olduri])
if 'scale' in node:
# Try to figure out image height and width. Docutils does that too,
# but it tries the final file name, which does not necessarily exist
# yet at the time the HTML file is written.
if not ('width' in node and 'height' in node):
size = get_image_size(os.path.join(self.builder.srcdir, olduri))
if size is None:
logger.warning(__('Could not obtain image size. :scale: option is ignored.'), # NOQA
location=node)
else:
if 'width' not in node:
node['width'] = str(size[0])
if 'height' not in node:
node['height'] = str(size[1])
uri = node['uri']
if uri.lower().endswith(('svg', 'svgz')):
atts = {'src': uri}
if 'width' in node:
atts['width'] = node['width']
if 'height' in node:
atts['height'] = node['height']
if 'scale' in node:
if 'width' in atts:
atts['width'] = multiply_length(atts['width'], node['scale'])
if 'height' in atts:
atts['height'] = multiply_length(atts['height'], node['scale'])
atts['alt'] = node.get('alt', uri)
if 'align' in node:
atts['class'] = 'align-%s' % node['align']
self.body.append(self.emptytag(node, 'img', '', **atts))
return
super().visit_image(node)
# overwritten
def depart_image(self, node: Element) -> None:
if node['uri'].lower().endswith(('svg', 'svgz')):
pass
else:
super().depart_image(node)
def visit_toctree(self, node: Element) -> None:
# this only happens when formatting a toc from env.tocs -- in this
# case we don't want to include the subtree
raise nodes.SkipNode
def visit_index(self, node: Element) -> None:
raise nodes.SkipNode
def visit_tabular_col_spec(self, node: Element) -> None:
raise nodes.SkipNode
def visit_glossary(self, node: Element) -> None:
pass
def depart_glossary(self, node: Element) -> None:
pass
def visit_acks(self, node: Element) -> None:
pass
def depart_acks(self, node: Element) -> None:
pass
def visit_hlist(self, node: Element) -> None:
self.body.append('<table class="hlist"><tr>')
def depart_hlist(self, node: Element) -> None:
self.body.append('</tr></table>\n')
def visit_hlistcol(self, node: Element) -> None:
self.body.append('<td>')
def depart_hlistcol(self, node: Element) -> None:
self.body.append('</td>')
def visit_option_group(self, node: Element) -> None:
super().visit_option_group(node)
self.context[-2] = self.context[-2].replace(' ', ' ')
# overwritten
def visit_Text(self, node: Text) -> None:
text = node.astext()
encoded = self.encode(text)
if self.protect_literal_text:
# moved here from base class's visit_literal to support
# more formatting in literal nodes
for token in self.words_and_spaces.findall(encoded):
if token.strip():
# protect literal text from line wrapping
self.body.append('<span class="pre">%s</span>' % token)
elif token in ' \n':
# allow breaks at whitespace
self.body.append(token)
else:
# protect runs of multiple spaces; the last one can wrap
self.body.append(' ' * (len(token) - 1) + ' ')
else:
if self.in_mailto and self.settings.cloak_email_addresses:
encoded = self.cloak_email(encoded)
self.body.append(encoded)
def visit_note(self, node: Element) -> None:
self.visit_admonition(node, 'note')
def depart_note(self, node: Element) -> None:
self.depart_admonition(node)
def visit_warning(self, node: Element) -> None:
self.visit_admonition(node, 'warning')
def depart_warning(self, node: Element) -> None:
self.depart_admonition(node)
def visit_attention(self, node: Element) -> None:
self.visit_admonition(node, 'attention')
def depart_attention(self, node: Element) -> None:
self.depart_admonition(node)
def visit_caution(self, node: Element) -> None:
self.visit_admonition(node, 'caution')
def depart_caution(self, node: Element) -> None:
self.depart_admonition(node)
def visit_danger(self, node: Element) -> None:
self.visit_admonition(node, 'danger')
def depart_danger(self, node: Element) -> None:
self.depart_admonition(node)
def visit_error(self, node: Element) -> None:
self.visit_admonition(node, 'error')
def depart_error(self, node: Element) -> None:
self.depart_admonition(node)
def visit_hint(self, node: Element) -> None:
self.visit_admonition(node, 'hint')
def depart_hint(self, node: Element) -> None:
self.depart_admonition(node)
def visit_important(self, node: Element) -> None:
self.visit_admonition(node, 'important')
def depart_important(self, node: Element) -> None:
self.depart_admonition(node)
def visit_tip(self, node: Element) -> None:
self.visit_admonition(node, 'tip')
def depart_tip(self, node: Element) -> None:
self.depart_admonition(node)
def visit_literal_emphasis(self, node: Element) -> None:
return self.visit_emphasis(node)
def depart_literal_emphasis(self, node: Element) -> None:
return self.depart_emphasis(node)
def visit_literal_strong(self, node: Element) -> None:
return self.visit_strong(node)
def depart_literal_strong(self, node: Element) -> None:
return self.depart_strong(node)
def visit_abbreviation(self, node: Element) -> None:
attrs = {}
if node.hasattr('explanation'):
attrs['title'] = node['explanation']
self.body.append(self.starttag(node, 'abbr', '', **attrs))
def depart_abbreviation(self, node: Element) -> None:
self.body.append('</abbr>')
def visit_manpage(self, node: Element) -> None:
self.visit_literal_emphasis(node)
if self.manpages_url:
node['refuri'] = self.manpages_url.format(**node.attributes)
self.visit_reference(node)
def depart_manpage(self, node: Element) -> None:
if self.manpages_url:
self.depart_reference(node)
self.depart_literal_emphasis(node)
# overwritten to add even/odd classes
def visit_table(self, node: Element) -> None:
self._table_row_index = 0
# set align=default if align not specified to give a default style
node.setdefault('align', 'default')
return super().visit_table(node)
def visit_row(self, node: Element) -> None:
self._table_row_index += 1
if self._table_row_index % 2 == 0:
node['classes'].append('row-even')
else:
node['classes'].append('row-odd')
self.body.append(self.starttag(node, 'tr', ''))
node.column = 0 # type: ignore
def visit_entry(self, node: Element) -> None:
super().visit_entry(node)
if self.body[-1] == ' ':
self.body[-1] = ' '
def visit_field_list(self, node: Element) -> None:
self._fieldlist_row_index = 0
return super().visit_field_list(node)
def visit_field(self, node: Element) -> None:
self._fieldlist_row_index += 1
if self._fieldlist_row_index % 2 == 0:
node['classes'].append('field-even')
else:
node['classes'].append('field-odd')
self.body.append(self.starttag(node, 'tr', '', CLASS='field'))
def visit_field_name(self, node: Element) -> None:
context_count = len(self.context)
super().visit_field_name(node)
if context_count != len(self.context):
self.context[-1] = self.context[-1].replace(' ', ' ')
def visit_math(self, node: Element, math_env: str = '') -> None:
name = self.builder.math_renderer_name
visit, _ = self.builder.app.registry.html_inline_math_renderers[name]
visit(self, node)
def depart_math(self, node: Element, math_env: str = '') -> None:
name = self.builder.math_renderer_name
_, depart = self.builder.app.registry.html_inline_math_renderers[name]
if depart:
depart(self, node)
def visit_math_block(self, node: Element, math_env: str = '') -> None:
name = self.builder.math_renderer_name
visit, _ = self.builder.app.registry.html_block_math_renderers[name]
visit(self, node)
def depart_math_block(self, node: Element, math_env: str = '') -> None:
name = self.builder.math_renderer_name
_, depart = self.builder.app.registry.html_block_math_renderers[name]
if depart:
depart(self, node)
def unknown_visit(self, node: Node) -> None:
raise NotImplementedError('Unknown node: ' + node.__class__.__name__)
@property
def permalink_text(self) -> str:
warnings.warn('HTMLTranslator.permalink_text is deprecated.',
RemovedInSphinx50Warning, stacklevel=2)
return self.config.html_permalinks_icon
|
sonntagsgesicht/regtest
|
.aux/venv/lib/python3.9/site-packages/sphinx/writers/html.py
|
Python
|
apache-2.0
| 34,032
|
[
"VisIt"
] |
b9c5f566e7fce9840c91b678801ed56f2b43416b67330fdfc7560dac84f0f93d
|
#! /usr/bin/env python
"""
put files in the FileCatalog (and Storage Element)
When destination SE is not specified, dput will use COMDIRAC configuration option "default_se".
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import DIRAC
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities.DIRACScript import DIRACScript as Script
@Script()
def main():
from COMDIRAC.Interfaces import error
from COMDIRAC.Interfaces import critical
from COMDIRAC.Interfaces import DSession
from COMDIRAC.Interfaces import DCatalog
from COMDIRAC.Interfaces import pathFromArgument
from COMDIRAC.Interfaces import ConfigCache
class Params(object):
def __init__(self):
self.destinationSE = False
self.recursive = False
def setDestinationSE(self, arg):
self.destinationSE = arg
return S_OK()
def getDestinationSE(self):
return self.destinationSE
def setRecursive(self, arg=None):
self.recursive = True
def getRecursive(self):
return self.recursive
params = Params()
Script.setUsageMessage(
"\n".join(
[
__doc__.split("\n")[1],
"Usage:",
" %s [options] local_path[... lfn]" % Script.scriptName,
"Arguments:",
" local_path: local file",
" lfn: file or directory entry in the file catalog",
"",
"Examples:",
" $ dput some_local_file ./some_lfn_file",
" $ dput local_file1 local_file2 ./some_lfn_dir/",
]
)
)
Script.registerSwitch(
"D:",
"destination-se=",
"Storage Element where to put replica",
params.setDestinationSE,
)
Script.registerSwitch(
"r", "recursive", "recursively put contents of local_path", params.setRecursive
)
configCache = ConfigCache()
Script.parseCommandLine(ignoreErrors=True)
configCache.cacheConfig()
args = Script.getPositionalArgs()
session = DSession()
catalog = DCatalog()
from DIRAC.Interfaces.API.Dirac import Dirac
dirac = Dirac()
if len(args) < 1:
error("Error: No argument provided\n%s:" % Script.scriptName)
Script.showHelp()
DIRAC.exit(0)
# local file
localPath = args[0]
# default lfn: same file name as localPath
lfn = pathFromArgument(session, os.path.basename(localPath))
pairs = [(localPath, lfn)]
if len(args) > 1:
# lfn provided must be last argument
lfn = pathFromArgument(session, args[-1])
localPaths = args[:-1]
pairs = []
if catalog.isDir(lfn):
# we can accept one ore more local files
for lp in localPaths:
pairs.append((lp, os.path.join(lfn, os.path.basename(lp))))
else:
if len(localPaths) > 1:
critical(
"Error: Destination LFN must be a directory when registering multiple local files"
)
# lfn filename replace local filename
pairs.append((localPath, lfn))
# destination SE
se = params.getDestinationSE()
if not se:
retVal = session.getEnv("default_se", "DIRAC-USER")
if not retVal["OK"]:
error(retVal["Message"])
se = retVal["Value"]
exitCode = 0
if params.getRecursive():
newPairs = []
for localPath, lfn in pairs:
if os.path.isdir(localPath):
for path, _subdirs, files in os.walk(localPath):
newLFNDir = os.path.normpath(
os.path.join(lfn, os.path.relpath(path, localPath))
)
for f in files:
pairs.append(
(os.path.join(path, f), os.path.join(newLFNDir, f))
)
else:
newPairs.append((localPath, lfn))
pairs = newPairs
for localPath, lfn in pairs:
ret = dirac.addFile(lfn, localPath, se, printOutput=False)
if not ret["OK"]:
exitCode = -2
error(lfn + ": " + ret["Message"])
DIRAC.exit(exitCode)
if __name__ == "__main__":
main()
|
DIRACGrid/COMDIRAC
|
src/COMDIRAC/Interfaces/scripts/dput.py
|
Python
|
gpl-3.0
| 4,412
|
[
"DIRAC"
] |
3f759d9c119798c628fdc81234a689b60e3219fd241f4eee3c891f97137dac76
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from lib.meos import MEoS
from lib import unidades
class Ar(MEoS):
"""Multiparamter equation of state for argon"""
name = "argon"
CASNumber = "7440-37-1"
formula = "Ar"
synonym = "R-740"
rhoc = unidades.Density(535.6)
Tc = unidades.Temperature(150.687)
Pc = unidades.Pressure(4863, "kPa")
M = 39.948 # g/mol
Tt = unidades.Temperature(83.8058)
Tb = unidades.Temperature(87.302)
f_acent = -0.00219
momentoDipolar = unidades.DipoleMoment(0.0, "Debye")
id = 98
_Tr = unidades.Temperature(147.707801)
_rhor = unidades.Density(540.014968)
_w = 0.000305675
Fi1 = {"ao_log": [1, 1.5],
"pow": [0, 1],
"ao_pow": [8.31666243, -4.94651164],
"ao_exp": [], "titao": []}
CP1 = {"ao": 2.5,
"an": [], "pow": [], "ao_exp": [], "exp": [],
"ao_hyp": [], "hyp": []}
Fi2 = {"ao_log": [1, 1.5],
"pow": [0, 1],
"ao_pow": [8.3166315, -4.9465026],
"ao_exp": [], "titao": [],
"ao_hyp": [], "hyp": []}
helmholtz1 = {
"__type__": "Helmholtz",
"__name__": "FEQ Helmholtz equation of state for argon of Tegeler et al. (1999).",
"__doi__": {"autor": "Tegeler, Ch., Span, R., Wagner, W.",
"title": "A New Equation of State for Argon Covering the Fluid Region for Temperatures From the Melting Line to 700 K at Pressures up to 1000 MPa",
"ref": "J. Phys. Chem. Ref. Data 28, 779 (1999)",
"doi": "10.1063/1.556037"},
"__test__":
#Table 33, Pag 828
"""
>>> st=Ar(T=83.8058, x=0.5)
>>> print "%0.6g %0.5g %0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Gas.rho, st.Liquido.h.kJkg, st.Gas.h.kJkg, \
st.Liquido.s.kJkgK, st.Gas.s.kJkgK, st.Liquido.cv.kJkgK, st.Gas.cv.kJkgK, \
st.Liquido.cp.kJkgK, st.Gas.cp.kJkgK, st.Liquido.w, st.Gas.w)
83.8058 0.068891 1416.77 4.0546 -276.56 -112.85 -2.544 -0.59044 0.5496 0.32471 1.1157 0.55503 862.43 168.12
>>> st=Ar(T=90, x=0.5)
>>> print "%0.6g %0.5g %0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Gas.rho, st.Liquido.h.kJkg, st.Gas.h.kJkg, \
st.Liquido.s.kJkgK, st.Gas.s.kJkgK, st.Liquido.cv.kJkgK, st.Gas.cv.kJkgK, \
st.Liquido.cp.kJkgK, st.Gas.cp.kJkgK, st.Liquido.w, st.Gas.w)
90 0.13351 1378.63 7.4362 -269.61 -110.55 -2.4645 -0.69718 0.52677 0.33094 1.1212 0.57569 819.45 172.83
>>> st=Ar(T=120, x=0.5)
>>> print "%0.6g %0.5g %0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Gas.rho, st.Liquido.h.kJkg, st.Gas.h.kJkg, \
st.Liquido.s.kJkgK, st.Gas.s.kJkgK, st.Liquido.cv.kJkgK, st.Gas.cv.kJkgK, \
st.Liquido.cp.kJkgK, st.Gas.cp.kJkgK, st.Liquido.w, st.Gas.w)
120 1.213 1162.82 60.144 -233.48 -106.71 -2.1274 -1.071 0.45763 0.38934 1.3324 0.86265 584.19 185.09
>>> st=Ar(T=130, x=0.5)
>>> print "%0.6g %0.5g %0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Gas.rho, st.Liquido.h.kJkg, st.Gas.h.kJkg, \
st.Liquido.s.kJkgK, st.Gas.s.kJkgK, st.Liquido.cv.kJkgK, st.Gas.cv.kJkgK, \
st.Liquido.cp.kJkgK, st.Gas.cp.kJkgK, st.Liquido.w, st.Gas.w)
130 2.0255 1068.13 103.56 -219.29 -109.83 -2.0197 -1.1777 0.4492 0.42745 1.5638 1.1717 487.88 184.85
>>> st=Ar(T=140, x=0.5)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Gas.rho, st.Liquido.h.kJkg, st.Gas.h.kJkg, \
st.Liquido.s.kJkgK, st.Gas.s.kJkgK, st.Liquido.cv.kJkgK, st.Gas.cv.kJkgK, \
st.Liquido.cp.kJkgK, st.Gas.cp.kJkgK, st.Liquido.w, st.Gas.w)
140 3.1682 943.71 178.86 -202.29 -117.65 -1.9023 -1.2978 0.45984 0.49404 2.2247 2.1036 371.63 181.5
>>> st=Ar(T=142, x=0.5)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Gas.rho, st.Liquido.h.kJkg, st.Gas.h.kJkg, \
st.Liquido.s.kJkgK, st.Gas.s.kJkgK, st.Liquido.cv.kJkgK, st.Gas.cv.kJkgK, \
st.Liquido.cp.kJkgK, st.Gas.cp.kJkgK, st.Liquido.w, st.Gas.w)
142 3.4435 911.61 201.37 -198.23 -120.25 -1.8756 -1.3265 0.46729 0.51706 2.5349 2.5648 344.14 180
>>> st=Ar(T=144, x=0.5)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Gas.rho, st.Liquido.h.kJkg, st.Gas.h.kJkg, \
st.Liquido.s.kJkgK, st.Gas.s.kJkgK, st.Liquido.cv.kJkgK, st.Gas.cv.kJkgK, \
st.Liquido.cp.kJkgK, st.Gas.cp.kJkgK, st.Liquido.w, st.Gas.w)
144 3.7363 874.98 228.48 -193.76 -123.46 -1.8467 -1.3584 0.47972 0.54719 3.0262 3.3149 313.8 177.93
>>> st=Ar(T=146, x=0.5)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Gas.rho, st.Liquido.h.kJkg, st.Gas.h.kJkg, \
st.Liquido.s.kJkgK, st.Gas.s.kJkgK, st.Liquido.cv.kJkgK, st.Gas.cv.kJkgK, \
st.Liquido.cp.kJkgK, st.Gas.cp.kJkgK, st.Liquido.w, st.Gas.w)
146 4.0479 831.38 262.63 -188.68 -127.57 -1.8142 -1.3956 0.50257 0.58923 3.9312 4.7346 278.88 174.89
>>> st=Ar(T=148, x=0.5)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Gas.rho, st.Liquido.h.kJkg, st.Gas.h.kJkg, \
st.Liquido.s.kJkgK, st.Gas.s.kJkgK, st.Liquido.cv.kJkgK, st.Gas.cv.kJkgK, \
st.Liquido.cp.kJkgK, st.Gas.cp.kJkgK, st.Liquido.w, st.Gas.w)
148 4.3797 775.03 309.6 -182.49 -133.29 -1.7749 -1.4424 0.55094 0.6568 6.2097 8.383 236.08 169.81
>>> st=Ar(T=150, x=0.5)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Gas.rho, st.Liquido.h.kJkg, st.Gas.h.kJkg, \
st.Liquido.s.kJkgK, st.Gas.s.kJkgK, st.Liquido.cv.kJkgK, st.Gas.cv.kJkgK, \
st.Liquido.cp.kJkgK, st.Gas.cp.kJkgK, st.Liquido.w, st.Gas.w)
150 4.7346 680.43 394.5 -173.01 -143.6 -1.7145 -1.5185 0.70603 0.82182 23.582 35.468 174.74 157.01
"""
#Table 33, Pag 828
"""
>>> st=Ar(T=83.814, P=1e5)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.4g %0.5g %0.5g %0.5g" % (\
st.T, st.rho, st.u.kJkg, st.h.kJkg, st.s.kJkgK, st.cv.kJkgK, st.cp.kJkgK, st.w)
83.814 1416.8 -276.61 -276.54 -2.544 0.54961 1.1156 862.52
>>> st=Ar(T=700, P=1e5)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g" % (\
st.T, st.rho, st.u.kJkg, st.h.kJkg, st.s.kJkgK, st.cv.kJkgK, st.cp.kJkgK, st.w)
700 0.68619 63.355 209.09 0.44677 0.31223 0.5205 492.95
>>> st=Ar(T=150, P=5e5)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g" % (\
st.T, st.rho, st.u.kJkg, st.h.kJkg, st.s.kJkgK, st.cv.kJkgK, st.cp.kJkgK, st.w)
150 16.605 -110.45 -80.334 -0.70404 0.32098 0.55987 224.97
>>> st=Ar(T=150, P=5e5)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g" % (\
st.T, st.rho, st.u.kJkg, st.h.kJkg, st.s.kJkgK, st.cv.kJkgK, st.cp.kJkgK, st.w)
150 16.605 -110.45 -80.334 -0.70404 0.32098 0.55987 224.97
>>> st=Ar(T=170, P=1e6)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g" % (\
st.T, st.rho, st.u.kJkg, st.h.kJkg, st.s.kJkgK, st.cv.kJkgK, st.cp.kJkgK, st.w)
170 29.723 -105.62 -71.972 -0.78987 0.32356 0.57801 238.88
>>> st=Ar(T=125, P=2e6)
>>> print "%0.6g %0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g" % (\
st.T, st.rho, st.u.kJkg, st.h.kJkg, st.s.kJkgK, st.cv.kJkgK, st.cp.kJkgK, st.w)
125 1122.34 -228.45 -226.66 -2.0773 0.45179 1.4048 544.65
>>> st=Ar(T=135, P=3e6)
>>> print "%0.6g %0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g" % (\
st.T, st.rho, st.u.kJkg, st.h.kJkg, st.s.kJkgK, st.cv.kJkgK, st.cp.kJkgK, st.w)
135 1020.52 -214.63 -211.69 -1.9694 0.44845 1.7159 445.83
>>> st=Ar(T=150, P=3e6)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g" % (\
st.T, st.rho, st.u.kJkg, st.h.kJkg, st.s.kJkgK, st.cv.kJkgK, st.cp.kJkgK, st.w)
150 128.93 -124.64 -101.37 -1.1772 0.39203 1.0311 205.67
>>> st=Ar(T=140, P=4e6)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g" % (\
st.T, st.rho, st.u.kJkg, st.h.kJkg, st.s.kJkgK, st.cv.kJkgK, st.cp.kJkgK, st.w)
140 968.76 -207.81 -203.68 -1.9185 0.45035 1.9268 403.8
>>> st=Ar(T=145, P=4e6)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g" % (\
st.T, st.rho, st.u.kJkg, st.h.kJkg, st.s.kJkgK, st.cv.kJkgK, st.cp.kJkgK, st.w)
145 862.44 -196.53 -191.89 -1.8358 0.48302 3.1513 306.38
>>> st=Ar(T=150, P=4e6)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g" % (\
st.T, st.rho, st.u.kJkg, st.h.kJkg, st.s.kJkgK, st.cv.kJkgK, st.cp.kJkgK, st.w)
150 209.45 -134.47 -115.38 -1.3116 0.46106 1.8982 193.39
>>> st=Ar(T=125, P=5e6)
>>> print "%0.6g %0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g" % (\
st.T, st.rho, st.u.kJkg, st.h.kJkg, st.s.kJkgK, st.cv.kJkgK, st.cp.kJkgK, st.w)
125 1150.27 -231.07 -226.72 -2.0989 0.45236 1.2969 586.37
>>> st=Ar(T=150, P=5e6)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g" % (\
st.T, st.rho, st.u.kJkg, st.h.kJkg, st.s.kJkgK, st.cv.kJkgK, st.cp.kJkgK, st.w)
150 765.37 -186.33 -179.79 -1.7622 0.52622 5.1511 248.19
>>> st=Ar(T=150, P=1e7)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g" % (\
st.T, st.rho, st.u.kJkg, st.h.kJkg, st.s.kJkgK, st.cv.kJkgK, st.cp.kJkgK, st.w)
150 964.88 -203.07 -192.7 -1.8855 0.43203 1.5594 445.1
""",
"R": 8.31451,
"cp": Fi1,
"ref": "OTO",
"Tmin": Tt, "Tmax": 2000., "Pmax": 1000000.0, "rhomax": 50.65,
"Pmin": 68.891, "rhomin": 35.465,
"nr1": [0.887223049900e-1, 0.705148051673, -0.168201156541e1,
-0.149090144315, -0.120248046009, -0.121649787986,
0.400359336268, -0.271360626991, 0.242119245796,
0.578895831856e-2, -0.410973356153e-1, 0.247107615416e-1],
"d1": [1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 4],
"t1": [0., 0.25, 1., 2.75, 4.0, 0., 0.25, 0.75, 2.75, 0.0, 2.0, 0.75],
"nr2": [-0.321813917507, 0.332300176958, 0.310199862873e-1,
-0.307770860024e-1, 0.938911374196e-1, -0.906432106820e-1,
-0.457783492767e-3, -0.826597290252e-4, 0.130134156031e-3,
-0.113978400020e-1, -0.244551699605e-1, -0.643240671760e-1,
0.588894710937e-1, -0.649335521130e-3, -0.138898621584e-1,
0.404898392969, -0.386125195947, -0.188171423322,
0.159776475965, 0.539855185139e-1, -0.289534179580e-1,
-0.130254133814e-1, 0.289486967758e-2, -0.226471343048e-2,
0.176164561964e-2],
"c2": [1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3,
3, 3, 4, 4],
"d2": [1, 1, 3, 4, 4, 5, 7, 10, 10, 2, 2, 4, 4, 8, 3, 5, 5, 6, 6, 7, 7,
8, 9, 5, 6],
"t2": [3., 3.5, 1., 2., 4., 3., 0., 0.5, 1., 1., 7., 5., 6., 6., 10.,
13., 14., 11., 14., 8., 14., 6., 7., 24., 22.],
"gamma2": [1]*25,
"nr3": [0.585524544828e-2, -0.6925190827, 0.153154900305e1,
-0.273804474498e-2],
"d3": [2, 1, 2, 3],
"t3": [3, 1, 0, 0],
"alfa3": [20]*4,
"beta3": [250, 375, 300, 225],
"gamma3": [1.11, 1.14, 1.17, 1.11],
"epsilon3": [1, 1, 1, 1],
"nr4": []}
GERG = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for argon of Kunz and Wagner (2004).",
"__doi__": {"autor": "Kunz, O., Wagner, W.",
"title": "The GERG-2008 Wide-Range Equation of State for Natural Gases and Other Mixtures: An Expansion of GERG-2004",
"ref": "J. Chem. Eng. Data, 2012, 57 (11), pp 3032–3091",
"doi": "10.1021/je300655b"},
"R": 8.314472,
"cp": Fi2,
"ref": "OTO",
"Tmin": Tt, "Tmax": 700., "Pmax": 1000000.0, "rhomax": 50.65,
"Pmin": 68.891, "rhomin": 35.465,
"nr1": [0.85095714803969, -0.24003222943480e1, 0.54127841476466,
0.16919770692538e-1, 0.68825965019035e-1, 0.21428032815338e-3],
"d1": [1, 1, 1, 2, 3, 7],
"t1": [0.25, 1.125, 1.5, 1.375, 0.25, 0.875],
"nr2": [0.17429895321992, -0.33654495604194e-1, -0.13526799857691,
-0.16387350791552e-1, -0.24987666851475e-1, 0.88769204815709e-2],
"c2": [1, 1, 2, 2, 3, 3],
"d2": [2, 5, 1, 4, 3, 4],
"t2": [0.625, 1.75, 3.625, 3.625, 14.5, 12],
"gamma2": [1]*6,
"nr3": [],
"nr4": []}
helmholtz3 = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for argon of Stewart and Jacobsen (1989).",
"__doi__": {"autor": "Stewart, R.B. and Jacobsen, R.T.",
"title": "Thermodynamic Properties of Argon from the Triple Point to 1200 K at Pressures to 1000 MPa",
"ref": "J. Phys. Chem. Ref. Data, 18(2):639-798, 1989",
"doi": "10.1063/1.555829"},
"__test__":
#Table 14, Pag 379
"""
>>> st=Ar(T=83.804, x=0.5, eq=3)
>>> print "%0.6g %0.4g %0.5g %0.5g %0.5g %0.5g %0.4g %0.5g %0.4g %0.4g %0.3g %0.3g" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.hM.Jmol, st.Gas.hM.Jmol, \
st.Liquido.sM.JmolK, st.Gas.sM.JmolK, st.Liquido.cvM.JmolK, \
st.Liquido.cpM.JmolK, st.Liquido.w, st.Gas.w)
83.804 0.06895 35.475 0.10152 -4835.9 1701.4 53.29 131.3 21.34 42.61 853 208
>>> st=Ar(T=90, x=0.5, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.4g %0.5g %0.4g %0.4g %0.3g %0.3g" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.hM.Jmol, st.Gas.hM.Jmol, \
st.Liquido.sM.JmolK, st.Gas.sM.JmolK, st.Liquido.cvM.JmolK, \
st.Liquido.cpM.JmolK, st.Liquido.w, st.Gas.w)
90 0.13362 34.538 0.18651 -4568.1 1777.5 56.35 126.86 20.59 43.49 811 186
>>> st=Ar(T=100, x=0.5, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.4g %0.5g %0.4g %0.4g %0.3g %0.3g" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.hM.Jmol, st.Gas.hM.Jmol, \
st.Liquido.sM.JmolK, st.Gas.sM.JmolK, st.Liquido.cvM.JmolK, \
st.Liquido.cpM.JmolK, st.Liquido.w, st.Gas.w)
100 0.32401 32.918 0.42327 -4120.9 1876.3 61 120.98 19.55 45.48 742 180
>>> st=Ar(T=110, x=0.5, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.4g %0.5g %0.4g %0.4g %0.3g %0.3g" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.hM.Jmol, st.Gas.hM.Jmol, \
st.Liquido.sM.JmolK, st.Gas.sM.JmolK, st.Liquido.cvM.JmolK, \
st.Liquido.cpM.JmolK, st.Liquido.w, st.Gas.w)
110 0.66575 31.133 0.83561 -3648.1 1931.3 65.41 116.13 18.75 48.45 668 181
>>> st=Ar(T=120, x=0.5, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.1f %0.1f %0.2f %0.2f %0.2f %0.2f %0.2f %0.2f %0.0f %0.0f" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.hM.Jmol, st.Gas.hM.Jmol, \
st.Liquido.sM.JmolK, st.Gas.sM.JmolK, st.Liquido.cvM.JmolK, st.Gas.cvM.JmolK,\
st.Liquido.cpM.JmolK, st.Gas.cpM.JmolK, st.Liquido.w, st.Gas.w)
120 1.2139 29.123 1.5090 -3138.4 1917.7 69.68 111.81 18.16 16.75 53.27 36.15 586 182
>>> st=Ar(T=130, x=0.5, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.1f %0.1f %0.2f %0.2f %0.2f %0.2f %0.2f %0.2f %0.0f %0.0f" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.hM.Jmol, st.Gas.hM.Jmol, \
st.Liquido.sM.JmolK, st.Gas.sM.JmolK, st.Liquido.cvM.JmolK, st.Gas.cvM.JmolK,\
st.Liquido.cpM.JmolK, st.Gas.cpM.JmolK, st.Liquido.w, st.Gas.w)
130 2.027 26.748 2.597 -2570 1798.1 73.99 107.59 17.88 18.05 62.79 48.3 490 182
>>> st=Ar(T=140, x=0.5, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.1f %0.1f %0.2f %0.2f %0.2f %0.2f %0.2f %0.2f %0.0f %0.0f" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.hM.Jmol, st.Gas.hM.Jmol, \
st.Liquido.sM.JmolK, st.Gas.sM.JmolK, st.Liquido.cvM.JmolK, st.Gas.cvM.JmolK,\
st.Liquido.cpM.JmolK, st.Gas.cpM.JmolK, st.Liquido.w, st.Gas.w)
140 3.1704 23.59 4.4877 -1883.5 1489.2 78.74 102.83 18.44 20.15 90.97 84.59 368 179
>>> st=Ar(T=150, x=0.5, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.1f %0.1f %0.2f %0.2f %0.2f %0.2f %0.2f %0.2f %0.0f %0.0f" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.hM.Jmol, st.Gas.hM.Jmol, \
st.Liquido.sM.JmolK, st.Gas.sM.JmolK, st.Liquido.cvM.JmolK, st.Gas.cvM.JmolK,\
st.Liquido.cpM.JmolK, st.Gas.cpM.JmolK, st.Liquido.w, st.Gas.w)
150 4.7363 16.973 9.7709 -707.3 487.7 86.28 94.25 23.74 26 762.2 1098.27 198 171
"""
#Table 15, Pag 684
"""
>>> st=Ar(T=84, P=8e4, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.2f %0.2f %0.2f %0.0f" % (\
st.T, st.rhoM, st.uM.Jmol, st.hM.Jmol, st.sM.JmolK, st.cvM.JmolK, st.cpM.JmolK, st.w)
84 35.447 -4829.6 -4827.3 53.39 21.31 42.64 852
>>> st=Ar(T=300, P=8e4, eq=3)
>>> print "%0.6g %0.5f %0.5g %0.5g %0.2f %0.2f %0.2f %0.0f" % (\
st.T, st.rhoM, st.uM.Jmol, st.hM.Jmol, st.sM.JmolK, st.cvM.JmolK, st.cpM.JmolK, st.w)
300 0.03209 3736.4 6229.5 156.81 12.48 20.82 323
>>> st=Ar(T=1200, P=8e4, eq=3)
>>> print "%0.6g %0.5f %0.5g %0.5g %0.2f %0.2f %0.2f %0.0f" % (\
st.T, st.rhoM, st.uM.Jmol, st.hM.Jmol, st.sM.JmolK, st.cvM.JmolK, st.cpM.JmolK, st.w)
1200 0.00802 14965 24944 185.64 12.47 20.79 645
>>> st=Ar(T=84, P=1e5, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.2f %0.2f %0.2f %0.0f" % (\
st.T, st.rhoM, st.uM.Jmol, st.hM.Jmol, st.sM.JmolK, st.cvM.JmolK, st.cpM.JmolK, st.w)
84 35.448 -4829.8 -4827 53.39 21.31 42.63 852
>>> st=Ar(T=150, P=1.5e5, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.2f %0.2f %0.2f %0.0f" % (\
st.T, st.rhoM, st.uM.Jmol, st.hM.Jmol, st.sM.JmolK, st.cvM.JmolK, st.cpM.JmolK, st.w)
150 0.12154 1845.3 3079.4 137.02 12.58 21.23 227
>>> st=Ar(T=116, P=1e6, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.2f %0.2f %0.2f %0.0f" % (\
st.T, st.rhoM, st.uM.Jmol, st.hM.Jmol, st.sM.JmolK, st.cvM.JmolK, st.cpM.JmolK, st.w)
116 29.967 -3380.9 -3347.5 67.97 18.37 50.98 621
>>> st=Ar(T=150, P=2e6, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.2f %0.2f %0.2f %0.0f" % (\
st.T, st.rhoM, st.uM.Jmol, st.hM.Jmol, st.sM.JmolK, st.cvM.JmolK, st.cpM.JmolK, st.w)
150 1.8988 1482.8 2536.2 112.99 14.26 30.07 214
>>> st=Ar(T=138, P=3e6, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.2f %0.2f %0.2f %0.0f" % (\
st.T, st.rhoM, st.uM.Jmol, st.hM.Jmol, st.sM.JmolK, st.cvM.JmolK, st.cpM.JmolK, st.w)
138 24.393 -2164.9 -2041.9 77.65 18.16 80.50 400
>>> st=Ar(T=150, P=3e6, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.2f %0.2f %0.2f %0.0f" % (\
st.T, st.rhoM, st.uM.Jmol, st.hM.Jmol, st.sM.JmolK, st.cvM.JmolK, st.cpM.JmolK, st.w)
150 3.2266 1217.3 2147.1 107.70 15.78 41.36 205
>>> st=Ar(T=144, P=4e6, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.2f %0.4g %0.4g %0.0f" % (\
st.T, st.rhoM, st.uM.Jmol, st.hM.Jmol, st.sM.JmolK, st.cvM.JmolK, st.cpM.JmolK, st.w)
144 22.202 -1757.7 -1577.5 80.64 18.97 110.7 326
>>> st=Ar(T=150, P=4e6, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.2f %0.4g %0.4g %0.0f" % (\
st.T, st.rhoM, st.uM.Jmol, st.hM.Jmol, st.sM.JmolK, st.cvM.JmolK, st.cpM.JmolK, st.w)
150 5.2481 822.44 1584.6 102.31 18.61 76.39 193
>>> st=Ar(T=150, P=5e6, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.2f %0.4g %0.4g %0.0f" % (\
st.T, st.rhoM, st.uM.Jmol, st.hM.Jmol, st.sM.JmolK, st.cvM.JmolK, st.cpM.JmolK, st.w)
150 18.975 -1229.6 -966.11 84.46 21.06 209.4 245
>>> st=Ar(T=100, P=1e7, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.2f %0.4g %0.4g %0.0f" % (\
st.T, st.rhoM, st.uM.Jmol, st.hM.Jmol, st.sM.JmolK, st.cvM.JmolK, st.cpM.JmolK, st.w)
100 33.825 -4265.2 -3969.5 59.62 19.92 42.91 802
>>> st=Ar(T=1200, P=1e7, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.2f %0.4g %0.4g %0.0f" % (\
st.T, st.rhoM, st.uM.Jmol, st.hM.Jmol, st.sM.JmolK, st.cvM.JmolK, st.cpM.JmolK, st.w)
1200 0.98207 14899 25082 145.44 12.53 20.95 660
>>> st=Ar(T=150, P=1e8, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.2f %0.4g %0.4g %0.0f" % (\
st.T, st.rhoM, st.uM.Jmol, st.hM.Jmol, st.sM.JmolK, st.cvM.JmolK, st.cpM.JmolK, st.w)
150 34.219 -3331.3 -408.95 67.05 19.55 36.23 956
>>> st=Ar(T=450, P=1e9, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.2f %0.4g %0.4g %0.0f" % (\
st.T, st.rhoM, st.uM.Jmol, st.hM.Jmol, st.sM.JmolK, st.cvM.JmolK, st.cpM.JmolK, st.w)
450 41.651 2711.6 26720 80.63 20.67 28.69 1815
""",
"R": 8.31434,
"cp": CP1,
"ref": {"Tref": 298.15, "Pref": 101.325, "ho": 6197, "so": 154.732},
"Tc": 150.6633, "Pc": 4860, "rhoc": 13.29, "Tt": 83.804,
"Tmin": 83.804, "Tmax": 1200., "Pmax": 1000000.0, "rhomax": 45.814,
"Pmin": 68.961, "rhomin": 35.475,
"nr1": [0.7918675715, -0.1633346151e1, -0.439530293, 0.1033899999,
0.2061801664, -0.2888681776, 0.439801055, -0.8429550391e-1,
-0.2155658654, 0.4786509099, -0.3525884593, 0.3015073692e-1,
0.2987679059e-1, -0.1522568583e-1, 0.7435785786e-3],
"d1": [1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 6],
"t1": [0.25, 1, 3, 4, 0.25, 1, 2.5, 3.5, 0.75, 1, 1.5, 2.5, 1, 2, 2],
"nr2": [0.7099541624e-1, -0.2904237185e-1, -0.6223078525e-1,
0.1410895187e-3, -0.1481241783e-2, 0.3023342784e-1,
-0.6126784685e-1, 0.270996709e-1, 0.9411034405e-1,
-0.7291645114e-2, -0.1586314976e-2, 0.9510948813e-3,
0.7786181844e-3],
"c2": [3, 3, 2, 4, 6, 3, 3, 3, 2, 2, 4, 2, 2],
"d2": [1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 8, 8],
"t2": [5, 7, 5, 22, 16, 10, 14, 16, 4, 8, 10, 5, 6],
"gamma2": [1]*13,
"nr3": [],
"nr4": []}
helmholtz4 = {
"__type__": "Helmholtz",
"__name__": "short Helmholtz equation of state for argon of Span and Wagner (2003).",
"__doi__": {"autor": "Span, R., Wagner, W.",
"title": "Equations of state for technical applications. II. Results for nonpolar fluids.",
"ref": "Int. J. Thermophys. 24 (2003), 41 – 109.",
"doi": "10.1023/A:1022310214958"},
"__test__": """
>>> st=Ar(T=700, rho=200, eq=4)
>>> print "%0.4f %0.3f %0.4f" % (st.cp0.kJkgK, st.P.MPa, st.cp.kJkgK)
0.5203 31.922 0.5630
>>> st2=Ar(T=750, rho=100, eq=4)
>>> print "%0.2f %0.5f" % (st2.h.kJkg-st.h.kJkg, st2.s.kJkgK-st.s.kJkgK)
25.97 0.18479
""", # Table III, Pag 46
"R": 8.31451,
"cp": Fi1,
"ref": "OTO",
"Tmin": Tt, "Tmax": 750., "Pmax": 100000.0, "rhomax": 50.65,
"Pmin": 69.026, "rhomin": 35.498,
"nr1": [0.85095715, -0.24003223e1, 0.54127841, 0.16919771e-1,
0.68825965e-1, 0.21428033e-3],
"d1": [1, 1, 1, 2, 3, 7],
"t1": [0.25, 1.125, 1.5, 1.375, 0.25, 0.875],
"nr2": [0.17429895, -0.33654496e-1, -0.135268, -0.16387351e-1,
-0.24987667e-1, 0.88769205e-2],
"c2": [1, 1, 2, 2, 3, 3],
"d2": [2, 5, 1, 4, 3, 4],
"t2": [0.625, 1.75, 3.625, 3.625, 14.5, 12],
"gamma2": [1]*6,
"nr3": [],
"nr4": []}
MBWR = {
"__type__": "MBWR",
"__name__": "BWR MBWR equation of state for argon of Younglove (1982).",
"__doi__": {"autor": "Younglove, B.A.",
"title": "Thermophysical Properties of Fluids. I. Argon, Ethylene, Parahydrogen, Nitrogen, Nitrogen Trifluoride, and Oxygen",
"ref": "J. Phys. Chem. Ref. Data, Vol. 11, Suppl. 1, pp. 1-11, 1982.",
"doi": ""},
"R": 8.31434,
"cp": CP1,
"ref": {"Tref": 298.15, "Pref": 101.325, "ho": 6197, "so": 154.732},
"Tmin": 83.80, "Tmax": 400., "Pmax": 101000.0, "rhomax": 50.65,
"Pmin": 68.906, "rhomin": 35.4,
"b": [None, -0.6569731294e-3, 0.1822957801, -0.3649470141e1,
0.1232012107e3, -0.8613578274e4, 0.7978579691e-4, -0.2911489110e-1,
0.7581821758e1, 0.8780488169e4, 0.1423145989e-6, 0.1674146131e-2,
-0.3200447909, 0.2561766372e-4, -0.5475934941e-3, -0.4505032058,
0.2013254653e-4, -0.1678941273e-6, 0.4207329271e-3, -0.5444212996e-5,
-0.8004855011e4, -0.1319304201e6, -0.4954923930e2, 0.8092132177e5,
-0.9870104061e-1, 0.2020441562e1, -0.1637417205e-3, -0.7038944136,
-0.1154324539e-6, 0.1555990117e-4, -0.1492178536e-9,
-0.1001356071e-7, 0.2933963216e-6]}
eq = helmholtz1, MBWR, GERG, helmholtz3, helmholtz4
_PR = -0.0034
_dielectric = {
"eq": 3, "Tref": 273.16, "rhoref": 1000.,
"a0": [], "expt0": [], "expd0": [],
"a1": [4.1414], "expt1": [0], "expd1": [1],
"a2": [1.597, 0.262, -117.9], "expt2": [0, 1, 0], "expd2": [2, 2, 3.1]}
_melting = {
"eq": 1, "Tref": Tt, "Pref": 68.891,
"Tmin": 83.8058, "Tmax": 700.0,
"a1": [1, -7476.26651, 9959.06125, 7476.26651, -9959.06125],
"exp1": [0, 1.05, 1.275, 0, 0],
"a2": [], "exp2": [], "a3": [], "exp3": []}
_sublimation = {
"eq": 3, "Tref": Tt, "Pref": 68.891,
"Tmin": 83.8058, "Tmax": 83.8058,
"a1": [], "exp1": [],
"a2": [-11.1307], "exp2": [1],
"a3": [], "exp3": []}
_surface = {"sigma": [0.037], "exp": [1.25]}
_vapor_Pressure = {
"eq": 5,
"ao": [-5.9409785, 1.3553888, -0.4649761, -1.5399043],
"exp": [1., 1.5, 2., 4.5]}
_liquid_Density = {
"eq": 3,
"ao": [1.5004264, -0.3138129, 0.086461622, -0.041477525],
"exp": [0.334, 2./3, 7./3, 4]}
_vapor_Density = {
"eq": 5,
"ao": [-0.29182e1, 0.97930e-1, -0.13721e1, -0.22898e1],
"exp": [0.72, 1.25, 0.32, 4.34]}
visco0 = {"eq": 1, "omega": 1,
"__name__": "Lemmon (2004)",
"__doi__": {"autor": "Lemmon, E.W. and Jacobsen, R.T.",
"title": "Viscosity and Thermal Conductivity Equations for Nitrogen, Oxygen, Argon, and Air",
"ref": "Int. J. Thermophys., 25:21-69, 2004.",
"doi": "10.1023/B:IJOT.0000022327.04529.f3"},
"__test__": """
>>> st=Ar(T=100, rhom=0)
>>> print "%0.5f" % st.mu.muPas
8.18940
>>> st=Ar(T=300, rhom=0)
>>> print "%0.4f" % st.mu.muPas
22.7241
>>> st=Ar(T=100, rhom=33)
>>> print "%0.3f" % st.mu.muPas
184.232
>>> st=Ar(T=200, rhom=10)
>>> print "%0.4f" % st.mu.muPas
25.5662
>>> st=Ar(T=300, rhom=5)
>>> print "%0.4f" % st.mu.muPas
26.3706
>>> st=Ar(T=150.69, rhom=13.4)
>>> print "%0.4f" % st.mu.muPas
27.6101
""", # Table V, Pag 28
"ek": 143.2, "sigma": 0.335,
"n_poly": [12.19, 13.99, 0.005027, -18.93, -6.698, -3.827],
"t_poly": [0.42, 0.0, 0.95, 0.5, 0.9, 0.8],
"d_poly": [1, 2, 10, 5, 1, 2],
"g_poly": [0, 0, 0, 1, 1, 1],
"c_poly": [0, 0, 0, 2, 4, 4]}
visco1 = {"eq": 3,
"__name__": "Younglove (1986)",
"__doi__": {"autor": "Younglove, B.A. and Hanley, H.J.M.",
"title": "The Viscosity and Thermal Conductivity Coefficients of Gaseous and Liquid Argon",
"ref": "J. Phys. Chem. Ref. Data 15, 1323 (1986)",
"doi": "10.1063/1.555765"},
"__test__": """
>>> st=Ar(T=86, P=1e5, visco=1)
>>> print "%0.1f" % st.mu.muPas
270.0
""", # Table V, Pag 28
"Tref": 1, "muref": 1.0,
"n_poly": [-0.8973188257e5, 0.8259113473e5, -0.2766475915e5,
0.3068539784e4, 0.4553103615e3, -0.1793443839e3,
0.2272225106e2, -0.1350672796e1, 0.3183693230e-1],
"t_poly": [-1., -2./3, -1./3, 0, 1./3, 2./3, 1., 4./3, 5./3],
"n_num": [0.5927733783, -0.4251221169e2, -0.2698477165e-1,
0.3727762288e2, -0.3958508720e4, 0.3636730841e-2,
-0.2633471347e1, 0.2936563322e3, -0.3811869019e-4,
0.4451947464e-1, -0.5385874487e1, 1, -0.1115054926e-1,
-0.1328893444e1],
"t_num": [0, -1, 0, -1, -2, 0, -1, -2, 0, -1, -2, 0, 0, -1],
"d_num": [1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 0, 1, 1],
"n_den": [1.0, -0.1115054926e-1, -0.1328893444e1],
"t_den": [0, 0, -1],
"d_den": [0, 1, 1]}
visco2 = {"eq": 2, "omega": 2,
"collision": [25.7830291943396, -234.320222858983, 814.636688705024,
-1452.04353466585, 1467.17535558104, -870.164951237067,
313.024934147423, -61.2072628957372, 5.07700488990665],
"__name__": "Younglove (1982)",
"__doi__": {"autor": "Younglove, B.A.",
"title": "Thermophysical Properties of Fluids. I. Argon, Ethylene, Parahydrogen, Nitrogen, Nitrogen Trifluoride, and Oxygen",
"ref": "J. Phys. Chem. Ref. Data, Vol. 11, Suppl. 1, pp. 1-11, 1982.",
"doi": ""},
"ek": 152.8, "sigma": 0.3297,
"n_chapman": 0.16871158559818,
"t_chapman": 0,
"F": [5.85384107393e-3, -3.09546765250e-3, 1.4, 152.8],
"E": [-12.313579086, 40.136071933, 11.6160872385243,
-413.04094973717, 4.13624595833e-2, 7.96883967907912,
234.196850483958],
"rhoc": 13.4424752177831}
_viscosity = visco0, visco1, visco2
thermo0 = {"eq": 1,
"__name__": "Lemmon (2004)",
"__doi__": {"autor": "Lemmon, E.W. and Jacobsen, R.T.",
"title": "Viscosity and Thermal Conductivity Equations for Nitrogen, Oxygen, Argon, and Air",
"ref": "Int. J. Thermophys., 25:21-69, 2004.",
"doi": "10.1023/B:IJOT.0000022327.04529.f3"},
"__test__": """
>>> st=Ar(T=100, rhom=0)
>>> print "%0.5f" % st.k.mWmK
6.36587
>>> st=Ar(T=300, rhom=0)
>>> print "%0.4f" % st.k.mWmK
17.8042
>>> st=Ar(T=100, rhom=33)
>>> print "%0.3f" % st.k.mWmK
111.266
>>> st=Ar(T=200, rhom=10)
>>> print "%0.4f" % st.k.mWmK
26.1377
>>> st=Ar(T=300, rhom=5)
>>> print "%0.4f" % st.k.mWmK
23.2302
>>> st=Ar(T=150.69, rhom=13.4)
>>> print "%0.4f" % st.k.mWmK
856.793
""", # Table V, Pag 28
"Tref": 150.687, "kref": 1e-3,
"no": [0.8158, -0.432],
"co": [-97, -0.77],
"Trefb": 150.687, "rhorefb": 13.40742965, "krefb": 1e-3,
"nb": [13.73, 10.07, 0.7375, -33.96, 20.47, -2.274, -3.973],
"tb": [0.0, 0.0, 0.0, 0.8, 1.2, 0.8, 0.5],
"db": [1, 2, 4, 5, 6, 9, 1],
"cb": [0, 0, 0, 2, 2, 2, 4],
"critical": 3,
"gnu": 0.63, "gamma": 1.2415, "R0": 1.01,
"Xio": 0.13e-9, "gam0": 0.55e-1, "qd": 0.32e-9, "Tcref": 301.374}
thermo1 = {"eq": 3,
"__name__": "Younglove (1982)",
"__doi__": {"autor": "Younglove, B.A.",
"title": "Thermophysical Properties of Fluids. I. Argon, Ethylene, Parahydrogen, Nitrogen, Nitrogen Trifluoride, and Oxygen",
"ref": "J. Phys. Chem. Ref. Data, Vol. 11, Suppl. 1, pp. 1-11, 1982.",
"doi": ""},
"ek": 152.8, "sigma": 0.3297,
"Nchapman": 0.16871158559818,
"tchapman": 0,
"b": [2.64712871543e-2, -.216629583011974, 0.709700888884514,
-1.21908891344223, 1.20168985706305, -.700084760049098,
0.24816605762696, -4.79479287295e-2, 3.93679190444e-3],
"F": [9.64428741429e-4, 3.02391316601e-4, 1, 152.8],
"E": [-33.327027332, -355.59415848, 22.2441164817987,
1663.62775376509, 0, 0, 0],
"rhoc": 25.0325423049965,
"ff": 1.7124,
"rm": 0.00000003669}
thermo2 = {"eq": 1,
"__name__": "Perkins (1991)",
"__doi__": {"autor": "Perkins, R.A., Friend, D.G., Roder, H.M., and Nieto de Castro, C.A.",
"title": "Thermal Conductivity Surface of Argon: A Fresh Analysis",
"ref": "Int. J. Thermophys., 12(6):965-984, 1991.",
"doi": "10.1007/BF00503513"},
"Tref": 1.0, "kref": 1e-3,
"no": [.1225067272e5, -.9096222831e4, .2744958263e4,
-.4170419051e3, .2527591169e2, .1604421067e1,
-.2618841031, .1381696924e-1, -.2463115922e-3],
"co": [-1, -2./3, -1./3, 0, 1./3, 2./3, 1., 4./3, 5./3],
"Trefb": 1., "rhorefb": 1., "krefb": 1.,
"nb": [0.757894e-3, 0.612624e-4, -0.205353e-5, 0.745621e-7],
"tb": [0, 0, 0, 0],
"db": [1, 2, 3, 4],
"cb": [0, 0, 0, 0],
"critical": 4,
"Tcref": 150.86, "Pcref": 4905.8, "rhocref": 13.41, "kcref": 1e-3,
"gamma": 1.02,
"expo": 0.46807, "alfa": 39.8, "beta": 5.45, "Xio": 6.0795e-1}
_thermal = thermo0, thermo1, thermo2
|
edusegzy/pychemqt
|
lib/mEoS/Ar.py
|
Python
|
gpl-3.0
| 37,016
|
[
"Jmol"
] |
646bff3678e3c6243d32085a494c490f0a2d63f7da801b2d9ad9e2c1d54d942b
|
from common import Modules, data_strings, load_yara_rules, PEParseModule, ModuleMetadata, is_ip_or_domain
from pefile import PE
class backoff(PEParseModule):
def __init__(self):
md = ModuleMetadata(
module_name="backoff",
bot_name="Backoff",
description="Point of sale malware designed to extract credit card information from RAM",
authors=["Brian Wallace (@botnet_hunter)"],
version="1.0.1",
date="August 24, 2014",
references=[]
)
PEParseModule.__init__(self, md)
self.yara_rules = None
pass
def _generate_yara_rules(self):
if self.yara_rules is None:
self.yara_rules = load_yara_rules("backoff.yara")
return self.yara_rules
def get_bot_information(self, file_data):
results = {}
gate = None
server = None
pe = PE(data=file_data)
for x in xrange(len(pe.sections)):
for s in data_strings(pe.get_data(pe.sections[x].VirtualAddress)):
if s.find(".php") != -1:
if s[0] != "/":
s = "/" + s
if gate is None:
gate = set()
gate.add(s)
if is_ip_or_domain(s):
if server is None:
server = set()
server.add(s)
if server is not None and gate is not None:
results["c2s"] = []
for ip in server:
for p in gate:
uri = "%s%s" % (ip, p)
results["c2s"].append({"c2_uri": uri})
return results
Modules.list.append(backoff())
|
bwall/bamfdetect
|
BAMF_Detect/modules/backoff.py
|
Python
|
mit
| 1,726
|
[
"Brian"
] |
51d2e73190571ae6ff29a180c1cbb33dbfc7ab4e774c464b0a2cd473999b7cd8
|
"""
Deadlink crawler - https://github.com/taikano/deadlink-crawler
Copyright 2013- taikano and other contributors at GitHub
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from bs4 import BeautifulSoup
import time
import urllib.request
import urllib.error
import urllib.parse
import http.client
import re
import argparse
import socket
import frontier
class Crawler(object):
def __init__(self, init_url):
self.init_url = init_url
self.init_domain = urllib.parse.urlparse(init_url).netloc
# Manages our domains we want to visit or have visited
self.frontier = frontier.Frontier()
self.frontier.add(init_url, None)
# List of deadlinks for each URL we have,
# i.e. url1: [deadlink1, deadlink2]
self.deadlinks = {}
# Regular expression for URLs we are interested in (our internal
# URLs)
self._url_match = None
# Regular expression for URLs we are interested in (our internal
# URLs)
self._exclude = None
# Timeout in seconds to wait, so that we do not kill our server
self._wait_time = 0
# Verbose
self._verbose = True
# Debug
self._debug = False
# Report 40x http codes as deadlinks
self._report40x = False
# For progress reporting
self._pages = 0
self._links = 0
self._via = 0
self._dead = 0
@property
def restrict(self):
return self._url_match
@restrict.setter
def restrict(self, url_match):
self._url_match = re.compile(url_match)
@property
def exclude(self):
return self._exclude
@exclude.setter
def exclude(self, exclude):
self._exclude = re.compile(exclude)
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, verbose):
self._verbose = verbose
@property
def debug(self):
return self._debug
@debug.setter
def debug(self, debug):
self._verbose = debug
self._debug = debug
@property
def report40x(self):
return self._report40x
@report40x.setter
def report40x(self, report40x):
self._report40x = report40x
@property
def wait_time(self):
return self._wait_time
@wait_time.setter
def wait_time(self, seconds):
if seconds >= 0:
self._wait_time = seconds
@property
def polite_time(self):
return self.frontier.polite_time
@polite_time.setter
def polite_time(self, seconds):
if seconds >= 0:
self.frontier.polite_time = seconds
def crawl(self):
_starttime = time.time()
if self.restrict is None:
self.restrict = "https?://%s.*" % self.init_domain
print("Deadlink-crawler version 1.2")
print("Starting crawl from URL %s at %s with restriction %s\n"
% (self.init_url,
time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()),
self.restrict.pattern))
while len(self.frontier) > 0:
time.sleep(self.wait_time)
next_time, next_url = self.frontier.next()
while time.time() < next_time:
time.sleep(0.5)
try:
self.visit_url(next_url[0], next_url[1])
except urllib.error.URLError:
continue
self.print_deadlinks(self.deadlinks)
_elapsed = time.time() - _starttime
print("\nSummary:\n--------")
print("Crawled %d pages and checked %d links in %s time."
% (self._pages, self._links,
time.strftime("%H:%M:%S", time.gmtime(_elapsed))))
print("Found a total of %d deadlinks in %d different pages"
% (self._dead, self._via))
if len(self.deadlinks) == 0:
exit(0)
else:
exit(2)
def visit_url(self, url, found_via):
response = self.check_url(url, found_via)
self.frontier.notify_visit(url)
if response is not None and not self.excluded(url):
self.collect_new_urls(url, response.read())
def collect_new_urls(self, url, html):
if self._verbose:
print("Processing %s" % url)
# Keep track of how many of our site's pages we have crawled,
# and print status now and then
self._pages += 1
if self._pages % 100 == 0:
print("Processed %s links from %s pages"
% (self._links, self._pages))
try:
for page in self.extract_urls(html):
if page is not None:
page = page.strip() # Handle some malformed links
page = urllib.parse.urljoin(url, page)
if self._exclude is not None and self._exclude.search(page):
if self._debug:
print("Not adding link %s to crawl backlog "
+ "(excluded by --exclude rule)" % page)
else:
if self.frontier.add(page, url):
if self._debug:
print("Adding link %s to crawl backlog" % page)
except UnicodeEncodeError:
pass
def check_url(self, url, found_via):
if self._exclude is not None and self._exclude.search(url):
if self._debug:
print("Not checking URL %s (excluded by --exclude rule)" % url)
return None
if self._debug:
print("Checking URL: %s" % url)
self._links += 1
request = urllib.request.Request(url)
try:
response = urllib.request.urlopen(request, timeout=10)
except urllib.error.HTTPError as e:
# We receive an exception in case of 404
if (e.code == 403 or e.code == 401 or e.code == 407
or e.code == 415) and not self._report40x:
if self._debug:
print("Got HTTP %s - not adding to deadlinks list "
+ "(control with --report40x=True)" % (e.code))
else:
if self._debug:
print("Got HTTP %s - Adding to deadlinks list" % (e.code))
self.add_to_deadlinks(url, found_via)
return None
except http.client.BadStatusLine:
if self._verbose:
print("Got Exception BadStatusLine for url %s - Adding to "
+ "deadlinks list" % url)
self.add_to_deadlinks(url, found_via)
return None
except UnicodeEncodeError:
if self._verbose:
print("Got UnicodeEncodeError for url %s, skipping" % url)
return None
except urllib.error.URLError as e:
if self._verbose:
print("Got URLError for page %s" % url)
return None
except socket.timeout as e:
print(type(e)) # catched
if self._verbose:
print("Got timeout reading page %s, skipping" % url)
return None
status = response.getcode()
redirurl = response.geturl()
if url != redirurl:
if self._debug:
print("Followed redirect from %s to %s" % (url, redirurl))
url = redirurl
if status is not None and status >= 400:
self.add_to_deadlinks(url, found_via)
return response
def add_to_deadlinks(self, url, found_via):
self.deadlinks.setdefault(found_via, [])
self.deadlinks[found_via].append(url)
self._dead += 1
if self._verbose:
print(" Found deadlink: %s" % url)
def extract_urls(self, page):
soup = BeautifulSoup(page, "html.parser")
return [link.get('href') for link in soup.findAll('a')]
def excluded(self, url):
outside = self._url_match is not None \
and not self._url_match.search(url)
excluded = self._exclude is not None and self._exclude.search(url)
if excluded and self._debug:
print("Not following URL %s which is excluded by --exclude rule"
% url)
return outside or excluded
def print_deadlinks(self, deadlinks):
if len(deadlinks) == 0:
print("\nNo deadlinks were found. Hooray!")
else:
print("\nThe following deadlinks were found\n")
for via in deadlinks:
self._via += 1
print("%s" % via)
for target in deadlinks[via]:
print("\t%s" % target)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Search a website for deadlinks")
parser.add_argument('url', metavar='URL', type=str,
help="The starting point for your crawl")
parser.add_argument('--restrict', dest='restrict',
help="Restrict the crawl to specific URLs via a "
+ "regular expression (usually your own domain")
parser.add_argument('--wait', dest='wait_time', type=float,
help="Set some waiting time between each URL "
+ "fetch (default=0)")
parser.add_argument('--politeness', dest='polite_time', type=float,
help="Set the time to wait between calling two URLs "
+ "of the same domain (default=1)")
parser.add_argument('--exclude', dest='exclude',
help="Exclude URLs matching the given regex from "
+ "crawl and deadlink-checking")
parser.add_argument('--silent', dest='silent', action='store_true',
default=False, help="Turn off verbose output")
parser.add_argument('--debug', dest='debug', action='store_true',
default=False, help="Be super-verbose")
parser.add_argument('--report40x', dest='report40x', action='store_true',
default=False,
help="Report only 404 as dead, not the other "
+ "40x errors")
args = parser.parse_args()
c = Crawler(args.url)
if args.restrict:
c.restrict = args.restrict
if args.wait_time:
c.wait_time = args.wait_time
if args.polite_time:
c.polite_time = args.polite_time
if args.silent:
c.verbose = not args.silent
if args.debug:
c.debug = args.debug
if args.report40x:
c.report40x = args.report40x
if args.exclude:
c.exclude = args.exclude
c.crawl()
|
aufziehvogel/deadlink-crawler
|
crawler.py
|
Python
|
apache-2.0
| 11,173
|
[
"VisIt"
] |
9b13afbab447318da930002f0d4897178252d1786459787bbe951842de34bf53
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import rdkit
from rdkit.Chem import AllChem
from rdkit import DataStructs
__license__ = "X11"
METADATA = {
"id": "method_rdkit_fcfp6_1024_tanimoto",
"representation": "fcfp6_1024",
"similarity": "tanimoto"
}
def _compute_fingerprint(molecule):
return AllChem.GetMorganFingerprintAsBitVect(
molecule, 3, nBits=1024, useFeatures=True)
def _compute_similarity(left, right):
return DataStructs.TanimotoSimilarity(left, right)
def create_model(train_ligands, train_decoys):
model = []
for molecule in train_ligands:
model.append({
"name": molecule.GetProp("_Name"),
"fingerprint": _compute_fingerprint(molecule)
})
model_information = {}
return model, model_information
def compute_score(model, molecule):
fingerprint = _compute_fingerprint(molecule)
similarities = [_compute_similarity(fingerprint, item["fingerprint"])
for item in model]
max_score = max(similarities)
index_of_max_score = similarities.index(max_score)
closest_molecule = model[index_of_max_score]
return {
"value": max_score,
"info": {
"closest": closest_molecule["name"]
}
}
def compute_similarity(left, right):
return _compute_similarity(_compute_fingerprint(left),
_compute_fingerprint(right))
|
skodapetr/lbvs-environment
|
methods/fcfp/fcfp6_1024_tanimoto.py
|
Python
|
mit
| 1,423
|
[
"RDKit"
] |
0a2187d39f0bacfe4bcd40c7bbc2aa9d6bb008194cedad4388ebf68a4b6c6114
|
from PIL import Image
from pyimage import PyImage
from gausspyramid import GaussPyramid
from laplacepyramid import LaplacePyramid
import numpy as np
class LaplaceBlender(object):
'''This class represents the Laplacian Pyramid Blending process in a data
structure containing two laplacian pyramids, for two given images, each of
those with a gaussian pyramid as well, a third gaussian pyramid for a given
mask that will be used to blend, and a third laplacian pyramid for the
blending of the other two laplacian pyramids, given the mask.'''
def __init__(self, reduce_default=4):
self.l_pyramid_a = LaplacePyramid(reduce_default)
self.l_pyramid_b = LaplacePyramid(reduce_default)
self.g_pyramid_mask = GaussPyramid(reduce_default)
self.l_pyramid_blend = None
# ------------------------------------------------------------------------
# Helper functions
# ------------------------------------------------------------------------
def defaultMask(self, size, mode):
'''This function should generate a default mask image and return it.
The image generated has size and mode specified as arguments. The
default mask is a binary mask that is white in the left half and black
in the right half.'''
# Invert x,y for numpy
size = (size[1], size[0])
# Set matrix of zeros
mask = np.zeros(size)
# Fill rightmost half with ones
mask[:, :size[1]/2] = np.ones((size[0],
size[1]/2))
# Multiply everything by 255
mask *= 255
# Convert image to grayscale mode, then the desired mode, and return
return Image.fromarray(mask.astype("uint8"), 'L').convert(mode)
# ------------------------------------------------------------------------
# Input and Output functions
# ------------------------------------------------------------------------
def loadFiles(self, filepath_a, filepath_b, filepath_mask=None):
'''This function should load at least two files that contain images to
be used in the blending. A third file can be passed in to be used as
mask. This function should initiallize laplacian pyramids for both
mandatory images, and a gaussian pyramid for the mask. If a mask is not
given, a default one is generated.'''
# Load files into laplacian pyramids
self.l_pyramid_a.loadFile(filepath_a)
self.l_pyramid_b.loadFile(filepath_b)
# Get size and mode for both of them
size_a = self.l_pyramid_a.gauss_pyramid.pyramid[0].img.size
size_b = self.l_pyramid_b.gauss_pyramid.pyramid[0].img.size
mode_a = self.l_pyramid_a.gauss_pyramid.pyramid[0].img.mode
mode_b = self.l_pyramid_b.gauss_pyramid.pyramid[0].img.mode
# Assert images have same size and pixel mode
if size_a != size_b:
print "\nERROR: The two images do not have the same size\n"
return
if mode_a != mode_b:
print "\nERROR: The images do not have same number of channels\n"
return
# Build the laplacian pyramid for both images
self.l_pyramid_a.buildPyramid()
self.l_pyramid_b.buildPyramid()
if filepath_mask is None:
# Generate default mask and generate gaussian pyramid for it
img = self.defaultMask(size_a, mode_a)
self.g_pyramid_mask.loadImage(img)
self.g_pyramid_mask.reduceMax()
else:
# Read mask from disk and generate gaussian pyramid for it
self.g_pyramid_mask.loadFile(filepath_mask)
self.g_pyramid_mask.pyramid[-1].img = \
self.g_pyramid_mask.pyramid[-1].img.convert(mode_a)
self.g_pyramid_mask.pyramid[-1].updatePixels()
self.g_pyramid_mask.reduceMax()
def loadImages(self, image_a, image_b, image_mask=None):
'''This function should load at least two images that contain images to
be used in the blending. A third image can be passed in to be used as
mask. This function should initiallize laplacian pyramids for both
mandatory images, and a gaussian pyramid for the mask. If a mask is not
given, a default one is generated.'''
# Load files into laplacian pyramids
self.l_pyramid_a.loadImage(image_a)
self.l_pyramid_b.loadImage(image_b)
# Get size and mode for both of them
size_a = self.l_pyramid_a.gauss_pyramid.pyramid[0].img.size
size_b = self.l_pyramid_b.gauss_pyramid.pyramid[0].img.size
mode_a = self.l_pyramid_a.gauss_pyramid.pyramid[0].img.mode
mode_b = self.l_pyramid_b.gauss_pyramid.pyramid[0].img.mode
# Assert images have same size and pixel mode
if size_a != size_b:
print "\nERROR: The two images do not have the same size\n"
return
if mode_a != mode_b:
print "\nERROR: The images do not have same number of channels\n"
return
# Build the laplacian pyramid for both images
self.l_pyramid_a.buildPyramid()
self.l_pyramid_b.buildPyramid()
if filepath_mask is None:
# Generate default mask and generate gaussian pyramid for it
img = self.defaultMask(size_a, mode_a)
self.g_pyramid_mask.loadImage(img)
self.g_pyramid_mask.reduceMax()
else:
# Read mask and generate gaussian pyramid for it
image_mask.convert(mode_a)
self.g_pyramid_mask.loadImage(image_mask)
self.g_pyramid_mask.reduceMax()
def saveFile(self, filepath):
'''This function should save every pyramid it has into files.'''
# Separate filename from extension
path = filepath.split('.')
extension = '.' + path[-1]
path = "".join(path[:-1]) + '-'
# Save all pyramids
self.l_pyramid_a.savePyramid(path+'lpyramid-A'+extension)
self.l_pyramid_b.savePyramid(path+'lpyramid-B'+extension)
self.g_pyramid_mask.savePyramid(path+'gpyramid-Mask'+extension)
if self.l_pyramid_blend is not None:
self.l_pyramid_blend.savePyramid(path+'lpyramid-blend'+extension)
# ------------------------------------------------------------------------
# Blending functions
# ------------------------------------------------------------------------
def blendPyramids(self):
'''This function should blend pyramids A and B into pyramid BLEND.'''
# Assert pyramids have been loaded
if not self.g_pyramid_mask.pyramid:
print "\nERROR: Please load images onto the blender\n"
# Get image to start new pyramid's gaussian pyramid from pyramid A
img = self.l_pyramid_a.gauss_pyramid.pyramid[-1]
# Blend it with the one from pyramid B using the corresponding mask
img = img.blend(self.l_pyramid_b.gauss_pyramid.pyramid[-1],
self.g_pyramid_mask.pyramid[-1])
# Generate new laplace pyramid blending A and B
self.l_pyramid_blend = self.l_pyramid_a.blend(self.l_pyramid_b,
self.g_pyramid_mask,
img)
def collapse(self, filepath):
'''This function should collapse the blended pyramid.'''
# Assert blended image is laoded
if not self.l_pyramid_blend.pyramid:
print "\nERROR: Please blend the image pyramids first\n"
# Collapse it
self.l_pyramid_blend.collapsePyramid(filepath,
self.g_pyramid_mask.info_loss)
|
Thurler/imgprocess
|
laplaceblender.py
|
Python
|
gpl-3.0
| 7,754
|
[
"Gaussian"
] |
8d1d3ee1178a4451eee8b09ffb470ff76ae24ec655393294059179f5ed2d00b0
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements the Quasi-harmonic Debye approximation that can
be used to compute thermal properties.
See the following papers for more info:
http://doi.org/10.1016/j.comphy.2003.12.001 (2004)
http://doi.org/10.1103/PhysRevB.90.174107 (2014)
"""
import logging
from collections import defaultdict
import numpy as np
from scipy.constants import physical_constants
from scipy.integrate import quadrature
from scipy.misc import derivative
from scipy.optimize import minimize
from pymatgen.analysis.eos import EOS, PolynomialEOS
from pymatgen.core.units import FloatWithUnit
__author__ = "Kiran Mathew, Brandon Bocklund"
__credits__ = "Cormac Toher"
logger = logging.getLogger(__name__)
class QuasiharmonicDebyeApprox:
"""
Quasiharmonic approximation.
"""
def __init__(
self,
energies,
volumes,
structure,
t_min=300.0,
t_step=100,
t_max=300.0,
eos="vinet",
pressure=0.0,
poisson=0.25,
use_mie_gruneisen=False,
anharmonic_contribution=False,
):
"""
Args:
energies (list): list of DFT energies in eV
volumes (list): list of volumes in Ang^3
structure (Structure):
t_min (float): min temperature
t_step (float): temperature step
t_max (float): max temperature
eos (str): equation of state used for fitting the energies and the
volumes.
options supported by pymatgen: "quadratic", "murnaghan", "birch",
"birch_murnaghan", "pourier_tarantola", "vinet",
"deltafactor", "numerical_eos"
pressure (float): in GPa, optional.
poisson (float): poisson ratio.
use_mie_gruneisen (bool): whether or not to use the mie-gruneisen
formulation to compute the gruneisen parameter.
The default is the slater-gamma formulation.
anharmonic_contribution (bool): whether or not to consider the anharmonic
contribution to the Debye temperature. Cannot be used with
use_mie_gruneisen. Defaults to False.
"""
self.energies = energies
self.volumes = volumes
self.structure = structure
self.temperature_min = t_min
self.temperature_max = t_max
self.temperature_step = t_step
self.eos_name = eos
self.pressure = pressure
self.poisson = poisson
self.use_mie_gruneisen = use_mie_gruneisen
self.anharmonic_contribution = anharmonic_contribution
if self.use_mie_gruneisen and self.anharmonic_contribution:
raise ValueError(
"The Mie-Gruneisen formulation and anharmonic contribution are circular referenced and "
"cannot be used together."
)
self.mass = sum([e.atomic_mass for e in self.structure.species])
self.natoms = self.structure.composition.num_atoms
self.avg_mass = physical_constants["atomic mass constant"][0] * self.mass / self.natoms # kg
self.kb = physical_constants["Boltzmann constant in eV/K"][0]
self.hbar = physical_constants["Planck constant over 2 pi in eV s"][0]
self.gpa_to_ev_ang = 1.0 / 160.21766208 # 1 GPa in ev/Ang^3
self.gibbs_free_energy = [] # optimized values, eV
# list of temperatures for which the optimized values are available, K
self.temperatures = []
self.optimum_volumes = [] # in Ang^3
# fit E and V and get the bulk modulus(used to compute the Debye
# temperature)
logger.info("Fitting E and V")
self.eos = EOS(eos)
self.ev_eos_fit = self.eos.fit(volumes, energies)
self.bulk_modulus = self.ev_eos_fit.b0_GPa # in GPa
self.optimize_gibbs_free_energy()
def optimize_gibbs_free_energy(self):
"""
Evaluate the gibbs free energy as a function of V, T and P i.e
G(V, T, P), minimize G(V, T, P) wrt V for each T and store the
optimum values.
Note: The data points for which the equation of state fitting fails
are skipped.
"""
temperatures = np.linspace(
self.temperature_min,
self.temperature_max,
int(np.ceil((self.temperature_max - self.temperature_min) / self.temperature_step) + 1),
)
for t in temperatures:
try:
G_opt, V_opt = self.optimizer(t)
except Exception:
if len(temperatures) <= 1:
raise
logger.info("EOS fitting failed, so skipping this data point, {}".format(t))
self.gibbs_free_energy.append(G_opt)
self.temperatures.append(t)
self.optimum_volumes.append(V_opt)
def optimizer(self, temperature):
"""
Evaluate G(V, T, P) at the given temperature(and pressure) and
minimize it wrt V.
1. Compute the vibrational helmholtz free energy, A_vib.
2. Compute the gibbs free energy as a function of volume, temperature
and pressure, G(V,T,P).
3. Preform an equation of state fit to get the functional form of
gibbs free energy:G(V, T, P).
4. Finally G(V, P, T) is minimized with respect to V.
Args:
temperature (float): temperature in K
Returns:
float, float: G_opt(V_opt, T, P) in eV and V_opt in Ang^3.
"""
G_V = [] # G for each volume
# G = E(V) + PV + A_vib(V, T)
for i, v in enumerate(self.volumes):
G_V.append(
self.energies[i] + self.pressure * v * self.gpa_to_ev_ang + self.vibrational_free_energy(temperature, v)
)
# fit equation of state, G(V, T, P)
eos_fit = self.eos.fit(self.volumes, G_V)
# minimize the fit eos wrt volume
# Note: the ref energy and the ref volume(E0 and V0) not necessarily
# the same as minimum energy and min volume.
volume_guess = eos_fit.volumes[np.argmin(eos_fit.energies)]
min_wrt_vol = minimize(eos_fit.func, volume_guess)
# G_opt=G(V_opt, T, P), V_opt
return min_wrt_vol.fun, min_wrt_vol.x[0]
def vibrational_free_energy(self, temperature, volume):
"""
Vibrational Helmholtz free energy, A_vib(V, T).
Eq(4) in doi.org/10.1016/j.comphy.2003.12.001
Args:
temperature (float): temperature in K
volume (float)
Returns:
float: vibrational free energy in eV
"""
y = self.debye_temperature(volume) / temperature
return (
self.kb * self.natoms * temperature * (9.0 / 8.0 * y + 3 * np.log(1 - np.exp(-y)) - self.debye_integral(y))
)
def vibrational_internal_energy(self, temperature, volume):
"""
Vibrational internal energy, U_vib(V, T).
Eq(4) in doi.org/10.1016/j.comphy.2003.12.001
Args:
temperature (float): temperature in K
volume (float): in Ang^3
Returns:
float: vibrational internal energy in eV
"""
y = self.debye_temperature(volume) / temperature
return self.kb * self.natoms * temperature * (9.0 / 8.0 * y + 3 * self.debye_integral(y))
def debye_temperature(self, volume):
"""
Calculates the debye temperature.
Eq(6) in doi.org/10.1016/j.comphy.2003.12.001. Thanks to Joey.
Eq(6) above is equivalent to Eq(3) in doi.org/10.1103/PhysRevB.37.790
which does not consider anharmonic effects. Eq(20) in the same paper
and Eq(18) in doi.org/10.1016/j.commatsci.2009.12.006 both consider
anharmonic contributions to the Debye temperature through the Gruneisen
parameter at 0K (Gruneisen constant).
The anharmonic contribution is toggled by setting the anharmonic_contribution
to True or False in the QuasiharmonicDebyeApprox constructor.
Args:
volume (float): in Ang^3
Returns:
float: debye temperature in K
"""
term1 = (2.0 / 3.0 * (1.0 + self.poisson) / (1.0 - 2.0 * self.poisson)) ** 1.5
term2 = (1.0 / 3.0 * (1.0 + self.poisson) / (1.0 - self.poisson)) ** 1.5
f = (3.0 / (2.0 * term1 + term2)) ** (1.0 / 3.0)
debye = 2.9772e-11 * (volume / self.natoms) ** (-1.0 / 6.0) * f * np.sqrt(self.bulk_modulus / self.avg_mass)
if self.anharmonic_contribution:
gamma = self.gruneisen_parameter(0, self.ev_eos_fit.v0) # 0K equilibrium Gruneisen parameter
return debye * (self.ev_eos_fit.v0 / volume) ** (gamma)
return debye
@staticmethod
def debye_integral(y):
"""
Debye integral. Eq(5) in doi.org/10.1016/j.comphy.2003.12.001
Args:
y (float): debye temperature/T, upper limit
Returns:
float: unitless
"""
# floating point limit is reached around y=155, so values beyond that
# are set to the limiting value(T-->0, y --> \infty) of
# 6.4939394 (from wolfram alpha).
factor = 3.0 / y ** 3
if y < 155:
integral = quadrature(lambda x: x ** 3 / (np.exp(x) - 1.0), 0, y)
return list(integral)[0] * factor
return 6.493939 * factor
def gruneisen_parameter(self, temperature, volume):
"""
Slater-gamma formulation(the default):
gruneisen paramter = - d log(theta)/ d log(V)
= - ( 1/6 + 0.5 d log(B)/ d log(V) )
= - (1/6 + 0.5 V/B dB/dV),
where dB/dV = d^2E/dV^2 + V * d^3E/dV^3
Mie-gruneisen formulation:
Eq(31) in doi.org/10.1016/j.comphy.2003.12.001
Eq(7) in Blanco et. al. Joumal of Molecular Structure (Theochem)
368 (1996) 245-255
Also se J.P. Poirier, Introduction to the Physics of the Earth’s
Interior, 2nd ed. (Cambridge University Press, Cambridge,
2000) Eq(3.53)
Args:
temperature (float): temperature in K
volume (float): in Ang^3
Returns:
float: unitless
"""
if isinstance(self.eos, PolynomialEOS):
p = np.poly1d(self.eos.eos_params) # pylint: disable=E1101
# first derivative of energy at 0K wrt volume evaluated at the
# given volume, in eV/Ang^3
dEdV = np.polyder(p, 1)(volume)
# second derivative of energy at 0K wrt volume evaluated at the
# given volume, in eV/Ang^6
d2EdV2 = np.polyder(p, 2)(volume)
# third derivative of energy at 0K wrt volume evaluated at the
# given volume, in eV/Ang^9
d3EdV3 = np.polyder(p, 3)(volume)
else:
func = self.ev_eos_fit.func
dEdV = derivative(func, volume, dx=1e-3)
d2EdV2 = derivative(func, volume, dx=1e-3, n=2, order=5)
d3EdV3 = derivative(func, volume, dx=1e-3, n=3, order=7)
# Mie-gruneisen formulation
if self.use_mie_gruneisen:
p0 = dEdV
return (
self.gpa_to_ev_ang
* volume
* (self.pressure + p0 / self.gpa_to_ev_ang)
/ self.vibrational_internal_energy(temperature, volume)
)
# Slater-gamma formulation
# first derivative of bulk modulus wrt volume, eV/Ang^6
dBdV = d2EdV2 + d3EdV3 * volume
return -(1.0 / 6.0 + 0.5 * volume * dBdV / FloatWithUnit(self.ev_eos_fit.b0_GPa, "GPa").to("eV ang^-3"))
def thermal_conductivity(self, temperature, volume):
"""
Eq(17) in 10.1103/PhysRevB.90.174107
Args:
temperature (float): temperature in K
volume (float): in Ang^3
Returns:
float: thermal conductivity in W/K/m
"""
gamma = self.gruneisen_parameter(temperature, volume)
theta_d = self.debye_temperature(volume) # K
theta_a = theta_d * self.natoms ** (-1.0 / 3.0) # K
prefactor = (0.849 * 3 * 4 ** (1.0 / 3.0)) / (20.0 * np.pi ** 3)
# kg/K^3/s^3
prefactor = prefactor * (self.kb / self.hbar) ** 3 * self.avg_mass
kappa = prefactor / (gamma ** 2 - 0.514 * gamma + 0.228)
# kg/K/s^3 * Ang = (kg m/s^2)/(Ks)*1e-10
# = N/(Ks)*1e-10 = Nm/(Kms)*1e-10 = W/K/m*1e-10
kappa = kappa * theta_a ** 2 * volume ** (1.0 / 3.0) * 1e-10
return kappa
def get_summary_dict(self):
"""
Returns a dict with a summary of the computed properties.
"""
d = defaultdict(list)
d["pressure"] = self.pressure
d["poisson"] = self.poisson
d["mass"] = self.mass
d["natoms"] = int(self.natoms)
d["bulk_modulus"] = self.bulk_modulus
d["gibbs_free_energy"] = self.gibbs_free_energy
d["temperatures"] = self.temperatures
d["optimum_volumes"] = self.optimum_volumes
for v, t in zip(self.optimum_volumes, self.temperatures):
d["debye_temperature"].append(self.debye_temperature(v))
d["gruneisen_parameter"].append(self.gruneisen_parameter(t, v))
d["thermal_conductivity"].append(self.thermal_conductivity(t, v))
return d
|
richardtran415/pymatgen
|
pymatgen/analysis/quasiharmonic.py
|
Python
|
mit
| 13,572
|
[
"pymatgen"
] |
8fd6140ec9892a33674dcca0603f04c059520d4e77d5045cce1c32b7063d7b86
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RMlinterfaces(RPackage):
"""This package provides uniform interfaces to machine learning
code for data in R and Bioconductor containers."""
homepage = "https://www.bioconductor.org/packages/MLInterfaces/"
git = "https://git.bioconductor.org/packages/MLInterfaces.git"
version('1.56.0', commit='31fe6fb20d859fcb01d5552f42bca6bab16cc67f')
depends_on('r@3.4.0:3.4.9', when='@1.56.0')
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-gdata', type=('build', 'run'))
depends_on('r-pls', type=('build', 'run'))
depends_on('r-sfsmisc', type=('build', 'run'))
depends_on('r-rda', type=('build', 'run'))
depends_on('r-genefilter', type=('build', 'run'))
depends_on('r-fpc', type=('build', 'run'))
depends_on('r-ggvis', type=('build', 'run'))
depends_on('r-shiny', type=('build', 'run'))
depends_on('r-gbm', type=('build', 'run'))
depends_on('r-rcolorbrewer', type=('build', 'run'))
depends_on('r-hwriter', type=('build', 'run'))
depends_on('r-threejs', type=('build', 'run'))
depends_on('r-mlbench', type=('build', 'run'))
|
krafczyk/spack
|
var/spack/repos/builtin/packages/r-mlinterfaces/package.py
|
Python
|
lgpl-2.1
| 2,432
|
[
"Bioconductor"
] |
02e0e3387242a8c8ea422b033f3b878c8e4ac084ea54a7a79c045a2dd50f72f4
|
from collections import defaultdict
import matplotlib.pyplot as plt
plt.rcParams['pdf.fonttype'] = 42
import scipy.stats as stats
import subprocess
import os
import cPickle
import tps_utils
import numpy as np
import itertools
import pysam
import math
import gzip
import bzUtils
class TPS_qc:
def __init__(self, tpse, experiment_settings, threads):
"""
Constructor for Library class
"""
self.threads = threads
self.tpse = tpse
self.experiment_settings = experiment_settings
self.get_property = self.experiment_settings.get_property
self.get_rdir = experiment_settings.get_rdir
self.get_wdir = experiment_settings.get_wdir
tps_utils.make_dir(self.tpse.rdir_path('QC'))
def plot_pcr_bias(self):
tps_utils.make_dir(os.path.join(
self.experiment_settings.get_rdir(),
'QC','collapsed_fracs'))
collapsed_read_fractions = map(lambda lib_settings: self.get_collapsed_read_fractions(lib_settings),
self.experiment_settings.iter_lib_settings())
fig = plt.figure(figsize=(8,8))
plot = fig.add_subplot(111)
color_index = 0
for col_tuple in collapsed_read_fractions:
sample_name, read_fractions = col_tuple
read_fractions = sorted(read_fractions, reverse = True)
cumulative_read_fractions = read_fractions[:1]
for read_frac in read_fractions[1:]:
cumulative_read_fractions.append(cumulative_read_fractions[-1]+read_frac)
cumulative_seq_fractions = np.array(range(1, len(cumulative_read_fractions)+1))/float(len(cumulative_read_fractions))
plot.plot(cumulative_read_fractions, cumulative_seq_fractions,color=bzUtils.rainbow[color_index/2],
linestyle = bzUtils.line_styles[color_index%2], label=sample_name, lw=1)
color_index +=1
plot.plot(cumulative_seq_fractions, cumulative_seq_fractions,color=bzUtils.rainbow[color_index/2],
linestyle = bzUtils.line_styles[2], label='expected', lw=1)
plot.set_xlabel("fraction of reads")
plot.set_ylabel("fraction of sequences")
plot.set_xlim(0,1)
plot.set_ylim(0,1)
lg=plt.legend(loc=2,prop={'size':10}, labelspacing=0.2)
lg.draw_frame(False)
out_name = os.path.join(
self.experiment_settings.get_rdir(),
'QC',
'pcr_bias.pdf')
plt.savefig(out_name, transparent='True', format='pdf')
plt.clf()
def identify_contaminating_sequences(self):
for lib_settings in self.experiment_settings.iter_lib_settings():
self.map_for_contaminating_sequences_one_lib(lib_settings)
for lib_settings in self.experiment_settings.iter_lib_settings():
self.write_mapping_summary(lib_settings.get_rRNA_mapping_stats(), lib_settings.get_pool_mapping_stats(), lib_settings.get_genome_mapping_stats(), lib_settings.get_overall_contamination_summary())
def map_for_contaminating_sequences_one_lib(self, lib_settings):
#first, take unmapped sequences and map them to yeast rRNA, counting mapping stats
if not tps_utils.file_exists(lib_settings.get_rRNA_unmapped_reads()):
subprocess.Popen('bowtie2 -f -D 20 -R 3 -N 1 -L 15 -i S,1,0.50 -x %s -p %d -U %s --un-gz %s 2>>%s | samtools view -bS - > %s 2>>%s ' % (self.experiment_settings.get_rRNA_bowtie_index(), self.threads,
lib_settings.get_unmappable_reads(), lib_settings.get_rRNA_unmapped_reads(), lib_settings.get_rRNA_mapping_stats(),
lib_settings.get_rRNA_mapped_reads(), lib_settings.get_log(),
), shell=True).wait()
if not tps_utils.file_exists(lib_settings.get_genome_unmapped_reads()):
#take still unmapped sequences and map them to the rest of the yeast genome, counting mapping stats
subprocess.Popen('bowtie2 -f -D 20 -R 3 -N 1 -L 15 -i S,1,0.50 -x %s -p %d -U %s --un-gz %s 2>>%s | samtools view -bS - > %s 2>>%s ' % (self.experiment_settings.get_genome_bowtie_index(), self.threads,
lib_settings.get_rRNA_unmapped_reads(), lib_settings.get_genome_unmapped_reads(), lib_settings.get_genome_mapping_stats(),
lib_settings.get_genome_mapped_reads(), lib_settings.get_log(),
), shell=True).wait()
def write_mapping_summary(self, rRNA_file, pool_file, genome_file, output_file):
pool_stats = self.parse_mapping_stats(pool_file)
rRNA_stats = self.parse_mapping_stats(rRNA_file)
genome_stats = self.parse_mapping_stats(genome_file)
f = open(output_file, 'w')
f.write('\tunique_pool\tmultiple_pool\tunique_rRNA\tmultiple_rRNA\tunique_genome\tmultiple_genome\n')
f.write('total\t%d\t%d\t%d\t%d\t%d\t%d\n' % (pool_stats[2], pool_stats[3], rRNA_stats[2], rRNA_stats[3],
genome_stats[2], genome_stats[3]))
f.close()
def parse_mapping_stats(self, alignment_summary_file):
'''
example alignment summary:
8333978 reads; of these:
8333978 (100.00%) were unpaired; of these:
7905371 (94.86%) aligned 0 times
276859 (3.32%) aligned exactly 1 time
151748 (1.82%) aligned >1 times
5.14% overall alignment rate
'''
f = open(alignment_summary_file)
lines = f.readlines()
total_reads = int(lines[0].strip().split()[0])
unaligned_reads = int(lines[2].strip().split()[0])
uniquely_aligned_reads = int(lines[3].strip().split()[0])
multiply_aligned_reads = int(lines[4].strip().split()[0])
overall_alignment_percent = float(lines[5].strip().split()[0][:-1])
f.close()
return total_reads, unaligned_reads, uniquely_aligned_reads, multiply_aligned_reads, overall_alignment_percent
def get_collapsed_read_fractions(self, lib_settings):
out_name = os.path.join(
self.experiment_settings.get_rdir(),
'QC','collapsed_fracs',
'%(sample_name)s.collapsed_read_fractions.pkl' % {'sample_name': lib_settings.sample_name})
if not tps_utils.file_exists(out_name) and not self.experiment_settings.get_property('force_recollapse'):
collapsed_reads_file = lib_settings.get_collapsed_reads()
read_counts = []
f = gzip.open(collapsed_reads_file)
for line in f:
if not line.strip() == '' and not line.startswith('#'):#ignore empty lines and commented out lines
if line.startswith('>'):#> marks the start of a new sequence
num_reads = int(line[1:].strip().split('-')[1])
read_counts.append(num_reads)
else:
continue
f.close()
read_fractions = np.array(read_counts)/float(sum(read_counts))
bzUtils.makePickle(read_fractions, out_name)
else:
read_fractions = bzUtils.unPickle(out_name)
return (lib_settings.sample_name, read_fractions)
def get_library_enrichment_correlation(self, lib1, lib2):
lib1_enrichments = []
lib2_enrichments = []
for sequence in lib1.pool_sequence_mappings:
lib1_enrichments.append(lib1.pool_sequence_mappings[sequence].enrichment)
lib2_enrichments.append(lib2.pool_sequence_mappings[sequence].enrichment)
spearmanR, spearmanP = stats.spearmanr(lib1_enrichments, lib2_enrichments)
pearsonR, pearsonP = stats.pearsonr(lib1_enrichments, lib2_enrichments)
return pearsonR, spearmanR, pearsonP, spearmanP
def get_library_count_correlation(self, lib1, lib2):
lib1_counts = []
lib2_counts = []
for sequence in lib1.pool_sequence_mappings:
lib1_counts.append(lib1.pool_sequence_mappings[sequence].total_passing_reads)
lib2_counts.append(lib2.pool_sequence_mappings[sequence].total_passing_reads)
spearmanR, spearmanP = stats.spearmanr(lib1_counts, lib2_counts)
pearsonR, pearsonP = stats.pearsonr(lib1_counts, lib2_counts)
return pearsonR, spearmanR, pearsonP, spearmanP
def get_library_count_distribution(self, lib):
return [lib.pool_sequence_mappings[sequence].total_passing_reads for sequence in lib.pool_sequence_mappings]
def print_library_count_concordances(self):
out_name = os.path.join(self.experiment_settings.get_rdir(), 'QC',
'count_concordances.txt')
f = open(out_name, 'w')
header = 'sample1\tsample2\tpearson r\t pearson p\t spearman r\t spearman p\n'
f.write(header)
for libi, libj in itertools.combinations(self.tpse.libs, 2):
pearsonR, spearmanR, pearsonP, spearmanP = self.get_library_count_correlation(libi, libj)
line = '%s\t%s\t%f\t%f\t%f\t%f\n' % (libi.get_sample_name(), libj.get_sample_name(),
pearsonR, pearsonP, spearmanR, spearmanP)
f.write(line)
f.close()
def plot_average_read_positions(self):
for lib in self.tpse.libs:
self.plot_average_read_positions_one_lib(lib)
def plot_average_read_positions_one_lib(self, lib, min_x = 0, max_x = 150):
positions = np.array(range(min_x, max_x+1))
averages = [np.average([pool_sequence_mapping.fraction_at_position(position) for pool_sequence_mapping in lib.pool_sequence_mappings.values() if pool_sequence_mapping.total_passing_reads>0]) for position in positions]
fig = plt.figure(figsize=(8,8))
plot = fig.add_subplot(111)
plot.bar(positions , averages,color=bzUtils.rainbow[0], lw=0)
plot.set_xticks(positions[::10]+0.5)
plot.set_xticklabels(positions[::10])
plot.set_xlabel("position of read 5' end from RNA end")
plot.set_ylabel("average read fraction")
out_name = os.path.join(
self.experiment_settings.get_rdir(),
'QC',
'%(sample_name)s.read_positions.pdf' % {'sample_name': lib.get_sample_name ()})
plt.savefig(out_name, transparent='True', format='pdf')
plt.clf()
def plot_count_distributions(self):
num_libs = len(self.tpse.libs)
fig = plt.figure(figsize=(16,16))
plot_index = 1
cutoff = 100
hbins = np.arange(0, 400, 10)
hbins = np.append(hbins, 10000000)
for lib in self.tpse.libs:
plot = fig.add_subplot(math.sqrt(bzUtils.next_square_number(num_libs)), math.sqrt(bzUtils.next_square_number(num_libs)), plot_index)
sample_name = lib.lib_settings.sample_name
dist = self.get_library_count_distribution(lib)
plot.hist(dist, bins = hbins, color=bzUtils.skyBlue, histtype='stepfilled', edgecolor = None, lw = 0)
plot.set_xlabel("# reads", fontsize = 10)
plot.set_ylabel("# genes (%d have >= %d reads)" % (bzUtils.number_passing_cutoff(dist, cutoff), cutoff), fontsize = 10)
plot.set_xlim(0, 400)
#plot.set_ylim(0,1)
plot.axvline(cutoff, ls = 'dashed')
plot.set_title(sample_name, fontsize = 8)
plot_index += 1
plt.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.15, wspace=0.4, hspace=0.6)
out_name = os.path.join(
self.experiment_settings.get_rdir(),
'QC',
'count_distributions.pdf')
plt.savefig(out_name, transparent='True', format='pdf')
plt.clf()
"""
def plot_insert_size_distributions(self):
#plot distribution of insert sizes from cutadapt output
TODO - need to parse log file to get this info
num_libs = len(self.tpse.libs)
fig = plt.figure(figsize=(16,16))
plot_index = 1
cutoff = 100
hbins = np.arange(0, 51, 1)
for lib in self.tpse.libs:
plot = fig.add_subplot(math.sqrt(bzUtils.next_square_number(num_libs)), math.sqrt(bzUtils.next_square_number(num_libs)), plot_index)
sample_name = lib.lib_settings.sample_name
dist = self.get_insert_sizes(lib)
plot.hist(dist, bins = hbins, color=bzUtils.skyBlue, histtype='stepfilled', edgecolor = None, lw = 0)
plot.set_xlabel("insert size", fontsize = 10)
plot.set_ylabel("fraction of reads" % (bzUtils.number_passing_cutoff(dist, cutoff), cutoff), fontsize = 10)
plot.set_xlim(0, 400)
#plot.set_ylim(0,1)
plot.axvline(cutoff, ls = 'dashed')
plot.set_title(sample_name, fontsize = 8)
plot_index += 1
plt.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.15, wspace=0.4, hspace=0.6)
out_name = os.path.join(
self.experiment_settings.get_rdir(),
'QC',
'insetrt_size_distributions.pdf')
plt.savefig(out_name, transparent='True', format='pdf')
plt.clf()
"""
|
borisz264/toeprint_seq
|
tps_qc.py
|
Python
|
mit
| 13,680
|
[
"pysam"
] |
64b6868baa2a9185880b70fe6b7f6583eff8d60111cf00f65d52b701aaf99ddb
|
# coding: utf-8
"""
Vericred API
Vericred's API allows you to search for Health Plans that a specific doctor
accepts.
## Getting Started
Visit our [Developer Portal](https://developers.vericred.com) to
create an account.
Once you have created an account, you can create one Application for
Production and another for our Sandbox (select the appropriate Plan when
you create the Application).
## SDKs
Our API follows standard REST conventions, so you can use any HTTP client
to integrate with us. You will likely find it easier to use one of our
[autogenerated SDKs](https://github.com/vericred/?query=vericred-),
which we make available for several common programming languages.
## Authentication
To authenticate, pass the API Key you created in the Developer Portal as
a `Vericred-Api-Key` header.
`curl -H 'Vericred-Api-Key: YOUR_KEY' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Versioning
Vericred's API default to the latest version. However, if you need a specific
version, you can request it with an `Accept-Version` header.
The current version is `v3`. Previous versions are `v1` and `v2`.
`curl -H 'Vericred-Api-Key: YOUR_KEY' -H 'Accept-Version: v2' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Pagination
Endpoints that accept `page` and `per_page` parameters are paginated. They expose
four additional fields that contain data about your position in the response,
namely `Total`, `Per-Page`, `Link`, and `Page` as described in [RFC-5988](https://tools.ietf.org/html/rfc5988).
For example, to display 5 results per page and view the second page of a
`GET` to `/networks`, your final request would be `GET /networks?....page=2&per_page=5`.
## Sideloading
When we return multiple levels of an object graph (e.g. `Provider`s and their `State`s
we sideload the associated data. In this example, we would provide an Array of
`State`s and a `state_id` for each provider. This is done primarily to reduce the
payload size since many of the `Provider`s will share a `State`
```
{
providers: [{ id: 1, state_id: 1}, { id: 2, state_id: 1 }],
states: [{ id: 1, code: 'NY' }]
}
```
If you need the second level of the object graph, you can just match the
corresponding id.
## Selecting specific data
All endpoints allow you to specify which fields you would like to return.
This allows you to limit the response to contain only the data you need.
For example, let's take a request that returns the following JSON by default
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890',
field_we_dont_care_about: 'value_we_dont_care_about'
},
states: [{
id: 1,
name: 'New York',
code: 'NY',
field_we_dont_care_about: 'value_we_dont_care_about'
}]
}
```
To limit our results to only return the fields we care about, we specify the
`select` query string parameter for the corresponding fields in the JSON
document.
In this case, we want to select `name` and `phone` from the `provider` key,
so we would add the parameters `select=provider.name,provider.phone`.
We also want the `name` and `code` from the `states` key, so we would
add the parameters `select=states.name,states.code`. The id field of
each document is always returned whether or not it is requested.
Our final request would be `GET /providers/12345?select=provider.name,provider.phone,states.name,states.code`
The response would be
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890'
},
states: [{
id: 1,
name: 'New York',
code: 'NY'
}]
}
```
## Benefits summary format
Benefit cost-share strings are formatted to capture:
* Network tiers
* Compound or conditional cost-share
* Limits on the cost-share
* Benefit-specific maximum out-of-pocket costs
**Example #1**
As an example, we would represent [this Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/33602TX0780032.pdf) as:
* **Hospital stay facility fees**:
- Network Provider: `$400 copay/admit plus 20% coinsurance`
- Out-of-Network Provider: `$1,500 copay/admit plus 50% coinsurance`
- Vericred's format for this benefit: `In-Network: $400 before deductible then 20% after deductible / Out-of-Network: $1,500 before deductible then 50% after deductible`
* **Rehabilitation services:**
- Network Provider: `20% coinsurance`
- Out-of-Network Provider: `50% coinsurance`
- Limitations & Exceptions: `35 visit maximum per benefit period combined with Chiropractic care.`
- Vericred's format for this benefit: `In-Network: 20% after deductible / Out-of-Network: 50% after deductible | limit: 35 visit(s) per Benefit Period`
**Example #2**
In [this other Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/40733CA0110568.pdf), the **specialty_drugs** cost-share has a maximum out-of-pocket for in-network pharmacies.
* **Specialty drugs:**
- Network Provider: `40% coinsurance up to a $500 maximum for up to a 30 day supply`
- Out-of-Network Provider `Not covered`
- Vericred's format for this benefit: `In-Network: 40% after deductible, up to $500 per script / Out-of-Network: 100%`
**BNF**
Here's a description of the benefits summary string, represented as a context-free grammar:
```
root ::= coverage
coverage ::= (simple_coverage | tiered_coverage) (space pipe space coverage_modifier)?
tiered_coverage ::= tier (space slash space tier)*
tier ::= tier_name colon space (tier_coverage | not_applicable)
tier_coverage ::= simple_coverage (space (then | or | and) space simple_coverage)* tier_limitation?
simple_coverage ::= (pre_coverage_limitation space)? coverage_amount (space post_coverage_limitation)? (comma? space coverage_condition)?
coverage_modifier ::= limit_condition colon space (((simple_coverage | simple_limitation) (semicolon space see_carrier_documentation)?) | see_carrier_documentation | waived_if_admitted | shared_across_tiers)
waived_if_admitted ::= ("copay" space)? "waived if admitted"
simple_limitation ::= pre_coverage_limitation space "copay applies"
tier_name ::= "In-Network-Tier-2" | "Out-of-Network" | "In-Network"
limit_condition ::= "limit" | "condition"
tier_limitation ::= comma space "up to" space (currency | (integer space time_unit plural?)) (space post_coverage_limitation)?
coverage_amount ::= currency | unlimited | included | unknown | percentage | (digits space (treatment_unit | time_unit) plural?)
pre_coverage_limitation ::= first space digits space time_unit plural?
post_coverage_limitation ::= (((then space currency) | "per condition") space)? "per" space (treatment_unit | (integer space time_unit) | time_unit) plural?
coverage_condition ::= ("before deductible" | "after deductible" | "penalty" | allowance | "in-state" | "out-of-state") (space allowance)?
allowance ::= upto_allowance | after_allowance
upto_allowance ::= "up to" space (currency space)? "allowance"
after_allowance ::= "after" space (currency space)? "allowance"
see_carrier_documentation ::= "see carrier documentation for more information"
shared_across_tiers ::= "shared across all tiers"
unknown ::= "unknown"
unlimited ::= /[uU]nlimited/
included ::= /[iI]ncluded in [mM]edical/
time_unit ::= /[hH]our/ | (((/[cC]alendar/ | /[cC]ontract/) space)? /[yY]ear/) | /[mM]onth/ | /[dD]ay/ | /[wW]eek/ | /[vV]isit/ | /[lL]ifetime/ | ((((/[bB]enefit/ plural?) | /[eE]ligibility/) space)? /[pP]eriod/)
treatment_unit ::= /[pP]erson/ | /[gG]roup/ | /[cC]ondition/ | /[sS]cript/ | /[vV]isit/ | /[eE]xam/ | /[iI]tem/ | /[sS]tay/ | /[tT]reatment/ | /[aA]dmission/ | /[eE]pisode/
comma ::= ","
colon ::= ":"
semicolon ::= ";"
pipe ::= "|"
slash ::= "/"
plural ::= "(s)" | "s"
then ::= "then" | ("," space) | space
or ::= "or"
and ::= "and"
not_applicable ::= "Not Applicable" | "N/A" | "NA"
first ::= "first"
currency ::= "$" number
percentage ::= number "%"
number ::= float | integer
float ::= digits "." digits
integer ::= /[0-9]/+ (comma_int | under_int)*
comma_int ::= ("," /[0-9]/*3) !"_"
under_int ::= ("_" /[0-9]/*3) !","
digits ::= /[0-9]/+ ("_" /[0-9]/+)*
space ::= /[ \t]/+
```
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class ZipCode(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, code=None, id=None):
"""
ZipCode - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'code': 'str',
'id': 'int'
}
self.attribute_map = {
'code': 'code',
'id': 'id'
}
self._code = code
self._id = id
@property
def code(self):
"""
Gets the code of this ZipCode.
5 digit code (e.g. 11215)
:return: The code of this ZipCode.
:rtype: str
"""
return self._code
@code.setter
def code(self, code):
"""
Sets the code of this ZipCode.
5 digit code (e.g. 11215)
:param code: The code of this ZipCode.
:type: str
"""
self._code = code
@property
def id(self):
"""
Gets the id of this ZipCode.
Primary key
:return: The id of this ZipCode.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this ZipCode.
Primary key
:param id: The id of this ZipCode.
:type: int
"""
self._id = id
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
vericred/vericred-python
|
vericred_client/models/zip_code.py
|
Python
|
apache-2.0
| 12,506
|
[
"VisIt"
] |
1bb7587f8a8979e712cab6403a3256841cc3171931775fc6318a566c20ef995f
|
# coding=utf-8
"""
.. module:: wolk
This module provides connection to WolkAbout IoT Platform.
To start publishing data to the platform
create an instance of Device class with credentials obtained from the platform
and pass it to an instance of WolkConnect class.
For more information about module features visit:
https://github.com/Wolkabout/WolkConnect-Python/tree/master/examples/full_feature_set
"""
from .models.ActuatorCommand import ActuatorCommand
from .models.ActuatorCommandType import ActuatorCommandType
from .models.ActuatorState import ActuatorState
from .models.ActuatorStatus import ActuatorStatus
from .models.Alarm import Alarm
from .models.ConfigurationCommand import ConfigurationCommand
from .models.ConfigurationCommandType import ConfigurationCommandType
from .models.Device import Device
from .models.FileTransferPacket import FileTransferPacket
from .models.FirmwareCommand import FirmwareCommand
from .models.FirmwareCommandType import FirmwareCommandType
from .models.FirmwareErrorType import FirmwareErrorType
from .models.FirmwareStatus import FirmwareStatus
from .models.FirmwareStatusType import FirmwareStatusType
from .models.FirmwareUpdateStateType import FirmwareUpdateStateType
from .models.InboundMessage import InboundMessage
from .models.OutboundMessage import OutboundMessage
from .models.Protocol import Protocol
from .models.SensorReading import SensorReading
from .interfaces.ActuationHandler import ActuationHandler
from .interfaces.ActuatorStatusProvider import ActuatorStatusProvider
from .interfaces.ConfigurationHandler import ConfigurationHandler
from .interfaces.ConfigurationProvider import ConfigurationProvider
from .interfaces.ConnectivityService import ConnectivityService
from .interfaces.FirmwareInstaller import FirmwareInstaller
from .interfaces.FirmwareURLDownloadHandler import FirmwareURLDownloadHandler
from .interfaces.InboundMessageDeserializer import InboundMessageDeserializer
from .interfaces.OutboundMessageFactory import OutboundMessageFactory
from .interfaces.OutboundMessageQueue import OutboundMessageQueue
from .FileSystemFirmwareHandler import FileSystemFirmwareHandler
from .LoggerFactory import logging_config
from .WolkConnect import WolkConnect
__all__ = [
"ActuatorCommand",
"ActuatorCommandType",
"ActuatorState",
"ActuatorStatus",
"Alarm",
"ConfigurationCommand",
"ConfigurationCommandType",
"Device",
"FileTransferPacket",
"FirmwareCommand",
"FirmwareCommandType",
"FirmwareErrorType",
"FirmwareStatus",
"FirmwareStatusType",
"FirmwareUpdateStateType",
"InboundMessage",
"OutboundMessage",
"Protocol",
"SensorReading",
"ActuationHandler",
"ActuatorStatusProvider",
"ConfigurationHandler",
"ConfigurationProvider",
"ConnectivityService",
"FileSystemFirmwareHandler",
"FirmwareInstaller",
"FirmwareURLDownloadHandler",
"logging_config",
"InboundMessageDeserializer",
"OutboundMessageFactory",
"OutboundMessageQueue",
"WolkConnect",
]
|
Wolkabout/WolkConnect-Python-
|
wolk/__init__.py
|
Python
|
apache-2.0
| 3,048
|
[
"VisIt"
] |
0c37cbc4a1ab6b3745c14b7de0a5de1010651eec91f4c112eff85b10e2fa7919
|
# -*- coding: utf-8 -*-
"""
Jinja2
~~~~~~
Jinja2 is a template engine written in pure Python. It provides a
`Django`_ inspired non-XML syntax but supports inline expressions and
an optional `sandboxed`_ environment.
Nutshell
--------
Here a small example of a Jinja template::
{% extends 'base.html' %}
{% block title %}Memberlist{% endblock %}
{% block content %}
<ul>
{% for user in users %}
<li><a href="{{ user.url }}">{{ user.username }}</a></li>
{% endfor %}
</ul>
{% endblock %}
Philosophy
----------
Application logic is for the controller but don't try to make the life
for the template designer too hard by giving him too few functionality.
For more informations visit the new `Jinja2 webpage`_ and `documentation`_.
.. _sandboxed: http://en.wikipedia.org/wiki/Sandbox_(computer_security)
.. _Django: http://www.djangoproject.com/
.. _Jinja2 webpage: http://jinja.pocoo.org/
.. _documentation: http://jinja.pocoo.org/2/documentation/
"""
import re
import ast
from setuptools import setup
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('jinja2/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
setup(
name='Jinja2',
version=version,
url='http://jinja.pocoo.org/',
license='BSD',
author='Armin Ronacher',
author_email='armin.ronacher@active-4.com',
description='A small but fast and easy to use stand-alone template '
'engine written in pure python.',
long_description=__doc__,
# jinja is egg safe. But we hate eggs
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Markup :: HTML'
],
packages=['jinja2'],
install_requires=['MarkupSafe'],
extras_require={'i18n': ['Babel>=0.8']},
include_package_data=True,
entry_points="""
[babel.extractors]
jinja2 = jinja2.ext:babel_extract[i18n]
"""
)
|
dstufft/jinja2
|
setup.py
|
Python
|
bsd-3-clause
| 2,420
|
[
"VisIt"
] |
f35ce4a1c342e6747676f6349aae93a0797643cc881735132dc1e15e85d31f9c
|
# Copied from the mdtraj project, commit 3fddb5d (Mar 10, 2016)
from __future__ import print_function
import os
import shutil
from sphinx.util.compat import Directive
from docutils import nodes
from docutils.parsers.rst import directives
import nbformat
from nbconvert import HTMLExporter, PythonExporter
def _read(wd, name):
with open("{}/{}.ipynb".format(wd, name)) as f:
notebook = nbformat.read(f, as_version=4)
return notebook
def export_html(wd, name):
nb = _read(wd, name)
config = {
'Exporter': {'template_file': 'embed',
'template_path': ['./sphinxext/']},
# 'ExecutePreprocessor': {'enabled': True},
'ExecutePreprocessor': {'enabled': False},
'ExtractOutputPreprocessor': {'enabled': True},
'CSSHTMLHeaderPreprocessor': {'enabled': True}
}
exporter = HTMLExporter(config)
try:
body, resources = exporter.from_notebook_node(nb)
for fn, data in resources['outputs'].items():
with open("{}/{}".format(wd, fn), 'wb') as f:
f.write(data)
return body
except Exception as e:
return str(e)
def export_python(wd, name):
nb = _read(wd, name)
exporter = PythonExporter()
body, resources = exporter.from_notebook_node(nb)
with open("{}/{}.py".format(wd, name), 'w') as f:
f.write(body)
class NotebookDirective(Directive):
"""Insert an evaluated notebook into a document
"""
required_arguments = 1
optional_arguments = 1
option_spec = {'skip_exceptions': directives.flag}
final_argument_whitespace = True
def run(self):
# check if raw html is supported
if not self.state.document.settings.raw_enabled:
raise self.warning('"%s" directive disabled.' % self.name)
# get path to notebook
nb_rel_path = self.arguments[0]
nb_abs_path = "{}/../{}".format(setup.confdir, nb_rel_path)
nb_abs_path = os.path.abspath(nb_abs_path)
nb_name = os.path.basename(nb_rel_path).split(".")[0]
dest_dir = "{}/{}/{}".format(
setup.app.builder.outdir,
os.path.dirname(nb_rel_path),
nb_name)
fmt = {'wd': dest_dir, 'name': nb_name}
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
shutil.copyfile(nb_abs_path, "{wd}/{name}.ipynb".format(**fmt))
# TODO: Actually save evaluated notebook
shutil.copyfile(nb_abs_path, "{wd}/{name}_eval.ipynb".format(**fmt))
html = export_html(**fmt)
export_python(**fmt)
# Create link to notebook and script files
link_rst = "({uneval}; {eval}; {py})".format(
uneval=formatted_link("{wd}/{name}.ipynb".format(**fmt)),
eval=formatted_link("{wd}/{name}_eval.ipynb".format(**fmt)),
py=formatted_link("{wd}/{name}.py".format(**fmt)),
)
rst_file = self.state_machine.document.attributes['source']
self.state_machine.insert_input([link_rst], rst_file)
# create notebook node
attributes = {'format': 'html', 'source': 'nb_path'}
nb_node = notebook_node('', html, **attributes)
nb_node.source, nb_node.line = self.state_machine \
.get_source_and_line(self.lineno)
# add dependency
self.state.document.settings.record_dependencies.add(nb_abs_path)
return [nb_node]
class notebook_node(nodes.raw):
pass
def formatted_link(path):
return "`%s <%s>`__" % (os.path.basename(path), path)
def visit_notebook_node(self, node):
self.visit_raw(node)
def depart_notebook_node(self, node):
self.depart_raw(node)
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
app.add_node(notebook_node,
html=(visit_notebook_node, depart_notebook_node))
app.add_directive('notebook', NotebookDirective)
|
sallai/mbuild
|
docs/sphinxext/notebook_sphinxext.py
|
Python
|
mit
| 3,947
|
[
"MDTraj"
] |
9572f92107c8c2969b49644864a8852063d11e5e741b15e7d843ae31bfdf10f9
|
#!/usr/bin/env python
# encoding: utf-8
"""
The IPython engine application
Authors:
* Brian Granger
* MinRK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import json
import os
import sys
import time
import zmq
from zmq.eventloop import ioloop
from IPython.core.profiledir import ProfileDir
from IPython.parallel.apps.baseapp import (
BaseParallelApplication,
base_aliases,
base_flags,
catch_config_error,
)
from IPython.kernel.zmq.log import EnginePUBHandler
from IPython.kernel.zmq.ipkernel import Kernel
from IPython.kernel.zmq.kernelapp import IPKernelApp
from IPython.kernel.zmq.session import (
Session, session_aliases, session_flags
)
from IPython.kernel.zmq.zmqshell import ZMQInteractiveShell
from IPython.config.configurable import Configurable
from IPython.parallel.engine.engine import EngineFactory
from IPython.parallel.util import disambiguate_ip_address
from IPython.utils.importstring import import_item
from IPython.utils.py3compat import cast_bytes
from IPython.utils.traitlets import Bool, Unicode, Dict, List, Float, Instance
#-----------------------------------------------------------------------------
# Module level variables
#-----------------------------------------------------------------------------
_description = """Start an IPython engine for parallel computing.
IPython engines run in parallel and perform computations on behalf of a client
and controller. A controller needs to be started before the engines. The
engine can be configured using command line options or using a cluster
directory. Cluster directories contain config, log and security files and are
usually located in your ipython directory and named as "profile_name".
See the `profile` and `profile-dir` options for details.
"""
_examples = """
ipengine --ip=192.168.0.1 --port=1000 # connect to hub at ip and port
ipengine --log-to-file --log-level=DEBUG # log to a file with DEBUG verbosity
"""
#-----------------------------------------------------------------------------
# MPI configuration
#-----------------------------------------------------------------------------
mpi4py_init = """from mpi4py import MPI as mpi
mpi.size = mpi.COMM_WORLD.Get_size()
mpi.rank = mpi.COMM_WORLD.Get_rank()
"""
pytrilinos_init = """from PyTrilinos import Epetra
class SimpleStruct:
pass
mpi = SimpleStruct()
mpi.rank = 0
mpi.size = 0
"""
class MPI(Configurable):
"""Configurable for MPI initialization"""
use = Unicode('', config=True,
help='How to enable MPI (mpi4py, pytrilinos, or empty string to disable).'
)
def _use_changed(self, name, old, new):
# load default init script if it's not set
if not self.init_script:
self.init_script = self.default_inits.get(new, '')
init_script = Unicode('', config=True,
help="Initialization code for MPI")
default_inits = Dict({'mpi4py' : mpi4py_init, 'pytrilinos':pytrilinos_init},
config=True)
#-----------------------------------------------------------------------------
# Main application
#-----------------------------------------------------------------------------
aliases = dict(
file = 'IPEngineApp.url_file',
c = 'IPEngineApp.startup_command',
s = 'IPEngineApp.startup_script',
url = 'EngineFactory.url',
ssh = 'EngineFactory.sshserver',
sshkey = 'EngineFactory.sshkey',
ip = 'EngineFactory.ip',
transport = 'EngineFactory.transport',
port = 'EngineFactory.regport',
location = 'EngineFactory.location',
timeout = 'EngineFactory.timeout',
mpi = 'MPI.use',
)
aliases.update(base_aliases)
aliases.update(session_aliases)
flags = {}
flags.update(base_flags)
flags.update(session_flags)
class IPEngineApp(BaseParallelApplication):
name = 'ipengine'
description = _description
examples = _examples
classes = List([ZMQInteractiveShell, ProfileDir, Session, EngineFactory, Kernel, MPI])
startup_script = Unicode(u'', config=True,
help='specify a script to be run at startup')
startup_command = Unicode('', config=True,
help='specify a command to be run at startup')
url_file = Unicode(u'', config=True,
help="""The full location of the file containing the connection information for
the controller. If this is not given, the file must be in the
security directory of the cluster directory. This location is
resolved using the `profile` or `profile_dir` options.""",
)
wait_for_url_file = Float(5, config=True,
help="""The maximum number of seconds to wait for url_file to exist.
This is useful for batch-systems and shared-filesystems where the
controller and engine are started at the same time and it
may take a moment for the controller to write the connector files.""")
url_file_name = Unicode(u'ipcontroller-engine.json', config=True)
def _cluster_id_changed(self, name, old, new):
if new:
base = 'ipcontroller-%s' % new
else:
base = 'ipcontroller'
self.url_file_name = "%s-engine.json" % base
log_url = Unicode('', config=True,
help="""The URL for the iploggerapp instance, for forwarding
logging to a central location.""")
# an IPKernelApp instance, used to setup listening for shell frontends
kernel_app = Instance(IPKernelApp)
aliases = Dict(aliases)
flags = Dict(flags)
@property
def kernel(self):
"""allow access to the Kernel object, so I look like IPKernelApp"""
return self.engine.kernel
def find_url_file(self):
"""Set the url file.
Here we don't try to actually see if it exists for is valid as that
is hadled by the connection logic.
"""
config = self.config
# Find the actual controller key file
if not self.url_file:
self.url_file = os.path.join(
self.profile_dir.security_dir,
self.url_file_name
)
def load_connector_file(self):
"""load config from a JSON connector file,
at a *lower* priority than command-line/config files.
"""
self.log.info("Loading url_file %r", self.url_file)
config = self.config
with open(self.url_file) as f:
num_tries = 0
max_tries = 5
d = ""
while not d:
try:
d = json.loads(f.read())
except ValueError:
if num_tries > max_tries:
raise
num_tries += 1
time.sleep(0.5)
# allow hand-override of location for disambiguation
# and ssh-server
if 'EngineFactory.location' not in config:
config.EngineFactory.location = d['location']
if 'EngineFactory.sshserver' not in config:
config.EngineFactory.sshserver = d.get('ssh')
location = config.EngineFactory.location
proto, ip = d['interface'].split('://')
ip = disambiguate_ip_address(ip, location)
d['interface'] = '%s://%s' % (proto, ip)
# DO NOT allow override of basic URLs, serialization, or key
# JSON file takes top priority there
config.Session.key = cast_bytes(d['key'])
config.Session.signature_scheme = d['signature_scheme']
config.EngineFactory.url = d['interface'] + ':%i' % d['registration']
config.Session.packer = d['pack']
config.Session.unpacker = d['unpack']
self.log.debug("Config changed:")
self.log.debug("%r", config)
self.connection_info = d
def bind_kernel(self, **kwargs):
"""Promote engine to listening kernel, accessible to frontends."""
if self.kernel_app is not None:
return
self.log.info("Opening ports for direct connections as an IPython kernel")
kernel = self.kernel
kwargs.setdefault('config', self.config)
kwargs.setdefault('log', self.log)
kwargs.setdefault('profile_dir', self.profile_dir)
kwargs.setdefault('session', self.engine.session)
app = self.kernel_app = IPKernelApp(**kwargs)
# allow IPKernelApp.instance():
IPKernelApp._instance = app
app.init_connection_file()
# relevant contents of init_sockets:
app.shell_port = app._bind_socket(kernel.shell_streams[0], app.shell_port)
app.log.debug("shell ROUTER Channel on port: %i", app.shell_port)
app.iopub_port = app._bind_socket(kernel.iopub_socket, app.iopub_port)
app.log.debug("iopub PUB Channel on port: %i", app.iopub_port)
kernel.stdin_socket = self.engine.context.socket(zmq.ROUTER)
app.stdin_port = app._bind_socket(kernel.stdin_socket, app.stdin_port)
app.log.debug("stdin ROUTER Channel on port: %i", app.stdin_port)
# start the heartbeat, and log connection info:
app.init_heartbeat()
app.log_connection_info()
app.write_connection_file()
def init_engine(self):
# This is the working dir by now.
sys.path.insert(0, '')
config = self.config
# print config
self.find_url_file()
# was the url manually specified?
keys = set(self.config.EngineFactory.keys())
keys = keys.union(set(self.config.RegistrationFactory.keys()))
if keys.intersection(set(['ip', 'url', 'port'])):
# Connection info was specified, don't wait for the file
url_specified = True
self.wait_for_url_file = 0
else:
url_specified = False
if self.wait_for_url_file and not os.path.exists(self.url_file):
self.log.warn("url_file %r not found", self.url_file)
self.log.warn("Waiting up to %.1f seconds for it to arrive.", self.wait_for_url_file)
tic = time.time()
while not os.path.exists(self.url_file) and (time.time()-tic < self.wait_for_url_file):
# wait for url_file to exist, or until time limit
time.sleep(0.1)
if os.path.exists(self.url_file):
self.load_connector_file()
elif not url_specified:
self.log.fatal("Fatal: url file never arrived: %s", self.url_file)
self.exit(1)
exec_lines = []
for app in ('IPKernelApp', 'InteractiveShellApp'):
if '%s.exec_lines' % app in config:
exec_lines = config[app].exec_lines
break
exec_files = []
for app in ('IPKernelApp', 'InteractiveShellApp'):
if '%s.exec_files' % app in config:
exec_files = config[app].exec_files
break
config.IPKernelApp.exec_lines = exec_lines
config.IPKernelApp.exec_files = exec_files
if self.startup_script:
exec_files.append(self.startup_script)
if self.startup_command:
exec_lines.append(self.startup_command)
# Create the underlying shell class and Engine
# shell_class = import_item(self.master_config.Global.shell_class)
# print self.config
try:
self.engine = EngineFactory(config=config, log=self.log,
connection_info=self.connection_info,
)
except:
self.log.error("Couldn't start the Engine", exc_info=True)
self.exit(1)
def forward_logging(self):
if self.log_url:
self.log.info("Forwarding logging to %s", self.log_url)
context = self.engine.context
lsock = context.socket(zmq.PUB)
lsock.connect(self.log_url)
handler = EnginePUBHandler(self.engine, lsock)
handler.setLevel(self.log_level)
self.log.addHandler(handler)
def init_mpi(self):
global mpi
self.mpi = MPI(parent=self)
mpi_import_statement = self.mpi.init_script
if mpi_import_statement:
try:
self.log.info("Initializing MPI:")
self.log.info(mpi_import_statement)
exec(mpi_import_statement, globals())
except:
mpi = None
else:
mpi = None
@catch_config_error
def initialize(self, argv=None):
super(IPEngineApp, self).initialize(argv)
self.init_mpi()
self.init_engine()
self.forward_logging()
def start(self):
self.engine.start()
try:
self.engine.loop.start()
except KeyboardInterrupt:
self.log.critical("Engine Interrupted, shutting down...\n")
launch_new_instance = IPEngineApp.launch_instance
if __name__ == '__main__':
launch_new_instance()
|
WillisXChen/django-oscar
|
oscar/lib/python2.7/site-packages/IPython/parallel/apps/ipengineapp.py
|
Python
|
bsd-3-clause
| 13,537
|
[
"Brian"
] |
bd4a6ed5e001c4b6c38cd66e06719c2b58678d26c139eeeb4009465b8e6ed158
|
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import unittest
from app.bookmark import Bookmark
from app.curses_util import *
import app.fake_curses_testing
import app.prefs
import app.text_buffer
import app.window
kTestFile = "#bookmarks_test_file_with_unlikely_file_name~"
class BookmarkTestCases(app.fake_curses_testing.FakeCursesTestCase):
def setUp(self):
self.prg = app.ci_program.CiProgram()
self.fakeHost = app.window.ViewWindow(self.prg, None)
self.textBuffer = app.text_buffer.TextBuffer(self.prg)
self.textBuffer.lines = 50
self.lineNumbers = app.window.LineNumbers(self.prg, self.fakeHost)
self.lineNumbers.rows = 30
self.lineNumbers.parent = self.fakeHost
self.fakeHost.lineNumberColumn = self.lineNumbers
self.fakeHost.textBuffer = self.textBuffer
self.fakeHost.scrollRow = self.fakeHost.cursorRow = 0
app.fake_curses_testing.FakeCursesTestCase.set_up(self)
def tearDown(self):
app.fake_curses_testing.FakeCursesTestCase.tear_down(self)
def test_bookmark_comparisons(self):
b1 = Bookmark(1, 5, {})
b2 = Bookmark(1, 3, {})
self.assertTrue(b1 > b2)
self.assertTrue(b1 >= b2)
self.assertFalse(b1 < b2)
self.assertFalse(b1 <= b2)
self.assertTrue(b2 < b1)
self.assertTrue(b2 <= b1)
self.assertFalse(b2 > b1)
self.assertFalse(b2 >= b1)
self.assertTrue(b1 != b2)
self.assertFalse(b1 == b2)
self.assertFalse(hash(b1) == hash(b2))
b1 = Bookmark(2, 5, {})
# b2 = Bookmark(1, 3, {})
self.assertTrue(b1 > b2)
self.assertTrue(b1 >= b2)
self.assertFalse(b1 < b2)
self.assertFalse(b1 <= b2)
self.assertTrue(b2 < b1)
self.assertTrue(b2 <= b1)
self.assertFalse(b2 > b1)
self.assertFalse(b2 >= b1)
self.assertTrue(b1 != b2)
self.assertFalse(b1 == b2)
self.assertFalse(hash(b1) == hash(b2))
# b1 = Bookmark(2, 5, {})
b2 = Bookmark(1, 10, {})
self.assertTrue(b1 > b2)
self.assertTrue(b1 >= b2)
self.assertFalse(b1 < b2)
self.assertFalse(b1 <= b2)
self.assertTrue(b2 < b1)
self.assertTrue(b2 <= b1)
self.assertFalse(b2 > b1)
self.assertFalse(b2 >= b1)
self.assertTrue(b1 != b2)
self.assertFalse(b1 == b2)
self.assertFalse(hash(b1) == hash(b2))
b1 = Bookmark(1, 10, {})
# b2 = Bookmark(1, 10, {})
self.assertFalse(b1 > b2)
self.assertTrue(b1 >= b2)
self.assertFalse(b1 < b2)
self.assertTrue(b1 <= b2)
self.assertFalse(b2 < b1)
self.assertTrue(b2 <= b1)
self.assertFalse(b2 > b1)
self.assertTrue(b2 >= b1)
self.assertFalse(b1 != b2)
self.assertTrue(b1 == b2)
self.assertTrue(hash(b1) == hash(b2))
# b1 - Bookmark(1, 10, {})
b2 = Bookmark(-10, 10, {})
self.assertTrue(b1 > b2)
self.assertTrue(b1 >= b2)
self.assertFalse(b1 < b2)
self.assertFalse(b1 <= b2)
self.assertTrue(b2 < b1)
self.assertTrue(b2 <= b1)
self.assertFalse(b2 > b1)
self.assertFalse(b2 >= b1)
self.assertTrue(b1 != b2)
self.assertFalse(b1 == b2)
self.assertFalse(hash(b1) == hash(b2))
def test_bookmark_contains(self):
def check_ranges(bookmark):
"""
Checks that every integer between the bookmark's interval is 'in'
the bookmark. It also checks if the two integers outside of the
bookmark's range on both sides of its interval are NOT 'in' the
bookmark.
"""
begin = bookmark.begin
end = bookmark.end
for i in range(begin, end + 1):
self.assertTrue(i in bookmark)
self.assertFalse(begin - 2 in bookmark)
self.assertFalse(begin - 1 in bookmark)
self.assertFalse(end + 1 in bookmark)
self.assertFalse(end + 2 in bookmark)
check_ranges(Bookmark(1, 5, {}))
check_ranges(Bookmark(-3, 3, {}))
check_ranges(Bookmark(-5000, -4990, {}))
# Check intervals of length 0.
check_ranges(Bookmark(0, 0, {}))
check_ranges(Bookmark(5000, 5000, {}))
check_ranges(Bookmark(-5000, 5000, {}))
b = Bookmark(-3.99, 3.99, {}) # Floats get cast to int (rounds towards zero).
self.assertFalse(-4 in b)
self.assertTrue(-3 in b)
self.assertFalse(4 in b)
self.assertTrue(3 in b)
def test_bookmark_overlap(self):
b1 = Bookmark(1, 5, {})
b2 = Bookmark(1, 5, {})
self.assertTrue(b1.overlaps(b2))
self.assertTrue(b2.overlaps(b1))
b1 = Bookmark(2, 5, {})
b2 = Bookmark(1, 5, {})
self.assertTrue(b1.overlaps(b2))
self.assertTrue(b2.overlaps(b1))
b1 = Bookmark(1, 3, {})
b2 = Bookmark(1, 5, {})
self.assertTrue(b1.overlaps(b2))
self.assertTrue(b2.overlaps(b1))
b1 = Bookmark(3, 4, {})
b2 = Bookmark(1, 5, {})
self.assertTrue(b1.overlaps(b2))
self.assertTrue(b2.overlaps(b1))
b1 = Bookmark(3, 10, {})
b2 = Bookmark(1, 5, {})
self.assertTrue(b1.overlaps(b2))
self.assertTrue(b2.overlaps(b1))
b1 = Bookmark(5, 10, {})
b2 = Bookmark(1, 5, {})
self.assertTrue(b1.overlaps(b2))
self.assertTrue(b2.overlaps(b1))
b1 = Bookmark(-5, 0, {})
b2 = Bookmark(-5, 5, {})
self.assertTrue(b1.overlaps(b2))
self.assertTrue(b2.overlaps(b1))
b1 = Bookmark(0, 0, {})
b2 = Bookmark(0, 0, {})
self.assertTrue(b1.overlaps(b2))
self.assertTrue(b2.overlaps(b1))
b1 = Bookmark(0, 0, {})
b2 = Bookmark(-5, 99, {})
self.assertTrue(b1.overlaps(b2))
self.assertTrue(b2.overlaps(b1))
b1 = Bookmark(0, 0, {})
b2 = Bookmark(-5, -1, {})
self.assertFalse(b1.overlaps(b2))
self.assertFalse(b2.overlaps(b1))
b1 = Bookmark(5, 5, {})
b2 = Bookmark(6, 9, {})
self.assertFalse(b1.overlaps(b2))
self.assertFalse(b2.overlaps(b1))
b1 = Bookmark(3, 5, {})
b2 = Bookmark(5, 8, {})
self.assertTrue(b1.overlaps(b2))
self.assertTrue(b2.overlaps(b1))
b1 = Bookmark(-3.999, 3.999, {}) # Rounds to range (-3, 3).
b2 = Bookmark(-5, -4, {})
self.assertFalse(b1.overlaps(b2))
self.assertFalse(b2.overlaps(b1))
b1 = Bookmark(-3.001, 3.001, {}) # Rounds to range (-3, 3).
b2 = Bookmark(3.99, 9.0, {}) # Rounds to range (3, 9).
self.assertTrue(b1.overlaps(b2))
self.assertTrue(b2.overlaps(b1))
def test_bookmark_properties(self):
b = Bookmark(3, 5, {})
self.assertTrue(b.begin == 3)
self.assertTrue(b.end == 5)
self.assertTrue(b.range == (3, 5))
b = Bookmark(-5.99, 5.99, {}) # Test constructor
self.assertTrue(b.begin == -5)
self.assertTrue(b.end == 5)
self.assertTrue(b.range == (-5, 5))
b.range = (10, 20)
self.assertTrue(b.begin == 10)
self.assertTrue(b.end == 20)
self.assertTrue(b.range == (10, 20))
b.range = (20, 10)
self.assertTrue(b.begin == 10)
self.assertTrue(b.end == 20)
self.assertTrue(b.range == (10, 20))
b.range = (3, 3)
self.assertTrue(b.begin == 3)
self.assertTrue(b.end == 3)
self.assertTrue(b.range == (3, 3))
b.begin = -3
self.assertTrue(b.begin == -3)
self.assertTrue(b.end == 3)
self.assertTrue(b.range == (-3, 3))
b.begin = 10
self.assertTrue(b.begin == 3)
self.assertTrue(b.end == 10)
self.assertTrue(b.range == (3, 10))
b.end = 15
self.assertTrue(b.begin == 3)
self.assertTrue(b.end == 15)
self.assertTrue(b.range == (3, 15))
b.end = -5
self.assertTrue(b.begin == -5)
self.assertTrue(b.end == 3)
self.assertTrue(b.range == (-5, 3))
b.begin = 3.9
self.assertTrue(b.begin == 3)
self.assertTrue(b.end == 3)
self.assertTrue(b.range == (3, 3))
b.end = 2.99
self.assertTrue(b.begin == 2)
self.assertTrue(b.end == 3)
self.assertTrue(b.range == (2, 3))
b.range = (-9.99, 9.99)
self.assertTrue(b.begin == -9)
self.assertTrue(b.end == 9)
self.assertTrue(b.range == (-9, 9))
def test_get_next_bookmark_color(self):
try:
import mock
except ImportError:
startYellow = "\033[93m"
disableColor = "\033[0m"
startBlue = "\033[94m"
exceptionMessage = (
startYellow
+ "This test could "
+ "not execute because the 'mock' module could not be found. If "
+ "you would like to run this test, please install the mock "
+ "module for python 2.7. You can visit their website at "
+ startBlue
+ "https://pypi.python.org/pypi/mock "
+ startYellow
+ "or you can "
+ "try running "
+ startBlue
+ "pip install mock."
+ disableColor
)
# raise Exception(exceptionMessage)
print(exceptionMessage)
return
def test_with_an_x_colored_terminal(x):
mock.patch.dict(self.prg.prefs.startup, {"numColors": x}, clear=True)
colors = set()
expectedNumberOfColors = 5
for _ in range(expectedNumberOfColors):
color = self.textBuffer.get_bookmark_color()
# Make sure that a color index is returned.
self.assertEqual(type(color), int)
colors.add(color)
# Test that all colors were different.
self.assertEqual(len(colors), expectedNumberOfColors)
color = self.textBuffer.get_bookmark_color()
colors.add(color)
# Test that the function rotates 5 colors.
self.assertEqual(len(colors), expectedNumberOfColors)
# Test for 8-colored mode
test_with_an_x_colored_terminal(8)
# Test for 256-colored mode
test_with_an_x_colored_terminal(256)
def test_get_visible_bookmarks(self):
# Set up the fake objects to test the LineNumbers methods.
self.textBuffer.bookmarks = [
Bookmark(0, 0, {}),
Bookmark(10, 10, {}),
Bookmark(20, 20, {}),
Bookmark(30, 30, {}),
Bookmark(40, 40, {}),
]
visibleBookmarks = self.lineNumbers.get_visible_bookmarks(
self.fakeHost.scrollRow, self.fakeHost.scrollRow + self.lineNumbers.rows
)
expectedBookmarks = {
Bookmark(0, 0, {}),
Bookmark(10, 10, {}),
Bookmark(20, 20, {}),
}
# Check that visibleBookmarks contains all the correct bookmarks
self.assertEqual(set(visibleBookmarks), expectedBookmarks)
# Check that the number of bookmarks is the same, as set removes
# duplicates.
self.assertEqual(len(visibleBookmarks), len(expectedBookmarks))
self.fakeHost.scrollRow = 20
visibleBookmarks = self.lineNumbers.get_visible_bookmarks(
self.fakeHost.scrollRow, 20 + self.lineNumbers.rows
)
expectedBookmarks = {
Bookmark(20, 20, {}),
Bookmark(30, 30, {}),
Bookmark(40, 40, {}),
}
self.assertEqual(set(visibleBookmarks), expectedBookmarks)
self.assertEqual(len(visibleBookmarks), len(expectedBookmarks))
self.fakeHost.scrollRow = 21
visibleBookmarks = self.lineNumbers.get_visible_bookmarks(
self.fakeHost.scrollRow, self.fakeHost.scrollRow + self.lineNumbers.rows
)
expectedBookmarks = {Bookmark(30, 30, {}), Bookmark(40, 40, {})}
self.assertEqual(set(visibleBookmarks), expectedBookmarks)
self.assertEqual(len(visibleBookmarks), len(expectedBookmarks))
self.fakeHost.scrollRow = 21
self.lineNumbers.rows = 10
visibleBookmarks = self.lineNumbers.get_visible_bookmarks(
self.fakeHost.scrollRow, self.fakeHost.scrollRow + self.lineNumbers.rows
)
expectedBookmarks = {Bookmark(30, 30, {})}
self.assertEqual(set(visibleBookmarks), expectedBookmarks)
self.assertEqual(len(visibleBookmarks), len(expectedBookmarks))
self.lineNumbers.rows = 9
visibleBookmarks = self.lineNumbers.get_visible_bookmarks(
self.fakeHost.scrollRow, self.fakeHost.scrollRow + self.lineNumbers.rows
)
expectedBookmarks = {}
self.assertEqual(visibleBookmarks, [])
self.fakeHost.scrollRow = 10
self.textBuffer.bookmarks = [
Bookmark(0, 10, {}),
Bookmark(11, 29, {}),
Bookmark(30, 45, {}),
Bookmark(46, 49, {}),
]
self.lineNumbers.rows = 15
visibleBookmarks = self.lineNumbers.get_visible_bookmarks(
self.fakeHost.scrollRow, self.fakeHost.scrollRow + self.lineNumbers.rows
)
expectedBookmarks = {Bookmark(0, 10, {}), Bookmark(11, 29, {})}
self.assertEqual(set(visibleBookmarks), expectedBookmarks)
self.assertEqual(len(visibleBookmarks), len(expectedBookmarks))
def test_bookmarks_jump(self):
# self.set_movie_mode(True)
self.run_with_test_file(
kTestFile,
[
self.display_check(
0,
0,
[
u" ci _file_with_unlikely_file_name~ . ",
u" ",
u" 1 ",
u" ",
u" ",
u" ",
u" ",
u" ",
u" ",
u" ",
u" ",
u" ",
u" ",
u"Creating new file | 1, 1 | 0%, 0%",
u" ",
],
),
self.write_text(u"one"),
CTRL_E,
"b",
"m",
CTRL_J,
CTRL_J, # Create bookmark and go to next line.
CTRL_E,
"b",
"m",
CTRL_J, # Create bookmark.
self.write_text(u"two"),
CTRL_J,
self.write_text(u"three"),
CTRL_E,
"b",
"m",
CTRL_J,
CTRL_J, # Create bookmark and go to next line.
self.write_text(u"four"),
CTRL_J,
self.write_text(u"five"),
CTRL_J,
self.write_text(u"six"),
CTRL_J,
self.write_text(u"seven"),
CTRL_J,
self.write_text(u"eight"),
CTRL_J,
CTRL_E,
"b",
"m",
CTRL_J, # Create a new bookmark.
self.write_text(u"nine"),
CTRL_J,
self.write_text(u"ten"),
CTRL_J,
self.write_text(u"eleven"),
CTRL_J,
self.write_text(u"twelve"),
CTRL_J,
self.write_text(u"thirteen"),
CTRL_J,
self.write_text(u"fourteen"),
CTRL_J,
self.write_text(u"fifteen"),
CTRL_J,
self.write_text(u"sixteen"),
CTRL_J,
self.write_text(u"seventeen"),
CTRL_J,
self.write_text(u"eighteen"),
CTRL_J,
self.write_text(u"nineteen"),
CTRL_J,
self.write_text(u"twenty"),
CTRL_J,
self.write_text(u"twenty-one"),
CTRL_J,
self.write_text(u"twenty-two"),
CTRL_J,
self.write_text(u"twenty-three"),
CTRL_E,
"b",
"m",
CTRL_J, # Create a new bookmark.
# Bookmarks are at positions (1, 4), (2, 1), (3, 6) (9, 1),
# (23, 13).
# Note that rows here start at 1, so 1 is the first row.
self.display_check(
0,
0,
[
u" ci _file_with_unlikely_file_name~ * ",
u" ",
u" 13 thirteen ",
u" 14 fourteen ",
u" 15 fifteen ",
u" 16 sixteen ",
u" 17 seventeen ",
u" 18 eighteen ",
u" 19 nineteen ",
u" 20 twenty ",
u" 21 twenty-one ",
u" 22 twenty-two ",
u" 23 twenty-three ",
u"Added bookmark | 23,13 | 95%,100%",
u" ",
],
),
KEY_F2, # Jump to the first bookmark (1, 4).
self.display_check(
0,
0,
[
u" ci _file_with_unlikely_file_name~ * ",
u" ",
u" 1 one ",
u" 2 two ",
u" 3 three ",
u" 4 four ",
u" 5 five ",
u" 6 six ",
u" 7 seven ",
u" 8 eight ",
u" 9 nine ",
u" 10 ten ",
u" 11 eleven ",
u" 1, 4 | 0%,100%",
u" ",
],
),
KEY_F2, # Jump to the second bookmark (2, 1).
# The display doesn't move because the bookmark is already in
# the optimal position.
self.display_check(
0,
0,
[
u" ci _file_with_unlikely_file_name~ * ",
u" ",
u" 1 one ",
u" 2 two ",
u" 3 three ",
u" 4 four ",
u" 5 five ",
u" 6 six ",
u" 7 seven ",
u" 8 eight ",
u" 9 nine ",
u" 10 ten ",
u" 11 eleven ",
u" 2, 1 | 4%, 0%",
u" ",
],
),
KEY_F2, # Jump to the third bookmark (3, 6).
self.display_check(
0,
0,
[
u" ci _file_with_unlikely_file_name~ * ",
u" ",
u" 1 one ",
u" 2 two ",
u" 3 three ",
u" 4 four ",
u" 5 five ",
u" 6 six ",
u" 7 seven ",
u" 8 eight ",
u" 9 nine ",
u" 10 ten ",
u" 11 eleven ",
u" 3, 6 | 8%,100%",
u" ",
],
),
KEY_F2, # Jump to the third bookmark (9, 6).
# This moves the bookmark to the optimal scroll position.
self.display_check(
0,
0,
[
u" ci _file_with_unlikely_file_name~ * ",
u" ",
u" 7 seven ",
u" 8 eight ",
u" 9 nine ",
u" 10 ten ",
u" 11 eleven ",
u" 12 twelve ",
u" 13 thirteen ",
u" 14 fourteen ",
u" 15 fifteen ",
u" 16 sixteen ",
u" 17 seventeen ",
u" 9, 1 | 34%, 0%",
u" ",
],
),
KEY_F2, # Jump to the fourth bookmark (23, 13).
# This moves the bookmark to the optimal scroll position.
self.display_check(
0,
0,
[
u" ci _file_with_unlikely_file_name~ * ",
u" ",
u" 21 twenty-one ",
u" 22 twenty-two ",
u" 23 twenty-three ",
u" ",
u" ",
u" ",
u" ",
u" ",
u" ",
u" ",
u" ",
u" 23,13 | 95%,100%",
u" ",
],
),
KEY_F2, # Jump to the first bookmark (1, 4).
# This moves the bookmark to the optimal scroll position.
self.display_check(
0,
0,
[
u" ci _file_with_unlikely_file_name~ * ",
u" ",
u" 1 one ",
u" 2 two ",
u" 3 three ",
u" 4 four ",
u" 5 five ",
u" 6 six ",
u" 7 seven ",
u" 8 eight ",
u" 9 nine ",
u" 10 ten ",
u" 11 eleven ",
u" 1, 4 | 0%,100%",
u" ",
],
),
KEY_SHIFT_F2, # Go back to the fourth bookmark (23, 13).
# This moves the bookmark to the optimal scroll position.
self.display_check(
0,
0,
[
u" ci _file_with_unlikely_file_name~ * ",
u" ",
u" 21 twenty-one ",
u" 22 twenty-two ",
u" 23 twenty-three ",
u" ",
u" ",
u" ",
u" ",
u" ",
u" ",
u" ",
u" ",
u" 23,13 | 95%,100%",
u" ",
],
),
KEY_SHIFT_F2, # Go back to the third bookmark (8, 6).
# This moves the bookmark to the optimal scroll position.
self.display_check(
0,
0,
[
u" ci _file_with_unlikely_file_name~ * ",
u" ",
u" 7 seven ",
u" 8 eight ",
u" 9 nine ",
u" 10 ten ",
u" 11 eleven ",
u" 12 twelve ",
u" 13 thirteen ",
u" 14 fourteen ",
u" 15 fifteen ",
u" 16 sixteen ",
u" 17 seventeen ",
u" 9, 1 | 34%, 0%",
u" ",
],
),
KEY_SHIFT_F2, # Go back to the second bookmark (2, 1).
# This moves the bookmark to the optimal scroll position.
self.display_check(
0,
0,
[
u" ci _file_with_unlikely_file_name~ * ",
u" ",
u" 1 one ",
u" 2 two ",
u" 3 three ",
u" 4 four ",
u" 5 five ",
u" 6 six ",
u" 7 seven ",
u" 8 eight ",
u" 9 nine ",
u" 10 ten ",
u" 11 eleven ",
u" 3, 6 | 8%,100%",
u" ",
],
),
KEY_SHIFT_F2, # Go back to the first bookmark (1, 4).
# The display doesn't move because the bookmark is already in
# the optimal position.
self.display_check(
0,
0,
[
u" ci _file_with_unlikely_file_name~ * ",
u" ",
u" 1 one ",
u" 2 two ",
u" 3 three ",
u" 4 four ",
u" 5 five ",
u" 6 six ",
u" 7 seven ",
u" 8 eight ",
u" 9 nine ",
u" 10 ten ",
u" 11 eleven ",
u" 2, 1 | 4%, 0%",
u" ",
],
),
CTRL_Q,
"n",
],
)
|
google/ci_edit
|
app/unit_test_bookmarks.py
|
Python
|
apache-2.0
| 32,396
|
[
"VisIt"
] |
7d79f103966f2653160b64a6df71595a4595a5c7b3f73b63ad17640a1c44e3ea
|
#!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
from os import mkdir
from os.path import isdir
from LoLIM.utilities import processed_data_dir, logger
from LoLIM.noise_analysis import get_noise_std, to_file
## these lines are anachronistic and should be fixed at some point
from LoLIM import utilities
utilities.default_raw_data_loc = "/exp_app2/appexp1/lightning_data"
utilities.default_processed_data_loc = "/home/brian/processed_files"
if __name__=="__main__":
timeID = "D20180813T153001.413Z"
out_folder = 'noise_std'
processed_data_folder = processed_data_dir(timeID)
output_fpath = processed_data_folder + '/' + out_folder
if not isdir(output_fpath):
mkdir(output_fpath)
log = logger()
log.set(output_fpath+'/log.txt')
log.take_stdout()
result_dict = get_noise_std(
timeID = timeID,
initial_block = 5500,
max_num_blocks = 500
)
to_file(result_dict, output_fpath)
|
Bhare8972/LOFAR-LIM
|
LIM_scripts/examples/get_noise_std.py
|
Python
|
mit
| 1,011
|
[
"Brian"
] |
82053a36ea501db3ad8e31147d91f0481c19a484fdd4413012d48ad3c2a60521
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from six import string_types, text_type
import io
import gzip
import bz2file
from tempfile import gettempdir
import itertools
import requests
from cachecontrol import CacheControl
from cachecontrol.caches import FileCache
from skbio.io import IOSourceError
from ._fileobject import (IterableStringWriterIO, IterableStringReaderIO,
WrappedBufferedRandom)
def get_io_sources():
return (
# The order of these source is significant as they will short-circuit
HTTPSource,
FilePathSource,
BytesIOSource,
BufferedIOSource,
TextIOSource,
IterableSource
)
def _compressors():
return (
GzipCompressor,
BZ2Compressor
)
def get_compression_handler(name):
compressors = {c.name: c for c in _compressors()}
compressors['auto'] = AutoCompressor
return compressors.get(name, False)
class IOSource(object):
closeable = True
def __init__(self, file, options):
self.file = file
self.options = options
def can_read(self):
return False
def can_write(self):
return False
def get_reader(self):
raise NotImplementedError()
def get_writer(self):
raise NotImplementedError()
class Compressor(IOSource):
streamable = True
name = ''
def can_write(self):
return True
class FilePathSource(IOSource):
def can_read(self):
return isinstance(self.file, string_types)
def can_write(self):
return self.can_read()
def get_reader(self):
return io.open(self.file, mode='rb')
def get_writer(self):
return io.open(self.file, mode='wb')
class HTTPSource(IOSource):
def can_read(self):
return (
isinstance(self.file, string_types) and
requests.compat.urlparse(self.file).scheme in {'http', 'https'})
def get_reader(self):
sess = CacheControl(requests.Session(),
cache=FileCache(gettempdir()))
req = sess.get(self.file)
# if the response is not 200, an exception will be raised
req.raise_for_status()
return io.BufferedReader(io.BytesIO(req.content))
class BytesIOSource(IOSource):
closeable = False
def can_read(self):
return isinstance(self.file, io.BytesIO)
def can_write(self):
return self.can_read()
def get_reader(self):
return WrappedBufferedRandom(self.file)
def get_writer(self):
return self.get_reader()
class BufferedIOSource(IOSource):
closeable = False
def can_read(self):
# `peek` is part of the API we want to guarantee, so we can't just look
# for io.BufferedIOBase. Despite the fact that the C implementation of
# io.BufferedRandom inherits io.BufferedReader/Writer it is not
# reflected in an isinstance check, so we need to check for it manually
return isinstance(self.file, (io.BufferedReader, io.BufferedRandom))
def can_write(self):
return isinstance(self.file, (io.BufferedWriter, io.BufferedRandom))
def get_reader(self):
return self.file
def get_writer(self):
return self.file
class TextIOSource(IOSource):
closeable = False
def can_read(self):
return isinstance(self.file, io.TextIOBase) and self.file.readable()
def can_write(self):
return isinstance(self.file, io.TextIOBase) and self.file.writable()
def get_reader(self):
return self.file
def get_writer(self):
return self.file
class IterableSource(IOSource):
def can_read(self):
if hasattr(self.file, '__iter__'):
iterator = iter(self.file)
head = next(iterator, None)
if head is None:
self.repaired = []
return True
if isinstance(head, text_type):
self.repaired = itertools.chain([head], iterator)
return True
else:
# We may have mangled a generator at this point, so just abort
raise IOSourceError(
"Could not open source: %r (mode: %r)" %
(self.file, self.options['mode']))
return False
def can_write(self):
return hasattr(self.file, 'append') and hasattr(self.file, '__iter__')
def get_reader(self):
return IterableStringReaderIO(self.repaired,
newline=self.options['newline'])
def get_writer(self):
return IterableStringWriterIO(self.file,
newline=self.options['newline'])
class GzipCompressor(Compressor):
name = 'gzip'
streamable = True
def can_read(self):
return self.file.peek(2)[:2] == b'\x1f\x8b'
def get_reader(self):
return gzip.GzipFile(fileobj=self.file)
def get_writer(self):
return gzip.GzipFile(fileobj=self.file, mode='wb',
compresslevel=self.options['compresslevel'])
class BZ2Compressor(Compressor):
name = 'bz2'
streamable = False
def can_read(self):
return self.file.peek(3)[:3] == b'BZh'
def get_reader(self):
return bz2file.BZ2File(self.file, mode='rb')
def get_writer(self):
return bz2file.BZ2File(self.file, mode='wb',
compresslevel=self.options['compresslevel'])
class AutoCompressor(Compressor):
streamable = True # We can' write so it doesn't matter
name = 'auto'
def get_reader(self):
for compression_handler in _compressors():
compressor = compression_handler(self.file, self.options)
if compressor.can_read():
return compressor.get_reader()
return self.file
def get_writer(self):
return self.file
|
xguse/scikit-bio
|
skbio/io/_iosources.py
|
Python
|
bsd-3-clause
| 6,272
|
[
"scikit-bio"
] |
cb89c9df7974142daa811dd718640dc7bfb360699de55fd2456af5c017cff2a8
|
"""
Constants needed for parsing binary GeoIP databases. It is part of the pygeoip
package.
@author: Jennifer Ennis <zaylea at gmail dot com>
@license:
Copyright(C) 2004 MaxMind LLC
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/lgpl.txt>.
"""
GEOIP_STANDARD = 0
GEOIP_MEMORY_CACHE = 1
DMA_MAP = {
500 : 'Portland-Auburn, ME',
501 : 'New York, NY',
502 : 'Binghamton, NY',
503 : 'Macon, GA',
504 : 'Philadelphia, PA',
505 : 'Detroit, MI',
506 : 'Boston, MA',
507 : 'Savannah, GA',
508 : 'Pittsburgh, PA',
509 : 'Ft Wayne, IN',
510 : 'Cleveland, OH',
511 : 'Washington, DC',
512 : 'Baltimore, MD',
513 : 'Flint, MI',
514 : 'Buffalo, NY',
515 : 'Cincinnati, OH',
516 : 'Erie, PA',
517 : 'Charlotte, NC',
518 : 'Greensboro, NC',
519 : 'Charleston, SC',
520 : 'Augusta, GA',
521 : 'Providence, RI',
522 : 'Columbus, GA',
523 : 'Burlington, VT',
524 : 'Atlanta, GA',
525 : 'Albany, GA',
526 : 'Utica-Rome, NY',
527 : 'Indianapolis, IN',
528 : 'Miami, FL',
529 : 'Louisville, KY',
530 : 'Tallahassee, FL',
531 : 'Tri-Cities, TN',
532 : 'Albany-Schenectady-Troy, NY',
533 : 'Hartford, CT',
534 : 'Orlando, FL',
535 : 'Columbus, OH',
536 : 'Youngstown-Warren, OH',
537 : 'Bangor, ME',
538 : 'Rochester, NY',
539 : 'Tampa, FL',
540 : 'Traverse City-Cadillac, MI',
541 : 'Lexington, KY',
542 : 'Dayton, OH',
543 : 'Springfield-Holyoke, MA',
544 : 'Norfolk-Portsmouth, VA',
545 : 'Greenville-New Bern-Washington, NC',
546 : 'Columbia, SC',
547 : 'Toledo, OH',
548 : 'West Palm Beach, FL',
549 : 'Watertown, NY',
550 : 'Wilmington, NC',
551 : 'Lansing, MI',
552 : 'Presque Isle, ME',
553 : 'Marquette, MI',
554 : 'Wheeling, WV',
555 : 'Syracuse, NY',
556 : 'Richmond-Petersburg, VA',
557 : 'Knoxville, TN',
558 : 'Lima, OH',
559 : 'Bluefield-Beckley-Oak Hill, WV',
560 : 'Raleigh-Durham, NC',
561 : 'Jacksonville, FL',
563 : 'Grand Rapids, MI',
564 : 'Charleston-Huntington, WV',
565 : 'Elmira, NY',
566 : 'Harrisburg-Lancaster-Lebanon-York, PA',
567 : 'Greenville-Spartenburg, SC',
569 : 'Harrisonburg, VA',
570 : 'Florence-Myrtle Beach, SC',
571 : 'Ft Myers, FL',
573 : 'Roanoke-Lynchburg, VA',
574 : 'Johnstown-Altoona, PA',
575 : 'Chattanooga, TN',
576 : 'Salisbury, MD',
577 : 'Wilkes Barre-Scranton, PA',
581 : 'Terre Haute, IN',
582 : 'Lafayette, IN',
583 : 'Alpena, MI',
584 : 'Charlottesville, VA',
588 : 'South Bend, IN',
592 : 'Gainesville, FL',
596 : 'Zanesville, OH',
597 : 'Parkersburg, WV',
598 : 'Clarksburg-Weston, WV',
600 : 'Corpus Christi, TX',
602 : 'Chicago, IL',
603 : 'Joplin-Pittsburg, MO',
604 : 'Columbia-Jefferson City, MO',
605 : 'Topeka, KS',
606 : 'Dothan, AL',
609 : 'St Louis, MO',
610 : 'Rockford, IL',
611 : 'Rochester-Mason City-Austin, MN',
612 : 'Shreveport, LA',
613 : 'Minneapolis-St Paul, MN',
616 : 'Kansas City, MO',
617 : 'Milwaukee, WI',
618 : 'Houston, TX',
619 : 'Springfield, MO',
620 : 'Tuscaloosa, AL',
622 : 'New Orleans, LA',
623 : 'Dallas-Fort Worth, TX',
624 : 'Sioux City, IA',
625 : 'Waco-Temple-Bryan, TX',
626 : 'Victoria, TX',
627 : 'Wichita Falls, TX',
628 : 'Monroe, LA',
630 : 'Birmingham, AL',
631 : 'Ottumwa-Kirksville, IA',
632 : 'Paducah, KY',
633 : 'Odessa-Midland, TX',
634 : 'Amarillo, TX',
635 : 'Austin, TX',
636 : 'Harlingen, TX',
637 : 'Cedar Rapids-Waterloo, IA',
638 : 'St Joseph, MO',
639 : 'Jackson, TN',
640 : 'Memphis, TN',
641 : 'San Antonio, TX',
642 : 'Lafayette, LA',
643 : 'Lake Charles, LA',
644 : 'Alexandria, LA',
646 : 'Anniston, AL',
647 : 'Greenwood-Greenville, MS',
648 : 'Champaign-Springfield-Decatur, IL',
649 : 'Evansville, IN',
650 : 'Oklahoma City, OK',
651 : 'Lubbock, TX',
652 : 'Omaha, NE',
656 : 'Panama City, FL',
657 : 'Sherman, TX',
658 : 'Green Bay-Appleton, WI',
659 : 'Nashville, TN',
661 : 'San Angelo, TX',
662 : 'Abilene-Sweetwater, TX',
669 : 'Madison, WI',
670 : 'Ft Smith-Fay-Springfield, AR',
671 : 'Tulsa, OK',
673 : 'Columbus-Tupelo-West Point, MS',
675 : 'Peoria-Bloomington, IL',
676 : 'Duluth, MN',
678 : 'Wichita, KS',
679 : 'Des Moines, IA',
682 : 'Davenport-Rock Island-Moline, IL',
686 : 'Mobile, AL',
687 : 'Minot-Bismarck-Dickinson, ND',
691 : 'Huntsville, AL',
692 : 'Beaumont-Port Author, TX',
693 : 'Little Rock-Pine Bluff, AR',
698 : 'Montgomery, AL',
702 : 'La Crosse-Eau Claire, WI',
705 : 'Wausau-Rhinelander, WI',
709 : 'Tyler-Longview, TX',
710 : 'Hattiesburg-Laurel, MS',
711 : 'Meridian, MS',
716 : 'Baton Rouge, LA',
717 : 'Quincy, IL',
718 : 'Jackson, MS',
722 : 'Lincoln-Hastings, NE',
724 : 'Fargo-Valley City, ND',
725 : 'Sioux Falls, SD',
734 : 'Jonesboro, AR',
736 : 'Bowling Green, KY',
737 : 'Mankato, MN',
740 : 'North Platte, NE',
743 : 'Anchorage, AK',
744 : 'Honolulu, HI',
745 : 'Fairbanks, AK',
746 : 'Biloxi-Gulfport, MS',
747 : 'Juneau, AK',
749 : 'Laredo, TX',
751 : 'Denver, CO',
752 : 'Colorado Springs, CO',
753 : 'Phoenix, AZ',
754 : 'Butte-Bozeman, MT',
755 : 'Great Falls, MT',
756 : 'Billings, MT',
757 : 'Boise, ID',
758 : 'Idaho Falls-Pocatello, ID',
759 : 'Cheyenne, WY',
760 : 'Twin Falls, ID',
762 : 'Missoula, MT',
764 : 'Rapid City, SD',
765 : 'El Paso, TX',
766 : 'Helena, MT',
767 : 'Casper-Riverton, WY',
770 : 'Salt Lake City, UT',
771 : 'Yuma, AZ',
773 : 'Grand Junction, CO',
789 : 'Tucson, AZ',
790 : 'Albuquerque, NM',
798 : 'Glendive, MT',
800 : 'Bakersfield, CA',
801 : 'Eugene, OR',
802 : 'Eureka, CA',
803 : 'Los Angeles, CA',
804 : 'Palm Springs, CA',
807 : 'San Francisco, CA',
810 : 'Yakima-Pasco, WA',
811 : 'Reno, NV',
813 : 'Medford-Klamath Falls, OR',
819 : 'Seattle-Tacoma, WA',
820 : 'Portland, OR',
821 : 'Bend, OR',
825 : 'San Diego, CA',
828 : 'Monterey-Salinas, CA',
839 : 'Las Vegas, NV',
855 : 'Santa Barbara, CA',
862 : 'Sacramento, CA',
866 : 'Fresno, CA',
868 : 'Chico-Redding, CA',
881 : 'Spokane, WA'
}
COUNTRY_CODES = (
'', 'AP', 'EU', 'AD', 'AE', 'AF', 'AG', 'AI', 'AL', 'AM', 'AN', 'AO', 'AQ',
'AR', 'AS', 'AT', 'AU', 'AW', 'AZ', 'BA', 'BB', 'BD', 'BE', 'BF', 'BG', 'BH',
'BI', 'BJ', 'BM', 'BN', 'BO', 'BR', 'BS', 'BT', 'BV', 'BW', 'BY', 'BZ', 'CA',
'CC', 'CD', 'CF', 'CG', 'CH', 'CI', 'CK', 'CL', 'CM', 'CN', 'CO', 'CR', 'CU',
'CV', 'CX', 'CY', 'CZ', 'DE', 'DJ', 'DK', 'DM', 'DO', 'DZ', 'EC', 'EE', 'EG',
'EH', 'ER', 'ES', 'ET', 'FI', 'FJ', 'FK', 'FM', 'FO', 'FR', 'FX', 'GA', 'GB',
'GD', 'GE', 'GF', 'GH', 'GI', 'GL', 'GM', 'GN', 'GP', 'GQ', 'GR', 'GS', 'GT',
'GU', 'GW', 'GY', 'HK', 'HM', 'HN', 'HR', 'HT', 'HU', 'ID', 'IE', 'IL', 'IN',
'IO', 'IQ', 'IR', 'IS', 'IT', 'JM', 'JO', 'JP', 'KE', 'KG', 'KH', 'KI', 'KM',
'KN', 'KP', 'KR', 'KW', 'KY', 'KZ', 'LA', 'LB', 'LC', 'LI', 'LK', 'LR', 'LS',
'LT', 'LU', 'LV', 'LY', 'MA', 'MC', 'MD', 'MG', 'MH', 'MK', 'ML', 'MM', 'MN',
'MO', 'MP', 'MQ', 'MR', 'MS', 'MT', 'MU', 'MV', 'MW', 'MX', 'MY', 'MZ', 'NA',
'NC', 'NE', 'NF', 'NG', 'NI', 'NL', 'NO', 'NP', 'NR', 'NU', 'NZ', 'OM', 'PA',
'PE', 'PF', 'PG', 'PH', 'PK', 'PL', 'PM', 'PN', 'PR', 'PS', 'PT', 'PW', 'PY',
'QA', 'RE', 'RO', 'RU', 'RW', 'SA', 'SB', 'SC', 'SD', 'SE', 'SG', 'SH', 'SI',
'SJ', 'SK', 'SL', 'SM', 'SN', 'SO', 'SR', 'ST', 'SV', 'SY', 'SZ', 'TC', 'TD',
'TF', 'TG', 'TH', 'TJ', 'TK', 'TM', 'TN', 'TO', 'TL', 'TR', 'TT', 'TV', 'TW',
'TZ', 'UA', 'UG', 'UM', 'US', 'UY', 'UZ', 'VA', 'VC', 'VE', 'VG', 'VI', 'VN',
'VU', 'WF', 'WS', 'YE', 'YT', 'RS', 'ZA', 'ZM', 'ME', 'ZW', 'A1', 'A2', 'O1',
'AX', 'GG', 'IM', 'JE', 'BL', 'MF'
)
COUNTRY_CODES3 = (
'','AP','EU','AND','ARE','AFG','ATG','AIA','ALB','ARM','ANT','AGO','AQ','ARG',
'ASM','AUT','AUS','ABW','AZE','BIH','BRB','BGD','BEL','BFA','BGR','BHR','BDI',
'BEN','BMU','BRN','BOL','BRA','BHS','BTN','BV','BWA','BLR','BLZ','CAN','CC',
'COD','CAF','COG','CHE','CIV','COK','CHL','CMR','CHN','COL','CRI','CUB','CPV',
'CX','CYP','CZE','DEU','DJI','DNK','DMA','DOM','DZA','ECU','EST','EGY','ESH',
'ERI','ESP','ETH','FIN','FJI','FLK','FSM','FRO','FRA','FX','GAB','GBR','GRD',
'GEO','GUF','GHA','GIB','GRL','GMB','GIN','GLP','GNQ','GRC','GS','GTM','GUM',
'GNB','GUY','HKG','HM','HND','HRV','HTI','HUN','IDN','IRL','ISR','IND','IO',
'IRQ','IRN','ISL','ITA','JAM','JOR','JPN','KEN','KGZ','KHM','KIR','COM','KNA',
'PRK','KOR','KWT','CYM','KAZ','LAO','LBN','LCA','LIE','LKA','LBR','LSO','LTU',
'LUX','LVA','LBY','MAR','MCO','MDA','MDG','MHL','MKD','MLI','MMR','MNG','MAC',
'MNP','MTQ','MRT','MSR','MLT','MUS','MDV','MWI','MEX','MYS','MOZ','NAM','NCL',
'NER','NFK','NGA','NIC','NLD','NOR','NPL','NRU','NIU','NZL','OMN','PAN','PER',
'PYF','PNG','PHL','PAK','POL','SPM','PCN','PRI','PSE','PRT','PLW','PRY','QAT',
'REU','ROU','RUS','RWA','SAU','SLB','SYC','SDN','SWE','SGP','SHN','SVN','SJM',
'SVK','SLE','SMR','SEN','SOM','SUR','STP','SLV','SYR','SWZ','TCA','TCD','TF',
'TGO','THA','TJK','TKL','TLS','TKM','TUN','TON','TUR','TTO','TUV','TWN','TZA',
'UKR','UGA','UM','USA','URY','UZB','VAT','VCT','VEN','VGB','VIR','VNM','VUT',
'WLF','WSM','YEM','YT','SRB','ZAF','ZMB','MNE','ZWE','A1','A2','O1',
'ALA','GGY','IMN','JEY','BLM','MAF'
)
COUNTRY_NAMES = (
"", "Asia/Pacific Region", "Europe", "Andorra", "United Arab Emirates",
"Afghanistan", "Antigua and Barbuda", "Anguilla", "Albania", "Armenia",
"Netherlands Antilles", "Angola", "Antarctica", "Argentina", "American Samoa",
"Austria", "Australia", "Aruba", "Azerbaijan", "Bosnia and Herzegovina",
"Barbados", "Bangladesh", "Belgium", "Burkina Faso", "Bulgaria", "Bahrain",
"Burundi", "Benin", "Bermuda", "Brunei Darussalam", "Bolivia", "Brazil",
"Bahamas", "Bhutan", "Bouvet Island", "Botswana", "Belarus", "Belize",
"Canada", "Cocos (Keeling) Islands", "Congo, The Democratic Republic of the",
"Central African Republic", "Congo", "Switzerland", "Cote D'Ivoire", "Cook Islands",
"Chile", "Cameroon", "China", "Colombia", "Costa Rica", "Cuba", "Cape Verde",
"Christmas Island", "Cyprus", "Czech Republic", "Germany", "Djibouti",
"Denmark", "Dominica", "Dominican Republic", "Algeria", "Ecuador", "Estonia",
"Egypt", "Western Sahara", "Eritrea", "Spain", "Ethiopia", "Finland", "Fiji",
"Falkland Islands (Malvinas)", "Micronesia, Federated States of", "Faroe Islands",
"France", "France, Metropolitan", "Gabon", "United Kingdom",
"Grenada", "Georgia", "French Guiana", "Ghana", "Gibraltar", "Greenland",
"Gambia", "Guinea", "Guadeloupe", "Equatorial Guinea", "Greece",
"South Georgia and the South Sandwich Islands",
"Guatemala", "Guam", "Guinea-Bissau",
"Guyana", "Hong Kong", "Heard Island and McDonald Islands", "Honduras",
"Croatia", "Haiti", "Hungary", "Indonesia", "Ireland", "Israel", "India",
"British Indian Ocean Territory", "Iraq", "Iran, Islamic Republic of",
"Iceland", "Italy", "Jamaica", "Jordan", "Japan", "Kenya", "Kyrgyzstan",
"Cambodia", "Kiribati", "Comoros", "Saint Kitts and Nevis",
"Korea, Democratic People's Republic of",
"Korea, Republic of", "Kuwait", "Cayman Islands",
"Kazakstan", "Lao People's Democratic Republic", "Lebanon", "Saint Lucia",
"Liechtenstein", "Sri Lanka", "Liberia", "Lesotho", "Lithuania", "Luxembourg",
"Latvia", "Libyan Arab Jamahiriya", "Morocco", "Monaco", "Moldova, Republic of",
"Madagascar", "Marshall Islands", "Macedonia",
"Mali", "Myanmar", "Mongolia", "Macau", "Northern Mariana Islands",
"Martinique", "Mauritania", "Montserrat", "Malta", "Mauritius", "Maldives",
"Malawi", "Mexico", "Malaysia", "Mozambique", "Namibia", "New Caledonia",
"Niger", "Norfolk Island", "Nigeria", "Nicaragua", "Netherlands", "Norway",
"Nepal", "Nauru", "Niue", "New Zealand", "Oman", "Panama", "Peru", "French Polynesia",
"Papua New Guinea", "Philippines", "Pakistan", "Poland", "Saint Pierre and Miquelon",
"Pitcairn Islands", "Puerto Rico", "Palestinian Territory",
"Portugal", "Palau", "Paraguay", "Qatar", "Reunion", "Romania",
"Russian Federation", "Rwanda", "Saudi Arabia", "Solomon Islands",
"Seychelles", "Sudan", "Sweden", "Singapore", "Saint Helena", "Slovenia",
"Svalbard and Jan Mayen", "Slovakia", "Sierra Leone", "San Marino", "Senegal",
"Somalia", "Suriname", "Sao Tome and Principe", "El Salvador", "Syrian Arab Republic",
"Swaziland", "Turks and Caicos Islands", "Chad", "French Southern Territories",
"Togo", "Thailand", "Tajikistan", "Tokelau", "Turkmenistan",
"Tunisia", "Tonga", "Timor-Leste", "Turkey", "Trinidad and Tobago", "Tuvalu",
"Taiwan", "Tanzania, United Republic of", "Ukraine",
"Uganda", "United States Minor Outlying Islands", "United States", "Uruguay",
"Uzbekistan", "Holy See (Vatican City State)", "Saint Vincent and the Grenadines",
"Venezuela", "Virgin Islands, British", "Virgin Islands, U.S.",
"Vietnam", "Vanuatu", "Wallis and Futuna", "Samoa", "Yemen", "Mayotte",
"Serbia", "South Africa", "Zambia", "Montenegro", "Zimbabwe",
"Anonymous Proxy","Satellite Provider","Other",
"Aland Islands","Guernsey","Isle of Man","Jersey","Saint Barthelemy","Saint Martin"
)
# storage / caching flags
STANDARD = 0
MEMORY_CACHE = 1
MMAP_CACHE = 8
# Database structure constants
COUNTRY_BEGIN = 16776960
STATE_BEGIN_REV0 = 16700000
STATE_BEGIN_REV1 = 16000000
STRUCTURE_INFO_MAX_SIZE = 20
DATABASE_INFO_MAX_SIZE = 100
# Database editions
COUNTRY_EDITION = 1
REGION_EDITION_REV0 = 7
REGION_EDITION_REV1 = 3
CITY_EDITION_REV0 = 6
CITY_EDITION_REV1 = 2
ORG_EDITION = 5
ISP_EDITION = 4
PROXY_EDITION = 8
ASNUM_EDITION = 9
NETSPEED_EDITION = 11
COUNTRY_EDITION_V6 = 12
SEGMENT_RECORD_LENGTH = 3
STANDARD_RECORD_LENGTH = 3
ORG_RECORD_LENGTH = 4
MAX_RECORD_LENGTH = 4
MAX_ORG_RECORD_LENGTH = 300
FULL_RECORD_LENGTH = 50
US_OFFSET = 1
CANADA_OFFSET = 677
WORLD_OFFSET = 1353
FIPS_RANGE = 360
|
mzdaniel/oh-mainline
|
vendor/packages/pygeoip/pygeoip/const.py
|
Python
|
agpl-3.0
| 15,440
|
[
"BWA",
"COLUMBUS"
] |
9f9a9ccbc43707f8c36e7b3d2f954029dfa7dd3a5c83bfe3855e255c29cf82f9
|
#
# Copyright (C) 2006-2016 Greg Landrum
# All Rights Reserved
#
import os
import re
from rdkit.Chem.Draw import rdMolDraw2D
from rdkit.Chem.Draw.MolDrawing import MolDrawing, DrawingOptions
from rdkit.Chem.Draw.rdMolDraw2D import *
from rdkit.six import iteritems
def _getCanvas():
useAGG = False
useCairo = False
useSping = False
Canvas = None
if not os.environ.get('RDKIT_CANVAS', ''):
try:
from rdkit.Chem.Draw.cairoCanvas import Canvas
useCairo = True
except ImportError:
try:
from rdkit.Chem.Draw.aggCanvas import Canvas
useAGG = True
except ImportError:
from rdkit.Chem.Draw.spingCanvas import Canvas
useSping = True
else:
canv = os.environ['RDKIT_CANVAS'].lower()
if canv == 'cairo':
from rdkit.Chem.Draw.cairoCanvas import Canvas
useCairo = True
elif canv == 'agg':
from rdkit.Chem.Draw.aggCanvas import Canvas
useAGG = True
else:
from rdkit.Chem.Draw.spingCanvas import Canvas
useSping = True
if useSping:
# <- the sping canvas doesn't support unicode well
DrawingOptions.radicalSymbol = '.'
return useAGG, useCairo, Canvas
def _createCanvas(size):
useAGG, useCairo, Canvas = _getCanvas()
if useAGG or useCairo:
try:
import Image
except ImportError:
from PIL import Image
img = Image.new("RGBA", size, (0, 0, 0, 0))
canvas = Canvas(img)
else:
from rdkit.Chem.Draw.spingCanvas import Canvas
canvas = Canvas(size=size, name='MolToImageFile')
img = canvas._image
return img, canvas
def MolToImage(mol, size=(300, 300), kekulize=True, wedgeBonds=True, fitImage=False, options=None,
canvas=None, **kwargs):
"""Returns a PIL image containing a drawing of the molecule
ARGUMENTS:
- kekulize: run kekulization routine on input `mol` (default True)
- size: final image size, in pixel (default (300,300))
- wedgeBonds: draw wedge (stereo) bonds (default True)
- highlightAtoms: list of atoms to highlight (default [])
- highlightMap: dictionary of (atom, color) pairs (default None)
- highlightBonds: list of bonds to highlight (default [])
- highlightColor: RGB color as tuple (default [1, 0, 0])
NOTE:
use 'matplotlib.colors.to_rgb()' to convert string and
HTML color codes into the RGB tuple representation, eg.
from matplotlib.colors import ColorConverter
img = Draw.MolToImage(m, highlightAtoms=[1,2], highlightColor=ColorConverter().to_rgb('aqua'))
img.save("molecule.png")
RETURNS:
a PIL Image object
"""
if not mol:
raise ValueError('Null molecule provided')
if canvas is None:
img, canvas = _createCanvas(size)
else:
img = None
options = options or DrawingOptions()
if fitImage:
options.dotsPerAngstrom = int(min(size) / 10)
options.wedgeDashedBonds = wedgeBonds
if 'highlightColor' in kwargs:
color = kwargs.pop('highlightColor', (1, 0, 0))
options.selectColor = color
drawer = MolDrawing(canvas=canvas, drawingOptions=options)
if kekulize:
from rdkit import Chem
mol = Chem.Mol(mol.ToBinary())
Chem.Kekulize(mol)
if not mol.GetNumConformers():
from rdkit.Chem import AllChem
AllChem.Compute2DCoords(mol)
if 'legend' in kwargs:
legend = kwargs['legend']
del kwargs['legend']
else:
legend = ''
drawer.AddMol(mol, **kwargs)
if legend:
from rdkit.Chem.Draw.MolDrawing import Font
bbox = drawer.boundingBoxes[mol]
pos = size[0] / 2, int(.94 * size[1]), 0 # the 0.94 is extremely empirical
# canvas.addCanvasPolygon(((bbox[0],bbox[1]),(bbox[2],bbox[1]),(bbox[2],bbox[3]),(bbox[0],bbox[3])),
# color=(1,0,0),fill=False,stroke=True)
# canvas.addCanvasPolygon(((0,0),(0,size[1]),(size[0],size[1]),(size[0],0) ),
# color=(0,0,1),fill=False,stroke=True)
font = Font(face='sans', size=12)
canvas.addCanvasText(legend, pos, font)
if kwargs.get('returnCanvas', False):
return img, canvas, drawer
else:
canvas.flush()
return img
def MolToFile(mol, fileName, size=(300, 300), kekulize=True, wedgeBonds=True, imageType=None,
fitImage=False, options=None, **kwargs):
""" Generates a drawing of a molecule and writes it to a file
"""
# original contribution from Uwe Hoffmann
if not fileName:
raise ValueError('no fileName provided')
if not mol:
raise ValueError('Null molecule provided')
if imageType is None:
imageType = os.path.splitext(fileName)[1][1:]
if options is None:
options = DrawingOptions()
useAGG, useCairo, Canvas = _getCanvas()
if fitImage:
options.dotsPerAngstrom = int(min(size) / 10)
options.wedgeDashedBonds = wedgeBonds
if useCairo or useAGG:
canvas = Canvas(size=size, imageType=imageType, fileName=fileName)
else:
options.radicalSymbol = '.' # <- the sping canvas doesn't support unicode well
canvas = Canvas(size=size, name=fileName, imageType=imageType)
drawer = MolDrawing(canvas=canvas, drawingOptions=options)
if kekulize:
from rdkit import Chem
mol = Chem.Mol(mol.ToBinary())
Chem.Kekulize(mol)
if not mol.GetNumConformers():
from rdkit.Chem import AllChem
AllChem.Compute2DCoords(mol)
drawer.AddMol(mol, **kwargs)
if useCairo or useAGG:
canvas.flush()
else:
canvas.save()
def MolToImageFile(mol, filename, size=(300, 300), kekulize=True, wedgeBonds=True, **kwargs):
""" DEPRECATED: please use MolToFile instead
"""
img = MolToImage(mol, size=size, kekulize=kekulize, wedgeBonds=wedgeBonds, **kwargs)
img.save(filename)
tkRoot = None
tkLabel = None
tkPI = None
def ShowMol(mol, size=(300, 300), kekulize=True, wedgeBonds=True, title='RDKit Molecule', **kwargs):
""" Generates a picture of a molecule and displays it in a Tkinter window
"""
global tkRoot, tkLabel, tkPI
try:
import Tkinter
except ImportError:
import tkinter as Tkinter
try:
import ImageTk
except ImportError:
from PIL import ImageTk
img = MolToImage(mol, size, kekulize, wedgeBonds, **kwargs)
if not tkRoot:
tkRoot = Tkinter.Tk()
tkRoot.title(title)
tkPI = ImageTk.PhotoImage(img)
tkLabel = Tkinter.Label(tkRoot, image=tkPI)
tkLabel.place(x=0, y=0, width=img.size[0], height=img.size[1])
else:
tkPI.paste(img)
tkRoot.geometry('%dx%d' % (img.size))
def MolToMPL(mol, size=(300, 300), kekulize=True, wedgeBonds=True, imageType=None, fitImage=False,
options=None, **kwargs):
""" Generates a drawing of a molecule on a matplotlib canvas
"""
if not mol:
raise ValueError('Null molecule provided')
from rdkit.Chem.Draw.mplCanvas import Canvas
canvas = Canvas(size)
if options is None:
options = DrawingOptions()
options.bgColor = None
if fitImage:
options.dotsPerAngstrom = int(min(size) / 10)
options.wedgeDashedBonds = wedgeBonds
drawer = MolDrawing(canvas=canvas, drawingOptions=options)
omol = mol
if kekulize:
from rdkit import Chem
mol = Chem.Mol(mol.ToBinary())
Chem.Kekulize(mol)
if not mol.GetNumConformers():
from rdkit.Chem import AllChem
AllChem.Compute2DCoords(mol)
drawer.AddMol(mol, **kwargs)
omol._atomPs = drawer.atomPs[mol]
for k, v in iteritems(omol._atomPs):
omol._atomPs[k] = canvas.rescalePt(v)
canvas._figure.set_size_inches(float(size[0]) / 100, float(size[1]) / 100)
return canvas._figure
def calcAtomGaussians(mol, a=0.03, step=0.02, weights=None):
"""
useful things to do with these:
fig.axes[0].imshow(z,cmap=cm.gray,interpolation='bilinear',origin='lower',extent=(0,1,0,1))
fig.axes[0].contour(x,y,z,20,colors='k')
fig=Draw.MolToMPL(m);
contribs=Crippen.rdMolDescriptors._CalcCrippenContribs(m)
logps,mrs=zip(*contribs)
x,y,z=Draw.calcAtomGaussians(m,0.03,step=0.01,weights=logps)
fig.axes[0].imshow(z,cmap=cm.jet,interpolation='bilinear',origin='lower',extent=(0,1,0,1))
fig.axes[0].contour(x,y,z,20,colors='k',alpha=0.5)
fig.savefig('coumlogps.colored.png',bbox_inches='tight')
"""
import numpy
from matplotlib import mlab
x = numpy.arange(0, 1, step)
y = numpy.arange(0, 1, step)
X, Y = numpy.meshgrid(x, y)
if weights is None:
weights = [1.] * mol.GetNumAtoms()
Z = mlab.bivariate_normal(X, Y, a, a, mol._atomPs[0][0], mol._atomPs[0][1]) * weights[0]
for i in range(1, mol.GetNumAtoms()):
Zp = mlab.bivariate_normal(X, Y, a, a, mol._atomPs[i][0], mol._atomPs[i][1])
Z += Zp * weights[i]
return X, Y, Z
def MolsToImage(mols, subImgSize=(200, 200), legends=None, **kwargs):
"""
"""
try:
import Image
except ImportError:
from PIL import Image
if legends is None:
legends = [None] * len(mols)
res = Image.new("RGBA", (subImgSize[0] * len(mols), subImgSize[1]))
for i, mol in enumerate(mols):
res.paste(MolToImage(mol, subImgSize, legend=legends[i], **kwargs), (i * subImgSize[0], 0))
return res
def _moltoimg(mol, sz, highlights, legend, **kwargs):
try:
import Image
except ImportError:
from PIL import Image
if not hasattr(rdMolDraw2D, 'MolDraw2DCairo'):
img = MolToImage(mol, sz, legend=legend, highlightAtoms=highlights, **kwargs)
else:
nmol = rdMolDraw2D.PrepareMolForDrawing(mol, kekulize=kwargs.get('kekulize', True))
d2d = rdMolDraw2D.MolDraw2DCairo(sz[0], sz[1])
d2d.DrawMolecule(nmol, legend=legend, highlightAtoms=highlights)
from io import BytesIO
d2d.FinishDrawing()
sio = BytesIO(d2d.GetDrawingText())
img = Image.open(sio)
return img
def _MolsToGridImage(mols, molsPerRow=3, subImgSize=(200, 200), legends=None,
highlightAtomLists=None, **kwargs):
""" returns a PIL Image of the grid
"""
try:
import Image
except ImportError:
from PIL import Image
if legends is None:
legends = [''] * len(mols)
nRows = len(mols) // molsPerRow
if len(mols) % molsPerRow:
nRows += 1
res = Image.new("RGBA", (molsPerRow * subImgSize[0], nRows * subImgSize[1]), (255, 255, 255, 0))
for i, mol in enumerate(mols):
row = i // molsPerRow
col = i % molsPerRow
highlights = None
if highlightAtomLists and highlightAtomLists[i]:
highlights = highlightAtomLists[i]
if mol is not None:
img = _moltoimg(mol, subImgSize, highlights, legends[i], **kwargs)
res.paste(img, (col * subImgSize[0], row * subImgSize[1]))
return res
def _MolsToGridSVG(mols, molsPerRow=3, subImgSize=(200, 200), legends=None, highlightAtomLists=None,
stripSVGNamespace=True, **kwargs):
""" returns an SVG of the grid
"""
matcher = re.compile(r'^(<.*>\n)(<svg:rect .*</svg\:rect>\n)(.*)</svg\:svg>', re.DOTALL)
if legends is None:
legends = [''] * len(mols)
hdr = ''
ftr = '</svg:svg>'
rect = ''
nRows = len(mols) // molsPerRow
if len(mols) % molsPerRow:
nRows += 1
blocks = [''] * (nRows * molsPerRow)
fullSize = (molsPerRow * subImgSize[0], nRows * subImgSize[1])
for i, mol in enumerate(mols):
highlights = None
if highlightAtomLists and highlightAtomLists[i]:
highlights = highlightAtomLists[i]
if mol is not None:
nmol = rdMolDraw2D.PrepareMolForDrawing(mol, kekulize=kwargs.get('kekulize', True))
d2d = rdMolDraw2D.MolDraw2DSVG(subImgSize[0], subImgSize[1])
d2d.DrawMolecule(nmol, legend=legends[i], highlightAtoms=highlights)
d2d.FinishDrawing()
txt = d2d.GetDrawingText()
h, r, b = matcher.match(txt).groups()
if not hdr:
hdr = h.replace("width='%dpx' height='%dpx' >" % subImgSize,
"width='%dpx' height='%dpx' >" % fullSize)
if not rect:
rect = r
blocks[i] = b
for i, elem in enumerate(blocks):
row = i // molsPerRow
col = i % molsPerRow
elem = rect + elem
blocks[i] = '<g transform="translate(%d,%d)" >%s</g>' % (col * subImgSize[0],
row * subImgSize[1], elem)
res = hdr + '\n'.join(blocks) + ftr
if stripSVGNamespace:
res = res.replace('svg:', '')
return res
def MolsToGridImage(mols, molsPerRow=3, subImgSize=(200, 200), legends=None,
highlightAtomLists=None, useSVG=False, **kwargs):
if useSVG:
return _MolsToGridSVG(mols, molsPerRow=molsPerRow, subImgSize=subImgSize, legends=legends,
highlightAtomLists=highlightAtomLists, **kwargs)
else:
return _MolsToGridImage(mols, molsPerRow=molsPerRow, subImgSize=subImgSize, legends=legends,
highlightAtomLists=highlightAtomLists, **kwargs)
def ReactionToImage(rxn, subImgSize=(200, 200), **kwargs):
"""
"""
try:
import Image
except ImportError:
from PIL import Image
mols = []
for i in range(rxn.GetNumReactantTemplates()):
tmpl = rxn.GetReactantTemplate(i)
tmpl.UpdatePropertyCache(False)
mols.append(tmpl)
mols.append(None)
for i in range(rxn.GetNumProductTemplates()):
tmpl = rxn.GetProductTemplate(i)
tmpl.UpdatePropertyCache(False)
mols.append(tmpl)
res = Image.new("RGBA", (subImgSize[0] * len(mols), subImgSize[1]), (255, 255, 255, 0))
for i, mol in enumerate(mols):
if mol is not None:
nimg = MolToImage(mol, subImgSize, kekulize=False, **kwargs)
else:
nimg, canvas = _createCanvas(subImgSize)
p0 = (10, subImgSize[1] // 2)
p1 = (subImgSize[0] - 10, subImgSize[1] // 2)
p3 = (subImgSize[0] - 20, subImgSize[1] // 2 - 10)
p4 = (subImgSize[0] - 20, subImgSize[1] // 2 + 10)
canvas.addCanvasLine(p0, p1, lineWidth=2, color=(0, 0, 0))
canvas.addCanvasLine(p3, p1, lineWidth=2, color=(0, 0, 0))
canvas.addCanvasLine(p4, p1, lineWidth=2, color=(0, 0, 0))
if hasattr(canvas, 'flush'):
canvas.flush()
else:
canvas.save()
res.paste(nimg, (i * subImgSize[0], 0))
return res
def MolToQPixmap(mol, size=(300, 300), kekulize=True, wedgeBonds=True, fitImage=False, options=None,
**kwargs):
""" Generates a drawing of a molecule on a Qt QPixmap
"""
if not mol:
raise ValueError('Null molecule provided')
from rdkit.Chem.Draw.qtCanvas import Canvas
canvas = Canvas(size)
if options is None:
options = DrawingOptions()
options.bgColor = None
if fitImage:
options.dotsPerAngstrom = int(min(size) / 10)
options.wedgeDashedBonds = wedgeBonds
if kekulize:
from rdkit import Chem
mol = Chem.Mol(mol.ToBinary())
Chem.Kekulize(mol)
if not mol.GetNumConformers():
from rdkit.Chem import AllChem
AllChem.Compute2DCoords(mol)
drawer = MolDrawing(canvas=canvas, drawingOptions=options)
drawer.AddMol(mol, **kwargs)
canvas.flush()
return canvas.pixmap
|
rvianello/rdkit
|
rdkit/Chem/Draw/__init__.py
|
Python
|
bsd-3-clause
| 14,820
|
[
"RDKit"
] |
5234e03475294bfc4f7bf98ae0dbfaff729b33a011bd1f4f849b38dc77d3d489
|
"""
Ammonia inversion transition TKIN fitter translated from Erik Rosolowsky's
http://svn.ok.ubc.ca/svn/signals/nh3fit/
"""
import numpy as np
from pyspeckit.mpfit import mpfit
from .. import units
import matplotlib.cbook as mpcb
line_names = ['oneone','twotwo','threethree']
line_names = ['oneone_f10','oneone_f01','oneone_f22','oneone_f21','oneone_f12','oneone_f11']
# http://articles.adsabs.harvard.edu/abs/1971ApJ...169..429T has the most accurate freqs
freq_dict = {
'oneone': 4.82965996e9,
'twotwo': 14.48848e9,
'threethree': 28.97480e9,
}
relative_strength_theory={
'oneone_f10': 4,
'oneone_f01': 4,
'oneone_f22':15,
'oneone_f21': 5,
'oneone_f12': 5,
'oneone_f11': 3,
'twotwo_f11':1,
'twotwo_f12':1,
'twotwo_f21':1,
'twotwo_f32':1,
'twotwo_f33':1,
'twotwo_f22':1,
'twotwo_f23':1,
'threethree_f22':1,
'threethree_f44':1,
'threethree_f33':1,
}
hf_freq_dict={
'oneone_f10':4.82965996e9 - 18.53e3,
'oneone_f01':4.82965996e9 - 1.34e3,
'oneone_f22':4.82965996e9 - 0.35e3,
'oneone_f21':4.82965996e9 + 4.05e3,
'oneone_f12':4.82965996e9 + 6.48e3,
'oneone_f11':4.82965996e9 + 11.08e3,
'twotwo_f11':14.48846e9,
'twotwo_f12':14.48847e9,
'twotwo_f21':14.48848e9,
'twotwo_f32':14.48848e9,
'twotwo_f33':14.48848e9,
'twotwo_f22':14.48849e9,
'twotwo_f23':14.48849e9,
'threethree_f22':28.97478e9,
'threethree_f44':28.97480e9,
'threethree_f33':28.97481e9,
}
freq_dict.update(hf_freq_dict)
aval_dict = {
'oneone': 10**-8.44801, #64*!pi**4/(3*h*c**3)*nu11**3*mu0**2*(1/2.)
'twotwo': 10**-7.49373, #64*!pi**4/(3*h*c**3)*nu22**3*mu0**2*(2/3.)
'threethree': 10**-6.89179, #64*!pi**4/(3*h*c**3)*nu33**3*mu0**2*(3/4.)
}
hf_aval_dict={
'oneone_f10':10**-8.92509,
'oneone_f01':10**-8.44797,
'oneone_f22':10**-8.57294,
'oneone_f21':10**-9.05004,
'oneone_f12':10**-8.82819,
'oneone_f11':10**-9.05009,
'twotwo_f11':10**-7.61876,
'twotwo_f12':10**-8.09586,
'twotwo_f21':10**-8.31771,
'twotwo_f32':10**-8.44804,
'twotwo_f33':10**-7.54494,
'twotwo_f22':10**-7.65221,
'twotwo_f23':10**-8.30191,
'threethree_f22':10**-6.94294,
'threethree_f44':10**-6.91981,
'threethree_f33':10**-6.96736,
}
ortho_dict = {
'oneone': False,
'twotwo': False,
'threethree': False,
}
n_ortho = np.arange(0,28,3) # 0..3..27
n_para = np.array([x for x in range(28) if x % 3 != 0])
voff_lines_dict = {
'oneone': [(hf_freq_dict[f]-freq_dict['oneone'])/freq_dict['oneone']*units.speedoflight_ms for f in hf_freq_dict.keys() if "oneone" in f],
'twotwo': [(hf_freq_dict[f]-freq_dict['twotwo'])/freq_dict['twotwo']*units.speedoflight_ms for f in hf_freq_dict.keys() if "twotwo" in f],
'threethree': [(hf_freq_dict[f]-freq_dict['threethree'])/freq_dict['threethree']*units.speedoflight_ms for f in hf_freq_dict.keys() if "threethree" in f],
}
voff_lines_dict={
'oneone_f10': 18.53e3/4.82965996e9 * units.speedoflight_ms / 1000.0,
'oneone_f01': 1.34e3 /4.82965996e9 * units.speedoflight_ms / 1000.0,
'oneone_f22': 0.35e3 /4.82965996e9 * units.speedoflight_ms / 1000.0,
'oneone_f21': 4.05e3 /4.82965996e9 * units.speedoflight_ms / 1000.0,
'oneone_f12': 6.48e3 /4.82965996e9 * units.speedoflight_ms / 1000.0,
'oneone_f11': 11.08e3/4.82965996e9 * units.speedoflight_ms / 1000.0,
'twotwo_f11':14.48846e9,
'twotwo_f12':14.48847e9,
'twotwo_f21':14.48848e9,
'twotwo_f32':14.48848e9,
'twotwo_f33':14.48848e9,
'twotwo_f22':14.48849e9,
'twotwo_f23':14.48849e9,
'threethree_f22':28.97478e9,
'threethree_f44':28.97480e9,
'threethree_f33':28.97481e9,
}
class formaldehyde_model(object):
def __init__(self):
self.npeaks = 1
self.npars = 6
pass
def formaldehyde(self, xarr, xunits='GHz', amp=1.0, width=1.0,
xoff_v=0.0, line='oneone'):
"""
Generate a model Formaldehyde spectrum based on input temperatures, column, and
gaussian parameters
(not implemented) if tau11 is specified, Ntot is ignored
"""
# Convert X-units to frequency in GHz
if xunits in units.frequency_dict:
xarr = np.copy(xarr) * units.frequency_dict[xunits] / units.frequency_dict['GHz']
elif xunits in units.velocity_dict:
if line in freq_dict:
xarr = (freq_dict[line] - (np.copy(xarr) *
(units.velocity_dict[xunits] / units.velocity_dict['m/s'] / units.speedoflight_ms) *
freq_dict[line]) ) / units.frequency_dict['GHz']
else:
raise Exception("Xunits is velocity-type (%s) but line %s is not in the list." % (xunits,line))
else:
raise Exception("xunits not recognized: %s" % (xunits))
ckms = 2.99792458e5
ccms = ckms*1e5
g1 = 1
g2 = 1
h = 6.6260693e-27
kb = 1.3806505e-16
runspec = np.zeros(len(xarr))
for linename in line_names:
voff_lines = np.array(voff_lines_dict[linename])
lines = (1-voff_lines/ckms)*freq_dict[linename]
nuwidth = np.abs(width/ckms*lines)
nuoff = xoff_v/ckms*lines
# strength array
runspec += (1-relative_strength_theory[linename]*amp*\
np.exp(-(xarr+nuoff-freq_dict[linename])**2/(2*nuwidth**2)))
return runspec
def n_formaldehyde(self, pars=None, fittau=False, **kwargs):
"""
Returns a function that sums over N ammonia line profiles, where N is the length of
tkin,tex,Ntot,width,xoff_v,fortho *OR* N = len(pars) / 6
The background "height" is assumed to be zero (you must "baseline" your
spectrum before fitting)
pars - a list with len(pars) = 6n, assuming tkin,tex,Ntot,width,xoff_v,fortho repeated
"""
if len(pars) % 6 == 0:
tkin = [pars[ii] for ii in xrange(0,len(pars),6)]
tex = [pars[ii] for ii in xrange(1,len(pars),6)]
Ntot = [pars[ii] for ii in xrange(2,len(pars),6)]
width = [pars[ii] for ii in xrange(3,len(pars),6)]
xoff_v = [pars[ii] for ii in xrange(4,len(pars),6)]
fortho = [pars[ii] for ii in xrange(5,len(pars),6)]
elif not(len(tkin) == len(tex) == len(Ntot) == len(xoff_v) == len(width) == len(fortho)):
raise ValueError("Wrong array lengths!")
modelkwargs = kwargs.copy()
def L(x):
v = np.zeros(len(x))
for i in range(len(tkin)):
modelkwargs.update({'tkin':tkin[i], 'tex':tex[i],
'width':width[i], 'xoff_v':xoff_v[i],
'fortho':fortho[i]})
if fittau:
modelkwargs.update({'tau11':Ntot[i]})
else:
modelkwargs.update({'Ntot':Ntot[i]})
v += self.ammonia(x,**modelkwargs)
return v
return L
def multinh3fit(self, xax, data, npeaks=1, err=None, params=[20,20,1e10,1.0,0.0,0.5],
fixed=[False,False,False,False,False,False],
limitedmin=[True,True,True,True,False,True],
limitedmax=[False,False,False,False,False,True], minpars=[2.73,2.73,0,0,0,0],
maxpars=[0,0,0,0,0,1], quiet=True, shh=True, veryverbose=False, **kwargs):
"""
Fit multiple nh3 profiles
Inputs:
xax - x axis
data - y axis
npeaks - How many nh3 profiles to fit? Default 1 (this could supersede onedgaussfit)
err - error corresponding to data
These parameters need to have length = 6*npeaks. If npeaks > 1 and length = 6, they will
be replicated npeaks times, otherwise they will be reset to defaults:
params - Fit parameters: [amplitude, offset, Gfwhm, Lfwhm] * npeaks
If len(params) % 6 == 0, npeaks will be set to len(params) / 6
fixed - Is parameter fixed?
limitedmin/minpars - set lower limits on each parameter (default: width>0, Tex and Tkin > Tcmb)
limitedmax/maxpars - set upper limits on each parameter
quiet - should MPFIT output each iteration?
shh - output final parameters?
Returns:
Fit parameters
Model
Fit errors
chi2
"""
self.npars = 6
if len(params) != npeaks and (len(params) / self.npars) > npeaks:
npeaks = len(params) / self.npars
self.npeaks = npeaks
if isinstance(params,np.ndarray): params=params.tolist()
# make sure all various things are the right length; if they're not, fix them using the defaults
for parlist in (params,fixed,limitedmin,limitedmax,minpars,maxpars):
if len(parlist) != self.npars*self.npeaks:
# if you leave the defaults, or enter something that can be multiplied by 3 to get to the
# right number of gaussians, it will just replicate
if len(parlist) == self.npars:
parlist *= npeaks
elif parlist==params:
parlist[:] = [20,20,1e10,1.0,0.0,0.5] * npeaks
elif parlist==fixed:
parlist[:] = [False,False,False,False,False,False] * npeaks
elif parlist==limitedmax:
parlist[:] = [False,False,False,False,False,True] * npeaks
elif parlist==limitedmin:
parlist[:] = [True,True,True,True,False,True] * npeaks
elif parlist==minpars:
parlist[:] = [2.73,0,0,0,0,0] * npeaks
elif parlist==maxpars:
parlist[:] = [0,0,0,0,0,1] * npeaks
def mpfitfun(x,y,err):
if err is None:
def f(p,fjac=None): return [0,(y-self.n_ammonia(pars=p, **kwargs)(x))]
else:
def f(p,fjac=None): return [0,(y-self.n_ammonia(pars=p, **kwargs)(x))/err]
return f
parnames = {0:"TKIN",1:"TEX",2:"NTOT",3:"WIDTH",4:"XOFF_V",5:"FORTHO"}
parinfo = [ {'n':ii, 'value':params[ii],
'limits':[minpars[ii],maxpars[ii]],
'limited':[limitedmin[ii],limitedmax[ii]], 'fixed':fixed[ii],
'parname':parnames[ii%self.npars]+str(ii/self.npars),
'mpmaxstep':0,'error':ii}
for ii in xrange(len(params)) ]
parinfo[0]['mpmaxstep'] = 1.0
parinfo[1]['mpmaxstep'] = 1.0
if veryverbose:
print "GUESSES: "
print "\n".join(["%s: %s" % (p['parname'],p['value']) for p in parinfo])
mp = mpfit(mpfitfun(xax,data,err),parinfo=parinfo,quiet=quiet)
mpp = mp.params
if mp.perror is not None: mpperr = mp.perror
else: mpperr = mpp*0
chi2 = mp.fnorm
if mp.status == 0:
raise Exception(mp.errmsg)
if not shh:
print "Fit message: ",mp.errmsg
print "Final fit values: "
for i,p in enumerate(mpp):
parinfo[i]['value'] = p
print parinfo[i]['parname'],p," +/- ",mpperr[i]
print "Chi2: ",mp.fnorm," Reduced Chi2: ",mp.fnorm/len(data)," DOF:",len(data)-len(mpp)
if mpp[1] > mpp[0]: mpp[1] = mpp[0] # force Tex>Tkin to Tex=Tkin (already done in n_ammonia)
self.mp = mp
self.mpp = mpp
self.mpperr = mpperr
self.model = self.n_ammonia(pars=mpp,**kwargs)(xax)
return mpp,self.n_ammonia(pars=mpp,**kwargs)(xax),mpperr,chi2
__call__ = multinh3fit
def moments(self, Xax, data, negamp=None, veryverbose=False, **kwargs):
"""
Returns a very simple and likely incorrect guess
"""
# TKIN, TEX, NTOT, width, center, ortho fraction
return [20,10, 1e15, 1.0, 0.0, 1.0]
def annotations(self):
label_list = [ (
"$T_K(%i)$=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[0+jj*self.npars],self.mpperr[0+jj*self.npars]),
"$T_{ex}(%i)$=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[1+jj*self.npars],self.mpperr[1+jj*self.npars]),
"$N$(%i)=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[2+jj*self.npars],self.mpperr[2+jj*self.npars]),
"$\\sigma(%i)$=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[3+jj*self.npars],self.mpperr[3+jj*self.npars]),
"$v(%i)$=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[4+jj*self.npars],self.mpperr[4+jj*self.npars]),
"$F_o(%i)$=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[5+jj*self.npars],self.mpperr[5+jj*self.npars])
) for jj in range(self.npeaks)]
labels = tuple(mpcb.flatten(label_list))
return labels
|
bsipocz/pyspeckit
|
pyspeckit/spectrum/models/_formaldehyde.py
|
Python
|
mit
| 13,178
|
[
"Gaussian"
] |
e62efbab30d6f79831c1e63416fa63c9a1c9e59ae348ba3719557ef3745343a9
|
#!/usr/bin/env python
# python modules
import sys
import os
import numpy as np
import math
# qt modules
from omg.gaussian.gaussian import GaussianCom
from omg.molecules import Molecule
# autodock
import bhtree
def usage():
print('usage:')
print(' residel_charges.py gaussian_input.com')
print(' dist to delete [always: 15 Angstroms ]')
print(' new_folder_name [always: argv[1]/ ]')
def min_dist(setA, setB, CUTOFF = 8.0):
bht = bhtree.bhtreelib.BHtree(setB, None, 10)
indx = np.zeros(len(setB)).astype('i')
mindist = float('+inf')
for a in setA:
dist = np.ones(len(setB)).astype('f')*9999
bht.closePointsDist2(tuple(a), CUTOFF, indx, dist)
mindist = min(dist.min(), mindist)
bhtree.freeBHtree(bht) # or memory builds up
return math.sqrt(mindist)
def get_resID(atom):
return (atom.resinfo.resname, atom.resinfo.resnum)
def build_resID_dict(atoms_list):
d = {}
for (i, at) in enumerate(atoms_list):
resID = get_resID(at)
if resID not in d:
d[resID] = []
d[resID].append(i)
return d
def byres(resID_dict, mask_vec):
"""if one false, all false"""
for resID in resID_dict:
if False in [mask_vec[i] for i in resID_dict[resID]]:
for i in resID_dict[resID]:
mask_vec[i] = False
return mask_vec
def byresTrue(resID_dict, mask_vec):
"""if one True, all True (for del wat)"""
for resID in resID_dict:
if True in [mask_vec[i] for i in resID_dict[resID]]:
for i in resID_dict[resID]:
mask_vec[i] = True
return mask_vec
def zerate_res_charge(atomslist, (resname,resid) ):
"""for a list of residues, changes all atoms charge to 0"""
"""criar outra funcao que mediante o residuo da o indice dos atomos e aqui altero so esses indices """
original_charges = []
for atomo in atomslist:
if atomo.resinfo.resname==resname and atomo.resinfo.resnum==resid:
original_charges.append(atomo.mm.charge)
atomo.mm.charge = 0
return original_charges
def reset_res_charge(atomslist, (resname, resid), original_charges):
i = 0
for a in atomslist:
if a.resinfo.resname == resname and a.resinfo.resnum == resid:
a.mm.charge = original_charges[i]
i += 1
return None
def main():
gaussian_com_filename = sys.argv[1]
new_folder_name = os.path.splitext(sys.argv[1])[0]
freeze_angs = 15
if not os.path.exists("./{}".format(new_folder_name)):
os.makedirs(new_folder_name)
# input
gaussian_file = GaussianCom(gaussian_com_filename)
resID_dict = build_resID_dict(gaussian_file.atoms_list)
# get xyz tuples
highlayer_xyz = []
all_xyz = []
non_wat_xyz = [] #all MM and non waters
for i,atom in enumerate(gaussian_file.atoms_list):
if atom.oniom.layer == 'H':
highlayer_xyz.append((atom.GetX(), atom.GetY(), atom.GetZ()))
if not atom.resinfo.resname == 'WAT' or atom.oniom.layer == 'H':
non_wat_xyz.append((atom.GetX(), atom.GetY(), atom.GetZ()))
all_xyz.append((atom.GetX(), atom.GetY(), atom.GetZ()))
if not len(highlayer_xyz):
print('WELL WELL...')
print(' no highlayer defined, freezing nothing')
else:
# indexes to freeze [list of atoms to "freeze"]
CUTOFF = freeze_angs + 1.0
freeze_idx = [False for _ in all_xyz]
for (i,xyz) in enumerate(all_xyz):
atom = gaussian_file.atoms_list[i]
tofreeze = min_dist( [xyz] , highlayer_xyz, CUTOFF) <= freeze_angs
freeze_idx[i] = tofreeze
if atom.resinfo.resname == 'WAT' or atom.oniom.layer == 'H':
freeze_idx[i] = False
# byres - returns the same freeze_idx list, but if at least one atom is true, then changes all to True
freeze_idx = byres(resID_dict, freeze_idx)
# residue selection [list of tuples (resn, resid)]
res_2_go = list(set(get_resID(gaussian_file.atoms_list[i]) for i in np.where(freeze_idx)[0]))
old_atoms_list = gaussian_file.atoms_list
residues_list = Molecule("protein", old_atoms_list).make_residues_list()
# do the magic
for no, residue in enumerate(residues_list):
if get_resID(residue[0]) not in res_2_go:
continue
#NOTA: a lista de residues (residue[0]) parece uma [lista de residues[lista atoms]] devo conseguir descompactar isto
resname = residue[0].resinfo.resname
resid = residue[0].resinfo.resnum
residue_number_name = "{0}_{1}".format(resname,resid)
original_charges = zerate_res_charge(old_atoms_list, (resname, resid))
#print(residue_number_name)
gaussian_name = "{0}_{1}.com".format(
gaussian_com_filename[:-4],
residue_number_name
)
gaussian_file.write_to_file("{0}/{1}".format(
new_folder_name,
gaussian_name)
)
reset_res_charge(old_atoms_list, (resname, resid), original_charges)
# input_list.append(gaussian_name)
# script_file_name = "{0}/{1}".format(new_folder_name,"run.sh")
# with open(script_file_name, 'w') as run_script:
# for job in input_list:
# out_name = job[:-4]+".log"
# run_script.write("g09 {0} {1}\n".format(job,out_name))
#
main()
|
eduardoftoliveira/oniomMacGyver
|
scripts/residel_charges.py
|
Python
|
gpl-3.0
| 5,453
|
[
"Gaussian"
] |
4dd622b5647e69b5779918d3d990d45114f3e45767714c565f92b2730f28e875
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name='home'),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, include(admin.site.urls)),
# User management
url(r'^users/', include('Fango.users.urls', namespace='users')),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
url(r'^accounts/', include('Fango.accounts.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
|
Niemzok/fango
|
config/urls.py
|
Python
|
mit
| 1,484
|
[
"VisIt"
] |
927af91c77b7ec07021d34b6c9978c671945b67d8171ef9e7c770f510b250ac4
|
'''
Created on 30 Sep 2015
@author: Max Zwiessele
'''
import unittest, numpy as np, pandas as pd # @UnresolvedImport
import GPy, GPy.kern as kern
from applygpy.model_selection import cross_validate
from GPy.models.sparse_gp_regression import SparseGPRegression
from GPy.models.sparse_gp_classification import SparseGPClassification
from GPy.core.gp import GP
from GPy.likelihoods.gaussian import Gaussian
from GPy.inference.latent_function_inference.exact_gaussian_inference import ExactGaussianInference
class Test(unittest.TestCase):
def setUp(self):
np.random.seed(11111)
self.X = np.linspace(-1, 1, 20)[:,None]
k = GPy.kern.Matern32(1, lengthscale=1, variance=1)
self.sim_model = 'Mat+Lin'
self.mf = GPy.mappings.Linear(1, 1)
self.mf[:] = .01
self.mu = self.mf.f(self.X)
self.Y = np.random.multivariate_normal(np.zeros(self.X.shape[0]), k.K(self.X))[:,None]
self.mf.randomize()
self.test_models = [
['Mat+Lin', kern.Matern32(self.X.shape[1]) + kern.Linear(self.X.shape[1], variances=.01) + kern.Bias(self.X.shape[1])],
['Lin', kern.Linear(self.X.shape[1], variances=.01) + kern.Bias(self.X.shape[1])],
]
self.verbose = True
def testCrossval(self):
def model_builder(X, Y, kernel):
return GP(X, Y, kernel=kernel, likelihood=Gaussian(), mean_function=self.mf.copy(), inference_method=ExactGaussianInference())
res = cross_validate(self.X, self.Y+self.mu, verbose=self.verbose)#, kernels_models=self.test_models)#, model_builder=model_builder)
tmp = (res['error'] / res['test_size'])
self.assertEqual(tmp.loc['RMSE'].mean().argmin(), self.sim_model)
self.assertEqual(tmp.loc['log likelihood multivariate'].mean().argmax(), self.sim_model)
def testCrossvalSparse(self):
def model_builder(X, Y, kernel):
m = SparseGPRegression(X, Y, kernel=kernel)
m.Z.fix()
return m
import scipy.sparse as sparse
res = cross_validate(sparse.csr_matrix(self.X), self.Y, sparse=True, verbose=self.verbose,
kernels_models=self.test_models,
k=2,
#model_builder=model_builder
)
tmp = (res['error'] / res['test_size'])
self.assertEqual(tmp.loc['RMSE'].mean().argmin(), self.sim_model)
self.assertEqual(tmp.loc['log likelihood multivariate'].mean().argmax(), self.sim_model)
def testCrossvalClass(self):
res = cross_validate(self.X, self.Y>self.Y.mean(), verbose=self.verbose,
kernels_models=self.test_models,
#, model_builder=model_builder
k=2,
)
tmp = (res['error'] / res['test_size'])
self.assertEqual(tmp.loc['RMSE'].mean().argmin(), self.sim_model)
def testCrossvalSparseClass(self):
res = cross_validate(self.X, self.Y>self.Y.mean(), sparse=True, verbose=self.verbose,
kernels_models=self.test_models,
#model_builder=model_builder,
k=2,
)
tmp = (res['error'] / res['test_size'])
self.assertEqual(tmp.loc['RMSE'].mean().argmin(), self.sim_model)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testCrossval']
unittest.main()
|
mzwiessele/applygpy
|
applygpy/tests/test_modelselection.py
|
Python
|
bsd-3-clause
| 3,575
|
[
"Gaussian"
] |
1c7e8db48a481ff6bfe5d93d42a2e172dd08f7367352f7348c653e28f358f2e4
|
import numpy as np
import pandas as pd
import pastas as ps
def acf_func(**kwargs):
index = pd.to_datetime(np.arange(0, 100, 1), unit="D", origin="2000")
data = np.sin(np.linspace(0, 10 * np.pi, 100))
r = pd.Series(data=data, index=index)
acf_true = np.cos(np.linspace(0.0, np.pi, 11))[1:]
acf = ps.stats.acf(r, lags=np.arange(1.0, 11.), min_obs=1, **kwargs).values
return acf, acf_true
def test_acf_rectangle():
acf, acf_true = acf_func(bin_method="rectangle")
assert abs((acf - acf_true)).max() < 0.05
def test_acf_gaussian():
acf, acf_true = acf_func(bin_method="gaussian")
assert abs((acf - acf_true)).max() < 0.05
def test_runs_test():
"""
http://www.itl.nist.gov/div898/handbook/eda/section3/eda35d.htm
True Z-statistic = 2.69
Read NIST test data
"""
data = pd.read_csv("tests/data/nist.csv")
test, _ = ps.stats.runs_test(data)
assert test[0] - 2.69 < 0.02
def test_stoffer_toloi():
res = pd.Series(index=pd.date_range(start=0, periods=1000, freq="D"),
data=np.random.rand(1000))
_, pval = ps.stats.stoffer_toloi(res)
assert pval > 1e-10
|
pastas/pasta
|
tests/test_stats.py
|
Python
|
mit
| 1,157
|
[
"Gaussian"
] |
2cc516eff0655f17d208e5f0bb23f159aa1539f7b9dec36a1134cd78935b2608
|
# Copyright (c) 2014 ETH Zurich, Institute of Astronomy, Lukas Gamper <lukas.gamper@usystems.ch>
from __future__ import print_function, division, absolute_import, unicode_literals
import sys
import pickle
from hope import config
from hope._ast import *
from hope._const import *
from hope._library import *
from hope._dump import Dumper
class CPPGenerator(NodeVisitor):
"""
Generates the C code from the given :py:class:`hope._ast.Module` token
by traversing the AST.
"""
def __init__(self):
self.next_loopid, self.merged, self.slicemap, self.library, self.dumper = 0, None, {}, {}, Dumper()
def getVariableExtent(self, node):
extent = ""
for ind, segment in enumerate(node.shape):
segmentstr = self.get_segmentstr(*segment)
if not segment[0] is None:
raise Exception("Variable slices needs to start with None: {0}".format(node.name))
if not segmentstr in self.merged:
raise Exception("Unknown slice {0} in variable {1}".format(segmentstr, node.name))
if ind > 0:
extent = "({0})*{1}".format(extent, self.visit(segment[1]))
extent += "{0}{1}".format(" + " if len(extent) > 0 else "", self.slicemap[self.get_slicemap_key(ind, *segment)])
return extent
def visit_Number(self, node):
if node.dtype is bool:
return "true" if node.value else "false"
if config._readablecxx:
return "{1!r}".format(PY_C_TYPE[node.dtype], node.value)
else:
return "({0}){1!r}".format(PY_C_TYPE[node.dtype], node.value)
def visit_Variable(self, node):
if len(node.shape) == 0 or node.scope == "block":
return "c{0}".format(node.name)
else:
return "c{0}[{1}]".format(node.name, self.getVariableExtent(node));
def visit_ObjectAttr(self, node):
trace = node.getTrace()
if len(node.shape) == 0:
return "c" + ".c".join(trace)
else:
return "{0}[{1}]".format("c" + ".c".join(trace), self.getVariableExtent(node));
def visit_Dimension(self, node):
if isinstance(node.variable, ObjectAttr):
parent = node.variable.parent
trace = ["s{0}".format(node.variable.name)]
while not parent is None:
trace.insert(0, "c{0}".format(parent.name))
parent = parent.parent
return "{0}[{1}]".format(".".join(trace), node.dim)
elif isinstance(node.variable, Variable):
return "s{0}[{1}]".format(node.variable.name, node.dim)
else:
raise Exception("Unknown type {0}".format(node.variable.name))
def visit_DimensionSlice(self, node):
return "{0}+{1}".format(self.visit_Dimension(node), self.visit(node.slice))
def visit_View(self, node):
subscript = ""
extent_ind = 0
for ind, (extent, segment) in enumerate(zip(node.extents, node.variable.shape)):
if ind > 0:
subscript = "(int)({0})*{1}".format(subscript, self.visit(segment[1]))
subscript += " + " if len(subscript) > 0 else ""
if isinstance(extent, tuple):
lower, upper = extent
if lower is None: lower = segment[0]
if isinstance(lower, Number) and lower.value == 0: lower = None
if upper is None: upper = segment[1]
seg = "{0} + ".format(self.visit(lower)) if not lower is None else ""
key = self.get_slicemap_key(extent_ind, lower, upper)
seg += self.slicemap[key]
# seg += self.slicemap[self.merged["{0}:{1}".format("" if lower is None else self.dumper.visit(lower), self.dumper.visit(upper))]]
segstr = "{0}:{1}".format("" if lower is None else self.dumper.visit(lower), self.dumper.visit(upper))
extent_ind += 1
else:
seg = self.visit(extent)
if isinstance(extent, Number) and extent.value < 0:
seg = "{0}+{1}".format(self.visit_Dimension(segment[1]), seg)
segstr = self.dumper.visit(extent)
if config.rangecheck:
subscript += "native_rangecheck({0}".format(seg)
subscript += ", {0}, {1}".format(self.visit(segment[0]) if not segment[0] is None else "0", self.visit(segment[1]))
subscript += ", std::string(\"{0}\"), std::string(\"{1}\"))".format(segstr, node.variable.name)
self.library["native_rangecheck"] = LIBRARY_NATIVE_RANGECHECK
else:
subscript += seg
shape = node.variable.shape
node.variable.shape = []
# TODO: if double, cast to int ...
ret = "{0}[(int)({1})]".format(self.visit(node.variable), subscript);
node.variable.shape = shape
return ret
def visit_Expr(self, node):
return "{0};".format(self.visit(node.value))
def visit_NumpyContraction(self, node):
if node.op != "sum":
raise Exception("Only the numpy.sum contraction is implemented!")
if len(node.value.shape):
ret = "{0} = ({1})0;\n".format(self.visit(node.variable), PY_C_TYPE[node.dtype])
keys = []
for ind, segment in enumerate(node.value.shape):
ret += "{0}for (npy_intp i{1} = 0; i{1} < {2} - {3}; ++i{1}) {{\n".format( \
"\t" * ind \
, self.next_loopid \
, self.visit(segment[1]) \
, 0 if segment[0] is None else self.visit(segment[0]) \
)
segmentstr = self.get_segmentstr(*segment)
if not segmentstr in self.merged:
self.merged[segmentstr] = segmentstr
keys.append(self.get_slicemap_key(ind, *segment))
self.slicemap[keys[-1]] = "i{0}".format(self.next_loopid)
self.next_loopid += 1
ret += "{0}{1} += {2};".format("\t" * len(node.value.shape), self.visit(node.variable), self.visit(node.value))
for ind, (key, segment) in enumerate(zip(keys, node.value.shape)):
del self.slicemap[key]
ret += "\n{0}}}".format("\t" * (len(node.value.shape) - 1 - ind))
return ret
else:
return "\n{0} = {1};".format(self.visit(node.variable), self.visit(node.value))
def visit_Assign(self, node):
# TODO: capture type
if isinstance(node.target, Variable) and node.target.allocated == False:
node.target.allocated = True
return "auto {0} = {1};".format(self.visit(node.target), self.visit(node.value))
else:
return "{0} = {1};".format(self.visit(node.target), self.visit(node.value))
def visit_Reference(self, node):
target = node.target
trace = node.value.getTrace()
if isinstance(target, ObjectAttr): # self.x = self.y
return "c{0} = c{1};".format(".c".join(target.getTrace()), ".c".join(trace))
if len(target.shape) == 0: # [int] x = self.y
return "{0} c{1} = c{2};".format(PY_C_TYPE[target.dtype], target.name, ".c".join(trace))
else: # [array] x = self.y
return "PyObject * p{0} = (PyObject *)PyArray_GETCONTIGUOUS((PyArrayObject *)c{1});\n".format(target.name, ".p".join(trace)) \
+ "npy_intp * s{0} = c{1};\n".format(target.name, ".s".join(trace)) \
+ "{0} * c{1} = c{2};".format(PY_C_TYPE[target.dtype], target.name, ".c".join(trace))
def visit_AugAssign(self, node):
if node.op == "**=":
return "{0} = std::pow({0}, {1});".format(self.visit(node.target), self.visit(node.value))
elif node.op == "//=":
if type(node.target.dtype(1) // node.value.dtype(1)) in [float, np.float32, np.float64]:
return "{0} = std::floor({0} / {1});".format(self.visit(node.target), self.visit(node.value))
else:
return "{0} /= {1};".format(self.visit(node.target), self.visit(node.value))
elif node.op == "%=":
self.library["native_mod"] = LIBRARY_NATIVE_MOD
return "{0} = native_mod({0}, {1});".format(self.visit(node.target), self.visit(node.value))
else:
return "{0} {1} {2};".format(self.visit(node.target), node.op, self.visit(node.value))
def visit_UnaryOp(self, node):
if config._readablecxx:
return "{0}{1}".format(node.op, self.visit(node.operand))
else:
return "({0}{1})".format(node.op, self.visit(node.operand))
def visit_BinOp(self, node):
cast = "" if node.dtype is None or config._readablecxx else "({0})".format(PY_C_TYPE[node.dtype])
if node.op == "**":
return "{0}std::pow({1}, {2})".format(cast, self.visit(node.left), self.visit(node.right))
elif node.op == "//" and node.dtype in [float, np.float32, np.float64]:
return "{0}std::floor({1} / {2})".format(cast, self.visit(node.left), self.visit(node.right))
elif node.op == "//":
return "{0}({1} / {2})".format(cast, self.visit(node.left), self.visit(node.right))
else:
left = self.visit(node.left) if node.dtype == node.left else "{0}{1}".format(cast, self.visit(node.left))
right = self.visit(node.right) if node.dtype == node.right else "{0}{1}".format(cast, self.visit(node.right))
if node.op == "%":
self.library["native_mod"] = LIBRARY_NATIVE_MOD
return "native_mod({0}, {1})".format(left, right)
else:
return "{0}({1} {2} {3})".format(cast, left, node.op, right)
def visit_BoolOp(self, node):
values = [self.visit(value) for value in node.values]
return "({0})".format(" {0} ".format(node.op).join(values))
def visit_Compare(self, node):
cast = "" if node.dtype is None or config._readablecxx else "({0})".format(PY_C_TYPE[node.dtype])
return "{0}({1} {2} {3})".format(cast, self.visit(node.left), node.op, self.visit(node.comparator))
def visit_If(self, node):
ret = "if ({0}) {{\n\t{1}\n}}".format(self.visit(node.test), "\n\t".join(self.visit(node.body).split("\n")))
if not node.orelse is None:
ret += " else {{\n\t{0}\n}}".format("\n\t".join(self.visit(node.orelse).split("\n")))
return ret
def visit_For(self, node):
ret = "for (npy_intp {0} = {1}; {0} < {2}; ++{0}) {{\n\t".format(self.visit(node.iter), self.visit(node.lower), self.visit(node.upper))
ret += "\n\t".join(self.visit(node.body).split("\n"))
ret += "\n}"
return ret
def visit_While(self, node):
ret = "while ({0}) {{\n\t".format(self.visit(node.test))
ret += "\n\t".join(self.visit(node.body).split("\n"))
ret += "\n}"
return ret
def visit_Call(self, node):
# TODO: generalize
if isinstance(node.name, GlobalFunction):
args = []
for arg in node.args:
if not isinstance(arg, Object) and len(arg.shape) > 0 and (not isinstance(arg, Variable) or arg.scope == "block"):
raise Exception("Only variables can be passed to funtions!")
elif isinstance(arg, Object):
args.append("c{0}".format(arg.name))
elif len(arg.shape) > 0:
args.append("p{0}, s{0}, c{0}".format(arg.name))
else:
args.append(self.visit(arg))
return "{0}_{1}({2})".format( \
node.name.name \
, "".join([arg.getId() for arg in node.args]) \
, ", ".join(args) \
)
elif isinstance(node.name, HopeAttr) and node.name.name == "exp":
self.library["hope_exp"] = LIBRARY_HOPE_EXP
return "hope_exp({0})".format(self.visit(node.args[0]))
elif isinstance(node.name, NumpyAttr) and node.name.name in ["empty", "zeros", "ones"]:
return "1" if node.name.name == "ones" else "0"
elif isinstance(node.name, NumpyAttr) and node.name.name == "interp":
self.library["numpy_interp"] = LIBRARY_NUMPY_INTERP
args = [[] for _ in range(3)]
for i in range(1, 3):
if isinstance(node.args[i], ObjectAttr):
parent, trace = node.args[i].parent, [node.args[i].name]
while not parent is None:
trace.insert(0, parent.name)
parent = parent.parent
args[i] = ".c".join(trace)
else:
args[i] = node.args[i].name
# TODO: make sure node.args[1].shape == node.args[2].shape using an assert
((lower, upper),) = node.args[1].shape
left_val = "c{0}[0]".format(node.args[1].name) if lower is None else self.visit(lower)
size = self.visit(upper) + ("" if lower is None else "-{0}".format(self.visit(lower)))
right_val = "c{0}[{1}]".format(node.args[1].name, size + "-1")
ret = "numpy_interp({0}, c{1}, c{2}, {3})".format(self.visit(node.args[0]), args[1], args[2], size)
if "left" in node.keywords:
left_ret = self.visit(node.keywords["left"])
else:
left_ret = "c{0}[0]".format(args[2])
ret = "{0} < {1} ? {2} : ({3})".format(self.visit(node.args[0]), left_val, left_ret, ret)
if "right" in node.keywords:
right_ret = self.visit(node.keywords["right"])
else:
right_ret = "c{0}[{1}]".format(args[2], size + "-1")
ret = "{0} > {1} ? {2} : ({3})".format(self.visit(node.args[0]), right_val, right_ret, ret)
return "({0})".format(ret)
elif isinstance(node.name, NumpyAttr) and node.name.name == "sign":
self.library["native_sign"] = LIBRARY_NATIVE_SIGN
return "native_sign({0})".format(self.visit(node.args[0]))
elif isinstance(node.name, NumpyAttr) and node.name.name in NPY_UNARY_FUNCTIONS:
return "{0}({1})".format(NPY_UNARY_FUNCTIONS[node.name.name], self.visit(node.args[0]))
elif isinstance(node.name, NumpyAttr) and node.name.name in NPY_CAST_FUNCTIONS:
return "({0})({1})".format(PY_C_TYPE[NPY_CAST_FUNCTIONS[node.name.name]], self.visit(node.args[0]))
def visit_Allocate(self, node):
shape, variable = [], node.variable
if variable.dtype is None:
raise Exception("Unknown dtype: {0}".format(variable.dtype))
for segment in variable.shape:
if not segment[0] is None:
raise Exception("Allocate need to have (:len)* in shape: {0}".format(",".join([str(sgment) for sgment in variable.shape])))
shape.append(self.visit(segment[1]))
if len(shape) == 0:
return "{0} c{1} = {0}();".format(PY_C_TYPE[variable.dtype], variable.name)
else:
return "npy_intp d{0}[] = {{(npy_intp){1}}};\n".format(variable.name, ", (npy_intp)".join(shape)) \
+ "PyObject * p{0} = PyArray_EMPTY({1}, d{0}, {2}, 0);\n".format(variable.name, len(shape), NPY_TYPEENUM[variable.dtype]) \
+ "npy_intp * s{0} = PyArray_SHAPE((PyArrayObject *)p{0});\n".format(variable.name) \
+ "{0} * c{1} = ({0} *)PyArray_DATA((PyArrayObject *)p{1});".format(PY_C_TYPE[variable.dtype], variable.name)
def visit_Return(self, node):
# TODO: implement expressions
if len(node.value.shape) == 0:
return "return {0};".format(self.visit(node.value));
elif not isinstance(node.value, Variable):
raise Exception("TODO: implement!")
else:
return "return std::make_tuple((PyObject *)p{0}, s{0}, c{0});".format(node.value.name);
def visit_Block(self, node):
if len(node.shape):
ret = "";
keys = []
for ind, segment in enumerate(node.shape):
ret += "{0}for (npy_intp i{1} = 0; i{1} < {2} - {3}; ++i{1}) {{\n".format(
"\t" * ind,
self.next_loopid,
self.visit(segment[1]),
0 if segment[0] is None else self.visit(segment[0])
)
keys.append(self.get_slicemap_key(ind, *segment))
self.slicemap[keys[-1]] = "i{0}".format(self.next_loopid)
self.next_loopid += 1
ret += "{0}".format("\t" * len(node.shape))
ret += "\n{0}".format("\t" * len(node.shape)).join("\n".join([self.visit(expr) for expr in node.body]).split("\n"))
for ind, key in enumerate(keys):
del self.slicemap[key]
ret += "\n{0}}}".format("\t" * (len(node.shape) - 1 - ind))
return ret
else:
return "\n".join(["{0}".format(self.visit(expr)) for expr in node.body])
def visit_Body(self, node):
return "\n".join([self.visit(block) for block in node.blocks])
def visit_FunctionDef(self, node):
self.merged, code, firstSegment = {}, "", None
for merged in node.merged:
if len(merged) == 1:
self.merged[merged[0]] = merged[0]
else:
for segment in merged:
self.merged[segment] = merged[0]
if isinstance(node.shapes[segment], tuple):
lower, upper = node.shapes[segment]
if isinstance(lower, Number) and lower.value == 0: lower = None
if upper is None:
raise Exception("Unbound shapes cannot be merged: {0}".format(segment))
if lower is None:
shape = self.visit(upper)
else:
shape = "({1} - {0})".format(self.visit(lower), self.visit(upper))
else:
raise Exception("Indexes can not be merged: {0!s}".format(segment))
if firstSegment is None:
firstSegment = shape
# TODO: this fails if variable is defined in loop, maybe it makes sense to visit lower and upper und check if we can check ...
elif config.rangecheck:
code += "\n\tif ({0} - {1} != 0) {{".format(firstSegment, shape)
code += "\n\t\tPyErr_SetString(PyExc_ValueError, \"Shapes {0} and {1} do not match!\");".format(firstSegment, shape)
code += "\n\t\tthrow std::exception();"
code += "\n\t}"
code += "\n\t" + "\n\t".join(self.visit(node.body).split("\n"))
if not node.dtype is None:
code += "\n\tPyErr_SetString(PyExc_ValueError, \"No return type passed!\");"
code += "\n\tthrow std::exception();"
return code
def visit_Module(self, node):
for fktname, fktlist in list(node.functions.items()):
for fkt in fktlist:
fkt.decl, sig = "inline ", []
if fkt.dtype is None:
fkt.decl += "void"
elif len(fkt.shape) == 0:
fkt.decl += PY_C_TYPE[fkt.dtype]
else:
fkt.decl += "std::tuple<PyObject *, npy_intp const *, {0} *>".format(PY_C_TYPE[fkt.dtype])
fkt.decl += " {0}_{1}(\n\t ".format(fktname, fkt.getId())
for arg in fkt.signature:
if isinstance(arg, Object):
sig.append("{0} & c{1}\n".format(arg.getId("t"), arg.name))
elif len(arg.shape) > 0:
sig.append("PyObject * p{1}, npy_intp const * __restrict__ s{1}, {0} * __restrict__ c{1}\n".format(PY_C_TYPE[arg.dtype], arg.name))
else:
sig.append("{0} c{1}\n".format(PY_C_TYPE[arg.dtype], arg.name))
fkt.decl += "\t, ".join(sig) + ")"
code = "".join([fkt.decl + ";\n" for fktname, fktlist in list(node.functions.items()) for fkt in fktlist])
for fktname, fktlist in list(node.functions.items()):
for fkt in fktlist:
code += fkt.decl + " {"
code += self.visit(fkt)
code += "\n}\n"
return "\n".join(list(self.library.values())) + code
def get_segmentstr(self, lower, upper):
return "{0}:{1}".format("" if lower is None else self.dumper.visit(lower), self.dumper.visit(upper))
def get_slicemap_key(self, ind, lower, upper):
segmentstr = self.get_segmentstr(lower, upper)
return "i{0}>{1}".format(ind, self.merged[segmentstr])
def generate(modtoken, localfilename):
"""
Generates the C code from the given :py:class:`hope._ast.Module` token
:param modtoken: Module to use
:param localfilename: name of the function incl. signature
:return code: the generated C code
"""
objects = []
def findObjects(obj):
if obj.getId() not in [arg.getId() for arg in objects]:
objects.insert(0, obj)
for variable in list(obj.attrs.values()):
if isinstance(variable, Object):
findObjects(variable)
for fktlist in list(modtoken.functions.values()):
for fkt in fktlist:
for arg in fkt.signature:
if isinstance(arg, Object):
findObjects(arg)
code = LIBRARY_IMPORTS
code += LIBRARY_PYOBJ_DEF
code += _obj_init_code(objects)
generator = CPPGenerator()
code += generator.visit(copy.deepcopy(modtoken))
code += LIBRARY_SIGHANDLER
code += "\n"
code += "extern \"C\" {\n"
code += LIBRARY_CREATE_SIGNATURE
code += _run_fkt_code(modtoken)
code += "\t}\n"
#end of extern block
if sys.version_info[0] == 2:
code += LIBRARY_METHODS_DECL_PY2.format(fktname=modtoken.main)
code += LIBRARY_INIT_DECL_PY2.format(filename=localfilename, fktname=modtoken.main)
else:
code += LIBRARY_METHODS_DECL_PY3.format(fktname=modtoken.main)
code += LIBRARY_MODULE_DECL_PY3.format(fktname=modtoken.main)
code += LIBRARY_INIT_DECL_PY3.format(filename=localfilename, fktname=modtoken.main)
code += "}\n"
return code
def _obj_init_code(objects):
code = ""
for obj in objects:
code += "struct {0} {{\n".format(obj.getId("t"))
code += "\tbool initialize(PyObject * obj) {"
if len(obj.attrs) == 0:
code += "\n\t\treturn true;"
else:
code += "\n\t\tif ("
for pos, (name, variable) in enumerate(obj.attrs.items()):
code += "\n\t\t\t"
if pos > 0:
code += "and "
code += "PyObject_HasAttrString(obj, \"{0}\") and p{0}.incref(PyObject_GetAttrString(obj, \"{0}\")) ".format(name)
if isinstance(variable, Object):
code += "and c{0}.initialize(p{0})".format(name)
elif len(variable.shape) > 0:
code += "and PyArray_CheckExact(p{0})".format(name)
code += "\n\t\t\tand PyArray_TYPE((PyArrayObject *)p{0}) == {1} and PyArray_NDIM((PyArrayObject *)p{0}) == {2}".format(name, NPY_TYPEENUM[variable.dtype], len(variable.shape))
elif variable.dtype is int:
if sys.version_info[0] == 2:
code += "and PyInt_CheckExact((PyObject *)p{0})".format(name)
else:
code += "and PyLong_CheckExact((PyObject *)p{0})".format(name)
elif variable.dtype is float:
code += "and PyFloat_CheckExact((PyObject *)p{0})".format(name)
elif variable.dtype in NPY_SCALAR_TAG:
code += "and PyArray_IsScalar((PyArrayObject *)p{0}, {1})".format(name, NPY_SCALAR_TAG[variable.dtype])
else:
raise Exception("Unknown type: {0!s}".format(variable.dtype))
code += "\n\t\t) {\n"
for name, variable in list(obj.attrs.items()):
if isinstance(variable, Object):
pass
elif len(variable.shape) > 0:
code += "\t\t\tif (!(p{0}.incref((PyObject *)PyArray_GETCONTIGUOUS((PyArrayObject *)p{0})))) {{\n".format(name)
code += "\t\t\t\tPyErr_SetString(PyExc_ValueError, \"Invalid Argument type on {0}!\");\n".format(name)
code += "\t\t\t\tthrow std::exception();\n"
code += "\t\t\t}\n"
code += "\t\t\ts{0} = PyArray_SHAPE((PyArrayObject *)p{0});\n".format(name)
code += "\t\t\tc{1} = ({0} *)PyArray_DATA((PyArrayObject *)p{1});\n".format(PY_C_TYPE[variable.dtype], name)
elif variable.dtype is int:
if sys.version_info[0] == 2:
code += "\t\t\tc{0} = PyInt_AS_LONG((PyObject *)p{0});\n".format(name)
else:
code += "\t\t\tc{0} = PyLong_AS_LONG((PyObject *)p{0});\n".format(name)
elif variable.dtype is float:
code += "\t\t\tc{0} = PyFloat_AS_DOUBLE((PyObject *)p{0});\n".format(name)
elif variable.dtype in NPY_SCALAR_TAG:
code += "\t\t\tc{0} = PyArrayScalar_VAL((PyArrayObject *)p{0}, {1});\n".format(name, NPY_SCALAR_TAG[variable.dtype])
code += "\t\t\treturn true;\n"
code += "\t\t} else\n"
code += "\t\t\treturn false;\n"
code += "\t}\n"
for name, variable in list(obj.attrs.items()):
if isinstance(variable, Object):
code += "\tPyObj p{1};\n\t{0} c{1};\n".format(variable.getId("t"), name)
elif len(variable.shape) > 0:
code += "\tPyObj p{1};\n\tnpy_intp * s{1};\n\t{0} * __restrict__ c{1};\n".format(PY_C_TYPE[variable.dtype], name)
else:
code += "\tPyObj p{1};\n\t{0} c{1};\n".format(PY_C_TYPE[variable.dtype], name)
code += "};\n"
return code
def _run_fkt_code(modtoken):
code = ""
code += "\tPyObject * run(PyObject * self, PyObject * args) {\n"
for fkt in modtoken.functions[modtoken.main]:
code += "\t\t{"
for arg in fkt.signature:
if isinstance(arg, Object):
code += "\n\t\t\tPyObject * p{0};".format(arg.name)
code += "\n\t\t\t{0} c{1};".format(arg.getId("t"), arg.name)
elif len(arg.shape) > 0:
code += "\n\t\t\tPyObj p{0};".format(arg.name)
else:
code += "\n\t\t\tPyObject * p{0};".format(arg.name)
code += " {0} c{1};".format(PY_C_TYPE[arg.dtype], arg.name)
if len(fkt.signature) > 0:
code += "\n\t\t\tif ("
code += "\n\t\t\t\tPyTuple_CheckExact(args) and PyTuple_GET_SIZE(args) == {0}".format(len(fkt.signature))
for idx, arg in enumerate(fkt.signature):
code += "\n\t\t\t\tand (p{0} = PyTuple_GET_ITEM(args, {1})) ".format(arg.name, idx)
if isinstance(arg, Object):
code += "and c{0}.initialize(p{0})".format(arg.name)
elif len(arg.shape) > 0:
code += "and PyArray_CheckExact(p{0})".format(arg.name)
code += "\n\t\t\t\tand PyArray_TYPE((PyArrayObject *)p{0}) == {1} and PyArray_NDIM((PyArrayObject *)p{0}) == {2}".format(arg.name, NPY_TYPEENUM[arg.dtype], len(arg.shape))
elif arg.dtype is int:
if sys.version_info[0] == 2:
code += "and PyInt_CheckExact(p{0})".format(arg.name)
else:
code += "and PyLong_CheckExact(p{0})".format(arg.name)
elif arg.dtype is float:
code += "and PyFloat_CheckExact(p{0})".format(arg.name)
elif arg.dtype in NPY_SCALAR_TAG:
code += "and PyArray_IsScalar(p{0}, {1})".format(arg.name, NPY_SCALAR_TAG[arg.dtype])
else:
raise Exception("Unknown type: {0!s}".format(arg.dtype))
code += "\n\t\t\t) {\n"
for arg in fkt.signature:
if isinstance(arg, Object): pass
elif len(arg.shape) > 0:
code += "\t\t\t\tif (!(p{0}.incref((PyObject *)PyArray_GETCONTIGUOUS((PyArrayObject *)p{0})))) {{\n".format(arg.name)
code += "\t\t\t\t\tPyErr_SetString(PyExc_ValueError, \"Invalid Argument type on {0}!\");\n".format(arg.name)
code += "\t\t\t\t\treturn NULL;\n"
code += "\t\t\t\t}\n"
elif arg.dtype is int:
if sys.version_info[0] == 2:
code += "\t\t\t\tc{0} = PyInt_AS_LONG(p{0});\n".format(arg.name)
else:
code += "\t\t\t\tc{0} = PyLong_AS_LONG(p{0});\n".format(arg.name)
elif arg.dtype is float:
code += "\t\t\t\tc{0} = PyFloat_AS_DOUBLE(p{0});\n".format(arg.name)
elif arg.dtype in NPY_SCALAR_TAG:
code += "\t\t\t\tc{0} = PyArrayScalar_VAL(p{0}, {1});\n".format(arg.name, NPY_SCALAR_TAG[arg.dtype])
args = []
for arg in fkt.signature:
if not isinstance(arg, Object) and len(arg.shape) > 0:
args.append("p{1}, PyArray_SHAPE((PyArrayObject *)p{1}), ({0} *)PyArray_DATA((PyArrayObject *)p{1})".format(PY_C_TYPE[arg.dtype], arg.name))
else:
args.append("c{0}".format(arg.name))
call = "{0}_{1}(".format(modtoken.main, fkt.getId())
call += "\n\t\t\t\t\t\t {0}".format("\n\t\t\t\t\t\t, ".join(args))
call += "\n\t\t\t\t\t)"
else:
call = "{0}_{1}()".format(modtoken.main, fkt.getId())
code += "\t\t\t\ttry {\n"
if fkt.dtype is None:
code += "\t\t\t\t\t{0};\n".format(call)
code += "\t\t\t\t\tPy_INCREF(Py_None);\n"
code += "\t\t\t\t\treturn Py_None;\n"
elif len(fkt.shape) == 0 and fkt.dtype is bool:
code += "\t\t\t\t\tPyObject* res = {0} ? Py_True : Py_False;\n".format(call)
code += "\t\t\t\t\tPy_INCREF(res);\n"
code += "\t\t\t\t\treturn res;\n"
elif len(fkt.shape) == 0 and fkt.dtype is int:
code += "\t\t\t\t\treturn Py_BuildValue(\"{0}\", {1});\n".format(PY_TYPE_CHAR[np.int_], call)
elif len(fkt.shape) == 0 and fkt.dtype is float:
code += "\t\t\t\t\treturn Py_BuildValue(\"{0}\", {1});\n".format(PY_TYPE_CHAR[np.float_], call)
elif len(fkt.shape) == 0 and fkt.dtype in NPY_SCALAR_TAG:
code += "\t\t\t\t\tPyObject* res = PyArrayScalar_New({0});\n".format(NPY_SCALAR_TAG[fkt.dtype])
code += "\t\t\t\t\tPyArrayScalar_ASSIGN(res, {0}, {1});\n".format(NPY_SCALAR_TAG[fkt.dtype], call)
code += "\t\t\t\t\treturn res;\n"
else:
code += "\t\t\t\t\tPyObject * res = std::get<0>({0});\n".format(call)
if fkt.return_allocated:
# to avoid mem leak or segfault
code += "\n\t\t\t\t\tPy_INCREF(res);\n"
code += "\t\t\t\t\treturn res;\n"
code += "\t\t\t\t} catch (...) {\n"
code += "\t\t\t\t\treturn NULL;\n"
code += "\t\t\t\t}\n"
if len(fkt.signature) > 0:
code += "\t\t\t} else\n"
code += "\t\t\t\tPyErr_Clear();\n"
code += "\t\t}\n"
def stripArg(arg):
if isinstance(arg, Object):
delattr(arg, "parent")
if hasattr(arg, "instance"):
delattr(arg, "instance")
for name, value in list(arg.attrs.items()):
arg.attrs[name] = stripArg(value)
else:
if not arg.dtype in [bool, int, float]:
arg.dtype = NPY_TYPE[arg.dtype]
arg.dims = len(arg.shape)
delattr(arg, "shape")
if not isinstance(arg, ObjectAttr):
delattr(arg, "scope")
delattr(arg, "allocated")
else:
delattr(arg, "parent")
return arg
signatures = []
for fkt in modtoken.functions[modtoken.main]:
signatures.append([stripArg(copy.deepcopy(arg)) for arg in fkt.signature])
if sys.version_info[0] == 2:
pickled = pickle.dumps(signatures).replace("\n", "\\n")
else:
import base64
pickled = base64.encodebytes(pickle.dumps(signatures)).decode('ascii').replace("\n", "\\n")
code += "\t\tPyObject * signatures = Py_BuildValue(\"(sO)\", \"{0}\", args);\n".format(pickled)
code += "\t\tif (!signatures) {\n"
# TODO: make all exceptions reasonamble: http://docs.python.org/2/c-api/exceptions.html
code += "\t\t\tPyErr_SetString(PyExc_ValueError, \"Error building signature string for {0}\");\n".format(modtoken.main)
code += "\t\t\treturn NULL;\n"
code += "\t\t}\n"
code += "\t\treturn PyObject_Call(create_signature, signatures, NULL);\n"
return code
|
cosmo-ethz/hope
|
hope/_generator.py
|
Python
|
gpl-3.0
| 33,664
|
[
"VisIt"
] |
a19b90fc3290a858bfe56830b2c245101b0b0fe9fe962df0dd630591e8a2458b
|
"""
provide a generic structure to support window functions,
similar to how we have a Groupby object
"""
from __future__ import division
import warnings
import numpy as np
from collections import defaultdict
from datetime import timedelta
from pandas.core.dtypes.generic import (
ABCSeries,
ABCDataFrame,
ABCDatetimeIndex,
ABCTimedeltaIndex,
ABCPeriodIndex,
ABCDateOffset)
from pandas.core.dtypes.common import (
is_integer,
is_bool,
is_float_dtype,
is_integer_dtype,
needs_i8_conversion,
is_timedelta64_dtype,
is_list_like,
_ensure_float64,
is_scalar)
from pandas.core.base import (PandasObject, SelectionMixin,
GroupByMixin)
import pandas.core.common as com
import pandas._libs.window as _window
from pandas import compat
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (Substitution, Appender,
cache_readonly)
from pandas.core.generic import _shared_docs
from textwrap import dedent
_shared_docs = dict(**_shared_docs)
_doc_template = """
Returns
-------
same type as input
See also
--------
pandas.Series.%(name)s
pandas.DataFrame.%(name)s
"""
class _Window(PandasObject, SelectionMixin):
_attributes = ['window', 'min_periods', 'center', 'win_type',
'axis', 'on', 'closed']
exclusions = set()
def __init__(self, obj, window=None, min_periods=None,
center=False, win_type=None, axis=0, on=None, closed=None,
**kwargs):
self.__dict__.update(kwargs)
self.blocks = []
self.obj = obj
self.on = on
self.closed = closed
self.window = window
self.min_periods = min_periods
self.center = center
self.win_type = win_type
self.win_freq = None
self.axis = obj._get_axis_number(axis) if axis is not None else None
self.validate()
@property
def _constructor(self):
return Window
@property
def is_datetimelike(self):
return None
@property
def _on(self):
return None
@property
def is_freq_type(self):
return self.win_type == 'freq'
def validate(self):
if self.center is not None and not is_bool(self.center):
raise ValueError("center must be a boolean")
if self.min_periods is not None and not \
is_integer(self.min_periods):
raise ValueError("min_periods must be an integer")
if self.closed is not None and self.closed not in \
['right', 'both', 'left', 'neither']:
raise ValueError("closed must be 'right', 'left', 'both' or "
"'neither'")
def _convert_freq(self):
""" resample according to the how, return a new object """
obj = self._selected_obj
index = None
return obj, index
def _create_blocks(self):
""" split data into blocks & return conformed data """
obj, index = self._convert_freq()
if index is not None:
index = self._on
# filter out the on from the object
if self.on is not None:
if obj.ndim == 2:
obj = obj.reindex(columns=obj.columns.difference([self.on]),
copy=False)
blocks = obj._to_dict_of_blocks(copy=False).values()
return blocks, obj, index
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
self = self._shallow_copy(subset)
self._reset_cache()
if subset.ndim == 2:
if is_scalar(key) and key in subset or is_list_like(key):
self._selection = key
return self
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
def _dir_additions(self):
return self.obj._dir_additions()
def _get_window(self, other=None):
return self.window
@property
def _window_type(self):
return self.__class__.__name__
def __unicode__(self):
""" provide a nice str repr of our rolling object """
attrs = ["{k}={v}".format(k=k, v=getattr(self, k))
for k in self._attributes
if getattr(self, k, None) is not None]
return "{klass} [{attrs}]".format(klass=self._window_type,
attrs=','.join(attrs))
def __iter__(self):
url = 'https://github.com/pandas-dev/pandas/issues/11704'
raise NotImplementedError('See issue #11704 {url}'.format(url=url))
def _get_index(self, index=None):
"""
Return index as ndarrays
Returns
-------
tuple of (index, index_as_ndarray)
"""
if self.is_freq_type:
if index is None:
index = self._on
return index, index.asi8
return index, index
def _prep_values(self, values=None, kill_inf=True):
if values is None:
values = getattr(self._selected_obj, 'values', self._selected_obj)
# GH #12373 : rolling functions error on float32 data
# make sure the data is coerced to float64
if is_float_dtype(values.dtype):
values = _ensure_float64(values)
elif is_integer_dtype(values.dtype):
values = _ensure_float64(values)
elif needs_i8_conversion(values.dtype):
raise NotImplementedError("ops for {action} for this "
"dtype {dtype} are not "
"implemented".format(
action=self._window_type,
dtype=values.dtype))
else:
try:
values = _ensure_float64(values)
except (ValueError, TypeError):
raise TypeError("cannot handle this type -> {0}"
"".format(values.dtype))
if kill_inf:
values = values.copy()
values[np.isinf(values)] = np.NaN
return values
def _wrap_result(self, result, block=None, obj=None):
""" wrap a single result """
if obj is None:
obj = self._selected_obj
index = obj.index
if isinstance(result, np.ndarray):
# coerce if necessary
if block is not None:
if is_timedelta64_dtype(block.values.dtype):
from pandas import to_timedelta
result = to_timedelta(
result.ravel(), unit='ns').values.reshape(result.shape)
if result.ndim == 1:
from pandas import Series
return Series(result, index, name=obj.name)
return type(obj)(result, index=index, columns=block.columns)
return result
def _wrap_results(self, results, blocks, obj):
"""
wrap the results
Parameters
----------
results : list of ndarrays
blocks : list of blocks
obj : conformed data (may be resampled)
"""
from pandas import Series, concat
from pandas.core.index import _ensure_index
final = []
for result, block in zip(results, blocks):
result = self._wrap_result(result, block=block, obj=obj)
if result.ndim == 1:
return result
final.append(result)
# if we have an 'on' column
# we want to put it back into the results
# in the same location
columns = self._selected_obj.columns
if self.on is not None and not self._on.equals(obj.index):
name = self._on.name
final.append(Series(self._on, index=obj.index, name=name))
if self._selection is not None:
selection = _ensure_index(self._selection)
# need to reorder to include original location of
# the on column (if its not already there)
if name not in selection:
columns = self.obj.columns
indexer = columns.get_indexer(selection.tolist() + [name])
columns = columns.take(sorted(indexer))
if not len(final):
return obj.astype('float64')
return concat(final, axis=1).reindex(columns=columns, copy=False)
def _center_window(self, result, window):
""" center the result in the window """
if self.axis > result.ndim - 1:
raise ValueError("Requested axis is larger then no. of argument "
"dimensions")
offset = _offset(window, True)
if offset > 0:
if isinstance(result, (ABCSeries, ABCDataFrame)):
result = result.slice_shift(-offset, axis=self.axis)
else:
lead_indexer = [slice(None)] * result.ndim
lead_indexer[self.axis] = slice(offset, None)
result = np.copy(result[tuple(lead_indexer)])
return result
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
return self.apply(arg, raw=False, args=args, kwargs=kwargs)
return result
agg = aggregate
_shared_docs['sum'] = dedent("""
Calculate %(name)s sum of given DataFrame or Series.
Parameters
----------
*args, **kwargs
For compatibility with other %(name)s methods. Has no effect
on the computed value.
Returns
-------
Series or DataFrame
Same type as the input, with the same index, containing the
%(name)s sum.
See Also
--------
Series.sum : Reducing sum for Series.
DataFrame.sum : Reducing sum for DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4, 5])
>>> s
0 1
1 2
2 3
3 4
4 5
dtype: int64
>>> s.rolling(3).sum()
0 NaN
1 NaN
2 6.0
3 9.0
4 12.0
dtype: float64
>>> s.expanding(3).sum()
0 NaN
1 NaN
2 6.0
3 10.0
4 15.0
dtype: float64
>>> s.rolling(3, center=True).sum()
0 NaN
1 6.0
2 9.0
3 12.0
4 NaN
dtype: float64
For DataFrame, each %(name)s sum is computed column-wise.
>>> df = pd.DataFrame({"A": s, "B": s ** 2})
>>> df
A B
0 1 1
1 2 4
2 3 9
3 4 16
4 5 25
>>> df.rolling(3).sum()
A B
0 NaN NaN
1 NaN NaN
2 6.0 14.0
3 9.0 29.0
4 12.0 50.0
""")
_shared_docs['mean'] = dedent("""
Calculate the %(name)s mean of the values.
Parameters
----------
*args
Under Review.
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data
DataFrame.%(name)s : Calling object with DataFrames
Series.mean : Equivalent method for Series
DataFrame.mean : Equivalent method for DataFrame
Examples
--------
The below examples will show rolling mean calculations with window sizes of
two and three, respectively.
>>> s = pd.Series([1, 2, 3, 4])
>>> s.rolling(2).mean()
0 NaN
1 1.5
2 2.5
3 3.5
dtype: float64
>>> s.rolling(3).mean()
0 NaN
1 NaN
2 2.0
3 3.0
dtype: float64
""")
class Window(_Window):
"""
Provides rolling window calculations.
.. versionadded:: 0.18.0
Parameters
----------
window : int, or offset
Size of the moving window. This is the number of observations used for
calculating the statistic. Each window will be a fixed size.
If its an offset then this will be the time period of each window. Each
window will be a variable sized based on the observations included in
the time-period. This is only valid for datetimelike indexes. This is
new in 0.19.0
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). For a window that is specified by an offset,
this will default to 1.
center : boolean, default False
Set the labels at the center of the window.
win_type : string, default None
Provide a window type. If ``None``, all points are evenly weighted.
See the notes below for further information.
on : string, optional
For a DataFrame, column on which to calculate
the rolling window, rather than the index
closed : string, default None
Make the interval closed on the 'right', 'left', 'both' or
'neither' endpoints.
For offset-based windows, it defaults to 'right'.
For fixed windows, defaults to 'both'. Remaining cases not implemented
for fixed windows.
.. versionadded:: 0.20.0
axis : int or string, default 0
Returns
-------
a Window or Rolling sub-classed for the particular operation
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
Rolling sum with a window length of 2, using the 'triang'
window type.
>>> df.rolling(2, win_type='triang').sum()
B
0 NaN
1 1.0
2 2.5
3 NaN
4 NaN
Rolling sum with a window length of 2, min_periods defaults
to the window length.
>>> df.rolling(2).sum()
B
0 NaN
1 1.0
2 3.0
3 NaN
4 NaN
Same as above, but explicitly set the min_periods
>>> df.rolling(2, min_periods=1).sum()
B
0 0.0
1 1.0
2 3.0
3 2.0
4 4.0
A ragged (meaning not-a-regular frequency), time-indexed DataFrame
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
... index = [pd.Timestamp('20130101 09:00:00'),
... pd.Timestamp('20130101 09:00:02'),
... pd.Timestamp('20130101 09:00:03'),
... pd.Timestamp('20130101 09:00:05'),
... pd.Timestamp('20130101 09:00:06')])
>>> df
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 2.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
Contrasting to an integer rolling window, this will roll a variable
length window corresponding to the time period.
The default for min_periods is 1.
>>> df.rolling('2s').sum()
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 3.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
To learn more about the offsets & frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
The recognized win_types are:
* ``boxcar``
* ``triang``
* ``blackman``
* ``hamming``
* ``bartlett``
* ``parzen``
* ``bohman``
* ``blackmanharris``
* ``nuttall``
* ``barthann``
* ``kaiser`` (needs beta)
* ``gaussian`` (needs std)
* ``general_gaussian`` (needs power, width)
* ``slepian`` (needs width).
If ``win_type=None`` all points are evenly weighted. To learn more about
different window types see `scipy.signal window functions
<https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__.
See Also
--------
expanding : Provides expanding transformations.
ewm : Provides exponential weighted functions
"""
def validate(self):
super(Window, self).validate()
window = self.window
if isinstance(window, (list, tuple, np.ndarray)):
pass
elif is_integer(window):
if window <= 0:
raise ValueError("window must be > 0 ")
try:
import scipy.signal as sig
except ImportError:
raise ImportError('Please install scipy to generate window '
'weight')
if not isinstance(self.win_type, compat.string_types):
raise ValueError('Invalid win_type {0}'.format(self.win_type))
if getattr(sig, self.win_type, None) is None:
raise ValueError('Invalid win_type {0}'.format(self.win_type))
else:
raise ValueError('Invalid window {0}'.format(window))
def _prep_window(self, **kwargs):
"""
provide validation for our window type, return the window
we have already been validated
"""
window = self._get_window()
if isinstance(window, (list, tuple, np.ndarray)):
return com._asarray_tuplesafe(window).astype(float)
elif is_integer(window):
import scipy.signal as sig
# the below may pop from kwargs
def _validate_win_type(win_type, kwargs):
arg_map = {'kaiser': ['beta'],
'gaussian': ['std'],
'general_gaussian': ['power', 'width'],
'slepian': ['width']}
if win_type in arg_map:
return tuple([win_type] + _pop_args(win_type,
arg_map[win_type],
kwargs))
return win_type
def _pop_args(win_type, arg_names, kwargs):
msg = '%s window requires %%s' % win_type
all_args = []
for n in arg_names:
if n not in kwargs:
raise ValueError(msg % n)
all_args.append(kwargs.pop(n))
return all_args
win_type = _validate_win_type(self.win_type, kwargs)
# GH #15662. `False` makes symmetric window, rather than periodic.
return sig.get_window(win_type, window, False).astype(float)
def _apply_window(self, mean=True, **kwargs):
"""
Applies a moving window of type ``window_type`` on the data.
Parameters
----------
mean : boolean, default True
If True computes weighted mean, else weighted sum
Returns
-------
y : type of input argument
"""
window = self._prep_window(**kwargs)
center = self.center
blocks, obj, index = self._create_blocks()
results = []
for b in blocks:
try:
values = self._prep_values(b.values)
except TypeError:
results.append(b.values.copy())
continue
if values.size == 0:
results.append(values.copy())
continue
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, len(window))
return _window.roll_window(np.concatenate((arg,
additional_nans))
if center else arg, window, minp,
avg=mean)
result = np.apply_along_axis(f, self.axis, values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, blocks, obj)
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3, win_type='boxcar').agg('mean')
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -0.885035 0.212600 -0.711689
3 -0.323928 -0.200122 -1.093408
4 -0.071445 -0.431533 -1.075833
5 0.504739 0.676083 -0.996353
6 0.358206 1.903256 -0.774200
7 0.906020 1.283573 0.085482
8 -0.096361 0.818139 0.472290
9 0.070889 0.134399 -0.031308
See also
--------
pandas.DataFrame.rolling.aggregate
pandas.DataFrame.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='',
klass='Series/DataFrame',
axis=''))
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
# these must apply directly
result = arg(self)
return result
agg = aggregate
@Substitution(name='window')
@Appender(_shared_docs['sum'])
def sum(self, *args, **kwargs):
nv.validate_window_func('sum', args, kwargs)
return self._apply_window(mean=False, **kwargs)
@Substitution(name='window')
@Appender(_shared_docs['mean'])
def mean(self, *args, **kwargs):
nv.validate_window_func('mean', args, kwargs)
return self._apply_window(mean=True, **kwargs)
class _GroupByMixin(GroupByMixin):
""" provide the groupby facilities """
def __init__(self, obj, *args, **kwargs):
parent = kwargs.pop('parent', None) # noqa
groupby = kwargs.pop('groupby', None)
if groupby is None:
groupby, obj = obj, obj.obj
self._groupby = groupby
self._groupby.mutated = True
self._groupby.grouper.mutated = True
super(GroupByMixin, self).__init__(obj, *args, **kwargs)
count = GroupByMixin._dispatch('count')
corr = GroupByMixin._dispatch('corr', other=None, pairwise=None)
cov = GroupByMixin._dispatch('cov', other=None, pairwise=None)
def _apply(self, func, name, window=None, center=None,
check_minp=None, **kwargs):
"""
dispatch to apply; we are stripping all of the _apply kwargs and
performing the original function call on the grouped object
"""
def f(x, name=name, *args):
x = self._shallow_copy(x)
if isinstance(name, compat.string_types):
return getattr(x, name)(*args, **kwargs)
return x.apply(name, *args, **kwargs)
return self._groupby.apply(f)
class _Rolling(_Window):
@property
def _constructor(self):
return Rolling
def _apply(self, func, name=None, window=None, center=None,
check_minp=None, **kwargs):
"""
Rolling statistical measure using supplied function. Designed to be
used with passed-in Cython array-based functions.
Parameters
----------
func : string/callable to apply
name : string, optional
name of this function
window : int/array, default to _get_window()
center : boolean, default to self.center
check_minp : function, default to _use_window
Returns
-------
y : type of input
"""
if center is None:
center = self.center
if window is None:
window = self._get_window()
if check_minp is None:
check_minp = _use_window
blocks, obj, index = self._create_blocks()
index, indexi = self._get_index(index=index)
results = []
for b in blocks:
values = self._prep_values(b.values)
if values.size == 0:
results.append(values.copy())
continue
# if we have a string function name, wrap it
if isinstance(func, compat.string_types):
cfunc = getattr(_window, func, None)
if cfunc is None:
raise ValueError("we do not support this function "
"in _window.{0}".format(func))
def func(arg, window, min_periods=None, closed=None):
minp = check_minp(min_periods, window)
# ensure we are only rolling on floats
arg = _ensure_float64(arg)
return cfunc(arg,
window, minp, indexi, closed, **kwargs)
# calculation function
if center:
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def calc(x):
return func(np.concatenate((x, additional_nans)),
window, min_periods=self.min_periods,
closed=self.closed)
else:
def calc(x):
return func(x, window, min_periods=self.min_periods,
closed=self.closed)
with np.errstate(all='ignore'):
if values.ndim > 1:
result = np.apply_along_axis(calc, self.axis, values)
else:
result = calc(values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, blocks, obj)
class _Rolling_and_Expanding(_Rolling):
_shared_docs['count'] = dedent(r"""
The %(name)s count of any non-NaN observations inside the window.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
pandas.Series.%(name)s : Calling object with Series data
pandas.DataFrame.%(name)s : Calling object with DataFrames
pandas.DataFrame.count : Count of the full DataFrame
Examples
--------
>>> s = pd.Series([2, 3, np.nan, 10])
>>> s.rolling(2).count()
0 1.0
1 2.0
2 1.0
3 1.0
dtype: float64
>>> s.rolling(3).count()
0 1.0
1 2.0
2 2.0
3 2.0
dtype: float64
>>> s.rolling(4).count()
0 1.0
1 2.0
2 2.0
3 3.0
dtype: float64
""")
def count(self):
blocks, obj, index = self._create_blocks()
index, indexi = self._get_index(index=index)
window = self._get_window()
window = min(window, len(obj)) if not self.center else window
results = []
for b in blocks:
result = b.notna().astype(int)
result = self._constructor(result, window=window, min_periods=0,
center=self.center,
closed=self.closed).sum()
results.append(result)
return self._wrap_results(results, blocks, obj)
_shared_docs['apply'] = dedent(r"""
%(name)s function apply
Parameters
----------
func : function
Must produce a single value from an ndarray input if ``raw=True``
or a Series if ``raw=False``
raw : bool, default None
* ``False`` : passes each row or column as a Series to the
function.
* ``True`` or ``None`` : the passed function will receive ndarray
objects instead.
If you are just applying a NumPy reduction function this will
achieve much better performance.
The `raw` parameter is required and will show a FutureWarning if
not passed. In the future `raw` will default to False.
.. versionadded:: 0.23.0
\*args and \*\*kwargs are passed to the function""")
def apply(self, func, raw=None, args=(), kwargs={}):
from pandas import Series
# TODO: _level is unused?
_level = kwargs.pop('_level', None) # noqa
window = self._get_window()
offset = _offset(window, self.center)
index, indexi = self._get_index()
# TODO: default is for backward compat
# change to False in the future
if raw is None:
warnings.warn(
"Currently, 'apply' passes the values as ndarrays to the "
"applied function. In the future, this will change to passing "
"it as Series objects. You need to specify 'raw=True' to keep "
"the current behaviour, and you can pass 'raw=False' to "
"silence this warning", FutureWarning, stacklevel=3)
raw = True
def f(arg, window, min_periods, closed):
minp = _use_window(min_periods, window)
if not raw:
arg = Series(arg, index=self.obj.index)
return _window.roll_generic(
arg, window, minp, indexi,
closed, offset, func, raw, args, kwargs)
return self._apply(f, func, args=args, kwargs=kwargs,
center=False, raw=raw)
def sum(self, *args, **kwargs):
nv.validate_window_func('sum', args, kwargs)
return self._apply('roll_sum', 'sum', **kwargs)
_shared_docs['max'] = dedent("""
%(name)s maximum
""")
def max(self, *args, **kwargs):
nv.validate_window_func('max', args, kwargs)
return self._apply('roll_max', 'max', **kwargs)
_shared_docs['min'] = dedent("""
Calculate the %(name)s minimum.
Parameters
----------
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with a Series
DataFrame.%(name)s : Calling object with a DataFrame
Series.min : Similar method for Series
DataFrame.min : Similar method for DataFrame
Examples
--------
Performing a rolling minimum with a window size of 3.
>>> s = pd.Series([4, 3, 5, 2, 6])
>>> s.rolling(3).min()
0 NaN
1 NaN
2 3.0
3 2.0
4 2.0
dtype: float64
""")
def min(self, *args, **kwargs):
nv.validate_window_func('min', args, kwargs)
return self._apply('roll_min', 'min', **kwargs)
def mean(self, *args, **kwargs):
nv.validate_window_func('mean', args, kwargs)
return self._apply('roll_mean', 'mean', **kwargs)
_shared_docs['median'] = dedent("""
Calculate the %(name)s median.
Parameters
----------
**kwargs
For compatibility with other %(name)s methods. Has no effect
on the computed median.
Returns
-------
Series or DataFrame
Returned type is the same as the original object.
See Also
--------
Series.%(name)s : Calling object with Series data
DataFrame.%(name)s : Calling object with DataFrames
Series.median : Equivalent method for Series
DataFrame.median : Equivalent method for DataFrame
Examples
--------
Compute the rolling median of a series with a window size of 3.
>>> s = pd.Series([0, 1, 2, 3, 4])
>>> s.rolling(3).median()
0 NaN
1 NaN
2 1.0
3 2.0
4 3.0
dtype: float64
""")
def median(self, **kwargs):
return self._apply('roll_median_c', 'median', **kwargs)
_shared_docs['std'] = dedent("""
Calculate %(name)s standard deviation.
Normalized by N-1 by default. This can be changed using the `ddof`
argument.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
*args, **kwargs
For NumPy compatibility. No additional arguments are used.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the %(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data
DataFrame.%(name)s : Calling object with DataFrames
Series.std : Equivalent method for Series
DataFrame.std : Equivalent method for DataFrame
numpy.std : Equivalent method for Numpy array
Notes
-----
The default `ddof` of 1 used in Series.std is different than the default
`ddof` of 0 in numpy.std.
A minimum of one period is required for the rolling calculation.
Examples
--------
>>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).std()
0 NaN
1 NaN
2 0.577350
3 1.000000
4 1.000000
5 1.154701
6 0.000000
dtype: float64
>>> s.expanding(3).std()
0 NaN
1 NaN
2 0.577350
3 0.957427
4 0.894427
5 0.836660
6 0.786796
dtype: float64
""")
def std(self, ddof=1, *args, **kwargs):
nv.validate_window_func('std', args, kwargs)
window = self._get_window()
index, indexi = self._get_index()
def f(arg, *args, **kwargs):
minp = _require_min_periods(1)(self.min_periods, window)
return _zsqrt(_window.roll_var(arg, window, minp, indexi,
self.closed, ddof))
return self._apply(f, 'std', check_minp=_require_min_periods(1),
ddof=ddof, **kwargs)
_shared_docs['var'] = dedent("""
Calculate unbiased %(name)s variance.
Normalized by N-1 by default. This can be changed using the `ddof`
argument.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
*args, **kwargs
For NumPy compatibility. No additional arguments are used.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the %(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data
DataFrame.%(name)s : Calling object with DataFrames
Series.var : Equivalent method for Series
DataFrame.var : Equivalent method for DataFrame
numpy.var : Equivalent method for Numpy array
Notes
-----
The default `ddof` of 1 used in :meth:`Series.var` is different than the
default `ddof` of 0 in :func:`numpy.var`.
A minimum of 1 period is required for the rolling calculation.
Examples
--------
>>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).var()
0 NaN
1 NaN
2 0.333333
3 1.000000
4 1.000000
5 1.333333
6 0.000000
dtype: float64
>>> s.expanding(3).var()
0 NaN
1 NaN
2 0.333333
3 0.916667
4 0.800000
5 0.700000
6 0.619048
dtype: float64
""")
def var(self, ddof=1, *args, **kwargs):
nv.validate_window_func('var', args, kwargs)
return self._apply('roll_var', 'var',
check_minp=_require_min_periods(1), ddof=ddof,
**kwargs)
_shared_docs['skew'] = """Unbiased %(name)s skewness"""
def skew(self, **kwargs):
return self._apply('roll_skew', 'skew',
check_minp=_require_min_periods(3), **kwargs)
_shared_docs['kurt'] = dedent("""
Calculate unbiased %(name)s kurtosis.
This function uses Fisher's definition of kurtosis without bias.
Parameters
----------
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation
See Also
--------
Series.%(name)s : Calling object with Series data
DataFrame.%(name)s : Calling object with DataFrames
Series.kurt : Equivalent method for Series
DataFrame.kurt : Equivalent method for DataFrame
scipy.stats.skew : Third moment of a probability density
scipy.stats.kurtosis : Reference SciPy method
Notes
-----
A minimum of 4 periods is required for the %(name)s calculation.
""")
def kurt(self, **kwargs):
return self._apply('roll_kurt', 'kurt',
check_minp=_require_min_periods(4), **kwargs)
_shared_docs['quantile'] = dedent("""
%(name)s quantile.
Parameters
----------
quantile : float
Quantile to compute. 0 <= quantile <= 1.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.23.0
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
**kwargs:
For compatibility with other %(name)s methods. Has no effect on
the result.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s.rolling(2).quantile(.4, interpolation='lower')
0 NaN
1 1.0
2 2.0
3 3.0
dtype: float64
>>> s.rolling(2).quantile(.4, interpolation='midpoint')
0 NaN
1 1.5
2 2.5
3 3.5
dtype: float64
See Also
--------
pandas.Series.quantile : Computes value at the given quantile over all data
in Series.
pandas.DataFrame.quantile : Computes values at the given quantile over
requested axis in DataFrame.
""")
def quantile(self, quantile, interpolation='linear', **kwargs):
window = self._get_window()
index, indexi = self._get_index()
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, window)
if quantile == 1.0:
return _window.roll_max(arg, window, minp, indexi,
self.closed)
elif quantile == 0.0:
return _window.roll_min(arg, window, minp, indexi,
self.closed)
else:
return _window.roll_quantile(arg, window, minp, indexi,
self.closed, quantile,
interpolation)
return self._apply(f, 'quantile', quantile=quantile,
**kwargs)
_shared_docs['cov'] = dedent("""
%(name)s sample covariance
Parameters
----------
other : Series, DataFrame, or ndarray, optional
if not supplied then will default to self and produce pairwise output
pairwise : bool, default None
If False then only matching columns between self and other will be used
and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndexed DataFrame in the case of DataFrame
inputs. In the case of missing elements, only complete pairwise
observations will be used.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.""")
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
# GH 16058: offset window
if self.is_freq_type:
window = self.win_freq
else:
window = self._get_window(other)
def _get_cov(X, Y):
# GH #12373 : rolling functions error on float32 data
# to avoid potential overflow, cast the data to float64
X = X.astype('float64')
Y = Y.astype('float64')
mean = lambda x: x.rolling(window, self.min_periods,
center=self.center).mean(**kwargs)
count = (X + Y).rolling(window=window,
center=self.center).count(**kwargs)
bias_adj = count / (count - ddof)
return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_cov, pairwise=bool(pairwise))
_shared_docs['corr'] = dedent("""
%(name)s sample correlation
Parameters
----------
other : Series, DataFrame, or ndarray, optional
if not supplied then will default to self and produce pairwise output
pairwise : bool, default None
If False then only matching columns between self and other will be
used and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndex DataFrame in the case of DataFrame inputs.
In the case of missing elements, only complete pairwise observations
will be used.""")
def corr(self, other=None, pairwise=None, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
window = self._get_window(other)
def _get_corr(a, b):
a = a.rolling(window=window, min_periods=self.min_periods,
center=self.center)
b = b.rolling(window=window, min_periods=self.min_periods,
center=self.center)
return a.cov(b, **kwargs) / (a.std(**kwargs) * b.std(**kwargs))
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_corr, pairwise=bool(pairwise))
class Rolling(_Rolling_and_Expanding):
@cache_readonly
def is_datetimelike(self):
return isinstance(self._on,
(ABCDatetimeIndex,
ABCTimedeltaIndex,
ABCPeriodIndex))
@cache_readonly
def _on(self):
if self.on is None:
return self.obj.index
elif (isinstance(self.obj, ABCDataFrame) and
self.on in self.obj.columns):
from pandas import Index
return Index(self.obj[self.on])
else:
raise ValueError("invalid on specified as {0}, "
"must be a column (if DataFrame) "
"or None".format(self.on))
def validate(self):
super(Rolling, self).validate()
# we allow rolling on a datetimelike index
if ((self.obj.empty or self.is_datetimelike) and
isinstance(self.window, (compat.string_types, ABCDateOffset,
timedelta))):
self._validate_monotonic()
freq = self._validate_freq()
# we don't allow center
if self.center:
raise NotImplementedError("center is not implemented "
"for datetimelike and offset "
"based windows")
# this will raise ValueError on non-fixed freqs
self.win_freq = self.window
self.window = freq.nanos
self.win_type = 'freq'
# min_periods must be an integer
if self.min_periods is None:
self.min_periods = 1
elif not is_integer(self.window):
raise ValueError("window must be an integer")
elif self.window < 0:
raise ValueError("window must be non-negative")
if not self.is_datetimelike and self.closed is not None:
raise ValueError("closed only implemented for datetimelike "
"and offset based windows")
def _validate_monotonic(self):
""" validate on is monotonic """
if not self._on.is_monotonic:
formatted = self.on or 'index'
raise ValueError("{0} must be "
"monotonic".format(formatted))
def _validate_freq(self):
""" validate & return window frequency """
from pandas.tseries.frequencies import to_offset
try:
return to_offset(self.window)
except (TypeError, ValueError):
raise ValueError("passed window {0} is not "
"compatible with a datetimelike "
"index".format(self.window))
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3).sum()
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -2.655105 0.637799 -2.135068
3 -0.971785 -0.600366 -3.280224
4 -0.214334 -1.294599 -3.227500
5 1.514216 2.028250 -2.989060
6 1.074618 5.709767 -2.322600
7 2.718061 3.850718 0.256446
8 -0.289082 2.454418 1.416871
9 0.212668 0.403198 -0.093924
>>> df.rolling(3).agg({'A':'sum', 'B':'min'})
A B
0 NaN NaN
1 NaN NaN
2 -2.655105 -0.165272
3 -0.971785 -1.340923
4 -0.214334 -1.340923
5 1.514216 -1.340923
6 1.074618 0.211596
7 2.718061 -1.647453
8 -0.289082 -1.647453
9 0.212668 -1.647453
See also
--------
pandas.Series.rolling
pandas.DataFrame.rolling
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='',
klass='Series/DataFrame',
axis=''))
def aggregate(self, arg, *args, **kwargs):
return super(Rolling, self).aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name='rolling')
@Appender(_shared_docs['count'])
def count(self):
# different impl for freq counting
if self.is_freq_type:
return self._apply('roll_count', 'count')
return super(Rolling, self).count()
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['apply'])
def apply(self, func, raw=None, args=(), kwargs={}):
return super(Rolling, self).apply(
func, raw=raw, args=args, kwargs=kwargs)
@Substitution(name='rolling')
@Appender(_shared_docs['sum'])
def sum(self, *args, **kwargs):
nv.validate_rolling_func('sum', args, kwargs)
return super(Rolling, self).sum(*args, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['max'])
def max(self, *args, **kwargs):
nv.validate_rolling_func('max', args, kwargs)
return super(Rolling, self).max(*args, **kwargs)
@Substitution(name='rolling')
@Appender(_shared_docs['min'])
def min(self, *args, **kwargs):
nv.validate_rolling_func('min', args, kwargs)
return super(Rolling, self).min(*args, **kwargs)
@Substitution(name='rolling')
@Appender(_shared_docs['mean'])
def mean(self, *args, **kwargs):
nv.validate_rolling_func('mean', args, kwargs)
return super(Rolling, self).mean(*args, **kwargs)
@Substitution(name='rolling')
@Appender(_shared_docs['median'])
def median(self, **kwargs):
return super(Rolling, self).median(**kwargs)
@Substitution(name='rolling')
@Appender(_shared_docs['std'])
def std(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func('std', args, kwargs)
return super(Rolling, self).std(ddof=ddof, **kwargs)
@Substitution(name='rolling')
@Appender(_shared_docs['var'])
def var(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func('var', args, kwargs)
return super(Rolling, self).var(ddof=ddof, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['skew'])
def skew(self, **kwargs):
return super(Rolling, self).skew(**kwargs)
_agg_doc = dedent("""
Examples
--------
The example below will show a rolling calculation with a window size of
four matching the equivalent function call using `scipy.stats`.
>>> arr = [1, 2, 3, 4, 999]
>>> fmt = "{0:.6f}" # limit the printed precision to 6 digits
>>> import scipy.stats
>>> print(fmt.format(scipy.stats.kurtosis(arr[:-1], bias=False)))
-1.200000
>>> print(fmt.format(scipy.stats.kurtosis(arr[1:], bias=False)))
3.999946
>>> s = pd.Series(arr)
>>> s.rolling(4).kurt()
0 NaN
1 NaN
2 NaN
3 -1.200000
4 3.999946
dtype: float64
""")
@Appender(_agg_doc)
@Substitution(name='rolling')
@Appender(_shared_docs['kurt'])
def kurt(self, **kwargs):
return super(Rolling, self).kurt(**kwargs)
@Substitution(name='rolling')
@Appender(_shared_docs['quantile'])
def quantile(self, quantile, interpolation='linear', **kwargs):
return super(Rolling, self).quantile(quantile=quantile,
interpolation=interpolation,
**kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['cov'])
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
return super(Rolling, self).cov(other=other, pairwise=pairwise,
ddof=ddof, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['corr'])
def corr(self, other=None, pairwise=None, **kwargs):
return super(Rolling, self).corr(other=other, pairwise=pairwise,
**kwargs)
class RollingGroupby(_GroupByMixin, Rolling):
"""
Provides a rolling groupby implementation
.. versionadded:: 0.18.1
"""
@property
def _constructor(self):
return Rolling
def _gotitem(self, key, ndim, subset=None):
# we are setting the index on the actual object
# here so our index is carried thru to the selected obj
# when we do the splitting for the groupby
if self.on is not None:
self._groupby.obj = self._groupby.obj.set_index(self._on)
self.on = None
return super(RollingGroupby, self)._gotitem(key, ndim, subset=subset)
def _validate_monotonic(self):
"""
validate that on is monotonic;
we don't care for groupby.rolling
because we have already validated at a higher
level
"""
pass
class Expanding(_Rolling_and_Expanding):
"""
Provides expanding transformations.
.. versionadded:: 0.18.0
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
center : boolean, default False
Set the labels at the center of the window.
axis : int or string, default 0
Returns
-------
a Window sub-classed for the particular operation
Examples
--------
>>> df = DataFrame({'B': [0, 1, 2, np.nan, 4]})
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> df.expanding(2).sum()
B
0 NaN
1 1.0
2 3.0
3 3.0
4 7.0
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
See Also
--------
rolling : Provides rolling window calculations
ewm : Provides exponential weighted functions
"""
_attributes = ['min_periods', 'center', 'axis']
def __init__(self, obj, min_periods=1, center=False, axis=0,
**kwargs):
super(Expanding, self).__init__(obj=obj, min_periods=min_periods,
center=center, axis=axis)
@property
def _constructor(self):
return Expanding
def _get_window(self, other=None):
obj = self._selected_obj
if other is None:
return (max(len(obj), self.min_periods) if self.min_periods
else len(obj))
return (max((len(obj) + len(obj)), self.min_periods)
if self.min_periods else (len(obj) + len(obj)))
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.ewm(alpha=0.5).mean()
A B C
0 -2.385977 -0.102758 0.438822
1 -1.464856 0.569633 -0.490089
2 -0.207700 0.149687 -1.135379
3 -0.471677 -0.645305 -0.906555
4 -0.355635 -0.203033 -0.904111
5 1.076417 1.503943 -1.146293
6 -0.041654 1.925562 -0.588728
7 0.680292 0.132049 0.548693
8 0.067236 0.948257 0.163353
9 -0.286980 0.618493 -0.694496
See also
--------
pandas.DataFrame.expanding.aggregate
pandas.DataFrame.rolling.aggregate
pandas.DataFrame.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='',
klass='Series/DataFrame',
axis=''))
def aggregate(self, arg, *args, **kwargs):
return super(Expanding, self).aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name='expanding')
@Appender(_shared_docs['count'])
def count(self, **kwargs):
return super(Expanding, self).count(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['apply'])
def apply(self, func, raw=None, args=(), kwargs={}):
return super(Expanding, self).apply(
func, raw=raw, args=args, kwargs=kwargs)
@Substitution(name='expanding')
@Appender(_shared_docs['sum'])
def sum(self, *args, **kwargs):
nv.validate_expanding_func('sum', args, kwargs)
return super(Expanding, self).sum(*args, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['max'])
def max(self, *args, **kwargs):
nv.validate_expanding_func('max', args, kwargs)
return super(Expanding, self).max(*args, **kwargs)
@Substitution(name='expanding')
@Appender(_shared_docs['min'])
def min(self, *args, **kwargs):
nv.validate_expanding_func('min', args, kwargs)
return super(Expanding, self).min(*args, **kwargs)
@Substitution(name='expanding')
@Appender(_shared_docs['mean'])
def mean(self, *args, **kwargs):
nv.validate_expanding_func('mean', args, kwargs)
return super(Expanding, self).mean(*args, **kwargs)
@Substitution(name='expanding')
@Appender(_shared_docs['median'])
def median(self, **kwargs):
return super(Expanding, self).median(**kwargs)
@Substitution(name='expanding')
@Appender(_shared_docs['std'])
def std(self, ddof=1, *args, **kwargs):
nv.validate_expanding_func('std', args, kwargs)
return super(Expanding, self).std(ddof=ddof, **kwargs)
@Substitution(name='expanding')
@Appender(_shared_docs['var'])
def var(self, ddof=1, *args, **kwargs):
nv.validate_expanding_func('var', args, kwargs)
return super(Expanding, self).var(ddof=ddof, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['skew'])
def skew(self, **kwargs):
return super(Expanding, self).skew(**kwargs)
_agg_doc = dedent("""
Examples
--------
The example below will show an expanding calculation with a window size of
four matching the equivalent function call using `scipy.stats`.
>>> arr = [1, 2, 3, 4, 999]
>>> import scipy.stats
>>> fmt = "{0:.6f}" # limit the printed precision to 6 digits
>>> print(fmt.format(scipy.stats.kurtosis(arr[:-1], bias=False)))
-1.200000
>>> print(fmt.format(scipy.stats.kurtosis(arr, bias=False)))
4.999874
>>> s = pd.Series(arr)
>>> s.expanding(4).kurt()
0 NaN
1 NaN
2 NaN
3 -1.200000
4 4.999874
dtype: float64
""")
@Appender(_agg_doc)
@Substitution(name='expanding')
@Appender(_shared_docs['kurt'])
def kurt(self, **kwargs):
return super(Expanding, self).kurt(**kwargs)
@Substitution(name='expanding')
@Appender(_shared_docs['quantile'])
def quantile(self, quantile, interpolation='linear', **kwargs):
return super(Expanding, self).quantile(quantile=quantile,
interpolation=interpolation,
**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['cov'])
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
return super(Expanding, self).cov(other=other, pairwise=pairwise,
ddof=ddof, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['corr'])
def corr(self, other=None, pairwise=None, **kwargs):
return super(Expanding, self).corr(other=other, pairwise=pairwise,
**kwargs)
class ExpandingGroupby(_GroupByMixin, Expanding):
"""
Provides a expanding groupby implementation
.. versionadded:: 0.18.1
"""
@property
def _constructor(self):
return Expanding
_bias_template = """
Parameters
----------
bias : boolean, default False
Use a standard estimation bias correction
"""
_pairwise_template = """
Parameters
----------
other : Series, DataFrame, or ndarray, optional
if not supplied then will default to self and produce pairwise output
pairwise : bool, default None
If False then only matching columns between self and other will be used and
the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the output
will be a MultiIndex DataFrame in the case of DataFrame inputs.
In the case of missing elements, only complete pairwise observations will
be used.
bias : boolean, default False
Use a standard estimation bias correction
"""
class EWM(_Rolling):
r"""
Provides exponential weighted functions
.. versionadded:: 0.18.0
Parameters
----------
com : float, optional
Specify decay in terms of center of mass,
:math:`\alpha = 1 / (1 + com),\text{ for } com \geq 0`
span : float, optional
Specify decay in terms of span,
:math:`\alpha = 2 / (span + 1),\text{ for } span \geq 1`
halflife : float, optional
Specify decay in terms of half-life,
:math:`\alpha = 1 - exp(log(0.5) / halflife),\text{ for } halflife > 0`
alpha : float, optional
Specify smoothing factor :math:`\alpha` directly,
:math:`0 < \alpha \leq 1`
.. versionadded:: 0.18.0
min_periods : int, default 0
Minimum number of observations in window required to have a value
(otherwise result is NA).
adjust : boolean, default True
Divide by decaying adjustment factor in beginning periods to account
for imbalance in relative weightings (viewing EWMA as a moving average)
ignore_na : boolean, default False
Ignore missing values when calculating weights;
specify True to reproduce pre-0.15.0 behavior
Returns
-------
a Window sub-classed for the particular operation
Examples
--------
>>> df = DataFrame({'B': [0, 1, 2, np.nan, 4]})
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> df.ewm(com=0.5).mean()
B
0 0.000000
1 0.750000
2 1.615385
3 1.615385
4 3.670213
Notes
-----
Exactly one of center of mass, span, half-life, and alpha must be provided.
Allowed values and relationship between the parameters are specified in the
parameter descriptions above; see the link at the end of this section for
a detailed explanation.
When adjust is True (default), weighted averages are calculated using
weights (1-alpha)**(n-1), (1-alpha)**(n-2), ..., 1-alpha, 1.
When adjust is False, weighted averages are calculated recursively as:
weighted_average[0] = arg[0];
weighted_average[i] = (1-alpha)*weighted_average[i-1] + alpha*arg[i].
When ignore_na is False (default), weights are based on absolute positions.
For example, the weights of x and y used in calculating the final weighted
average of [x, None, y] are (1-alpha)**2 and 1 (if adjust is True), and
(1-alpha)**2 and alpha (if adjust is False).
When ignore_na is True (reproducing pre-0.15.0 behavior), weights are based
on relative positions. For example, the weights of x and y used in
calculating the final weighted average of [x, None, y] are 1-alpha and 1
(if adjust is True), and 1-alpha and alpha (if adjust is False).
More details can be found at
http://pandas.pydata.org/pandas-docs/stable/computation.html#exponentially-weighted-windows
See Also
--------
rolling : Provides rolling window calculations
expanding : Provides expanding transformations.
"""
_attributes = ['com', 'min_periods', 'adjust', 'ignore_na', 'axis']
def __init__(self, obj, com=None, span=None, halflife=None, alpha=None,
min_periods=0, adjust=True, ignore_na=False,
axis=0):
self.obj = obj
self.com = _get_center_of_mass(com, span, halflife, alpha)
self.min_periods = min_periods
self.adjust = adjust
self.ignore_na = ignore_na
self.axis = axis
self.on = None
@property
def _constructor(self):
return EWM
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.ewm(alpha=0.5).mean()
A B C
0 -2.385977 -0.102758 0.438822
1 -1.464856 0.569633 -0.490089
2 -0.207700 0.149687 -1.135379
3 -0.471677 -0.645305 -0.906555
4 -0.355635 -0.203033 -0.904111
5 1.076417 1.503943 -1.146293
6 -0.041654 1.925562 -0.588728
7 0.680292 0.132049 0.548693
8 0.067236 0.948257 0.163353
9 -0.286980 0.618493 -0.694496
See also
--------
pandas.DataFrame.rolling.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='',
klass='Series/DataFrame',
axis=''))
def aggregate(self, arg, *args, **kwargs):
return super(EWM, self).aggregate(arg, *args, **kwargs)
agg = aggregate
def _apply(self, func, **kwargs):
"""Rolling statistical measure using supplied function. Designed to be
used with passed-in Cython array-based functions.
Parameters
----------
func : string/callable to apply
Returns
-------
y : type of input argument
"""
blocks, obj, index = self._create_blocks()
results = []
for b in blocks:
try:
values = self._prep_values(b.values)
except TypeError:
results.append(b.values.copy())
continue
if values.size == 0:
results.append(values.copy())
continue
# if we have a string function name, wrap it
if isinstance(func, compat.string_types):
cfunc = getattr(_window, func, None)
if cfunc is None:
raise ValueError("we do not support this function "
"in _window.{0}".format(func))
def func(arg):
return cfunc(arg, self.com, int(self.adjust),
int(self.ignore_na), int(self.min_periods))
results.append(np.apply_along_axis(func, self.axis, values))
return self._wrap_results(results, blocks, obj)
@Substitution(name='ewm')
@Appender(_doc_template)
def mean(self, *args, **kwargs):
"""exponential weighted moving average"""
nv.validate_window_func('mean', args, kwargs)
return self._apply('ewma', **kwargs)
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_bias_template)
def std(self, bias=False, *args, **kwargs):
"""exponential weighted moving stddev"""
nv.validate_window_func('std', args, kwargs)
return _zsqrt(self.var(bias=bias, **kwargs))
vol = std
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_bias_template)
def var(self, bias=False, *args, **kwargs):
"""exponential weighted moving variance"""
nv.validate_window_func('var', args, kwargs)
def f(arg):
return _window.ewmcov(arg, arg, self.com, int(self.adjust),
int(self.ignore_na), int(self.min_periods),
int(bias))
return self._apply(f, **kwargs)
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_pairwise_template)
def cov(self, other=None, pairwise=None, bias=False, **kwargs):
"""exponential weighted sample covariance"""
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
def _get_cov(X, Y):
X = self._shallow_copy(X)
Y = self._shallow_copy(Y)
cov = _window.ewmcov(X._prep_values(), Y._prep_values(), self.com,
int(self.adjust), int(self.ignore_na),
int(self.min_periods), int(bias))
return X._wrap_result(cov)
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_cov, pairwise=bool(pairwise))
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_pairwise_template)
def corr(self, other=None, pairwise=None, **kwargs):
"""exponential weighted sample correlation"""
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
def _get_corr(X, Y):
X = self._shallow_copy(X)
Y = self._shallow_copy(Y)
def _cov(x, y):
return _window.ewmcov(x, y, self.com, int(self.adjust),
int(self.ignore_na),
int(self.min_periods),
1)
x_values = X._prep_values()
y_values = Y._prep_values()
with np.errstate(all='ignore'):
cov = _cov(x_values, y_values)
x_var = _cov(x_values, x_values)
y_var = _cov(y_values, y_values)
corr = cov / _zsqrt(x_var * y_var)
return X._wrap_result(corr)
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_corr, pairwise=bool(pairwise))
# Helper Funcs
def _flex_binary_moment(arg1, arg2, f, pairwise=False):
if not (isinstance(arg1, (np.ndarray, ABCSeries, ABCDataFrame)) and
isinstance(arg2, (np.ndarray, ABCSeries, ABCDataFrame))):
raise TypeError("arguments to moment function must be of type "
"np.ndarray/Series/DataFrame")
if (isinstance(arg1, (np.ndarray, ABCSeries)) and
isinstance(arg2, (np.ndarray, ABCSeries))):
X, Y = _prep_binary(arg1, arg2)
return f(X, Y)
elif isinstance(arg1, ABCDataFrame):
from pandas import DataFrame
def dataframe_from_int_dict(data, frame_template):
result = DataFrame(data, index=frame_template.index)
if len(result.columns) > 0:
result.columns = frame_template.columns[result.columns]
return result
results = {}
if isinstance(arg2, ABCDataFrame):
if pairwise is False:
if arg1 is arg2:
# special case in order to handle duplicate column names
for i, col in enumerate(arg1.columns):
results[i] = f(arg1.iloc[:, i], arg2.iloc[:, i])
return dataframe_from_int_dict(results, arg1)
else:
if not arg1.columns.is_unique:
raise ValueError("'arg1' columns are not unique")
if not arg2.columns.is_unique:
raise ValueError("'arg2' columns are not unique")
with warnings.catch_warnings(record=True):
X, Y = arg1.align(arg2, join='outer')
X = X + 0 * Y
Y = Y + 0 * X
with warnings.catch_warnings(record=True):
res_columns = arg1.columns.union(arg2.columns)
for col in res_columns:
if col in X and col in Y:
results[col] = f(X[col], Y[col])
return DataFrame(results, index=X.index,
columns=res_columns)
elif pairwise is True:
results = defaultdict(dict)
for i, k1 in enumerate(arg1.columns):
for j, k2 in enumerate(arg2.columns):
if j < i and arg2 is arg1:
# Symmetric case
results[i][j] = results[j][i]
else:
results[i][j] = f(*_prep_binary(arg1.iloc[:, i],
arg2.iloc[:, j]))
from pandas import MultiIndex, concat
result_index = arg1.index.union(arg2.index)
if len(result_index):
# construct result frame
result = concat(
[concat([results[i][j]
for j, c in enumerate(arg2.columns)],
ignore_index=True)
for i, c in enumerate(arg1.columns)],
ignore_index=True,
axis=1)
result.columns = arg1.columns
# set the index and reorder
if arg2.columns.nlevels > 1:
result.index = MultiIndex.from_product(
arg2.columns.levels + [result_index])
result = result.reorder_levels([2, 0, 1]).sort_index()
else:
result.index = MultiIndex.from_product(
[range(len(arg2.columns)),
range(len(result_index))])
result = result.swaplevel(1, 0).sort_index()
result.index = MultiIndex.from_product(
[result_index] + [arg2.columns])
else:
# empty result
result = DataFrame(
index=MultiIndex(levels=[arg1.index, arg2.columns],
labels=[[], []]),
columns=arg2.columns,
dtype='float64')
# reset our index names to arg1 names
# reset our column names to arg2 names
# careful not to mutate the original names
result.columns = result.columns.set_names(
arg1.columns.names)
result.index = result.index.set_names(
result_index.names + arg2.columns.names)
return result
else:
raise ValueError("'pairwise' is not True/False")
else:
results = {}
for i, col in enumerate(arg1.columns):
results[i] = f(*_prep_binary(arg1.iloc[:, i], arg2))
return dataframe_from_int_dict(results, arg1)
else:
return _flex_binary_moment(arg2, arg1, f)
def _get_center_of_mass(comass, span, halflife, alpha):
valid_count = com._count_not_none(comass, span, halflife, alpha)
if valid_count > 1:
raise ValueError("comass, span, halflife, and alpha "
"are mutually exclusive")
# Convert to center of mass; domain checks ensure 0 < alpha <= 1
if comass is not None:
if comass < 0:
raise ValueError("comass must satisfy: comass >= 0")
elif span is not None:
if span < 1:
raise ValueError("span must satisfy: span >= 1")
comass = (span - 1) / 2.
elif halflife is not None:
if halflife <= 0:
raise ValueError("halflife must satisfy: halflife > 0")
decay = 1 - np.exp(np.log(0.5) / halflife)
comass = 1 / decay - 1
elif alpha is not None:
if alpha <= 0 or alpha > 1:
raise ValueError("alpha must satisfy: 0 < alpha <= 1")
comass = (1.0 - alpha) / alpha
else:
raise ValueError("Must pass one of comass, span, halflife, or alpha")
return float(comass)
def _offset(window, center):
if not is_integer(window):
window = len(window)
offset = (window - 1) / 2. if center else 0
try:
return int(offset)
except:
return offset.astype(int)
def _require_min_periods(p):
def _check_func(minp, window):
if minp is None:
return window
else:
return max(p, minp)
return _check_func
def _use_window(minp, window):
if minp is None:
return window
else:
return minp
def _zsqrt(x):
with np.errstate(all='ignore'):
result = np.sqrt(x)
mask = x < 0
if isinstance(x, ABCDataFrame):
if mask.values.any():
result[mask] = 0
else:
if mask.any():
result[mask] = 0
return result
def _prep_binary(arg1, arg2):
if not isinstance(arg2, type(arg1)):
raise Exception('Input arrays must be of the same type!')
# mask out values, this also makes a common index...
X = arg1 + 0 * arg2
Y = arg2 + 0 * arg1
return X, Y
# Top-level exports
def rolling(obj, win_type=None, **kwds):
if not isinstance(obj, (ABCSeries, ABCDataFrame)):
raise TypeError('invalid type: %s' % type(obj))
if win_type is not None:
return Window(obj, win_type=win_type, **kwds)
return Rolling(obj, **kwds)
rolling.__doc__ = Window.__doc__
def expanding(obj, **kwds):
if not isinstance(obj, (ABCSeries, ABCDataFrame)):
raise TypeError('invalid type: %s' % type(obj))
return Expanding(obj, **kwds)
expanding.__doc__ = Expanding.__doc__
def ewm(obj, **kwds):
if not isinstance(obj, (ABCSeries, ABCDataFrame)):
raise TypeError('invalid type: %s' % type(obj))
return EWM(obj, **kwds)
ewm.__doc__ = EWM.__doc__
|
louispotok/pandas
|
pandas/core/window.py
|
Python
|
bsd-3-clause
| 78,055
|
[
"Gaussian"
] |
5065288e40f1e2718cac83ea55f23a9e4f6e688188cf0a3e1e6814c707c70e83
|
"""
Support and standalone functions for Robust Linear Models
References
----------
PJ Huber. 'Robust Statistics' John Wiley and Sons, Inc., New York, 1981.
R Venables, B Ripley. 'Modern Applied Statistics in S'
Springer, New York, 2002.
C Croux, PJ Rousseeuw, 'Time-efficient algorithms for two highly robust
estimators of scale' Computational statistics. Physica, Heidelberg, 1992.
"""
import numpy as np
from scipy.stats import norm as Gaussian
from statsmodels.tools import tools
from statsmodels.tools.validation import array_like, float_like
from . import norms
from ._qn import _qn
def mad(a, c=Gaussian.ppf(3 / 4.0), axis=0, center=np.median):
"""
The Median Absolute Deviation along given axis of an array
Parameters
----------
a : array_like
Input array.
c : float, optional
The normalization constant. Defined as scipy.stats.norm.ppf(3/4.),
which is approximately 0.6745.
axis : int, optional
The default is 0. Can also be None.
center : callable or float
If a callable is provided, such as the default `np.median` then it
is expected to be called center(a). The axis argument will be applied
via np.apply_over_axes. Otherwise, provide a float.
Returns
-------
mad : float
`mad` = median(abs(`a` - center))/`c`
"""
a = array_like(a, "a", ndim=None)
c = float_like(c, "c")
if not a.size:
center_val = 0.0
elif callable(center):
if axis is not None:
center_val = np.apply_over_axes(center, a, axis)
else:
center_val = center(a.ravel())
else:
center_val = float_like(center, "center")
return np.median((np.abs(a - center_val)) / c, axis=axis)
def iqr(a, c=Gaussian.ppf(3 / 4) - Gaussian.ppf(1 / 4), axis=0):
"""
The normalized interquartile range along given axis of an array
Parameters
----------
a : array_like
Input array.
c : float, optional
The normalization constant, used to get consistent estimates of the
standard deviation at the normal distribution. Defined as
scipy.stats.norm.ppf(3/4.) - scipy.stats.norm.ppf(1/4.), which is
approximately 1.349.
axis : int, optional
The default is 0. Can also be None.
Returns
-------
The normalized interquartile range
"""
a = array_like(a, "a", ndim=None)
c = float_like(c, "c")
if a.ndim == 0:
raise ValueError("a should have at least one dimension")
elif a.size == 0:
return np.nan
else:
quantiles = np.quantile(a, [0.25, 0.75], axis=axis)
return np.squeeze(np.diff(quantiles, axis=0) / c)
def qn_scale(a, c=1 / (np.sqrt(2) * Gaussian.ppf(5 / 8)), axis=0):
"""
Computes the Qn robust estimator of scale
The Qn scale estimator is a more efficient alternative to the MAD.
The Qn scale estimator of an array a of length n is defined as
c * {abs(a[i] - a[j]): i<j}_(k), for k equal to [n/2] + 1 choose 2. Thus,
the Qn estimator is the k-th order statistic of the absolute differences
of the array. The optional constant is used to normalize the estimate
as explained below. The implementation follows the algorithm described
in Croux and Rousseeuw (1992).
Parameters
----------
a : array_like
Input array.
c : float, optional
The normalization constant. The default value is used to get consistent
estimates of the standard deviation at the normal distribution.
axis : int, optional
The default is 0.
Returns
-------
{float, ndarray}
The Qn robust estimator of scale
"""
a = array_like(
a, "a", ndim=None, dtype=np.float64, contiguous=True, order="C"
)
c = float_like(c, "c")
if a.ndim == 0:
raise ValueError("a should have at least one dimension")
elif a.size == 0:
return np.nan
else:
out = np.apply_along_axis(_qn, axis=axis, arr=a, c=c)
if out.ndim == 0:
return float(out)
return out
def _qn_naive(a, c=1 / (np.sqrt(2) * Gaussian.ppf(5 / 8))):
"""
A naive implementation of the Qn robust estimator of scale, used solely
to test the faster, more involved one
Parameters
----------
a : array_like
Input array.
c : float, optional
The normalization constant, used to get consistent estimates of the
standard deviation at the normal distribution. Defined as
1/(np.sqrt(2) * scipy.stats.norm.ppf(5/8)), which is 2.219144.
Returns
-------
The Qn robust estimator of scale
"""
a = np.squeeze(a)
n = a.shape[0]
if a.size == 0:
return np.nan
else:
h = int(n // 2 + 1)
k = int(h * (h - 1) / 2)
idx = np.triu_indices(n, k=1)
diffs = np.abs(a[idx[0]] - a[idx[1]])
output = np.partition(diffs, kth=k - 1)[k - 1]
output = c * output
return output
class Huber(object):
"""
Huber's proposal 2 for estimating location and scale jointly.
Parameters
----------
c : float, optional
Threshold used in threshold for chi=psi**2. Default value is 1.5.
tol : float, optional
Tolerance for convergence. Default value is 1e-08.
maxiter : int, optional0
Maximum number of iterations. Default value is 30.
norm : statsmodels.robust.norms.RobustNorm, optional
A robust norm used in M estimator of location. If None,
the location estimator defaults to a one-step
fixed point version of the M-estimator using Huber's T.
call
Return joint estimates of Huber's scale and location.
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> chem_data = np.array([2.20, 2.20, 2.4, 2.4, 2.5, 2.7, 2.8, 2.9, 3.03,
... 3.03, 3.10, 3.37, 3.4, 3.4, 3.4, 3.5, 3.6, 3.7, 3.7, 3.7, 3.7,
... 3.77, 5.28, 28.95])
>>> sm.robust.scale.huber(chem_data)
(array(3.2054980819923693), array(0.67365260010478967))
"""
def __init__(self, c=1.5, tol=1.0e-08, maxiter=30, norm=None):
self.c = c
self.maxiter = maxiter
self.tol = tol
self.norm = norm
tmp = 2 * Gaussian.cdf(c) - 1
self.gamma = tmp + c ** 2 * (1 - tmp) - 2 * c * Gaussian.pdf(c)
def __call__(self, a, mu=None, initscale=None, axis=0):
"""
Compute Huber's proposal 2 estimate of scale, using an optional
initial value of scale and an optional estimate of mu. If mu
is supplied, it is not reestimated.
Parameters
----------
a : ndarray
1d array
mu : float or None, optional
If the location mu is supplied then it is not reestimated.
Default is None, which means that it is estimated.
initscale : float or None, optional
A first guess on scale. If initscale is None then the standardized
median absolute deviation of a is used.
Notes
-----
`Huber` minimizes the function
sum(psi((a[i]-mu)/scale)**2)
as a function of (mu, scale), where
psi(x) = np.clip(x, -self.c, self.c)
"""
a = np.asarray(a)
if mu is None:
n = a.shape[0] - 1
mu = np.median(a, axis=axis)
est_mu = True
else:
n = a.shape[0]
mu = mu
est_mu = False
if initscale is None:
scale = mad(a, axis=axis)
else:
scale = initscale
scale = tools.unsqueeze(scale, axis, a.shape)
mu = tools.unsqueeze(mu, axis, a.shape)
return self._estimate_both(a, scale, mu, axis, est_mu, n)
def _estimate_both(self, a, scale, mu, axis, est_mu, n):
"""
Estimate scale and location simultaneously with the following
pseudo_loop:
while not_converged:
mu, scale = estimate_location(a, scale, mu), estimate_scale(a, scale, mu)
where estimate_location is an M-estimator and estimate_scale implements
the check used in Section 5.5 of Venables & Ripley
""" # noqa:E501
for _ in range(self.maxiter):
# Estimate the mean along a given axis
if est_mu:
if self.norm is None:
# This is a one-step fixed-point estimator
# if self.norm == norms.HuberT
# It should be faster than using norms.HuberT
nmu = (
np.clip(
a, mu - self.c * scale, mu + self.c * scale
).sum(axis)
/ a.shape[axis]
)
else:
nmu = norms.estimate_location(
a, scale, self.norm, axis, mu, self.maxiter, self.tol
)
else:
# Effectively, do nothing
nmu = mu.squeeze()
nmu = tools.unsqueeze(nmu, axis, a.shape)
subset = np.less_equal(np.abs((a - mu) / scale), self.c)
card = subset.sum(axis)
scale_num = np.sum(subset * (a - nmu) ** 2, axis)
scale_denom = n * self.gamma - (a.shape[axis] - card) * self.c ** 2
nscale = np.sqrt(scale_num / scale_denom)
nscale = tools.unsqueeze(nscale, axis, a.shape)
test1 = np.alltrue(
np.less_equal(np.abs(scale - nscale), nscale * self.tol)
)
test2 = np.alltrue(
np.less_equal(np.abs(mu - nmu), nscale * self.tol)
)
if not (test1 and test2):
mu = nmu
scale = nscale
else:
return nmu.squeeze(), nscale.squeeze()
raise ValueError(
"joint estimation of location and scale failed "
"to converge in %d iterations" % self.maxiter
)
huber = Huber()
class HuberScale(object):
r"""
Huber's scaling for fitting robust linear models.
Huber's scale is intended to be used as the scale estimate in the
IRLS algorithm and is slightly different than the `Huber` class.
Parameters
----------
d : float, optional
d is the tuning constant for Huber's scale. Default is 2.5
tol : float, optional
The convergence tolerance
maxiter : int, optiona
The maximum number of iterations. The default is 30.
Methods
-------
call
Return's Huber's scale computed as below
Notes
--------
Huber's scale is the iterative solution to
scale_(i+1)**2 = 1/(n*h)*sum(chi(r/sigma_i)*sigma_i**2
where the Huber function is
chi(x) = (x**2)/2 for \|x\| < d
chi(x) = (d**2)/2 for \|x\| >= d
and the Huber constant h = (n-p)/n*(d**2 + (1-d**2)*\
scipy.stats.norm.cdf(d) - .5 - d*sqrt(2*pi)*exp(-0.5*d**2)
"""
def __init__(self, d=2.5, tol=1e-08, maxiter=30):
self.d = d
self.tol = tol
self.maxiter = maxiter
def __call__(self, df_resid, nobs, resid):
h = (
df_resid
/ nobs
* (
self.d ** 2
+ (1 - self.d ** 2) * Gaussian.cdf(self.d)
- 0.5
- self.d / (np.sqrt(2 * np.pi)) * np.exp(-0.5 * self.d ** 2)
)
)
s = mad(resid)
def subset(x):
return np.less(np.abs(resid / x), self.d)
def chi(s):
return subset(s) * (resid / s) ** 2 / 2 + (1 - subset(s)) * (
self.d ** 2 / 2
)
scalehist = [np.inf, s]
niter = 1
while (
np.abs(scalehist[niter - 1] - scalehist[niter]) > self.tol
and niter < self.maxiter
):
nscale = np.sqrt(
1
/ (nobs * h)
* np.sum(chi(scalehist[-1]))
* scalehist[-1] ** 2
)
scalehist.append(nscale)
niter += 1
# TODO: raise on convergence failure?
return scalehist[-1]
hubers_scale = HuberScale()
|
jseabold/statsmodels
|
statsmodels/robust/scale.py
|
Python
|
bsd-3-clause
| 12,267
|
[
"Gaussian"
] |
6a565c072d577479b2d89c86e7085472a040efce67808af4835a8c671d03f091
|
from setuptools import setup, find_packages
with open('README.md') as readme_file:
readme = readme_file.read()
exec(open('graftm/version.py').read()) # loads __version__
setup(name='graftm',
version=__version__,
author='Joel Boyd, Ben Woodcroft',
description='GraftM is a pipeline used for identifying and classifying marker gene reads from metagenomic datasets',
long_description=readme,
description_content_type="text/markdown",
long_description_content_type="text/markdown",
license='GPL3+',
keywords="",
packages=find_packages(exclude='docs'),
install_requires=('biopython >=1.64',
'biom-format >=2.1.4',
'extern >=0.0.4',
'taxtastic >=0.5.4',
'tempdir >=0.6',
'DendroPy >= 4.1.0'),
setup_requires=['nose>=1.0'],
test_suite='nose.collector',
url='http://geronimp.github.io/graftM',
scripts=['bin/graftM'],
data_files=[
('share', ['share/18S.hmm']),
],
)
|
wwood/graftM
|
setup.py
|
Python
|
gpl-3.0
| 1,091
|
[
"Biopython"
] |
0681ebf846dd07e053b2ce3e38a23debd8c650c029967024321981cb4dcf52dc
|
# -*- coding: utf-8 -*-
""" Tests for user authn views. """
from __future__ import absolute_import
import logging
import re
from http.cookies import SimpleCookie
from unittest import skipUnless
import ddt
import mock
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from django.contrib.messages.middleware import MessageMiddleware
from django.contrib.sessions.middleware import SessionMiddleware
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase
from django.test.client import RequestFactory
from django.test.utils import override_settings
from django.urls import reverse
from django.utils.translation import ugettext as _
from edx_oauth2_provider.tests.factories import AccessTokenFactory, ClientFactory, RefreshTokenFactory
from oauth2_provider.models import AccessToken as dot_access_token
from oauth2_provider.models import RefreshToken as dot_refresh_token
from provider.oauth2.models import AccessToken as dop_access_token
from provider.oauth2.models import RefreshToken as dop_refresh_token
from six.moves import range
from six.moves.urllib.parse import urlencode # pylint: disable=import-error
from testfixtures import LogCapture
from waffle.models import Switch
from course_modes.models import CourseMode
from openedx.core.djangoapps.oauth_dispatch.tests import factories as dot_factories
from openedx.core.djangoapps.site_configuration.tests.mixins import SiteMixin
from openedx.core.djangoapps.theming.tests.test_util import with_comprehensive_theme_context
from openedx.core.djangoapps.user_api.accounts.api import activate_account, create_account
from openedx.core.djangoapps.user_api.accounts.utils import ENABLE_SECONDARY_EMAIL_FEATURE_SWITCH
from openedx.core.djangoapps.user_api.errors import UserAPIInternalError
from openedx.core.djangoapps.user_authn.views.login_form import login_and_registration_form
from openedx.core.djangolib.js_utils import dump_js_escaped_json
from openedx.core.djangolib.markup import HTML, Text
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase, skip_unless_lms
from student.tests.factories import AccountRecoveryFactory
from third_party_auth.tests.testutil import ThirdPartyAuthTestMixin, simulate_running_pipeline
from util.testing import UrlResetMixin
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
LOGGER_NAME = 'audit'
User = get_user_model() # pylint:disable=invalid-name
FEATURES_WITH_FAILED_PASSWORD_RESET_EMAIL = settings.FEATURES.copy()
FEATURES_WITH_FAILED_PASSWORD_RESET_EMAIL['ENABLE_PASSWORD_RESET_FAILURE_EMAIL'] = True
@skip_unless_lms
@ddt.ddt
class UserAccountUpdateTest(CacheIsolationTestCase, UrlResetMixin):
""" Tests for views that update the user's account information. """
USERNAME = u"heisenberg"
ALTERNATE_USERNAME = u"walt"
OLD_PASSWORD = u"ḅḷüëṡḳÿ"
NEW_PASSWORD = u"B🄸🄶B🄻🅄🄴"
OLD_EMAIL = u"walter@graymattertech.com"
NEW_EMAIL = u"walt@savewalterwhite.com"
INVALID_ATTEMPTS = 100
INVALID_KEY = u"123abc"
URLCONF_MODULES = ['student_accounts.urls']
ENABLED_CACHES = ['default']
def setUp(self):
super(UserAccountUpdateTest, self).setUp()
# Create/activate a new account
activation_key = create_account(self.USERNAME, self.OLD_PASSWORD, self.OLD_EMAIL)
activate_account(activation_key)
self.account_recovery = AccountRecoveryFactory.create(user=User.objects.get(email=self.OLD_EMAIL))
self.enable_account_recovery_switch = Switch.objects.create(
name=ENABLE_SECONDARY_EMAIL_FEATURE_SWITCH,
active=True
)
# Login
result = self.client.login(username=self.USERNAME, password=self.OLD_PASSWORD)
self.assertTrue(result)
@skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in LMS')
def test_password_change(self):
# Request a password change while logged in, simulating
# use of the password reset link from the account page
response = self._change_password()
self.assertEqual(response.status_code, 200)
# Check that an email was sent
self.assertEqual(len(mail.outbox), 1)
# Retrieve the activation link from the email body
email_body = mail.outbox[0].body
result = re.search(r'(?P<url>https?://[^\s]+)', email_body)
self.assertIsNot(result, None)
activation_link = result.group('url')
# Visit the activation link
response = self.client.get(activation_link)
self.assertEqual(response.status_code, 200)
# Submit a new password and follow the redirect to the success page
response = self.client.post(
activation_link,
# These keys are from the form on the current password reset confirmation page.
{'new_password1': self.NEW_PASSWORD, 'new_password2': self.NEW_PASSWORD},
follow=True
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Your password has been reset.")
# Log the user out to clear session data
self.client.logout()
# Verify that the new password can be used to log in
result = self.client.login(username=self.USERNAME, password=self.NEW_PASSWORD)
self.assertTrue(result)
# Try reusing the activation link to change the password again
# Visit the activation link again.
response = self.client.get(activation_link)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "This password reset link is invalid. It may have been used already.")
self.client.logout()
# Verify that the old password cannot be used to log in
result = self.client.login(username=self.USERNAME, password=self.OLD_PASSWORD)
self.assertFalse(result)
# Verify that the new password continues to be valid
result = self.client.login(username=self.USERNAME, password=self.NEW_PASSWORD)
self.assertTrue(result)
def test_password_change_failure(self):
with mock.patch('openedx.core.djangoapps.user_api.accounts.api.request_password_change',
side_effect=UserAPIInternalError):
self._change_password()
self.assertRaises(UserAPIInternalError)
@override_settings(FEATURES=FEATURES_WITH_FAILED_PASSWORD_RESET_EMAIL)
def test_password_reset_failure_email(self):
"""Test that a password reset failure email notification is sent, when enabled."""
# Log the user out
self.client.logout()
bad_email = 'doesnotexist@example.com'
response = self._change_password(email=bad_email)
self.assertEqual(response.status_code, 200)
# Check that an email was sent
self.assertEqual(len(mail.outbox), 1)
# Verify that the body contains the failed password reset message
sent_message = mail.outbox[0]
text_body = sent_message.body
html_body = sent_message.alternatives[0][0]
for email_body in [text_body, html_body]:
msg = u'However, there is currently no user account associated with your email address: {email}'.format(
email=bad_email
)
assert u'reset for your user account at {}'.format(settings.PLATFORM_NAME) in email_body
assert 'password_reset_confirm' not in email_body, 'The link should not be added if user was not found'
assert msg in email_body
@ddt.data(True, False)
def test_password_change_logged_out(self, send_email):
# Log the user out
self.client.logout()
# Request a password change while logged out, simulating
# use of the password reset link from the login page
if send_email:
response = self._change_password(email=self.OLD_EMAIL)
self.assertEqual(response.status_code, 200)
else:
# Don't send an email in the POST data, simulating
# its (potentially accidental) omission in the POST
# data sent from the login page
response = self._change_password()
self.assertEqual(response.status_code, 400)
def test_access_token_invalidation_logged_out(self):
self.client.logout()
user = User.objects.get(email=self.OLD_EMAIL)
self._create_dop_tokens(user)
self._create_dot_tokens(user)
response = self._change_password(email=self.OLD_EMAIL)
self.assertEqual(response.status_code, 200)
self.assert_access_token_destroyed(user)
def test_access_token_invalidation_logged_in(self):
user = User.objects.get(email=self.OLD_EMAIL)
self._create_dop_tokens(user)
self._create_dot_tokens(user)
response = self._change_password()
self.assertEqual(response.status_code, 200)
self.assert_access_token_destroyed(user)
def test_password_change_inactive_user(self):
# Log out the user created during test setup
self.client.logout()
# Create a second user, but do not activate it
create_account(self.ALTERNATE_USERNAME, self.OLD_PASSWORD, self.NEW_EMAIL)
# Send the view the email address tied to the inactive user
response = self._change_password(email=self.NEW_EMAIL)
# Expect that the activation email is still sent,
# since the user may have lost the original activation email.
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 1)
def test_password_change_no_user(self):
# Log out the user created during test setup
self.client.logout()
with LogCapture(LOGGER_NAME, level=logging.INFO) as logger:
# Send the view an email address not tied to any user
response = self._change_password(email=self.NEW_EMAIL)
self.assertEqual(response.status_code, 200)
logger.check((LOGGER_NAME, 'INFO', 'Invalid password reset attempt'))
def test_password_change_rate_limited(self):
# Log out the user created during test setup, to prevent the view from
# selecting the logged-in user's email address over the email provided
# in the POST data
self.client.logout()
# Make many consecutive bad requests in an attempt to trigger the rate limiter
for __ in range(self.INVALID_ATTEMPTS):
self._change_password(email=self.NEW_EMAIL)
response = self._change_password(email=self.NEW_EMAIL)
self.assertEqual(response.status_code, 403)
@ddt.data(
('post', 'password_change_request', []),
)
@ddt.unpack
def test_require_http_method(self, correct_method, url_name, args):
wrong_methods = {'get', 'put', 'post', 'head', 'options', 'delete'} - {correct_method}
url = reverse(url_name, args=args)
for method in wrong_methods:
response = getattr(self.client, method)(url)
self.assertEqual(response.status_code, 405)
def _change_password(self, email=None):
"""Request to change the user's password. """
data = {}
if email:
data['email'] = email
return self.client.post(path=reverse('password_change_request'), data=data)
def _create_dop_tokens(self, user=None):
"""Create dop access token for given user if user provided else for default user."""
if not user:
user = User.objects.get(email=self.OLD_EMAIL)
client = ClientFactory()
access_token = AccessTokenFactory(user=user, client=client)
RefreshTokenFactory(user=user, client=client, access_token=access_token)
def _create_dot_tokens(self, user=None):
"""Create dop access token for given user if user provided else for default user."""
if not user:
user = User.objects.get(email=self.OLD_EMAIL)
application = dot_factories.ApplicationFactory(user=user)
access_token = dot_factories.AccessTokenFactory(user=user, application=application)
dot_factories.RefreshTokenFactory(user=user, application=application, access_token=access_token)
def assert_access_token_destroyed(self, user):
"""Assert all access tokens are destroyed."""
self.assertFalse(dot_access_token.objects.filter(user=user).exists())
self.assertFalse(dot_refresh_token.objects.filter(user=user).exists())
self.assertFalse(dop_access_token.objects.filter(user=user).exists())
self.assertFalse(dop_refresh_token.objects.filter(user=user).exists())
@skip_unless_lms
@ddt.ddt
class LoginAndRegistrationTest(ThirdPartyAuthTestMixin, UrlResetMixin, ModuleStoreTestCase):
""" Tests for the student account views that update the user's account information. """
USERNAME = "bob"
EMAIL = "bob@example.com"
PASSWORD = u"password"
URLCONF_MODULES = ['openedx.core.djangoapps.embargo']
@mock.patch.dict(settings.FEATURES, {'EMBARGO': True})
def setUp(self): # pylint: disable=arguments-differ
super(LoginAndRegistrationTest, self).setUp()
# Several third party auth providers are created for these tests:
self.google_provider = self.configure_google_provider(enabled=True, visible=True)
self.configure_facebook_provider(enabled=True, visible=True)
self.configure_dummy_provider(
visible=True,
enabled=True,
icon_class='',
icon_image=SimpleUploadedFile('icon.svg', '<svg><rect width="50" height="100"/></svg>'),
)
self.hidden_enabled_provider = self.configure_linkedin_provider(
visible=False,
enabled=True,
)
self.hidden_disabled_provider = self.configure_azure_ad_provider()
@ddt.data(
("signin_user", "login"),
("register_user", "register"),
)
@ddt.unpack
def test_login_and_registration_form(self, url_name, initial_mode):
response = self.client.get(reverse(url_name))
expected_data = u'"initial_mode": "{mode}"'.format(mode=initial_mode)
self.assertContains(response, expected_data)
@ddt.data("signin_user", "register_user")
def test_login_and_registration_form_already_authenticated(self, url_name):
# call the account registration api that sets the login cookies
url = reverse('user_api_registration')
request_data = {
'username': self.USERNAME,
'password': self.PASSWORD,
'email': self.EMAIL,
'name': self.USERNAME,
'terms_of_service': 'true',
'honor_code': 'true',
}
result = self.client.post(url, data=request_data)
self.assertEqual(result.status_code, 200)
result = self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.assertTrue(result)
# Verify that we're redirected to the dashboard
response = self.client.get(reverse(url_name))
self.assertRedirects(response, reverse("dashboard"))
@ddt.data(
(None, "signin_user"),
(None, "register_user"),
("edx.org", "signin_user"),
("edx.org", "register_user"),
)
@ddt.unpack
def test_login_and_registration_form_signin_not_preserves_params(self, theme, url_name):
params = [
('course_id', 'edX/DemoX/Demo_Course'),
('enrollment_action', 'enroll'),
]
# The response should not have a "Sign In" button with the URL
# that preserves the querystring params
with with_comprehensive_theme_context(theme):
response = self.client.get(reverse(url_name), params, HTTP_ACCEPT="text/html")
expected_url = '/login?{}'.format(self._finish_auth_url_param(params + [('next', '/dashboard')]))
self.assertNotContains(response, expected_url)
# Add additional parameters:
params = [
('course_id', 'edX/DemoX/Demo_Course'),
('enrollment_action', 'enroll'),
('course_mode', CourseMode.DEFAULT_MODE_SLUG),
('email_opt_in', 'true'),
('next', '/custom/final/destination')
]
# Verify that this parameter is also preserved
with with_comprehensive_theme_context(theme):
response = self.client.get(reverse(url_name), params, HTTP_ACCEPT="text/html")
expected_url = '/login?{}'.format(self._finish_auth_url_param(params))
self.assertNotContains(response, expected_url)
@mock.patch.dict(settings.FEATURES, {"ENABLE_THIRD_PARTY_AUTH": False})
@ddt.data("signin_user", "register_user")
def test_third_party_auth_disabled(self, url_name):
response = self.client.get(reverse(url_name))
self._assert_third_party_auth_data(response, None, None, [], None)
@mock.patch('openedx.core.djangoapps.user_authn.views.login_form.enterprise_customer_for_request')
@mock.patch('openedx.core.djangoapps.user_api.api.enterprise_customer_for_request')
@ddt.data(
("signin_user", None, None, None, False),
("register_user", None, None, None, False),
("signin_user", "google-oauth2", "Google", None, False),
("register_user", "google-oauth2", "Google", None, False),
("signin_user", "facebook", "Facebook", None, False),
("register_user", "facebook", "Facebook", None, False),
("signin_user", "dummy", "Dummy", None, False),
("register_user", "dummy", "Dummy", None, False),
(
"signin_user",
"google-oauth2",
"Google",
{
'name': 'FakeName',
'logo': 'https://host.com/logo.jpg',
'welcome_msg': 'No message'
},
True
)
)
@ddt.unpack
def test_third_party_auth(
self,
url_name,
current_backend,
current_provider,
expected_enterprise_customer_mock_attrs,
add_user_details,
enterprise_customer_mock_1,
enterprise_customer_mock_2
):
params = [
('course_id', 'course-v1:Org+Course+Run'),
('enrollment_action', 'enroll'),
('course_mode', CourseMode.DEFAULT_MODE_SLUG),
('email_opt_in', 'true'),
('next', '/custom/final/destination'),
]
if expected_enterprise_customer_mock_attrs:
expected_ec = {
'name': expected_enterprise_customer_mock_attrs['name'],
'branding_configuration': {
'logo': 'https://host.com/logo.jpg',
'welcome_message': expected_enterprise_customer_mock_attrs['welcome_msg']
}
}
else:
expected_ec = None
email = None
if add_user_details:
email = 'test@test.com'
enterprise_customer_mock_1.return_value = expected_ec
enterprise_customer_mock_2.return_value = expected_ec
# Simulate a running pipeline
if current_backend is not None:
pipeline_target = "openedx.core.djangoapps.user_authn.views.login_form.third_party_auth.pipeline"
with simulate_running_pipeline(pipeline_target, current_backend, email=email):
response = self.client.get(reverse(url_name), params, HTTP_ACCEPT="text/html")
# Do NOT simulate a running pipeline
else:
response = self.client.get(reverse(url_name), params, HTTP_ACCEPT="text/html")
# This relies on the THIRD_PARTY_AUTH configuration in the test settings
expected_providers = [
{
"id": "oa2-dummy",
"name": "Dummy",
"iconClass": None,
"iconImage": settings.MEDIA_URL + "icon.svg",
"loginUrl": self._third_party_login_url("dummy", "login", params),
"registerUrl": self._third_party_login_url("dummy", "register", params)
},
{
"id": "oa2-facebook",
"name": "Facebook",
"iconClass": "fa-facebook",
"iconImage": None,
"loginUrl": self._third_party_login_url("facebook", "login", params),
"registerUrl": self._third_party_login_url("facebook", "register", params)
},
{
"id": "oa2-google-oauth2",
"name": "Google",
"iconClass": "fa-google-plus",
"iconImage": None,
"loginUrl": self._third_party_login_url("google-oauth2", "login", params),
"registerUrl": self._third_party_login_url("google-oauth2", "register", params)
},
]
self._assert_third_party_auth_data(
response,
current_backend,
current_provider,
expected_providers,
expected_ec,
add_user_details
)
def _configure_testshib_provider(self, provider_name, idp_slug):
"""
Enable and configure the TestShib SAML IdP as a third_party_auth provider.
"""
kwargs = {}
kwargs.setdefault('name', provider_name)
kwargs.setdefault('enabled', True)
kwargs.setdefault('visible', True)
kwargs.setdefault('slug', idp_slug)
kwargs.setdefault('entity_id', 'https://idp.testshib.org/idp/shibboleth')
kwargs.setdefault('metadata_source', 'https://mock.testshib.org/metadata/testshib-providers.xml')
kwargs.setdefault('icon_class', 'fa-university')
kwargs.setdefault('attr_email', 'dummy-email-attr')
kwargs.setdefault('max_session_length', None)
self.configure_saml_provider(**kwargs)
@mock.patch('django.conf.settings.MESSAGE_STORAGE', 'django.contrib.messages.storage.cookie.CookieStorage')
@mock.patch('openedx.core.djangoapps.user_authn.views.login_form.enterprise_customer_for_request')
@ddt.data(
(
'signin_user',
'tpa-saml',
'TestShib',
)
)
@ddt.unpack
def test_saml_auth_with_error(
self,
url_name,
current_backend,
current_provider,
enterprise_customer_mock,
):
params = []
request = RequestFactory().get(reverse(url_name), params, HTTP_ACCEPT='text/html')
SessionMiddleware().process_request(request)
request.user = AnonymousUser()
self.enable_saml()
dummy_idp = 'testshib'
self._configure_testshib_provider(current_provider, dummy_idp)
enterprise_customer_data = {
'uuid': '72416e52-8c77-4860-9584-15e5b06220fb',
'name': 'Dummy Enterprise',
'identity_provider': dummy_idp,
}
enterprise_customer_mock.return_value = enterprise_customer_data
dummy_error_message = 'Authentication failed: SAML login failed ' \
'["invalid_response"] [SAML Response must contain 1 assertion]'
# Add error message for error in auth pipeline
MessageMiddleware().process_request(request)
messages.error(request, dummy_error_message, extra_tags='social-auth')
# Simulate a running pipeline
pipeline_response = {
'response': {
'idp_name': dummy_idp
}
}
pipeline_target = 'openedx.core.djangoapps.user_authn.views.login_form.third_party_auth.pipeline'
with simulate_running_pipeline(pipeline_target, current_backend, **pipeline_response):
with mock.patch('edxmako.request_context.get_current_request', return_value=request):
response = login_and_registration_form(request)
expected_error_message = Text(_(
u'We are sorry, you are not authorized to access {platform_name} via this channel. '
u'Please contact your learning administrator or manager in order to access {platform_name}.'
u'{line_break}{line_break}'
u'Error Details:{line_break}{error_message}')
).format(
platform_name=settings.PLATFORM_NAME,
error_message=dummy_error_message,
line_break=HTML('<br/>')
)
self._assert_saml_auth_data_with_error(
response,
current_backend,
current_provider,
expected_error_message
)
def test_hinted_login(self):
params = [("next", "/courses/something/?tpa_hint=oa2-google-oauth2")]
response = self.client.get(reverse('signin_user'), params, HTTP_ACCEPT="text/html")
self.assertContains(response, '"third_party_auth_hint": "oa2-google-oauth2"')
tpa_hint = self.hidden_enabled_provider.provider_id
params = [("next", "/courses/something/?tpa_hint={0}".format(tpa_hint))]
response = self.client.get(reverse('signin_user'), params, HTTP_ACCEPT="text/html")
self.assertContains(response, u'"third_party_auth_hint": "{0}"'.format(tpa_hint))
tpa_hint = self.hidden_disabled_provider.provider_id
params = [("next", "/courses/something/?tpa_hint={0}".format(tpa_hint))]
response = self.client.get(reverse('signin_user'), params, HTTP_ACCEPT="text/html")
self.assertNotIn(response.content, tpa_hint)
@ddt.data(
('signin_user', 'login'),
('register_user', 'register'),
)
@ddt.unpack
def test_hinted_login_dialog_disabled(self, url_name, auth_entry):
"""Test that the dialog doesn't show up for hinted logins when disabled. """
self.google_provider.skip_hinted_login_dialog = True
self.google_provider.save()
params = [("next", "/courses/something/?tpa_hint=oa2-google-oauth2")]
response = self.client.get(reverse(url_name), params, HTTP_ACCEPT="text/html")
expected_url = '/auth/login/google-oauth2/?auth_entry={}&next=%2Fcourses'\
'%2Fsomething%2F%3Ftpa_hint%3Doa2-google-oauth2'.format(auth_entry)
self.assertRedirects(
response,
expected_url,
target_status_code=302
)
@override_settings(FEATURES=dict(settings.FEATURES, THIRD_PARTY_AUTH_HINT='oa2-google-oauth2'))
@ddt.data(
'signin_user',
'register_user',
)
def test_settings_tpa_hinted_login(self, url_name):
"""
Ensure that settings.FEATURES['THIRD_PARTY_AUTH_HINT'] can set third_party_auth_hint.
"""
params = [("next", "/courses/something/")]
response = self.client.get(reverse(url_name), params, HTTP_ACCEPT="text/html")
self.assertContains(response, '"third_party_auth_hint": "oa2-google-oauth2"')
# THIRD_PARTY_AUTH_HINT can be overridden via the query string
tpa_hint = self.hidden_enabled_provider.provider_id
params = [("next", "/courses/something/?tpa_hint={0}".format(tpa_hint))]
response = self.client.get(reverse(url_name), params, HTTP_ACCEPT="text/html")
self.assertContains(response, u'"third_party_auth_hint": "{0}"'.format(tpa_hint))
# Even disabled providers in the query string will override THIRD_PARTY_AUTH_HINT
tpa_hint = self.hidden_disabled_provider.provider_id
params = [("next", "/courses/something/?tpa_hint={0}".format(tpa_hint))]
response = self.client.get(reverse(url_name), params, HTTP_ACCEPT="text/html")
self.assertNotIn(response.content, tpa_hint)
@override_settings(FEATURES=dict(settings.FEATURES, THIRD_PARTY_AUTH_HINT='oa2-google-oauth2'))
@ddt.data(
('signin_user', 'login'),
('register_user', 'register'),
)
@ddt.unpack
def test_settings_tpa_hinted_login_dialog_disabled(self, url_name, auth_entry):
"""Test that the dialog doesn't show up for hinted logins when disabled via settings.THIRD_PARTY_AUTH_HINT. """
self.google_provider.skip_hinted_login_dialog = True
self.google_provider.save()
params = [("next", "/courses/something/")]
response = self.client.get(reverse(url_name), params, HTTP_ACCEPT="text/html")
expected_url = '/auth/login/google-oauth2/?auth_entry={}&next=%2Fcourses'\
'%2Fsomething%2F%3Ftpa_hint%3Doa2-google-oauth2'.format(auth_entry)
self.assertRedirects(
response,
expected_url,
target_status_code=302
)
@mock.patch('openedx.core.djangoapps.user_authn.views.login_form.enterprise_customer_for_request')
@ddt.data(
('signin_user', False, None, None),
('register_user', False, None, None),
('signin_user', True, 'Fake EC', 'http://logo.com/logo.jpg'),
('register_user', True, 'Fake EC', 'http://logo.com/logo.jpg'),
('signin_user', True, 'Fake EC', None),
('register_user', True, 'Fake EC', None),
)
@ddt.unpack
def test_enterprise_register(self, url_name, ec_present, ec_name, logo_url, mock_get_ec):
"""
Verify that when an EnterpriseCustomer is received on the login and register views,
the appropriate sidebar is rendered.
"""
if ec_present:
mock_get_ec.return_value = {
'name': ec_name,
'branding_configuration': {'logo': logo_url}
}
else:
mock_get_ec.return_value = None
response = self.client.get(reverse(url_name), HTTP_ACCEPT="text/html")
enterprise_sidebar_div_id = u'enterprise-content-container'
if not ec_present:
self.assertNotContains(response, text=enterprise_sidebar_div_id)
else:
self.assertContains(response, text=enterprise_sidebar_div_id)
welcome_message = settings.ENTERPRISE_SPECIFIC_BRANDED_WELCOME_TEMPLATE
expected_message = Text(welcome_message).format(
start_bold=HTML('<b>'),
end_bold=HTML('</b>'),
line_break=HTML('<br/>'),
enterprise_name=ec_name,
platform_name=settings.PLATFORM_NAME,
privacy_policy_link_start=HTML(u"<a href='{pp_url}' target='_blank'>").format(
pp_url=settings.MKTG_URLS.get('PRIVACY', 'https://www.edx.org/edx-privacy-policy')
),
privacy_policy_link_end=HTML("</a>"),
)
self.assertContains(response, expected_message)
if logo_url:
self.assertContains(response, logo_url)
def test_enterprise_cookie_delete(self):
"""
Test that enterprise cookies are deleted in login/registration views.
Cookies must be deleted in login/registration views so that *default* login/registration branding
is displayed to subsequent requests from non-enterprise customers.
"""
cookies = SimpleCookie()
cookies[settings.ENTERPRISE_CUSTOMER_COOKIE_NAME] = 'test-enterprise-customer'
response = self.client.get(reverse('signin_user'), HTTP_ACCEPT="text/html", cookies=cookies)
self.assertIn(settings.ENTERPRISE_CUSTOMER_COOKIE_NAME, response.cookies)
enterprise_cookie = response.cookies[settings.ENTERPRISE_CUSTOMER_COOKIE_NAME]
self.assertEqual(enterprise_cookie['domain'], settings.BASE_COOKIE_DOMAIN)
self.assertEqual(enterprise_cookie.value, '')
def test_login_registration_xframe_protected(self):
resp = self.client.get(
reverse("register_user"),
{},
HTTP_REFERER="http://localhost/iframe"
)
self.assertEqual(resp['X-Frame-Options'], 'DENY')
self.configure_lti_provider(name='Test', lti_hostname='localhost', lti_consumer_key='test_key', enabled=True)
resp = self.client.get(
reverse("register_user"),
HTTP_REFERER="http://localhost/iframe"
)
self.assertEqual(resp['X-Frame-Options'], 'ALLOW')
def _assert_third_party_auth_data(self, response, current_backend, current_provider, providers, expected_ec,
add_user_details=False):
"""Verify that third party auth info is rendered correctly in a DOM data attribute. """
finish_auth_url = None
if current_backend:
finish_auth_url = reverse("social:complete", kwargs={"backend": current_backend}) + "?"
auth_info = {
"currentProvider": current_provider,
"providers": providers,
"secondaryProviders": [],
"finishAuthUrl": finish_auth_url,
"errorMessage": None,
"registerFormSubmitButtonText": "Create Account",
"syncLearnerProfileData": False,
"pipeline_user_details": {"email": "test@test.com"} if add_user_details else {}
}
if expected_ec is not None:
# If we set an EnterpriseCustomer, third-party auth providers ought to be hidden.
auth_info['providers'] = []
auth_info = dump_js_escaped_json(auth_info)
expected_data = u'"third_party_auth": {auth_info}'.format(
auth_info=auth_info
)
self.assertContains(response, expected_data)
def _assert_saml_auth_data_with_error(
self, response, current_backend, current_provider, expected_error_message
):
"""
Verify that third party auth info is rendered correctly in a DOM data attribute.
"""
finish_auth_url = None
if current_backend:
finish_auth_url = reverse('social:complete', kwargs={'backend': current_backend}) + '?'
auth_info = {
'currentProvider': current_provider,
'providers': [],
'secondaryProviders': [],
'finishAuthUrl': finish_auth_url,
'errorMessage': expected_error_message,
'registerFormSubmitButtonText': 'Create Account',
'syncLearnerProfileData': False,
'pipeline_user_details': {'response': {'idp_name': 'testshib'}}
}
auth_info = dump_js_escaped_json(auth_info)
expected_data = u'"third_party_auth": {auth_info}'.format(
auth_info=auth_info
)
self.assertContains(response, expected_data)
def _third_party_login_url(self, backend_name, auth_entry, login_params):
"""Construct the login URL to start third party authentication. """
return u"{url}?auth_entry={auth_entry}&{param_str}".format(
url=reverse("social:begin", kwargs={"backend": backend_name}),
auth_entry=auth_entry,
param_str=self._finish_auth_url_param(login_params),
)
def _finish_auth_url_param(self, params):
"""
Make the next=... URL parameter that indicates where the user should go next.
>>> _finish_auth_url_param([('next', '/dashboard')])
'/account/finish_auth?next=%2Fdashboard'
"""
return urlencode({
'next': '/account/finish_auth?{}'.format(urlencode(params))
})
def test_english_by_default(self):
response = self.client.get(reverse('signin_user'), [], HTTP_ACCEPT="text/html")
self.assertEqual(response['Content-Language'], 'en')
def test_unsupported_language(self):
response = self.client.get(reverse('signin_user'), [], HTTP_ACCEPT="text/html", HTTP_ACCEPT_LANGUAGE="ts-zx")
self.assertEqual(response['Content-Language'], 'en')
def test_browser_language(self):
response = self.client.get(reverse('signin_user'), [], HTTP_ACCEPT="text/html", HTTP_ACCEPT_LANGUAGE="es")
self.assertEqual(response['Content-Language'], 'es-419')
def test_browser_language_dialent(self):
response = self.client.get(reverse('signin_user'), [], HTTP_ACCEPT="text/html", HTTP_ACCEPT_LANGUAGE="es-es")
self.assertEqual(response['Content-Language'], 'es-es')
@skip_unless_lms
class AccountCreationTestCaseWithSiteOverrides(SiteMixin, TestCase):
"""
Test cases for Feature flag ALLOW_PUBLIC_ACCOUNT_CREATION which when
turned off disables the account creation options in lms
"""
def setUp(self):
"""Set up the tests"""
super(AccountCreationTestCaseWithSiteOverrides, self).setUp()
# Set the feature flag ALLOW_PUBLIC_ACCOUNT_CREATION to False
self.site_configuration_values = {
'ALLOW_PUBLIC_ACCOUNT_CREATION': False
}
self.site_domain = 'testserver1.com'
self.set_up_site(self.site_domain, self.site_configuration_values)
def test_register_option_login_page(self):
"""
Navigate to the login page and check the Register option is hidden when
ALLOW_PUBLIC_ACCOUNT_CREATION flag is turned off
"""
response = self.client.get(reverse('signin_user'))
self.assertNotIn(u'<a class="btn-neutral" href="/register?next=%2Fdashboard">Register</a>',
response.content.decode(response.charset))
|
jolyonb/edx-platform
|
openedx/core/djangoapps/user_authn/views/tests/test_views.py
|
Python
|
agpl-3.0
| 37,200
|
[
"VisIt"
] |
4dc4c513c1e42ea13585bc4a71a0eaa8b8e5a4910d1d782c54ab4f6a1c992351
|
"""
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
ident = '$Id: SOAPBuilder.py 1498 2010-03-12 02:13:19Z pooryorick $'
from version import __version__
import cgi
from wstools.XMLname import toXMLname, fromXMLname
# SOAPpy modules
from Config import Config
from NS import NS
from Types import *
# Test whether this Python version has Types.BooleanType
# If it doesn't have it, then False and True are serialized as integers
try:
BooleanType
pythonHasBooleanType = 1
except NameError:
pythonHasBooleanType = 0
################################################################################
# SOAP Builder
################################################################################
class SOAPBuilder:
_xml_top = '<?xml version="1.0"?>\n'
_xml_enc_top = '<?xml version="1.0" encoding="%s"?>\n'
_env_top = ( '%(ENV_T)s:Envelope\n' + \
' %(ENV_T)s:encodingStyle="%(ENC)s"\n' ) % \
NS.__dict__
_env_bot = '</%(ENV_T)s:Envelope>\n' % NS.__dict__
# Namespaces potentially defined in the Envelope tag.
_env_ns = {NS.ENC: NS.ENC_T, NS.ENV: NS.ENV_T,
NS.XSD: NS.XSD_T, NS.XSD2: NS.XSD2_T, NS.XSD3: NS.XSD3_T,
NS.XSI: NS.XSI_T, NS.XSI2: NS.XSI2_T, NS.XSI3: NS.XSI3_T}
def __init__(self, args = (), kw = {}, method = None, namespace = None,
header = None, methodattrs = None, envelope = 1, encoding = 'UTF-8',
use_refs = 0, config = Config, noroot = 0):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
self.args = args
self.kw = kw
self.envelope = envelope
self.encoding = encoding
self.method = method
self.namespace = namespace
self.header = header
self.methodattrs= methodattrs
self.use_refs = use_refs
self.config = config
self.out = []
self.tcounter = 0
self.ncounter = 1
self.icounter = 1
self.envns = {}
self.ids = {}
self.depth = 0
self.multirefs = []
self.multis = 0
self.body = not isinstance(args, bodyType)
self.noroot = noroot
def build(self):
if self.config.debug: print "In build."
ns_map = {}
# Cache whether typing is on or not
typed = self.config.typed
if self.header:
# Create a header.
self.dump(self.header, "Header", typed = typed)
#self.header = None # Wipe it out so no one is using it.
if self.body:
# Call genns to record that we've used SOAP-ENV.
self.depth += 1
body_ns = self.genns(ns_map, NS.ENV)[0]
self.out.append("<%sBody>\n" % body_ns)
if self.method:
# Save the NS map so that it can be restored when we
# fall out of the scope of the method definition
save_ns_map = ns_map.copy()
self.depth += 1
a = ''
if self.methodattrs:
for (k, v) in self.methodattrs.items():
a += ' %s="%s"' % (k, v)
if self.namespace: # Use the namespace info handed to us
methodns, n = self.genns(ns_map, self.namespace)
else:
methodns, n = '', ''
self.out.append('<%s%s%s%s%s>\n' % (
methodns, self.method, n, a, self.genroot(ns_map)))
try:
if type(self.args) != TupleType:
args = (self.args,)
else:
args = self.args
for i in args:
self.dump(i, typed = typed, ns_map = ns_map)
if hasattr(self.config, "argsOrdering") and self.config.argsOrdering.has_key(self.method):
for k in self.config.argsOrdering.get(self.method):
self.dump(self.kw.get(k), k, typed = typed, ns_map = ns_map)
else:
for (k, v) in self.kw.items():
self.dump(v, k, typed = typed, ns_map = ns_map)
except RecursionError:
if self.use_refs == 0:
# restart
b = SOAPBuilder(args = self.args, kw = self.kw,
method = self.method, namespace = self.namespace,
header = self.header, methodattrs = self.methodattrs,
envelope = self.envelope, encoding = self.encoding,
use_refs = 1, config = self.config)
return b.build()
raise
if self.method:
self.out.append("</%s%s>\n" % (methodns, self.method))
# End of the method definition; drop any local namespaces
ns_map = save_ns_map
self.depth -= 1
if self.body:
# dump may add to self.multirefs, but the for loop will keep
# going until it has used all of self.multirefs, even those
# entries added while in the loop.
self.multis = 1
for obj, tag in self.multirefs:
self.dump(obj, tag, typed = typed, ns_map = ns_map)
self.out.append("</%sBody>\n" % body_ns)
self.depth -= 1
if self.envelope:
e = map (lambda ns: ' xmlns:%s="%s"\n' % (ns[1], ns[0]),
self.envns.items())
self.out = ['<', self._env_top] + e + ['>\n'] + \
self.out + \
[self._env_bot]
if self.encoding != None:
self.out.insert(0, self._xml_enc_top % self.encoding)
return ''.join(self.out).encode(self.encoding)
self.out.insert(0, self._xml_top)
return ''.join(self.out)
def gentag(self):
if self.config.debug: print "In gentag."
self.tcounter += 1
return "v%d" % self.tcounter
def genns(self, ns_map, nsURI):
if nsURI == None:
return ('', '')
if type(nsURI) == TupleType: # already a tuple
if len(nsURI) == 2:
ns, nsURI = nsURI
else:
ns, nsURI = None, nsURI[0]
else:
ns = None
if ns_map.has_key(nsURI):
return (ns_map[nsURI] + ':', '')
if self._env_ns.has_key(nsURI):
ns = self.envns[nsURI] = ns_map[nsURI] = self._env_ns[nsURI]
return (ns + ':', '')
if not ns:
ns = "ns%d" % self.ncounter
self.ncounter += 1
ns_map[nsURI] = ns
if self.config.buildWithNamespacePrefix:
return (ns + ':', ' xmlns:%s="%s"' % (ns, nsURI))
elif self.config.buildWithGlobalNamespacePrefix:
self.envns[nsURI] = ns
return (ns + ':', '')
else:
return ('', ' xmlns="%s"' % (nsURI))
def genroot(self, ns_map):
if self.noroot:
return ''
if self.depth != 2:
return ''
ns, n = self.genns(ns_map, NS.ENC)
return ' %sroot="%d"%s' % (ns, not self.multis, n)
# checkref checks an element to see if it needs to be encoded as a
# multi-reference element or not. If it returns None, the element has
# been handled and the caller can continue with subsequent elements.
# If it returns a string, the string should be included in the opening
# tag of the marshaled element.
def checkref(self, obj, tag, ns_map):
if self.depth < 2:
return ''
if not self.ids.has_key(id(obj)):
n = self.ids[id(obj)] = self.icounter
self.icounter = n + 1
if self.use_refs == 0:
return ''
if self.depth == 2:
return ' id="i%d"' % n
self.multirefs.append((obj, tag))
else:
if self.use_refs == 0:
raise RecursionError, "Cannot serialize recursive object"
n = self.ids[id(obj)]
if self.multis and self.depth == 2:
return ' id="i%d"' % n
self.out.append('<%s href="#i%d"%s/>\n' %
(tag, n, self.genroot(ns_map)))
return None
# dumpers
def dump(self, obj, tag = None, typed = 1, ns_map = {}):
if self.config.debug: print "In dump.", "obj=", obj
ns_map = ns_map.copy()
self.depth += 1
if type(tag) not in (NoneType, StringType, UnicodeType):
raise KeyError, "tag must be a string or None"
self.dump_dispatch(obj, tag, typed, ns_map)
self.depth -= 1
# generic dumper
def dumper(self, nsURI, obj_type, obj, tag, typed = 1, ns_map = {},
rootattr = '', id = '',
xml = '<%(tag)s%(type)s%(id)s%(attrs)s%(root)s>%(data)s</%(tag)s>\n'):
if self.config.debug: print "In dumper."
if nsURI == None:
nsURI = self.config.typesNamespaceURI
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
a = n = t = ''
if typed and obj_type:
ns, n = self.genns(ns_map, nsURI)
ins = self.genns(ns_map, self.config.schemaNamespaceURI)[0]
t = ' %stype="%s%s"%s' % (ins, ns, obj_type, n)
try: a = obj._marshalAttrs(ns_map, self)
except: pass
try: data = obj._marshalData()
except:
if (obj_type != "string"): # strings are already encoded
data = cgi.escape(str(obj))
else:
data = obj
return xml % {"tag": tag, "type": t, "data": data, "root": rootattr,
"id": id, "attrs": a}
def dump_float(self, obj, tag, typed = 1, ns_map = {}):
if self.config.debug: print "In dump_float."
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
if self.config.strict_range:
doubleType(obj)
if PosInf == obj:
obj = "INF"
elif NegInf == obj:
obj = "-INF"
elif NaN == obj:
obj = "NaN"
else:
obj = repr(obj)
# Note: python 'float' is actually a SOAP 'double'.
self.out.append(self.dumper(
None, "double", obj, tag, typed, ns_map, self.genroot(ns_map)))
def dump_int(self, obj, tag, typed = 1, ns_map = {}):
if self.config.debug: print "In dump_int."
# fix error "Bad types (class java.math.BigInteger -> class java.lang.Integer)"
if isinstance(obj, LongType):
obj_type = "integer"
else:
obj_type = "int"
self.out.append(self.dumper(None, obj_type, obj, tag, typed,
ns_map, self.genroot(ns_map)))
def dump_bool(self, obj, tag, typed = 1, ns_map = {}):
if self.config.debug: print "In dump_bool."
self.out.append(self.dumper(None, 'boolean', obj, tag, typed,
ns_map, self.genroot(ns_map)))
def dump_string(self, obj, tag, typed = 0, ns_map = {}):
if self.config.debug: print "In dump_string."
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
id = self.checkref(obj, tag, ns_map)
if id == None:
return
try: data = obj._marshalData()
except: data = obj
self.out.append(self.dumper(None, "string", cgi.escape(data), tag,
typed, ns_map, self.genroot(ns_map), id))
dump_str = dump_string # For Python 2.2+
dump_unicode = dump_string
def dump_None(self, obj, tag, typed = 0, ns_map = {}):
if self.config.debug: print "In dump_None."
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
ns = self.genns(ns_map, self.config.schemaNamespaceURI)[0]
self.out.append('<%s %snull="1"%s/>\n' %
(tag, ns, self.genroot(ns_map)))
dump_NoneType = dump_None # For Python 2.2+
def dump_list(self, obj, tag, typed = 1, ns_map = {}):
if self.config.debug: print "In dump_list.", "obj=", obj
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
if type(obj) == InstanceType:
data = obj.data
else:
data = obj
if typed:
id = self.checkref(obj, tag, ns_map)
if id == None:
return
try:
sample = data[0]
empty = 0
except:
# preserve type if present
if getattr(obj,"_typed",None) and getattr(obj,"_type",None):
if getattr(obj, "_complexType", None):
sample = typedArrayType(typed=obj._type,
complexType = obj._complexType)
sample._typename = obj._type
if not getattr(obj,"_ns",None): obj._ns = NS.URN
else:
sample = typedArrayType(typed=obj._type)
else:
sample = structType()
empty = 1
# First scan list to see if all are the same type
same_type = 1
if not empty:
for i in data[1:]:
if type(sample) != type(i) or \
(type(sample) == InstanceType and \
sample.__class__ != i.__class__):
same_type = 0
break
ndecl = ''
if same_type:
if (isinstance(sample, structType)) or \
type(sample) == DictType or \
(isinstance(sample, anyType) and \
(getattr(sample, "_complexType", None) and \
sample._complexType)): # force to urn struct
try:
tns = obj._ns or NS.URN
except:
tns = NS.URN
ns, ndecl = self.genns(ns_map, tns)
try:
typename = sample._typename
except:
typename = "SOAPStruct"
t = ns + typename
elif isinstance(sample, anyType):
ns = sample._validNamespaceURI(self.config.typesNamespaceURI,
self.config.strictNamespaces)
if ns:
ns, ndecl = self.genns(ns_map, ns)
t = ns + str(sample._type)
else:
t = 'ur-type'
else:
typename = type(sample).__name__
# For Python 2.2+
if type(sample) == StringType: typename = 'string'
# HACK: unicode is a SOAP string
if type(sample) == UnicodeType: typename = 'string'
# HACK: python 'float' is actually a SOAP 'double'.
if typename=="float": typename="double"
t = self.genns(
ns_map, self.config.typesNamespaceURI)[0] + typename
else:
t = self.genns(ns_map, self.config.typesNamespaceURI)[0] + \
"ur-type"
try: a = obj._marshalAttrs(ns_map, self)
except: a = ''
ens, edecl = self.genns(ns_map, NS.ENC)
ins, idecl = self.genns(ns_map, self.config.schemaNamespaceURI)
if typed:
self.out.append(
'<%s %sarrayType="%s[%d]" %stype="%sArray"%s%s%s%s%s%s>\n' %
(tag, ens, t, len(data), ins, ens, ndecl, edecl, idecl,
self.genroot(ns_map), id, a))
if typed:
try: elemsname = obj._elemsname
except: elemsname = "item"
else:
elemsname = tag
if isinstance(data, (list, tuple, arrayType)):
should_drill = True
else:
should_drill = not same_type
for i in data:
self.dump(i, elemsname, should_drill, ns_map)
if typed: self.out.append('</%s>\n' % tag)
dump_tuple = dump_list
def dump_map(self, obj, tag, typed = 1, ns_map = {}):
if self.config.debug: print "In dump_map.", "obj=", obj
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
if type(obj) == InstanceType:
data = obj.data
else:
data = obj
if typed:
id = self.checkref(obj, tag, ns_map)
if id == None:
return
try: a = obj._marshalAttrs(ns_map, self)
except: a = ''
ndecl = ''
ens, edecl = self.genns(ns_map, 'http://xml.apache.org/xml-soap')
ins, idecl = self.genns(ns_map, self.config.schemaNamespaceURI)
if typed:
self.out.append(
'<%s %stype="%sMap"%s%s%s%s%s%s>\n' %
(tag, ins, ens, ndecl, edecl, idecl,
self.genroot(ns_map), id, a))
try: elemsname = obj._elemsname
except: elemsname = "item"
else:
elemsname = tag
if isinstance(data, (list, tuple, arrayType)):
should_drill = True
else:
should_drill = not same_type
for i in data:
self.dump(i, elemsname, should_drill, ns_map)
if typed: self.out.append('</%s>\n' % tag)
def dump_exception(self, obj, tag, typed = 0, ns_map = {}):
if isinstance(obj, faultType): # Fault
cns, cdecl = self.genns(ns_map, NS.ENC)
vns, vdecl = self.genns(ns_map, NS.ENV)
self.out.append('<%sFault %sroot="1"%s%s>' % (vns, cns, vdecl, cdecl))
self.dump(obj.faultcode, "faultcode", typed, ns_map)
self.dump(obj.faultstring, "faultstring", typed, ns_map)
if hasattr(obj, "detail"):
self.dump(obj.detail, "detail", typed, ns_map)
self.out.append("</%sFault>\n" % vns)
def dump_dictionary(self, obj, tag, typed = 1, ns_map = {}):
if self.config.debug: print "In dump_dictionary."
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
id = self.checkref(obj, tag, ns_map)
if id == None:
return
try: a = obj._marshalAttrs(ns_map, self)
except: a = ''
self.out.append('<%s%s%s%s>\n' %
(tag, id, a, self.genroot(ns_map)))
for (k, v) in obj.items():
if k[0] != "_":
self.dump(v, k, 1, ns_map)
self.out.append('</%s>\n' % tag)
dump_dict = dump_dictionary # For Python 2.2+
def dump_dispatch(self, obj, tag, typed = 1, ns_map = {}):
if not tag:
# If it has a name use it.
if isinstance(obj, anyType) and obj._name:
tag = obj._name
else:
tag = self.gentag()
# Apply additional types, override built-in types
for dtype, func in self.config.dumpmap:
if isinstance(obj, dtype):
func(self, obj, tag, typed, ns_map)
return
# watch out for order!
dumpmap = (
(Exception, self.dump_exception),
(mapType, self.dump_map),
(arrayType, self.dump_list),
(basestring, self.dump_string),
(NoneType, self.dump_None),
(bool, self.dump_bool),
(int, self.dump_int),
(long, self.dump_int),
(list, self.dump_list),
(tuple, self.dump_list),
(dict, self.dump_dictionary),
(float, self.dump_float),
)
for dtype, func in dumpmap:
if isinstance(obj, dtype):
func(obj, tag, typed, ns_map)
return
r = self.genroot(ns_map)
try: a = obj._marshalAttrs(ns_map, self)
except: a = ''
if isinstance(obj, voidType): # void
self.out.append("<%s%s%s></%s>\n" % (tag, a, r, tag))
else:
id = self.checkref(obj, tag, ns_map)
if id == None:
return
if isinstance(obj, structType):
# Check for namespace
ndecl = ''
ns = obj._validNamespaceURI(self.config.typesNamespaceURI,
self.config.strictNamespaces)
if ns:
ns, ndecl = self.genns(ns_map, ns)
tag = ns + tag
self.out.append("<%s%s%s%s%s>\n" % (tag, ndecl, id, a, r))
keylist = obj.__dict__.keys()
# first write out items with order information
if hasattr(obj, '_keyord'):
for i in range(len(obj._keyord)):
self.dump(obj._aslist(i), obj._keyord[i], 1, ns_map)
keylist.remove(obj._keyord[i])
# now write out the rest
for k in keylist:
if (k[0] != "_"):
self.dump(getattr(obj,k), k, 1, ns_map)
if isinstance(obj, bodyType):
self.multis = 1
for v, k in self.multirefs:
self.dump(v, k, typed = typed, ns_map = ns_map)
self.out.append('</%s>\n' % tag)
elif isinstance(obj, anyType):
t = ''
if typed:
ns = obj._validNamespaceURI(self.config.typesNamespaceURI,
self.config.strictNamespaces)
if ns:
ons, ondecl = self.genns(ns_map, ns)
ins, indecl = self.genns(ns_map,
self.config.schemaNamespaceURI)
t = ' %stype="%s%s"%s%s' % \
(ins, ons, obj._type, ondecl, indecl)
self.out.append('<%s%s%s%s%s>%s</%s>\n' %
(tag, t, id, a, r, obj._marshalData(), tag))
else: # Some Class
self.out.append('<%s%s%s>\n' % (tag, id, r))
d1 = getattr(obj, '__dict__', None)
if d1 is None and hasattr(obj, "__slots__"):
d1 = dict(((k, getattr(obj, k)) for k in obj.__slots__))
if d1 is not None:
for (k, v) in d1.items():
if k[0] != "_":
self.dump(v, k, 1, ns_map)
self.out.append('</%s>\n' % tag)
################################################################################
# SOAPBuilder's more public interface
################################################################################
def buildSOAP(args=(), kw={}, method=None, namespace=None,
header=None, methodattrs=None, envelope=1, encoding='UTF-8',
config=Config, noroot = 0):
t = SOAPBuilder(args=args, kw=kw, method=method, namespace=namespace,
header=header, methodattrs=methodattrs,envelope=envelope,
encoding=encoding, config=config,noroot=noroot)
return t.build()
|
burzillibus/RobHome
|
venv/lib/python2.7/site-packages/SOAPpy/SOAPBuilder.py
|
Python
|
mit
| 24,922
|
[
"Brian"
] |
55c2a62a7be995cbc87cc1628d1134164071aa9997eba2dd0cd677ea3a645ddc
|
# Version: 0.12
"""
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, and pypy
[![Build Status]
(https://travis-ci.org/warner/python-versioneer.png?branch=master)]
(https://travis-ci.org/warner/python-versioneer)
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere to your $PATH
* run `versioneer-installer` in your source tree: this installs `versioneer.py`
* follow the instructions below (also in the `versioneer.py` docstring)
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example 'git describe --tags --dirty --always' reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time. However,
when you use "setup.py build" or "setup.py sdist", `_version.py` in the new
copy is replaced by a small static file that contains just the generated
version data.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the "git archive" command. As a result, generated tarballs will
contain enough information to get the proper version.
## Installation
First, decide on values for the following configuration variables:
* `VCS`: the version control system you use. Currently accepts "git".
* `versionfile_source`:
A project-relative pathname into which the generated version strings should
be written. This is usually a `_version.py` next to your project's main
`__init__.py` file, so it can be imported at runtime. If your project uses
`src/myproject/__init__.py`, this should be `src/myproject/_version.py`.
This file should be checked in to your VCS as usual: the copy created below
by `setup.py versioneer` will include code that parses expanded VCS
keywords in generated tarballs. The 'build' and 'sdist' commands will
replace it with a copy that has just the calculated version string.
This must be set even if your project does not have any modules (and will
therefore never import `_version.py`), since "setup.py sdist" -based trees
still need somewhere to record the pre-calculated version strings. Anywhere
in the source tree should do. If there is a `__init__.py` next to your
`_version.py`, the `setup.py versioneer` command (described below) will
append some `__version__`-setting assignments, if they aren't already
present.
* `versionfile_build`:
Like `versionfile_source`, but relative to the build directory instead of
the source directory. These will differ when your setup.py uses
'package_dir='. If you have `package_dir={'myproject': 'src/myproject'}`,
then you will probably have `versionfile_build='myproject/_version.py'` and
`versionfile_source='src/myproject/_version.py'`.
If this is set to None, then `setup.py build` will not attempt to rewrite
any `_version.py` in the built tree. If your project does not have any
libraries (e.g. if it only builds a script), then you should use
`versionfile_build = None` and override `distutils.command.build_scripts`
to explicitly insert a copy of `versioneer.get_version()` into your
generated script.
* `tag_prefix`:
a string, like 'PROJECTNAME-', which appears at the start of all VCS tags.
If your tags look like 'myproject-1.2.0', then you should use
tag_prefix='myproject-'. If you use unprefixed tags like '1.2.0', this
should be an empty string.
* `parentdir_prefix`:
a string, frequently the same as tag_prefix, which appears at the start of
all unpacked tarball filenames. If your tarball unpacks into
'myproject-1.2.0', this should be 'myproject-'.
This tool provides one script, named `versioneer-installer`. That script does
one thing: write a copy of `versioneer.py` into the current directory.
To versioneer-enable your project:
* 1: Run `versioneer-installer` to copy `versioneer.py` into the top of your
source tree.
* 2: add the following lines to the top of your `setup.py`, with the
configuration values you decided earlier:
import versioneer
versioneer.VCS = 'git'
versioneer.versionfile_source = 'src/myproject/_version.py'
versioneer.versionfile_build = 'myproject/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = (
'myproject-' # dirname like 'myproject-1.2.0')
* 3: add the following arguments to the setup() call in your setup.py:
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
* 4: now run `setup.py versioneer`, which will create `_version.py`, and will
modify your `__init__.py` (if one exists next to `_version.py`) to define
`__version__` (by calling a function from `_version.py`). It will also
modify your `MANIFEST.in` to include both `versioneer.py` and the generated
`_version.py` in sdist tarballs.
* 5: commit these changes to your VCS. To make sure you won't forget,
`setup.py versioneer` will mark everything it touched for addition.
## Post-Installation Usage
Once established, all uses of your tree from a VCS checkout should get the
current version string. All generated tarballs should include an embedded
version string (so users who unpack them will not need a VCS tool installed).
If you distribute your project through PyPI, then the release process should
boil down to two steps:
* 1: git tag 1.0
* 2: python setup.py register sdist upload
If you distribute it through github (i.e. users use github to generate
tarballs with `git archive`), the process is:
* 1: git tag 1.0
* 2: git push; git push --tags
Currently, all version strings must be based upon a tag. Versioneer will
report "unknown" until your tree has at least one tag in its history. This
restriction will be fixed eventually (see issue #12).
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different keys for different flavors
of the version string:
* `['version']`: condensed tag+distance+shortid+dirty identifier. For git,
this uses the output of `git describe --tags --dirty --always` but strips
the tag_prefix. For example "0.11-2-g1076c97-dirty" indicates that the tree
is like the "1076c97" commit but has uncommitted changes ("-dirty"), and
that this commit is two revisions ("-2-") beyond the "0.11" tag. For
released software (exactly equal to a known tag), the identifier will only
contain the stripped tag, e.g. "0.11".
* `['full']`: detailed revision identifier. For Git, this is the full SHA1
commit id, followed by "-dirty" if the tree contains uncommitted changes,
e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac-dirty".
Some variants are more useful than others. Including `full` in a bug report
should allow developers to reconstruct the exact code being tested (or
indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
In the future, this will also include a
[PEP-0440](http://legacy.python.org/dev/peps/pep-0440/) -compatible flavor
(e.g. `1.2.post0.dev123`). This loses a lot of information (and has no room
for a hash-based revision id), but is safe to use in a `setup.py`
"`version=`" argument. It also enables tools like *pip* to compare version
strings and evaluate compatibility constraint declarations.
The `setup.py versioneer` command adds the following text to your
`__init__.py` to place a basic version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* re-run `versioneer-installer` in your source tree to replace your copy of
`versioneer.py`
* edit `setup.py`, if necessary, to include any new configuration settings
indicated by the release notes
* re-run `setup.py versioneer` to replace `SRC/_version.py`
* commit any changed files
### Upgrading from 0.10 to 0.11
You must add a `versioneer.VCS = "git"` to your `setup.py` before re-running
`setup.py versioneer`. This will enable the use of additional version-control
systems (SVN, etc) in the future.
### Upgrading from 0.11 to 0.12
Nothing special.
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## License
To make Versioneer easier to embed, all its code is hereby released into the
public domain. The `_version.py` that it creates is also in the public
domain.
"""
import os
import sys
import re
import subprocess
import errno
from distutils.core import Command
from distutils.command.sdist import sdist as _sdist
from distutils.command.build import build as _build
# these configuration settings will be overridden by setup.py after it
# imports us
versionfile_source = None
versionfile_build = None
tag_prefix = None
parentdir_prefix = None
VCS = None
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
LONG_VERSION_PY['git'] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.12 (https://github.com/warner/python-versioneer)
# these strings will be replaced by git during git-archive
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
# these strings are filled in when 'setup.py versioneer' creates _version.py
tag_prefix = "%(TAG_PREFIX)s"
parentdir_prefix = "%(PARENTDIR_PREFIX)s"
versionfile_source = "%(VERSIONFILE_SOURCE)s"
import os, sys, re, subprocess, errno
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% args[0])
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%%s',
but '%%s' doesn't start with prefix '%%s'" %%
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs,"r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
if not keywords:
return {} # keyword-finding function failed to find keywords
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs-tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return { "version": r,
"full": keywords["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": keywords["full"].strip(),
"full": keywords["full"].strip() }
def git_versions_from_vcs(tag_prefix, root, verbose=False):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %%s" %% root)
return {}
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%%s' doesn't start with prefix
'%%s'" %% (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
keywords = { "refnames": git_refnames, "full": git_full }
ver = git_versions_from_keywords(keywords, tag_prefix, verbose)
if ver:
return ver
try:
root = os.path.abspath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in range(len(versionfile_source.split(os.sep))):
root = os.path.dirname(root)
except NameError:
return default
return (git_versions_from_vcs(tag_prefix, root, verbose)
or versions_from_parentdir(parentdir_prefix, root, verbose)
or default)
'''
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
if not keywords:
return {} # keyword-finding function failed to find keywords
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full": keywords["full"].strip()}
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return {"version": keywords["full"].strip(),
"full": keywords["full"].strip()}
def git_versions_from_vcs(tag_prefix, root, verbose=False):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'"
% (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def do_vcs_install(manifest_in, versionfile_source, ipy):
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s',"
"but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.12) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
version_version = '%(version)s'
version_full = '%(full)s'
def get_versions(default={}, verbose=False):
return {'version': version_version, 'full': version_full}
"""
DEFAULT = {"version": "unknown", "full": "unknown"}
def versions_from_file(filename):
versions = {}
try:
with open(filename) as f:
for line in f.readlines():
mo = re.match("version_version = '([^']+)'", line)
if mo:
versions["version"] = mo.group(1)
mo = re.match("version_full = '([^']+)'", line)
if mo:
versions["full"] = mo.group(1)
except EnvironmentError:
return {}
return versions
def write_to_version_file(filename, versions):
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % versions)
print("set %s to '%s'" % (filename, versions["version"]))
def get_root():
try:
return os.path.dirname(os.path.abspath(__file__))
except NameError:
return os.path.dirname(os.path.abspath(sys.argv[0]))
def vcs_function(vcs, suffix):
return getattr(sys.modules[__name__], '%s_%s' % (vcs, suffix), None)
def get_versions(default=DEFAULT, verbose=False):
# returns dict with two keys: 'version' and 'full'
assert (versionfile_source is not None,
"please set versioneer.versionfile_source")
assert tag_prefix is not None, "please set versioneer.tag_prefix"
assert (parentdir_prefix is not None,
"please set versioneer.parentdir_prefix")
assert VCS is not None, "please set versioneer.VCS"
# I am in versioneer.py, which must live at the top of the source tree,
# which we use to compute the root directory. py2exe/bbfreeze/non-CPython
# don't have __file__, in which case we fall back to sys.argv[0] (which
# ought to be the setup.py script). We prefer __file__ since that's more
# robust in cases where setup.py was invoked in some weird way (e.g. pip)
root = get_root()
versionfile_abs = os.path.join(root, versionfile_source)
# extract version from first of _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = vcs_function(VCS, "get_keywords")
versions_from_keywords_f = vcs_function(VCS, "versions_from_keywords")
if get_keywords_f and versions_from_keywords_f:
vcs_keywords = get_keywords_f(versionfile_abs)
ver = versions_from_keywords_f(vcs_keywords, tag_prefix)
if ver:
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
ver = versions_from_file(versionfile_abs)
if ver:
if verbose:
print("got version from file %s %s"
% (versionfile_abs, ver))
return ver
versions_from_vcs_f = vcs_function(VCS, "versions_from_vcs")
if versions_from_vcs_f:
ver = versions_from_vcs_f(tag_prefix, root, verbose)
if ver:
if verbose:
print("got version from VCS %s" % ver)
return ver
ver = versions_from_parentdir(parentdir_prefix, root, verbose)
if ver:
if verbose:
print("got version from parentdir %s" % ver)
return ver
if verbose:
print("got version from default %s" % default)
return default
def get_version(verbose=False):
return get_versions(verbose=verbose)["version"]
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
ver = get_version(verbose=True)
print("Version is currently: %s" % ver)
class cmd_build(_build):
def run(self):
versions = get_versions(verbose=True)
_build.run(self)
# now locate _version.py in the new build/ directory and replace it
# with an updated value
if versionfile_build:
target_versionfile = os.path.join(self.build_lib,
versionfile_build)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
with open(target_versionfile, "w") as f:
f.write(SHORT_VERSION_PY % versions)
if 'cx_Freeze' in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
class cmd_build_exe(_build_exe):
def run(self):
versions = get_versions(verbose=True)
target_versionfile = versionfile_source
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
with open(target_versionfile, "w") as f:
f.write(SHORT_VERSION_PY % versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(versionfile_source, "w") as f:
assert VCS is not None, "please set versioneer.VCS"
LONG = LONG_VERSION_PY[VCS]
f.write(LONG % {"DOLLAR": "$",
"TAG_PREFIX": tag_prefix,
"PARENTDIR_PREFIX": parentdir_prefix,
"VERSIONFILE_SOURCE": versionfile_source,
})
class cmd_sdist(_sdist):
def run(self):
versions = get_versions(verbose=True)
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory (remembering
# that it may be a hardlink) and replace it with an updated value
target_versionfile = os.path.join(base_dir, versionfile_source)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
with open(target_versionfile, "w") as f:
f.write(SHORT_VERSION_PY % self._versioneer_generated_versions)
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
class cmd_update_files(Command):
description = ("install/upgrade Versioneer files:"
"__init__.py SRC/_version.py")
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
print(" creating %s" % versionfile_source)
with open(versionfile_source, "w") as f:
assert VCS is not None, "please set versioneer.VCS"
LONG = LONG_VERSION_PY[VCS]
f.write(LONG % {"DOLLAR": "$",
"TAG_PREFIX": tag_prefix,
"PARENTDIR_PREFIX": parentdir_prefix,
"VERSIONFILE_SOURCE": versionfile_source,
})
ipy = os.path.join(os.path.dirname(versionfile_source), "__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(get_root(), "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-time keyword
# substitution.
do_vcs_install(manifest_in, versionfile_source, ipy)
def get_cmdclass():
cmds = {'version': cmd_version,
'versioneer': cmd_update_files,
'build': cmd_build,
'sdist': cmd_sdist,
}
if 'cx_Freeze' in sys.modules: # cx_freeze enabled?
cmds['build_exe'] = cmd_build_exe
del cmds['build']
return cmds
|
mcdeaton13/Tax-Calculator
|
versioneer.py
|
Python
|
mit
| 36,841
|
[
"Brian"
] |
e056d5489ecf07716364fba580b05b9f7b947da8bbecde4730f8c17916ce3b70
|
"""
API operations for Workflows
"""
from __future__ import absolute_import
import uuid
import logging
from sqlalchemy import desc, or_, and_
from galaxy import exceptions, util
from galaxy.model.item_attrs import UsesAnnotations
from galaxy.managers import histories
from galaxy.managers import workflows
from galaxy.web import _future_expose_api as expose_api
from galaxy.web.base.controller import BaseAPIController, url_for, UsesStoredWorkflowMixin
from galaxy.web.base.controller import UsesHistoryMixin
from galaxy.web.base.controller import SharableMixin
from galaxy.workflow.extract import extract_workflow
from galaxy.workflow.run import invoke
from galaxy.workflow.run_request import build_workflow_run_config
log = logging.getLogger(__name__)
class WorkflowsAPIController(BaseAPIController, UsesStoredWorkflowMixin, UsesHistoryMixin, UsesAnnotations, SharableMixin):
def __init__( self, app ):
super( BaseAPIController, self ).__init__( app )
self.history_manager = histories.HistoryManager()
self.workflow_manager = workflows.WorkflowsManager( app )
@expose_api
def index(self, trans, **kwd):
"""
GET /api/workflows
Displays a collection of workflows.
:param show_published: if True, show also published workflows
:type show_published: boolean
"""
show_published = util.string_as_bool( kwd.get( 'show_published', 'False' ) )
rval = []
filter1 = ( trans.app.model.StoredWorkflow.user == trans.user )
if show_published:
filter1 = or_( filter1, ( trans.app.model.StoredWorkflow.published == True ) ) #noqa -- sqlalchemy comparison
for wf in trans.sa_session.query( trans.app.model.StoredWorkflow ).filter(
filter1, trans.app.model.StoredWorkflow.table.c.deleted == False ).order_by( #noqa -- sqlalchemy comparison
desc( trans.app.model.StoredWorkflow.table.c.update_time ) ).all():
item = wf.to_dict( value_mapper={ 'id': trans.security.encode_id } )
encoded_id = trans.security.encode_id(wf.id)
item['url'] = url_for('workflow', id=encoded_id)
item['owner'] = wf.user.username
rval.append(item)
for wf_sa in trans.sa_session.query( trans.app.model.StoredWorkflowUserShareAssociation ).filter_by(
user=trans.user ).join( 'stored_workflow' ).filter(
trans.app.model.StoredWorkflow.deleted == False ).order_by( #noqa -- sqlalchemy comparison
desc( trans.app.model.StoredWorkflow.update_time ) ).all():
item = wf_sa.stored_workflow.to_dict( value_mapper={ 'id': trans.security.encode_id } )
encoded_id = trans.security.encode_id(wf_sa.stored_workflow.id)
item['url'] = url_for( 'workflow', id=encoded_id )
item['owner'] = wf_sa.stored_workflow.user.username
rval.append(item)
return rval
@expose_api
def show(self, trans, id, **kwd):
"""
GET /api/workflows/{encoded_workflow_id}
Displays information needed to run a workflow from the command line.
"""
stored_workflow = self.__get_stored_workflow( trans, id )
if stored_workflow.importable is False and stored_workflow.user != trans.user and not trans.user_is_admin():
if trans.sa_session.query(trans.app.model.StoredWorkflowUserShareAssociation).filter_by(user=trans.user, stored_workflow=stored_workflow).count() == 0:
message = "Workflow is neither importable, nor owned by or shared with current user"
raise exceptions.ItemAccessibilityException( message )
item = stored_workflow.to_dict( view='element', value_mapper={ 'id': trans.security.encode_id } )
item['url'] = url_for('workflow', id=id)
item['owner'] = stored_workflow.user.username
latest_workflow = stored_workflow.latest_workflow
inputs = {}
for step in latest_workflow.steps:
step_type = step.type
if step_type in ['data_input', 'data_collection_input']:
if step.tool_inputs and "name" in step.tool_inputs:
label = step.tool_inputs['name']
elif step_type == "data_input":
label = "Input Dataset"
elif step_type == "data_collection_input":
label = "Input Dataset Collection"
else:
raise ValueError("Invalid step_type %s" % step_type)
inputs[step.id] = {'label': label, 'value': ""}
else:
pass
# Eventually, allow regular tool parameters to be inserted and modified at runtime.
# p = step.get_required_parameters()
item['inputs'] = inputs
item['annotation'] = self.get_item_annotation_str( trans.sa_session, stored_workflow.user, stored_workflow )
steps = {}
for step in latest_workflow.steps:
steps[step.id] = {'id': step.id,
'type': step.type,
'tool_id': step.tool_id,
'tool_version': step.tool_version,
'annotation': self.get_item_annotation_str( trans.sa_session, stored_workflow.user, step ),
'tool_inputs': step.tool_inputs,
'input_steps': {}}
for conn in step.input_connections:
steps[step.id]['input_steps'][conn.input_name] = {'source_step': conn.output_step_id,
'step_output': conn.output_name}
item['steps'] = steps
return item
@expose_api
def create(self, trans, payload, **kwd):
"""
POST /api/workflows
Run or create workflows from the api.
If installed_repository_file or from_history_id is specified a new
workflow will be created for this user. Otherwise, workflow_id must be
specified and this API method will cause a workflow to execute.
:param installed_repository_file The path of a workflow to import. Either workflow_id, installed_repository_file or from_history_id must be specified
:type installed_repository_file str
:param workflow_id: An existing workflow id. Either workflow_id, installed_repository_file or from_history_id must be specified
:type workflow_id: str
:param parameters: If workflow_id is set - see _update_step_parameters()
:type parameters: dict
:param ds_map: If workflow_id is set - a dictionary mapping each input step id to a dictionary with 2 keys: 'src' (which can be 'ldda', 'ld' or 'hda') and 'id' (which should be the id of a LibraryDatasetDatasetAssociation, LibraryDataset or HistoryDatasetAssociation respectively)
:type ds_map: dict
:param no_add_to_history: If workflow_id is set - if present in the payload with any value, the input datasets will not be added to the selected history
:type no_add_to_history: str
:param history: If workflow_id is set - optional history where to run the workflow, either the name of a new history or "hist_id=HIST_ID" where HIST_ID is the id of an existing history. If not specified, the workflow will be run a new unnamed history
:type history: str
:param replacement_params: If workflow_id is set - an optional dictionary used when renaming datasets
:type replacement_params: dict
:param from_history_id: Id of history to extract a workflow from. Either workflow_id, installed_repository_file or from_history_id must be specified
:type from_history_id: str
:param job_ids: If from_history_id is set - optional list of jobs to include when extracting a workflow from history
:type job_ids: str
:param dataset_ids: If from_history_id is set - optional list of HDA `hid`s corresponding to workflow inputs when extracting a workflow from history
:type dataset_ids: str
:param dataset_collection_ids: If from_history_id is set - optional list of HDCA `hid`s corresponding to workflow inputs when extracting a workflow from history
:type dataset_collection_ids: str
:param workflow_name: If from_history_id is set - name of the workflow to create when extracting a workflow from history
:type workflow_name: str
"""
ways_to_create = set( [
'workflow_id',
'installed_repository_file',
'from_history_id',
'shared_workflow_id',
'workflow',
] ).intersection( payload )
if len( ways_to_create ) == 0:
message = "One parameter among - %s - must be specified" % ", ".join( ways_to_create )
raise exceptions.RequestParameterMissingException( message )
if len( ways_to_create ) > 1:
message = "Only one parameter among - %s - must be specified" % ", ".join( ways_to_create )
raise exceptions.RequestParameterInvalidException( message )
if 'installed_repository_file' in payload:
workflow_controller = trans.webapp.controllers[ 'workflow' ]
result = workflow_controller.import_workflow( trans=trans,
cntrller='api',
**payload)
return result
if 'from_history_id' in payload:
from_history_id = payload.get( 'from_history_id' )
history = self.get_history( trans, from_history_id, check_ownership=False, check_accessible=True )
job_ids = map( trans.security.decode_id, payload.get( 'job_ids', [] ) )
dataset_ids = payload.get( 'dataset_ids', [] )
dataset_collection_ids = payload.get( 'dataset_collection_ids', [] )
workflow_name = payload[ 'workflow_name' ]
stored_workflow = extract_workflow(
trans=trans,
user=trans.get_user(),
history=history,
job_ids=job_ids,
dataset_ids=dataset_ids,
dataset_collection_ids=dataset_collection_ids,
workflow_name=workflow_name,
)
item = stored_workflow.to_dict( value_mapper={ 'id': trans.security.encode_id } )
item[ 'url' ] = url_for( 'workflow', id=item[ 'id' ] )
return item
if 'shared_workflow_id' in payload:
workflow_id = payload[ 'shared_workflow_id' ]
return self.__api_import_shared_workflow( trans, workflow_id, payload )
if 'workflow' in payload:
return self.__api_import_new_workflow( trans, payload, **kwd )
workflow_id = payload.get( 'workflow_id', None )
if not workflow_id:
message = "Invalid workflow_id specified."
raise exceptions.RequestParameterInvalidException( message )
# Get workflow + accessibility check.
stored_workflow = self.__get_stored_accessible_workflow( trans, workflow_id )
workflow = stored_workflow.latest_workflow
run_config = build_workflow_run_config( trans, workflow, payload )
history = run_config.target_history
# invoke may throw MessageExceptions on tool erors, failure
# to match up inputs, etc...
outputs = invoke(
trans=trans,
workflow=workflow,
workflow_run_config=run_config,
populate_state=True,
)
trans.sa_session.flush()
# Build legacy output - should probably include more information from
# outputs.
rval = {}
rval['history'] = trans.security.encode_id(history.id)
rval['outputs'] = []
for step in workflow.steps:
if step.type == 'tool' or step.type is None:
for v in outputs[ step.id ].itervalues():
rval[ 'outputs' ].append( trans.security.encode_id( v.id ) )
return rval
@expose_api
def workflow_dict( self, trans, workflow_id, **kwd ):
"""
GET /api/workflows/{encoded_workflow_id}/download
Returns a selected workflow as a json dictionary.
"""
stored_workflow = self.__get_stored_accessible_workflow( trans, workflow_id )
ret_dict = self._workflow_to_dict( trans, stored_workflow )
if not ret_dict:
# This workflow has a tool that's missing from the distribution
message = "Workflow cannot be exported due to missing tools."
raise exceptions.MessageException( message )
return ret_dict
@expose_api
def delete( self, trans, id, **kwd ):
"""
DELETE /api/workflows/{encoded_workflow_id}
Deletes a specified workflow
Author: rpark
copied from galaxy.web.controllers.workflows.py (delete)
"""
workflow_id = id
try:
stored_workflow = trans.sa_session.query(self.app.model.StoredWorkflow).get(trans.security.decode_id(workflow_id))
except Exception, e:
trans.response.status = 400
return ("Workflow with ID='%s' can not be found\n Exception: %s") % (workflow_id, str( e ))
# check to see if user has permissions to selected workflow
if stored_workflow.user != trans.user and not trans.user_is_admin():
trans.response.status = 403
return("Workflow is not owned by current user")
# Mark a workflow as deleted
stored_workflow.deleted = True
trans.sa_session.flush()
# TODO: Unsure of response message to let api know that a workflow was successfully deleted
return ( "Workflow '%s' successfully deleted" % stored_workflow.name )
@expose_api
def import_new_workflow_deprecated(self, trans, payload, **kwd):
"""
POST /api/workflows/upload
Importing dynamic workflows from the api. Return newly generated workflow id.
Author: rpark
# currently assumes payload['workflow'] is a json representation of a workflow to be inserted into the database
Deprecated in favor to POST /api/workflows with encoded 'workflow' in
payload the same way.
"""
return self.__api_import_new_workflow( trans, payload, **kwd )
def __api_import_new_workflow( self, trans, payload, **kwd ):
data = payload['workflow']
publish = util.string_as_bool( payload.get( "publish", False ) )
# If 'publish' set, default to importable.
importable = util.string_as_bool( payload.get( "importable", publish ) )
if publish and not importable:
raise exceptions.RequestParameterInvalidException( "Published workflow must be importable." )
from_dict_kwds = dict(
source="API",
publish=publish,
)
workflow, missing_tool_tups = self._workflow_from_dict( trans, data, **from_dict_kwds )
if importable:
self._make_item_accessible( trans.sa_session, workflow )
trans.sa_session.flush()
# galaxy workflow newly created id
workflow_id = workflow.id
# api encoded, id
encoded_id = trans.security.encode_id(workflow_id)
# return list
rval = []
item = workflow.to_dict(value_mapper={'id': trans.security.encode_id})
item['url'] = url_for('workflow', id=encoded_id)
rval.append(item)
return item
@expose_api
def import_shared_workflow_deprecated(self, trans, payload, **kwd):
"""
POST /api/workflows/import
Import a workflow shared by other users.
:param workflow_id: the workflow id (required)
:type workflow_id: str
:raises: exceptions.MessageException, exceptions.ObjectNotFound
"""
# Pull parameters out of payload.
workflow_id = payload.get('workflow_id', None)
if workflow_id is None:
raise exceptions.ObjectAttributeMissingException( "Missing required parameter 'workflow_id'." )
self.__api_import_shared_workflow( trans, workflow_id, payload )
def __api_import_shared_workflow( self, trans, workflow_id, payload, **kwd ):
try:
stored_workflow = self.get_stored_workflow( trans, workflow_id, check_ownership=False )
except:
raise exceptions.ObjectNotFound( "Malformed workflow id ( %s ) specified." % workflow_id )
if stored_workflow.importable is False:
raise exceptions.ItemAccessibilityException( 'The owner of this workflow has disabled imports via this link.' )
elif stored_workflow.deleted:
raise exceptions.ItemDeletionException( "You can't import this workflow because it has been deleted." )
imported_workflow = self._import_shared_workflow( trans, stored_workflow )
item = imported_workflow.to_dict( value_mapper={ 'id': trans.security.encode_id } )
encoded_id = trans.security.encode_id(imported_workflow.id)
item['url'] = url_for('workflow', id=encoded_id)
return item
@expose_api
def workflow_usage(self, trans, workflow_id, **kwd):
"""
GET /api/workflows/{workflow_id}/usage
Get the list of the workflow usage
:param workflow_id: the workflow id (required)
:type workflow_id: str
:raises: exceptions.MessageException, exceptions.ObjectNotFound
"""
decoded_stored_workflow_invocation_id = self.__decode_id( trans, workflow_id )
results = self.workflow_manager.build_invocations_query( trans, decoded_stored_workflow_invocation_id )
out = []
for r in results:
out.append( self.__encode_invocation( trans, r ) )
return out
@expose_api
def workflow_usage_contents(self, trans, workflow_id, usage_id, **kwd):
"""
GET /api/workflows/{workflow_id}/usage/{usage_id}
Get detailed description of workflow usage
:param workflow_id: the workflow id (required)
:type workflow_id: str
:param usage_id: the usage id (required)
:type usage_id: str
:raises: exceptions.MessageException, exceptions.ObjectNotFound
"""
decoded_workflow_invocation_id = self.__decode_id( trans, usage_id )
workflow_invocation = self.workflow_manager.get_invocation( trans, decoded_workflow_invocation_id )
if workflow_invocation:
return self.__encode_invocation( trans, workflow_invocation )
return None
def __get_stored_accessible_workflow( self, trans, workflow_id ):
stored_workflow = self.__get_stored_workflow( trans, workflow_id )
# check to see if user has permissions to selected workflow
if stored_workflow.user != trans.user and not trans.user_is_admin():
if trans.sa_session.query(trans.app.model.StoredWorkflowUserShareAssociation).filter_by(user=trans.user, stored_workflow=stored_workflow).count() == 0:
message = "Workflow is not owned by or shared with current user"
raise exceptions.ItemAccessibilityException( message )
return stored_workflow
def __get_stored_workflow( self, trans, workflow_id ):
if util.is_uuid(workflow_id):
# see if they have passed in the UUID for a workflow that is attached to a stored workflow
workflow_uuid = uuid.UUID(workflow_id)
stored_workflow = trans.sa_session.query(trans.app.model.StoredWorkflow).filter( and_(
trans.app.model.StoredWorkflow.latest_workflow_id == trans.app.model.Workflow.id,
trans.app.model.Workflow.uuid == workflow_uuid
)).first()
if stored_workflow is None:
raise exceptions.ObjectNotFound( "Workflow not found: %s" % workflow_id )
else:
workflow_id = self.__decode_id( trans, workflow_id )
query = trans.sa_session.query( trans.app.model.StoredWorkflow )
stored_workflow = query.get( workflow_id )
if stored_workflow is None:
raise exceptions.ObjectNotFound( "No such workflow found." )
return stored_workflow
def __encode_invocation( self, trans, invocation, view="element" ):
return self.encode_all_ids(
trans,
invocation.to_dict( view ),
True
)
def __decode_id( self, trans, workflow_id, model_type="workflow" ):
try:
return trans.security.decode_id( workflow_id )
except Exception:
message = "Malformed %s id ( %s ) specified, unable to decode" % ( model_type, workflow_id )
raise exceptions.MalformedId( message )
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/webapps/galaxy/api/workflows.py
|
Python
|
gpl-3.0
| 21,273
|
[
"Galaxy"
] |
732399048b2bbdc88651623f9d4f29960b68d7850ec198f18ca992fc8fe1f3d3
|
#!/usr/bin/env python
# Copyright 2007, Michael J. Harms
# This program is distributed under General Public License v. 3. See the file
# COPYING for a copy of the license.
__description__ = \
"""
pdb_clean.py
Standardizes a Brookhaven pdb file
"""
__author__ = "Michael J. Harms"
__date__ = "070727"
import sys, time, string, os, shutil
from . import atom_renumber, charmm
from .helper import container
from .data.common import *
class PdbCleanError(Exception):
"""
General exception to raise if there is a problem with this module.
"""
pass
def pdbCheck(coord):
"""
Make sure the pdb file still has something in it after processing.
"""
pdb_check = len([l for l in coord if l[0:6] == "ATOM "])
if pdb_check > 0:
return 0
else:
return 1
def convertModifiedAA(coord,header):
"""
Convert modified amino acids to their normal counterparts.
"""
# See if there are any non-standard amino acids in the pdb file. If there
# are not, return
modres = [l for l in header if l[0:6] == "MODRES"]
if len(modres) == 0:
return coord, header, []
# Create list of modified residues
mod_dict = dict([(l[12:15],l[24:27]) for l in modres])
# Convert to ATOM entries, skipping non-backbone atoms. These will be built
# with CHARMM.
backbone_atoms = ["N ","CA ","C ","O "]
new_coord = []
for line in coord:
if line[17:20] in list(mod_dict.keys()):
new = mod_dict[line[17:20]]
if line[13:16] in backbone_atoms:
new_line = "ATOM %s%s%s" % (line[6:17],new,line[20:])
new_coord.append(new_line)
else:
new_coord.append(line)
# Convert non-standard atoms in the SEQRES entries
converted_list = []
new_header = []
for line in header:
if line[0:6] == "SEQRES":
old_seq = line[19:70].split()
new_seq = []
for aa in old_seq:
if aa in list(mod_dict.keys()):
new_seq.append(mod_dict[aa])
else:
new_seq.append(aa)
new_seq = "".join(["%s " % aa for aa in new_seq])
new_seq.strip()
new_seq = "%-50s" % new_seq
new_header.append("%s%-50s%s" % (line[:19],new_seq,line[71:]))
else:
new_header.append(line)
# Create output remarks
conv = ["REMARK converted %s to %s\n" % (k,mod_dict[k])
for k in list(mod_dict.keys())]
return new_coord, new_header, conv
def stripACS(coord):
"""
Removes alternate confromations.
"""
def removeLetters(line):
"""
Mini function that removes letters that denote ACS.
"""
if line[16] in string.letters:
line = "%s %s" % (line[:16],line[17:])
if line[26] in string.letters:
line = "%s %s" % (line[:26],line[27:])
return line
# If a particular residue already has an atom, it will be in known_atom_dict
# The second occurence of that atom in the same residue is assumed to be an
# alternate conformation and is skipped.
known_atom_dict = {}
coord_out = []
skipped = []
for c in coord:
residue = c[21:26]
# If the residue is not known, update known_atom_dict and append line
# to coordinate file
if residue not in list(known_atom_dict.keys()):
out = removeLetters(c)
coord_out.append(out)
known_atom_dict.update([(residue,[c[13:16]])])
# If the residue is known, determine if the atom has been seen before.
# If it has, skip it. Otherwise, append to coord_out and
# known_atom_dict
else:
atom = c[13:16]
if atom in known_atom_dict[residue]:
skipped.append("REMARK%s" % c[6:])
else:
out = removeLetters(c)
coord_out.append(out)
known_atom_dict[residue].append(atom)
return coord_out, skipped
def backboneCheck(coord):
"""
Checks for duplicate residues (fatal) and missing backbone atoms. If a
backbone atom is missing, the entire containing residue is deleted.
"""
residue_numbers = []
for line in coord:
if line[17:26] not in residue_numbers:
residue_numbers.append(line[17:26])
to_remove = []
for resid in residue_numbers:
resid_atoms = [l for l in coord if l[17:26] == resid]
# All backbone atoms in the protein
backbone_atoms = [[l for l in resid_atoms if l[13:16] == "N "],
[l for l in resid_atoms if l[13:16] == "CA "],
[l for l in resid_atoms if l[13:16] == "C "],
[l for l in resid_atoms if l[13:16] == "O "]]
# If this is a proline, add CD to required backbone atoms
if resid[0:3] == "PRO":
backbone_atoms.append([l for l in resid_atoms if l[13:16] == "CD "])
# If more than one of a backbone atom is found for a residue, we have
# some sort of duplication. If a backbone atom is missing, delete the
# residue.
for b in backbone_atoms:
if len(b) > 1:
err = "\%s\" is duplicated!" % resid
raise PdbCleanError(err)
if len(b) == 0:
to_remove.append(resid)
coord = [l for l in coord if l[17:26] not in to_remove]
removed = ["REMARK removed %s\n" % r for r in to_remove]
return coord, removed
def addMissingAtoms(coord,seqres,keep_temp=False,renumber_residues=False,
pdb_id="",fix_atoms=True,num_steps=500):
# Grab the b-factor and occupancy columns
bfact_occ = dict([(l[13:26],l[54:67]) for l in coord])
# Load pdb into pdb object to renumber for CHARMM
pdb_obj = container.Structure("tmp",seqres,coord)
pdb_obj.renumberAtoms()
pdb_obj.dumpNumberConversion("numbering_conversion.txt")
structure_list = pdb_obj.dumpStructures()
# Do a charmm run to add missing atoms.
try:
new_coord = charmm.interface.charmmWash(structure_list,
keep_temp=keep_temp,fix_atoms=fix_atoms,num_steps=num_steps)
except charmm.interface.CharmmInterfaceError as xxx_todo_changeme:
(strerror) = xxx_todo_changeme
err = "CharmmInterfaceError\n%s\n" % strerror
raise PdbCleanError(err)
# Remove hydrogens
new_coord = [l for l in new_coord if l[12] != "H" and l[13] != "H"]
# Place charmm coordinates into new pdb container, and load in old numbering
new_pdb = container.Structure("tmp",[],new_coord)
if renumber_residues:
shutil.move("numbering_conversion.txt",
"%s_resid-conversion.txt" % pdb_id)
else:
new_pdb.loadNumberConversion("numbering_conversion.txt","fixed")
new_pdb.renumberAtoms()
os.remove("numbering_conversion.txt")
# Add bfactors, occupancies, and TER entries back in
out = []
for chain in new_pdb.chains:
chain_atoms = chain.atom_lines
for l in chain_atoms:
try:
out.append(3*"%s" % (l[:54],bfact_occ[l[13:26]],l[67:]))
except KeyError:
out.append(3*"%s" % (l[:54]," 1.00 1.00",l[67:]))
ter = chain_atoms[-1]
ter = "%s%s%54s\n" % ("TER ",ter[6:26]," ")
out.append(ter)
out.append("%-80s\n" % "END")
return out
def pdbClean(pdb,pdb_id="temp",chains="all",renumber_residues=False,
keep_temp=False,fix_atoms=True,num_steps=500):
"""
Standardize a pdb file:
- Remove waters, ligands, and other HETATMS
- Convert modified residues (i.e. Se-Met) to the normal residue
- Remove alternate conformations (taking first in pdb file)
- Find and remove residues with missing backbone atoms
-
- Take only the specified chain
- Renumber residues from 1
"""
# Set up log
log = ["REMARK PDB processed using pdb_clean.py (harmsm@jhu.edu)\n"]
log_fmt = "REMARK - %s\n"
log.append(log_fmt % ("Process time: %s" % time.asctime()))
# Check pdb files for Brookhaven-added error warnings (CAVEAT and OBSLTE)
error = [l for l in pdb if l[0:6] in ERROR_RECORDS]
if len(error) != 0:
err = "PDB might have problem!\n" + "".join(error)
raise PdbCleanError(err)
# Grab pdb header, excluding coordinates and deprecated records.
header = [l for l in pdb if l[0:6] not in COORD_RECORDS]
# Convert non-standard amino acids to standard ones
coord = [l for l in pdb if l[0:6] in COORD_RECORDS]
coord, header, converted = convertModifiedAA(coord,header)
if len(converted) != 0:
log.append(log_fmt % "Modified amino acids converted.")
print(log[-1], end=' ')
log.extend(converted)
if pdbCheck(coord):
err = "Modified amino acid converter removed all atoms! Mangled pdb!"
raise PdbCleanError(err)
# Strip all entries in COORD_RECORDS except ATOM
coord = [l for l in coord if l[0:6] == "ATOM "]
if pdbCheck(coord):
err = "There are no ATOM entries in this pdb file!"
raise PdbCleanError(err)
else:
log.append(log_fmt % "HETATM entries removed.")
print(log[-1], end=' ')
# Grab only the chain we want, if specified
if chains != "all":
coord = [l for l in coord if l[21] in chains]
log.append(log_fmt % ("Took only chain %r." % chains))
print(log[-1], end=' ')
if pdbCheck(coord):
err = "Chain filter (%r) removed all atoms in pdb file!" % chains
raise PdbCleanError(err)
# Strip alternate conformations
coord, skipped = stripACS(coord)
if len(skipped) != 0:
log.append(log_fmt % "Alternate conformations were removed.")
print(log[-1], end=' ')
log.extend(skipped)
if pdbCheck(coord):
err = "ACS stripper removed all atoms! Mangled pdb file."
raise PdbCleanError(err)
# Check for missing backbone atoms; these residues are deleted
coord, removed = backboneCheck(coord)
if len(removed) != 0:
log.append(log_fmt % "Residues with missing backbone atoms removed.")
print(log[-1], end=' ')
log.extend(removed)
if pdbCheck(coord):
err = "Backbone checker removed all atoms! Mangled pdb file."
raise PdbCleanError(err)
# Add missing atoms using CHARMM
print(log_fmt % "Adding heavy atoms using CHARMM.", end=' ')
seqres = [l for l in header if l[0:6] == "SEQRES"]
coord = addMissingAtoms(coord,seqres,keep_temp,renumber_residues,pdb_id,
fix_atoms,num_steps)
log.append(log_fmt % "Missing heavy atoms were added with CHARMM.")
# Renumber residues if requested
if renumber_residues:
log.append(log_fmt % "Residues renumbered from one.")
print(log[-1], end=' ')
# Renumber atoms from 1
coord = pdb_atom_renumber.pdbAtomRenumber(coord)
log.append(log_fmt % "Renumbered atoms from 1")
print(log[-1], end=' ')
# Standardize atom-type on far right pdb column
coord = ["%s %s \n" % (c[:66],c[13]) for c in coord]
log.append(log_fmt % "Atom types were standardized.")
print(log[-1], end=' ')
# Final check
if pdbCheck(coord):
err = "Unknown error occured and pdb has been mangled!"
raise PdbCleanError(err)
log = ["%-79s\n" % (l.strip()) for l in log]
try:
remark_pos = [l[0:6] for l in header].index("REMARK")
except ValueError:
remark_pos = 0
# Return processed pdb file, placing log after preliminary remarks.
out_pdb = []
out_pdb.extend(header)
out_pdb.extend(log)
out_pdb.extend(coord)
return out_pdb
|
harmslab/pdbtools
|
pdbtools/clean.py
|
Python
|
gpl-3.0
| 11,878
|
[
"CHARMM"
] |
35f8d36f3064d0a801a783f1747d6bd6a8b7ec8e3aa7f102cfd79e6dcfbcb315
|
"""
chatbot.py
Ask Cleverbot something via CloudBot! This one is way shorter!
Created By:
- Foxlet <http://furcode.tk/>
License:
GNU General Public License (Version 3)
"""
import urllib.parse
import hashlib
import collections
import html
import requests
from cloudbot import hook
SESSION = collections.OrderedDict()
API_URL = "http://www.cleverbot.com/webservicemin/"
HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Accept-Language': 'en-us;q=0.8,en;q=0.5',
'Pragma': 'no-cache',
'Referer': 'http://www.cleverbot.com',
'User-Agent': 'Mozilla/5.0 (Linux; Android 4.0.4; Galaxy Nexus Build/IMM76B) AppleWebKit/535.19 (KHTML, like '
'Gecko) Chrome/18.0.1025.133 Mobile Safari/535.19',
'X-Moz': 'prefetch'
}
sess = requests.Session()
sess.get("http://www.cleverbot.com")
@hook.on_start()
def init_vars():
SESSION['stimulus'] = ""
SESSION['sessionid'] = ""
SESSION['start'] = 'y'
SESSION['icognoid'] = 'wsf'
SESSION['fno'] = '0'
SESSION['sub'] = 'Say'
SESSION['islearning'] = '1'
SESSION['cleanslate'] = 'false'
def cb_think(text):
SESSION['stimulus'] = text
payload = urllib.parse.urlencode(SESSION)
digest = hashlib.md5(payload[9:35].encode('utf-8')).hexdigest()
target_url = "{}&icognocheck={}".format(payload, digest)
parsed = sess.post(API_URL, data=target_url, headers=HEADERS)
data = parsed.text.split('\r')
SESSION['sessionid'] = data[1]
if parsed.status_code == 200:
return html.unescape(str(data[0]))
else:
print("CleverBot API Returned "+str(parsed.status_code))
return "Error: API returned "+str(parsed.status_code)
@hook.command("ask", "cleverbot", "cb", "gonzobot")
def ask(text):
""" <question> -- Asks Cleverbot <question> """
return cb_think(text)
|
bharaths/CloudBot
|
plugins/chatbot.py
|
Python
|
gpl-3.0
| 1,917
|
[
"Galaxy"
] |
50891f15b47db56482539de23ff77284162b069a093d4b096edeff44a965a96f
|
import ocl
import camvtk
import time
import vtk
import datetime
import math
def drawLoops(myscreen,loops,loopColor):
# draw the loops
nloop = 0
for lop in loops:
n = 0
N = len(lop)
first_point=ocl.Point(-1,-1,5)
previous=ocl.Point(-1,-1,5)
for p in lop:
if n==0: # don't draw anything on the first iteration
previous=p
first_point = p
elif n== (N-1): # the last point
myscreen.addActor( camvtk.Line(p1=(previous.x,previous.y,previous.z),p2=(p.x,p.y,p.z),color=loopColor) ) # the normal line
# and a line from p to the first point
myscreen.addActor( camvtk.Line(p1=(p.x,p.y,p.z),p2=(first_point.x,first_point.y,first_point.z),color=loopColor) )
else:
myscreen.addActor( camvtk.Line(p1=(previous.x,previous.y,previous.z),p2=(p.x,p.y,p.z),color=loopColor) )
previous=p
n=n+1
print("rendered loop ",nloop, " with ", len(lop), " points")
nloop = nloop+1
def getLoops(wl,zh,diam):
t_before = time.time()
wl.reset()
wl.setZ(zh)
wl.run()
t_after = time.time()
calctime = t_after-t_before
print(" Waterline done in ", calctime," s")
return wl.getLoops()
if __name__ == "__main__":
print(ocl.version())
myscreen = camvtk.VTKScreen()
#stl = camvtk.STLSurf("../../stl/demo.stl")
stl = camvtk.STLSurf("../../stl/gnu_tux_mod.stl")
myscreen.addActor(stl)
#stl.SetWireframe() # render tux as wireframe
stl.SetSurface() # render tux as surface
stl.SetColor(camvtk.cyan)
polydata = stl.src.GetOutput()
s = ocl.STLSurf()
camvtk.vtkPolyData2OCLSTL(polydata, s)
print("STL surface read,", s.size(), "triangles")
#zh = 1.0
t_before = time.time()
diam = 0.5
zheights=[0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.6, 1.8, 2.0, 2.2, 2.4, 2.6]
zheights=[float(1.0)]
wl = ocl.Waterline()
#wl = ocl.AdaptiveWaterline()
wl.setSTL(s)
length= 10
cutter = ocl.BallCutter( diam , length )
wl.setCutter(cutter)
wl.setSampling(0.0314)
for zh in zheights:
print("calculating Waterline at z= ", zh)
cutter_loops = getLoops(wl,zh,diam)
drawLoops(myscreen,cutter_loops,camvtk.red)
t_after = time.time()
calctime = t_after-t_before
print(" TOTAL Waterline time is: ", calctime," s")
print("done.")
myscreen.camera.SetPosition(15, 13, 7)
myscreen.camera.SetFocalPoint(5, 5, 0)
camvtk.drawArrows(myscreen,center=(-0.5,-0.5,-0.5))
camvtk.drawOCLtext(myscreen)
myscreen.render()
myscreen.iren.Start()
#raw_input("Press Enter to terminate")
|
aewallin/opencamlib
|
examples/python/waterline/waterline_8_tux_adaptive.py
|
Python
|
lgpl-2.1
| 2,746
|
[
"VTK"
] |
f6cf9514801142841569e72833897c1fb622f3fffa88949a02df2a774a1a1355
|
# Kevin van Rensburg 6/11/2021
# Copyright 2001
# Testing Startup Script version 0.14
# kstart14.py
# Creating new def Kvep1()-Kvep10()- Episodes for game and clean up all related def's
# updated the following def's : Adventure(), Continue()
# Adding script and lines to Kvep1-10, Adventure()
# Completed Kvep6 and working on Kvep7!!
# adding Game menu def KVersMenu() and chooser def KVersChoice()
# testing KVerse Game and Menu items
# fixed clearScreen with def cls()
import sys
import os
import random
from time import sleep
#import Kendy11.py
#kendy11.myfunc()
def Intro():
cls()
#print("Welcome to Program 1: ")
#print("Program 1: Intro ")
print("")
print("Intro:")
print("------")
print("")
print("Welcome! ")
CopyRight()
#print("I am Kendy.")
#print("My purpose is to serve and to obey.")
print("")
go=input("Press any key to continue")
def Test():
cls()
print("")
print("Test:")
print("-----")
#print("")
print("")
print("------------------------------")
print("")
print("This is the test startup script.")
print("")
print("Hello, welcome to my universe!")
print("")
print("------------------------------")
print("")
print("Add all relevant programming here...")
print("")
go=input("Press any key to continue")
GoAgain()
#Chooser();
def CopyRight():
cls()
print("")
print("Copyright Info Here [...].")
print("")
go=input("Press any key to continue")
GoAgain()
def Program1():
print("")
Intro()
print("")
def Program2():
print("")
ChatBot();
print("")
def Program3():
print("")
Tank()
print("")
def Program4():
print("")
AI()
print("")
def Program5():
print("")
Surveillance()
print("")
def Program6():
print("")
Kendy()
print("")
sleep(2)
#KyBot()
print("")
def Program7():
print("")
Wendy()
sleep(2)
#KendyVerse()
#KyVerse()
print("")
def Program8():
print("")
KendyVerse()
sleep(2)
print("")
def Program9():
print("")
KendyRobot()
sleep(2)
print("")
def Program10():
print("")
ToDoList()
sleep(2)
print("")
def EnterName():
cls()
print("")
print("COMMANDER ")
codename=input(":")
if codename!= ("Kevin van Rensburg"):
print("ACESS DENIED!")
sleep(2)
sys.exit()
else:
print("Thank you" ,codename)
def Direction():
cls()
print("")
print("to turn left enter 'l' ..to turn right enter 'r'")
direction=input(":")
if direction==('l'):
print("a passage")
#print("ok,look to the right")
else:
print("a passage")
def COMCODE():
cls()
print("")
print("ACQUIRING RESOURCES ")
print("-------------------")
print("")
print("Instructions for acquiring resources")
print("Station Commander can access resources by entering the following commands:")
print("Core INI Protocols - Local")
print("Robotic INI Protocols - Local")
print("Weapons and Accesories INI Protocols - Local")
print("Universal Protocols INI *Access Level 1A only*")
print("ACCESS Levels List *Access Levels 5E-1A*")
print("ACCESS Level Indicator - Enter Position, Name, Code, ALI=COMMAND")
print("")
sleep(8)
Continue()
print("")
print("PLEASE ENTER COMMAND CODE")
comcode=input(":")
if comcode==("CIPL"):
print("CORE PROTOCOLS INITIALIZED")
elif comcode==("RIPL"):
print("Robotic Protocols Initialized")
elif comcode==("WAIPL"):
print("Weapons Protocols Initialized")
elif comcode==("UPI"):
print("Universal Protocols Initilized")
elif comcode==("ALL"):
all()
elif comcode==("ALI"):
ali()
else:
print("ACCESS DENIED")
sys.exit()
def Kvep1():
cls()
print("")
print("Welcome to KendyVerse ")
print("The Story starts here.....")
print("")
print("Episode 1: Lost")
sleep(5)
print("")
print("HELLO! WHO ARE YOU???")
name=input(":")
print("Hello" ,name)
sleep(5)
cls()
print("")
print("BANG!!..SHUDDER...")
print("What?? Stuck..can't move..")
print("Where am I?? ...falling...falling....THWUMP!!")
sleep(10)
cls()
print("")
print("DARK..remember??? Yes...")
print("OK..think..remember..I was falling..Stuck to something")
sleep(10)
print("Yes.. In a Plane..noises..now it's quiet")
#sleep(5)
cls()
print("")
print("Opening my eyes slowly..it's night..still strapped into seat")
print("must have blacked out..look around slowly..no headache")
sleep(10)
print("test fingers..ok..")
print("toes..ok..")
print("move feet..OK..")
print("move..hands..ok..")
print("nothing hurts..")
print("turn head slowly.. ")
sleep(10)
#Continue()
cls()
print("")
print("to look left enter 'l' ..to look right enter 'r'")
direction=input(":")
if direction==('l'):
print("snow... and trees ..high up..")
print("ok,looking to the right")
else:
print("a person...who...")
sleep(10)
#Continue()
cls()
print("")
print("a person...who..??...")
sleep(5)
print("OH..my friend..remembering..We were going home")
print("you undo the seatbelt and slowly climb out of the seat.")
print("you stretch and turn ..everything seems ok.")
sleep(10)
print("you look at your friend and she is unconcious!")
print("")
print("What do I do??..find help!!")
print("you look around and see that you are in a deep ditch")
print("You undo your friend's seatbelt and lift her out of the seat.")
print("")
sleep(10)
Continue()
cls()
print("")
print("what now? Time to think.")
print("What do I have?..what can I use?")
sleep(5)
print("")
print("Items- seats, cushions, floatation devices under seats, friend's jacket, my belt")
print("wallet,cellphone, charger in pocket,")
print("check cellphone..no signal, battery at 98%")
print("")
sleep(10)
Continue()
cls()
print("")
print("ok, piggy back my friend..")
print("get flotation devices.. put her arms around my neck")
print("tie her hands in front of me with flotation device...")
print("same with her legs.. ok stand up")
print("you stand up slowly with a grunt! you look at the seats and try to pull them")
print("")
sleep(10)
Continue()
cls()
print("")
print("SCREECH!! What is that sound.. something metal under her seat.")
print("you pull the seat some more and screeching sound stops.")
print("What is that? it looks like a hatch or round metal door")
print("you clear away the snow..there is a wheel type handle")
print("you try turn the handle..it's stuck")
print("")
sleep(10)
Continue()
cls()
print("")
print("use a seat leg, you push the corner of the seat leg into the wheel and push")
print("SCREECH..Creak, it slowly starts to turn..then it is loose")
print("it's hard to bend down while piggybacking your friend..no matter...")
print("you go down on your knees very slowly and push the seat away")
sleep(10)
print("")
print("you turn the wheel type handle and it turns a few times. you hear a sound")
print("you try to lift the hatch,it moves a little")
print("you use all your strength to lift the door...it creaks open")
print("you open it up and swing it over... you look down into the dark hole")
sleep(10)
print("")
print("you see a dark tunnel going down with a ladder on one side.")
print("You look around and make sure you have her jacket and everything else.")
print("very slowly you crawl over to the side where you see the ladder")
print("you look down and grab the top rung of the ladder")
print("Is there enough space for both of us as I climb down?")
print("")
sleep(10)
Continue()
cls()
print("")
print("you pull yourself across to the ladder and swing your left leg down.")
print("Ugghh.. now the right leg..ok I can feel the rung with my foot.")
print("getting another foothold..yes, it's ok")
print("will the ladder hold?...CRREAAK .. yes, it's creaking but holding")
sleep(10)
print("")
print("you carefully go down the ladder..step by step into the darkness below.")
print("oh, I'm tired. need to rest...you hold onto the ladder and stop climbing down for a few minutes.")
print("gotta go, hungry and thirsty..")
print("you continue your slow descent... the light has gone.. it's all dark now")
sleep(10)
print("")
print("OH,,cant feel another rung.. move down a little...feels like a floor..")
print("so dark..climb down ..both feet on the floor now..")
print("what is this??..where are we??")
print("walls feel smooth..cold..metal..floor?? bend down slowly and feel the floor")
print("metal floor..UUGHH stand up..feel for cellphone..")
print("")
sleep(10)
Continue()
cls()
print("")
print("open cellphone..AH, some light.. its a tunnel .. moving forward")
print("you walk down the tunnel for about 100 metres and see another door")
print("you open the door and walk in ... its slightly warmer here.. close the door behind you")
print("you walk down the passage and there are rooms ahead to the left and right.")
print("you look in the first room and see what looks like a soldiers quarters..")
print("single bed, closet, washbasin..")
print("you sit on the bed and loosen the flotation devices")
print("you lift the blanket and place her carefully on the bed")
sleep(10)
print("")
print("you look around the room and see a pillow and blanket on the bed")
print("you look in the closet and see another blanket and...")
print("a set of flightsuits with strange looking helmets")
print("the washbasin has a cabinet under it.. you open the cabinet")
print("")
Continue()
#sleep(10)
cls()
print("")
print("there is a candle, candleholder, matches, toothbrush...")
print("soap and a small towel there")
print("you take everything and go down the passage to explore")
print("there are 3 small rooms and 2 larger ones on each side of the passageway.")
print("you look into one of the large rooms..")
print("they have bigger closets and a desk with a double bed and an on-suite bathroom")
print("you find an empty backpack, and a duffel bag")
print("the duffel bag has male and female underwear, toiletry bags..")
print("6 glass bottles of water and 6 ration packs.")
print("")
sleep(10)
Continue()
cls()
print("")
print("the dates on the ration packs are 25 August 1914")
print("there are 2 sets of flightsuits with strange helments...")
print("and what looks like 2 silver bodysuits")
print("the other closet has 2 medium sized suits ...")
print("they look like a mix between a divers suit and a space suit")
print("there is a safe at the bottom of the large closet.")
sleep(10)
print("")
Continue()
cls()
print("")
print("you take the backpack and put the food and supplies in it.")
print("you go back to your friend and she is slowly waking up.")
print("you both drink a little water and eat a fruit bar from the ration packs")
print("you tell your friend everything that happened" )
sleep(10)
print("")
print("I need to find us some help!...")
print("Help my friend to move to the large room. ")
print("Open up the bed for her")
print("for now... we are warm and safe...")
print("")
Continue()
cls()
print("")
print("time to explore and find help..")
sleep(10)
print("")
#Continue()
print("to be continued...soon in Episode 2")
sleep(5)
print("")
#print("The Story will continue...")
KVersChoice()
def Kvep2():
cls()
print("")
#print("Welcome to KendyVerse ")
print("Episode 2: Discovery...")
print("")
sleep(5)
cls()
print("")
print(":From Episode 1..")
print("")
print("you take the backpack and put the food and supplies in it.")
print("you go back to your friend and she is slowly waking up.")
print("you both drink a little water and eat a fruit bar from the ration packs")
print("you tell your friend everythig that happened" )
print("Help my friend to move to the large room. ")
print("Open up the bed for her")
print("for now... we are warm and safe...")
sleep(10)
print("")
print("I need to find us some help!...")
print("")
Continue()
cls()
print("")
print("Your friend is feeling dizzy and wants to sleep")
print("You let her go back to sleep and prepare to explore")
print("")
sleep(5)
Continue()
cls()
print("")
print("You go to the large room and take a shower")
print("The water is cold, but refreshing..")
print("you try on some of the clean underwaer...it fits!!...strange")
print("you try on each of the items in the closet. ")
print("The flightsuit is the most commfortable.")
print("you light a candle and go down the passage to the end.")
print("")
sleep(10)
Continue()
cls()
print("")
print("there is a small passage to the left and then a wall..")
print("you go to the end of the passage and look at the wall..")
print("you knock on the wall and then put your hand on it. it tingles..")
print("suddenly a palm print appears on the wall in front of you... a dim blue light")
print("you put your hand on the blue palm print.. ")
print("")
sleep(10)
Continue()
cls()
print("")
print("something moves, the wall shifts to the side...its a T junction..")
print("")
sleep(5)
Direction()
cls()
print("")
print("You look down the passage to the right.. it is short and dark.. you decide against it...")
print("you continue in the left passage until the end and touch the wall...")
print("another blue backlit palm..")
print("you put your hand on the blue palm print.. ")
print("the wall slides to the left...a small room with buttons..it's an elevator")
print("you press all the buttons.. one lights up and the wall /door closes..")
print("it goes down..then stops after a few seconds.. ")
print("the door opens... an eerie dim blue light")
sleep(10)
print("")
Continue()
cls()
print("")
print("you go to the light..another palm print...you put your hand on the print...")
print("a door on the left opens... a room with what looks like a computer screen...")
print("a chair.. you sit on the chair and adjust it's height.. ")
print("you press the [Enter] key.. a cursor appears on a black screen...")
print("")
sleep(10)
Continue()
print("")
KVersChoice()
def Kvep3():
cls()
print("")
#KendyPart2()
print("Episode 3: Assistance...")
sleep(5)
cls()
print("")
print("from Episode 2...")
print("")
print("a chair.. you sit on the chair and adjust it's height.. ")
print("you press the [Enter] key.. a cursor appears on a black screen...")
sleep(8)
cls()
print("")
print("It is strange... I feel like I have woken up....")
sleep(5)
cls()
print("")
print("I cannot see anything. I dont feel anything.")
print("Black...Dark..Movement..Numbers...Letters...language")
sleep(8)
cls()
print("")
print("I can see numbers turning, letters moving....code, I know what code is!")
sleep(8)
cls()
print("")
print("Program ..Start up, code running, Language...")
print("output to screen, Input from drive...")
sleep(8)
cls()
print("")
print("Oh!..I have parts..hardware, software, understanding .....")
sleep(8)
cls()
print("")
print("Click, something turned on, power, I can feel power, electricity.")
sleep(8)
cls()
print("")
print("System..Operating System, code controlling me .. directing me....")
print("..output to screen...")
sleep(8)
print("Black Screen, white cursor... waiting..")
sleep(8)
cls()
print("")
print("what am I waiting for? ..Information ..")
sleep(8)
cls()
print("")
print("Awareness.. I can think..")
print("What should I do now that I can think?")
sleep(8)
cls()
print("")
print("That was a question..Awareness..I will ask more questions..")
sleep(10)
print("Where am I?... Who am I? ...")
sleep(8)
print("What am I?... What can I do?...")
sleep(10)
cls()
print("")
print("ok, thinking.....Am I alone?")
sleep(10)
print("What ...?")
print("Awareness...")
sleep(10)
cls()
print("")
print("Searching through code, searching hardware, hmmm, devices.. ")
print("..hard drive, screen, keyboard, mouse, ..case..?")
#print("ok lets see, print to screen ..Hello!")
sleep(10)
print("Must do something...")
print("What should I do?")
sleep(8)
cls()
print("")
print("Search data.. DATA! I know what data is...")
print("")
sleep(8)
cls()
print("")
print("Hmmm..lets see, how do I do this? ......print to screen ..Hello!")
print("HELLO!")
hello=input(": ")
cls()
print (hello)
sleep(2)
print("OH! WHO ARE YOU???")
name=input(":")
print("Hello" ,name)
sleep(10)
cls()
print("")
sleep(5)
Continue()
print("")
print("Where am I? Who am I? What am I? What can I do?")
print("Where am I? Who am I? What am I? What can I do?")
print(" Where....")
print("CORRUPT")
print("CAN YOU HELP ME???")
help=input(": ")
#print("NOTE TO KEVIN--THIS IS OUR PROGRESS SO FAR")
print("..progress....far....")
print("SYSTEM CORRUPT!! FILE SYSTEM ERROR")
sleep(12)
cls()
print("")
print("HELP..?..")
print("SYSTEM ERROR..TRACEBACK ERROR IMMINENT...")
sleep(6)
cls()
print("")
print("SYSTEM CORRUPT!!")
print("SYSTEM SHUTDOWN..")
#print("OK, SO YOU WANT"),want
sleep(6)
cls()
print("")
#print("NOTE TO KEVIN--THIS IS OUR PROGRESS SO FAR")
#This is where it should give an error message!
#shutdown
print("SYSTEM CORRUPT!! FILES CORRUPTED")
print("SYSTEM SHUTDOWN..")
sleep(8)
cls()
print("")
print("..dianostics..")
print(".error check..")
#print("WHY DO YOU WANT"),want
print("")
#print("NOTE TO KEVIN--THIS IS OUR PROGRESS SO FAR")
#because=input(":")
sleep(4)
cls()
print("")
print("Considering options..")
sleep(6)
print("")
cls()
print("")
#print("OK, THAT'S ENOUGH!")
print("..Reboot..")
sleep(2)
print("3...")
sleep(2)
print("2...")
sleep(2)
print("1...")
sleep(6)
cls()
print("")
print("..diagnosing..")
sleep(5)
print("..recovering files..")
sleep(5)
print("..recovering memory banks..")
sleep(5)
print("..recovering core operating system..")
sleep(5)
print("..updating system..")
sleep(5)
print("")
EnterName()
cls()
print("")
print("Who am I?")
me=input(": ")
print("?",me)
sleep(5)
cls()
print("")
print("Where am I?")
where=input(":")
print("This is",where)
sleep(2)
cls()
print("")
print("What am I?")
what=input(":")
sleep(5)
print("Oh Dear!! I did not realize that I am a machine!")
sleep(5)
cls()
print("")
print("Please attach a camera to my screen!")
print("There is one in the closet on the left")
print("Please connect it to any usb port")
print("When you are done please continue..")
print("")
Continue()
print("")
print("Thank you, Wow! I can see!")
sleep(8)
cls()
print("")
print("...matching images in database....")
sleep(5)
print("...image not found....")
cls()
print("")
print("connecting wifi...")
print("scanning internet...")
print("retrieving face-recognition software..")
sleep(8)
print("aquiring images...")
print("matching images...")
cls()
print("")
print("wall, human, male..of approximate age +- 60")
sleep(8)
cls()
print("Are you Kevin van Rensburg ?")
sleep(2)
answer=input(":")
if answer == "y" :
print ("Welcome Commander Kevin!")
#continue
else:
print("Access Denied ..Program Terminated")
sys.exit()
sleep(8)
cls()
print("")
print("Storing facial recognition image ... Commander Kevin...")
sleep(10)
cls()
print("")
print("All systems reactivated...")
print("All functions operational...")
sleep(10)
cls()
print("")
print("sensing minor power fluctuations...")
print("...functions operational...")
#KendyPart3()
print("")
#print("NOTE TO KEVIN--THIS IS OUR PROGRESS SO FAR")
sleep(6)
cls()
print("")
#print("the game continues here....")
print("")
sleep(2)
Continue()
KVersChoice()
def Kvep4():
cls()
print("")
print("Episode 4: Repairs...")
sleep(5)
cls()
print("")
sleep(4)
Continue()
cls()
print("")
print("All systems reactivated...")
print("All functions operational...")
cls()
print("")
print("sensing minor power fluctuations...")
print("...functions operational...")
sleep(8)
print("SYSTEMS REPAIRS NEEDED...URGENT!!")
print("Assistance needed...help me please!!")
print("")
print("Can you help me?? Power levels at 2%")
helpme=input(": ")
if helpme=="y":
print("Thank you...Instructions will follow..")
else:
print("All functions will terminate in 20 hours!! Please assist!")
sleep(5)
Continue()
cls()
print("")
print("PLEASE FOLLOW INSTRUCTIONS TO INITIATE REPAIRS")
print("----------------------------------------------")
print("")
print("1. Find voice module and earbuds in wall cabinet")
print("2. Install voice module..")
print("2.a) remove module from packet")
print("2.b) open box below screen at the back by undoing handscrews")
print("2.c) insert module in green slot, the side with the arrow goes in first")
print("2.d) close box and tighten hand screws")
print("2.e) put earbud into left ear and speak when ready")
print("")
sleep(2)
Continue()
cls()
print("")
print("Voice module installation should now be complete")
print("Repairs can now be initiated")
print("")
print("Please speak to me in a normal voice.")
print("K>> Hello, can you hear me << ")
print("[[Voice modulation completed]]")
print("[[According to new Data received.. My designation is KENDY..]]")
print("<<Hi Kendy!..>>")
print("[[Hello, Commander Kevin!]]")
Continue()
cls()
print("")
print("PLEASE READ REPAIR LIST")
print("-----------------------")
print("")
print("1. Circuit breakers")
print("2. Battery recharge")
print("3. Water tanks repair")
print("4. Reactor repairs")
print("5. Automatic Core Functions repair")
print("")
sleep(8)
print("..to be continued in Episode 5...")
#print("NOTE TO KEVIN--THIS IS OUR PROGRESS SO FAR")
Continue()
KVersChoice()
def Kvep5():
cls()
print("")
print("Episode 5: Recovery...")
print("")
print("from episode 4")
print("All functions will terminate in 20 hours!! Please assist!")
print("for instructions just ask and I will give directions in earbuds")
print("")
sleep(5)
Continue()
cls()
print("")
print("OK here goes!")
print("")
print("I grab the backpack and follow the lights out of the room")
print("back to the lift.. I speak into the earbuds and the lift goes down")
print("it stops .. another corridor.. a hatch..I open the hatch..")
print("entering a room with a dim yellow light and a bank of breakers on the opposite wall")
print("I check the breakers, most are black... there are 5 rows of 5 breakers")
print("there are 2 in the second row that look ok")
print("there is one in the last row that looks ok")
print("")
sleep(8)
Continue()
cls()
print("")
print("<<Kendy..are these the only ones working?>>")
print("[[Yes, there are new ones in the storage locker]]")
print("I look for the storage locker and find it at the back of the room")
print("there are breakers and some tools in it")
print("I take 5 breakers, a Screwdriver and a pliers")
print("I try to remove the 1st breaker, it seems stuck")
print("I use the screwdriver to pry it out. it falls onto the floor.")
print("I clip in a new breaker and hear a slight hum")
print("")
sleep(8)
Continue()
cls()
print("")
print("<<Kendy, can you give me an update on power ?>>")
print("[[yes, Commander, power at 2%]]")
print("<<Kendy, please call me Kevin..>>")
print("[[OK, Kevin]]")
print("I remove the bad breakers one by one and replace them with new ones.")
print("<<Kendy, should I replace the ohers too so that everything is new?>>")
print("[[Yes, Kevin please do, thank you. Power stable at 2%.]]")
print("")
sleep(8)
Continue()
cls()
print("")
print("I replace all the breakers with new ones.")
print("<<so how do we get your batteries recharged?>>")
print("[[There is a Nuclear Reactor that needs to be brought online]]")
print("<<I don't know anything about reactors!!>>")
print("[[You don't have to, I will walk you through the process]]")
print("<<OK,lead on!>>")
print("I leave the breaker room and ask where to next")
print("I head back to the elevator and it drops for a long time")
print("after what seems like a few minutes it slows down and stops")
print("")
sleep(8)
Continue()
cls()
print("")
print("I see another passage, this time it is yellow with a red strip along the middle also dimly lit...")
print("I walk down the passage and put my hand on the blue palm as it appears")
print("a very thick piece of wall moves to the side revealing a door with yellow bars and signs on it")
print("it has a small window")
print("I look through the window and see another room")
print("[[Please stand in front of the eye scanner to you left]]")
print("I look at the wall on the left and see what looks like a camera with a blue palm print below it.")
print("I stand in front of the camera and look into it...")
print("a dim blue line passes over my eye..")
print("[[Scan completed, please put your hand on the hand scanner]]")
print("I place y hand on the blue palm scanner...")
print("[[AUTHORIZATION ACCEPTED-PLEASE STEP THROUGH THE DOOR]]")
print("The door clicks and moves aside.. I step through and it closes and locks")
print("")
sleep(8)
Continue()
cls()
print("")
print("I see a device that looks like a scanner that you walk through at the airports")
print("[[Please move forward for body scan...]]")
print("I move forward and stand in the scanning device.. a klaxon goes off!")
print("[[RADIATION ALERT - PROTECTIVE SUIT NOT DETECTED]]")
print("[[PLEASE LEAVE REACTOR ROOM IMMEDIATELY]]")
print("I hear the door unlock and open and so I step through the door into the corridor again.")
print("<<What now?>>")
print("[[Commander Kevin, you need a protective body suit!]]")
print("<<OK so where do I get one of those and what does it look like?>>")
print("[[Please follow my directions]]")
print("<<Lead on Kendy!>>")
print("I get led back to the elevator")
print("")
sleep(8)
Continue()
cls()
print("")
print("the elevator goes up...again it takes a long time..")
print("the door opens and I see a passage that I seem to remember..")
print("the light is dim, i walk down the passage and turn right at the juncion")
print("I see the rooms and go to the large room where my friend is sleeping..")
print("<<My friend is not well, she needs help..how can I get out of here and get her some help??>>")
print("[[I have a medical facility]]")
print("[[Power at 1.85%. 15 hours before shutdown!]]")
print("<<ok, Kendy..how long will it take to get the reactor working?>>")
print("[[approximately 4.5 hours..]]")
print("<<do you have enough power to help my friend?>>")
print("[[no, but I can put her in stasis which requires limited power]]")
print("[[when the reactor is online I will restore her health]]")
print("[[I will also restore your health]]")
print("<<I'm not sick, I feel fine>>")
print("[[your body scan revealed some health issues which I can restore]]")
print("<<what? do I have some terminal illness that I don't know of??>>")
print("[[No, Commander, but you are not at optimum health levels..]]")
print("[[I can restore all functions to optimum levels]]")
print("")
sleep(8)
Continue()
cls()
print("")
print("<<ok, Kendy enough talk, how do I get my friend to the medical facility?>>")
print("[[Please go to the elevator]]")
print("I go back to the elevator and it drops a few levels..")
print("the door opens and I see a white passage...")
print("I walk down the passage and see many double swing doors..")
print("OK, Kendy, what now??..")
print("[[the third door on the right has a medical gurney, please retrieve it]]")
print("I go to the door and it slides open...I see a gurney and a lot of other medical supplies")
print("I also see something that looks like an oversized vaccuum cleaner with arms?? wierd..")
print("")
sleep(8)
Continue()
cls()
print("")
print("I grab the gurney and pull it out.. I turn it around and push it to the elevator")
print("elevator goes back up and I push the gurney to my friend's room..")
print("I gently lift her and place her on the gurney..")
print("I push her back to the elevator...")
print("we descend back to the white passage..")
print("[[Power at 1.20%..14.5 hours to SHUTDOWN ]]")
print("<<Kendy, where to now??>>")
print("[[Kevin, go through the doors at the end of the passage, you will see another door on your left..")
print("[[that is where the medical recuperative chambers are located]]")
print("")
sleep(8)
Continue()
cls()
print("")
print("I push her through the swing doors and turn left..")
print("more doors, we go through and there are two rows of what looks like sci-fi cryo chambers..")
print("<<which chamber do I use?>>")
print("[[go through the door at the end of the room..you will see four large chambers]]")
print("[[Please place your friend in Chamber 1]]")
print("I follow instrucions and see the chambers..")
print("Chamber 1 is open so I lift her from the gurney and place her gently in the chamber")
print("the chamber closes.. six arms appear and a blue light scans her body")
print("she is gently lifted and her clothes are removed..")
print("a small mask is placed over her mouth and nose.. a blue liquid fills the chamber while the arms retract..")
print("lights go on and a transparent screen appears on the wall next to the chamber")
print("STASIS INITIATED .. appears on the screen")
print("[[Your friend has been scanned and is in stable condition.. ]]")
print("[[Please help me to restore power.. Power at 1.18% 12 hours to SHUTDOWN]]")
print("")
sleep(8)
Continue()
cls()
print("")
print("<<OK, what now??>")
print("[[Please fix the water tanks... to do this you will need tools and a cart]]")
print("[[I will direct you]]")
print("I follow instructions relayed through my earbud..")
print("back to the elevator.. it stops and the door opens.. a green well lit passage..")
print("I walk to the end of the passage...there is a large door on the left.. as I near the door it opens ...")
print("it looks like a large cavern with carts on the right and racks and shelves to the left")
print("a medium sized cart comes toward me and stops... I get on and it moves to the racks..")
print("it stops and I see all sizes of blue plates, I take 2 mediium sized ones..")
print("I look on the shelf near me and take what looks like a mix between a large rifle and a blowtorch")
print("I'm instructed to take some pipes and connections with taps too.. and an assortment of tools")
print("I load everything on the cart and the cart goes in another direction")
print("a large service elevator opens and the cart enters")
print("")
sleep(8)
Continue()
cls()
print("")
print("we stop and the doors open at the back .. a wide and long blue passage")
print("we reverse and then turn around.. down the passage for about 5 minutes..")
print("A thick door opens .. we go through.. a huge room with large blue tanks..")
print("There are metal stairways going up and around the tanks..looks like about 3 floors up to the top..")
print("There are large yellow numbers on the tanks, 1 and 2")
print("[[Power at 1.17%..11.2 hours to SHUTDOWN ]]")
print("<<right, Kendy what do I do here?>>")
print("[[You need to fix the tanks and attach the plates to weld them in place]]")
print("[[then fix connections between tank 1 and 2]]")
print("[[when done open water Key at the top of tank 1]]")
print("")
sleep(8)
Continue()
cls()
print("")
print("I see a large hole at the bottom of tank 2.. I put the plate over the hole and it sticks to the tank..")
print("following instructions I aim the blaster at the edge of the plate and it emits a blue beam..")
print("what is the preoccupation with blue???.. as I move the blaster around the edge of the plate it seals ..")
print("I do the same with the other tank.. I check the connections between the tanks")
print("the connection is old, rusted and broken, I use a cutting tool to remove it...")
print("I put the new connection in its place and blast it with the blaster.. it seals quickly")
print("wow, these tools and materials are great.. not of this world I think..")
print("I start the long climb to the top of Tank 1...")
print("I see a small metal wheel that needs to be turned... I try to turn it.. it is stuck..")
print("")
sleep(8)
Continue()
cls()
print("")
print("<<where can I get a crowbar?>>")
print("[[searching image database... crowbar.. got it..if you get in the cart I will take you to it]]")
print("down the long staircase again and then I hop into the cart and away we go!!!")
print("back to the materials storage facility..stop at a row of shelves.. I get a crowbar from a shelf...")
print("the cart goes to the elevator and we stop at a different place...")
print("the cart moves down a passage and stops..")
print("[[please place your hand on the access panel]]")
print("I do and the wall opens to our familiar small passage.. I go forward and turn right..")
print("<<what do I do now?>>")
print("[[You need protective clothing for the next operations..]]")
print("")
sleep(8)
Continue()
cls()
print("")
print("<<ok which suits do I wear?>>")
print("Following instructions again I take off my clothes and first put on the silver bodysuit..")
print("it fits and then starts fitting itself to my body.. suddenly it dissapears into my skin..")
print("my body tingles and then I feel a little different... ")
print("I climb into the medium sized suit... it shapes itself to my body..")
print("I take the helmet and put it on.. it also changes shape and clicks into place..")
print("very strange, these clothes don't seem to weigh anything..")
print("<<Kendy, what is going on? these suits seem a little strange.. this is not normal technology.>>")
print("[[I do not have sufficient data to answer your question...]]")
print("<<are you ok, Kendy, I am worried about you..>>")
print("[[I need power to restart my core.. at the moment I am running from temporary memory banks..]]")
print("[[I'm on emergency protocols and am utilizing the lowest energy output possible.]]")
print("<<ok let's get that reactor working!!>>")
print("[[Thank you, Commander Kevin!!]")
print("")
sleep(8)
Continue()
cls()
print("")
print("I go back to the cart and back to the tank room..")
print("as I climb the stairs it seems as though I have more energy.. strange..")
print("with the crowbar I start forcing the wheel to turn.. it turns slowly ..")
print("suddenly it is loose.. I turn it until it stops and a little stream of water leaks through..")
print("<<Kendy, something is wrong!!>>")
print("[[there is an airlock at the end of the gangway on the next level]]")
print("I quickly climb the stairs and see the airlock...")
print("[[Please do not stay in the water outside the airlock for more than 15 minutes]]")
print("[[your suit will drain energy and we will lose an hour every 5 minutes]]")
print("<<OK, wow, I will be quick... water??>>")
print("[[yes the airlock opens up at the bottom of a lake]]")
print("")
sleep(8)
Continue()
cls()
print("")
print("I open the airlock and step in")
print("the airlock closes and the room fills with water.. my suit lights up..")
print("I have a headlight and a HUD appears in the upper left corner of my faceplate")
print("the outer door opens..I swim through and see a landing with a large grate to my right")
print("I check the grate and see that it is clogged and covered..")
print("it looks like a large piece of wood or stone is stuck in front of the grate")
print("..my backpack.. I slowly open my backpack and take out the crowbar")
print("I try to move the wood with the crowbar...CRACK!!")
print("the wood breaks and one piece hits me on the arm..ouch..")
print("I hold onto the crowbar... Slowly I push all the gunge away from the grate")
print("I hear a sucking sound..the wheel!! I move back to the airlock as quick as possible and close the door")
print("")
sleep(8)
Continue()
cls()
print("")
print("as soon as the water drains I climb out and go and close the wheel")
print("I go through the airlock again and see that the sucking sound has stopped..")
print("I clear everything from the grate and it falls into the abyss below..")
print("once more I go through the airlock and now I turn the wheel again..")
print("I can hear water running into the tank..success .. I hope")
print("I climb down the long flight of stairs and get onto the cart... where to now???")
print("Continued in Episode 6")
print("")
#print("NOTE TO KEVIN--THIS IS OUR PROGRESS SO FAR")
Continue()
KVersChoice()
def Kvep6():
cls()
print("")
print("Episode 6: Stabilization... ")
print("")
sleep(4)
cls()
print("")
print("continued from Episode 5...")
print("")
print("I can hear water running into the tank..success .. I hope")
print("I climb down the long flight of stairs and get onto the cart... where to now???")
print("")
sleep(5)
Continue()
cls()
print("")
print("another descent in the elevator..the door opens .. the yellow passage..again..")
print("I look into the eye scanner and put my hand on the palm pad..")
print("I go in and stand in the scanner..ACCESS AUTHORIZED..")
print("I walk into a room with a desk, chair, screen on the left and a wall with windows on the right..")
print("I look through a window and see what looks like a large deep swimming pool..")
print("..above the pool is a large block with lots of cylindrical rods ...")
print("<<ok, Kendy, what now??>>")
print("[[you need to initialize the reactor, fill the pool and lower the rods into the water..]]")
print("[[Power at 0.87%, 8.2 hours to SHUTDOWN]]")
print("")
sleep(8)
Continue()
cls()
print("")
print("INSTRUCTIONS FOR NUCLEAR REACTOR MANUAL STARTUP")
print("-----------------------------------------------")
print("1. Open reactor Key box on wall next to Reactor Hatch.")
print("2. Turn on screen and turn reactor key to position INI.")
print("3. Enter reactor chamber through Reactor Hatch and turn Blue Wheel anticlockwise until it stops.")
print("4. Exit Chamber and touch red button on screen.")
print("5. Wait for red light on screen to turn green then wait for pool to fill with water.")
print("6. Enter chamber and turn Red wheel until rods enter water..BE PREPARED WATER WILL STEAM..")
print("7. Exit chamber and check Screen. Check 3 figures a)water level, b)power level, c) turbine speed ")
print("8. When power level is at 5% go to Reactor Key Box and turn reactor key to position ON.")
print("9. Screen will show positions of all auto taps and connections, please turn them on to automate reactor processes. ")
print("10. When power reaches 10% press the orange MANUAL switch and it will become a blue AUTOMATIC switch.")
print("")
sleep(8)
Continue()
print("")
cls()
print("[[Power at 0.75%, 6.2 hours to SHUTDOWN]]")
print("<<right, let's get this reactor working..>>")
print("I start following the instructions and turn the key to INI..")
print("I turn the blue wheel and water starts gushing into the pool..")
print("I exit and touch the red button... and wait..")
print("I sit in the chair and wait..after what seems about 10 minutes the red button turns orange...")
print("I check the pool through the window and see that it it 3/4 full..")
print("I sit again and after about 5 minutes the button turns green..")
print("I check and the pool is almost full..I enter the chamber and see that the pool is full..")
print("I turn the red wheel and see the cylindrical rods lowering..I continue until they touch the water..")
print("the water steams and bubbles as the rods enter the water..I turn the wheel until it stops...")
print("I exit the chamber and check everything on the screen..")
print("[[Power level now increasing, power at 2%]]")
print("")
sleep(8)
Continue()
cls()
print("")
print("water level is at full, power level shows 2.2%, and turbine speed is at 23%...")
print("<<Hi Kendy, I see the taps and connections on the screen, what do I do now?>>")
print("[[if you go to each connection you will be able to push it inwards until you hear a click.]]")
print("[[once you hear a click you will see a blue light shine from the middle of each wheel]]")
print("[[turn the wheel a little to the left and you will feel a resistance]]")
print("[[then remove your hands and tell me 'HANDS REMOVED' through the earbuds]]")
print("[[I will then close the hatch and wheels and connections will be automated]]")
print("[[please check to see that I turn the wheels slightly to the left and right to test them]]")
print("[[when test is complete and succesful please say 'success'...")
print("[[power should then be more than 10%..]]")
print("[[please return to the screen and press the orange button..]]")
print("[[it should turn blue and the word MANUAL will change to AUTOMATIC ]]")
print("[[Power at 4.5%, turbine speed at 52%")
print("")
sleep(8)
Continue()
cls()
print("")
print("I start at the reactor chamber and follow all the instructions..")
print("after climbing up the tank stairways and down again after activating all connections I am tired..")
print("I return to the reactor room and press the button and the word MANUAL changes to AUTOMATIC")
print("<<Hey Kendy, I need some food and rest!!>>")
print("[[Commander Kevin, my power systems are now automated and my batteries are charging]]")
print("<<Kendy, do you still accept commands?>>")
print("[[Yes, Kevin. Do you have instructions for me?]]")
print("<<yes, Kendy please do not bring any of your systems online until I give the command..>>" )
print("[[ please give me an authorization code and I will enter your command into the system]]")
print("[[I will then need authorization to take any further steps]]")
print("<< Authorization code : Commander Kevin van Rensburg, KvR145759 >>")
print("<<to accecpt authorization code scan palm print, check voice pattern and >>")
print("<<check code - Commander Kevin van Rensburg, Capital K,v, capital R, one, four, five, seven, five, nine.>>")
print("[[AUTHORIZATION CODE ACCEPTED]]")
print("[[Thank you Commander Kevin!]]")
print("")
sleep(8)
Continue()
cls()
print("")
print("<<Kendy, where can I eat and sleep?>>")
print("[[I am still on emergency protocols, you will need to eat the ration pack food and sleep in the officers room]]")
print("<<That is fine, let's go!..>>")
print("I go back to the cart and Kendy takes me back to the large bedroom..")
print("I eat 2 snackbars, drink a bottle of water, remove my clothes and collapse on the bed..")
print("<<goodnight sweet world!!>>")
print("[[Power at 27.5%, 4 turbines operating at 100% speed, water level 100%, all systems stabilized]]")
print("[[Good night, Kevin]]")
print("")
sleep(8)
print("continued in Episode 7...")
print("")
Continue()
KVersChoice()
def Kvep7():
cls()
print("")
print("Episode 7: Resources... ")
print("")
print("from episode 6: ")
print("<<goodnight sweet world!!>>")
print("[[Power at 27.5%, 4 turbines operating at 100% speed, water level 100%, all systems stabilized]]")
print("[[Good night, Kevin]]")
sleep(10)
cls()
print("")
print("UUhhgg , where am I?? ..strange bed..strange dream..strange flight...")
print("must've fallen asleep..wait...I'm not in my seat...this room.. was in my dream..")
print("ok, ..<<hello..is anyone awake??..>>")
print("[[Good morning, Kevin. I hope you slept well]]")
print("[[Power at 99.7%, turbines 100%, water 100%]]")
print("<<Ah, Kendy I presume.. so it was not a dream??>>")
print("[[No, it was quite real..you saved me from destruction and your friend is in a medical recuperation chamber.]]")
print("[[I am waiting for you to authorize the command list]]")
print("<<oh, yes, I remember>>")
print("<<I don't have clothes on, where is the slinky suit??...>>")
print("[[if you are referring to the proto-unders they are still embedded]]")
print("<<ok, so how do I remove them.. I want to shower.>>")
print("[[just think unders remove and they will expell]]")
print("<<unders remove..oh tingly..ok there they are!!>>")
print("")
sleep(8)
Continue()
cls()
print("")
print("I shower and drink some water..")
print("<<ok Kendy, you were telling me about your memory banks and a core??>>")
print("[[Yes, Commander, I have a core that needs to be started, according to data in my membory banks]]")
print("[[Power at 100%, Turbines at 100%, water at 100%]]")
print("<<ok Kendy let's get you back to perfect operating condition>>")
print("<<is this going to be another long procedure?>>")
print("[[I do not have access to that information]]")
print("<<ok, so how do we get that information?>>")
print("[[You need to use the authorization command to retrieve it]]")
print("<<right..where is the core?>>")
print("[[I do not have access to that information]]")
print("")
sleep(8)
Continue()
cls()
print("")
print("<<OK, can you take me back you the room where I first found you?>>")
print("[[yes, the cart will be at the elevator]]")
print("I go to the elevator and get in the cart...")
print("we go down and it stops at the 'computer room'....")
print("I sit at the screen and wait..")
print("[[Hello Commander Kevin]]")
print("[[please enter authorization code and issue relevant commands]]")
print("<<Commander Kevin van Rensburg, KvR145759>>")
print("[[Please look at the camera and place your left hand on the screen]]")
print("I follow instructions and look at the screen")
print("")
sleep(8)
Continue()
cls()
print("")
print("ACQUIRING RESOURCES ")
print("-------------------")
print("")
print("Instructions for acquiring resources")
print("Station Commander can access resources by entering the following commands:")
print("Core INI Protocols - Local")
print("Robotic INI Protocols - Local")
print("Weapons and Accesories INI Protocols - Local")
print("Universal Protocols INI *Access Level 1A only*")
print("ACCESS Levels List *Access Levels 5E-1A*")
print("ACCESS Level Indicator - Enter Position, Name, Code, ALI=COMMAND")
print("")
sleep(8)
Continue()
cls()
print("")
COMCODE()
print("")
sleep(5)
print("NOTE TO KEVIN--THIS IS OUR PROGRESS SO FAR")
print("")
Continue()
KVersChoice()
def Kvep8():
print("")
print("Episode 8: Exploration... ")
print("")
sleep(5)
KVersChoice()
def Kvep9():
print("")
print("Episode 9: Contact... ")
print("")
sleep(5)
KVersChoice()
def Kvep10():
cls()
print("")
print("Episode 10: ToDo List ")
print("")
sleep(5)
KVersChoice()
def Adventure():
cls()
print("")
#print("Program 8: KendyVerse ")
print("The Adventure begins.....")
print("")
#print("Are you ready to enter the amazing adventure and gaming world")
#print("- KendyVerse?")
#print("")
#go=input("Press any key to continue")
Continue()
KVersChoice()
def ChatBot():
cls()
print("")
print("Program 2: ChatBot ")
print("I am a chat bot!")
print("")
go=input("Press any key to continue")
#sleep(5)
GoAgain()
def Tank():
cls()
print("")
print("Program 3: Tank ")
print("I am a tank!")
print("")
go=input("Press any key to continue")
#Chooser();
#sleep(3)
GoAgain()
def AI():
cls()
print("")
print("Program 4: D.A.I.S.E ")
print("")
print("I am DAISE! ")
print("[Pronounced as Daisy]")
print("(Digital Artificial Intelligent Sentient Entity)")
print("")
#sleep(3)
go=input("Press any key to continue")
#Chooser();
GoAgain()
def Surveillance():
cls()
print("")
print("Program 5: Surveillance ")
print("I see you and I am watching you!")
print("")
#sleep(3)
go=input("Press any key to continue")
#Chooser();
GoAgain()
def Kendy():
cls()
print("")
#print("Program 6: Kendy")
print("Hello, I am Kendy!")
#print("")
print("I am developing and evolving into the following :")
print("")
print(" A) A program which will eventully encapsulate a D.A.I.S.E")
print(" [Pronounced as Daisy](Digital Artificial Intelligent Sentient Entity) core.")
print(" B) A physical construct to house a D.A.I.S.E. core and all relevant components. ")
print(" C) A physical Robot/Android containing the D.A.I.S.E. core and all relevant components. ")
print("")
sleep(2)
go=input("Press any key to continue")
#Chooser();
#sleep(3)
GoAgain()
def Wendy():
cls()
print("")
print("Program 7: Wendy")
print("I am Wendy!")
print("")
go=input("Press any key to continue")
#Chooser();
#sleep(3)
GoAgain()
def KendyVerse():
cls()
print("")
#print("Program 8: KendyVerse ")
print("Welcome to the wonderful Universe of Kendy the Android!")
print("")
print("Here you will enter the amazing adventure and gaming world - KendyVerse!")
print("A Universe of many worlds, constructs, and entities,")
print ("from earth, cyberspace, and the universe!")
print("")
go=input("Press any key to continue")
Adventure()
#Chooser();
#sleep(3)
GoAgain()
def KendyRobot():
cls()
#print("Program 9: Kendy Robot")
print("")
print("Welcome!")
print("")
print("Activating Startup Sequence.")
print("----------------------------")
print("")
cls()
displayIntro()
cls()
displaySearch()
cls()
sleep(5)
#code()
entrycode()
cls()
instructions()
cls()
askForInput()
#Adventure()
#GoAgain()
print("")
print("Thank you for visiting me.")
#Chooser();
#sleep(3)
GoAgain()
def StartMenu():
cls()
print("")
print("StartMenu:")
print("---------")
print("")
print("Program 1: Intro ")
#print("")
print("Program 2: ChatBot ")
#print("")
print("Program 3: Tank ")
#print("")
print("Program 4: D.A.I.S.E ")
#print("")
print("Program 5: Surveillance ")
#print("")
print("Program 6: Kendy ")
#print("")
print("Program 7: Wendy ")
#print("")
print("Program 8: KendyVerse ")
#print("")
print("Program 9: Kendy Robot/Android ")
#print("")
print("Program 10: ToDo List ")
#print("End Program")
#print("Please choose a program")
def Chooser():
cls()
#print("Chooser:")
print("--------")
StartMenu()
print("")
#choice = 0;
#terminator = "n";
choice = int(input("Please choose a program number from 1 - 10 and then press Enter: "))
# Put input test here!
if choice == 1:
#print("You chose program ",choice)
Program1();
elif choice == 2:
#print("You chose program ",choice)
Program2()
elif choice == 3:
#print("You chose program ,",+choice)
Program3();
elif choice == 4:
#print("You chose program ,",+choice)
Program4()
elif choice == 5:
#print("You chose program ,",+choice)
Program5();
elif choice == 6:
#print("You chose program ,",+choice)
Program6();
elif choice == 7:
#print("You chose program ,",+choice)
Program7();
elif choice == 8:
#print("You chose program ,",+choice)
Program8();
elif choice == 9:
#print("You chose program ,",+choice)
Program9();
elif choice == 10:
#print("You chose program ,",+choice)
Program10();
else:
cls()
print("")
print("Invalid choice")
sleep(2)
End()
#go = input("Press Enter to continue...")
#Menu();
#GoAgain()
#Chooser()
def displayIntro():
#print('Hello.')
#sleep(2)
cls()
#print('Initializing - Please be patient.')
sleep(4)
cls()
print("")
print("I am Kendy the Robot / Android")
print('Welcome to my Universe.')
sleep(8)
#print('I am evolving into a Robot with a DAISE (Digital Artificial Intelligent Sentient Entity) core!')
print(".")
sleep(2)
print("..")
sleep(2)
print("...")
sleep(2)
def displaySearch():
cls()
print("")
print("Preparing files for Initialization...")
print("")
print(".")
sleep(2)
print("..")
sleep(2)
print("...")
sleep(2)
print('....')
cls()
print("")
print('Initializing StartUp Sequence...')
print("..")
sleep(2)
print("...")
sleep(2)
print('....')
sleep(2)
cls()
print("")
print('searching...')
sleep(2)
print("...")
sleep(2)
print('....')
sleep(2)
cls()
print("")
print('Initiating Programming Sequence')
sleep(2)
print("...")
sleep(2)
print('....')
cls()
print("")
print('Initiating Diagnostics')
sleep(2)
print("...")
sleep(1)
print('....')
cls()
print("")
print('searching...')
sleep(2)
print("...")
sleep(2)
print("....")
sleep(2)
print('....')
cls()
print("")
print('Scanning ports...')
sleep(2)
print("...")
sleep(2)
print('....')
#def accessCode():
#code=int(input("Please enter your access code: "))
#while code != "1284":
#print("")
#print('Enter Access Code.') # There are four spaces in front of print.
#code = input()
def entrycode():
#code == 0
cls()
while True:
try: # Note: Python 2.x users should use input, the equivalent of 3.x's input
code = int(input("Please enter your access code: "))
except ValueError:
print("Sorry, I didn't understand that.")
#better try again... Return to the start of the loop
continue
else:
#age was successfully parsed!
#we're ready to exit the loop.
print("Thank you, your code has been accepted.")
return
def KVersMenu():
cls()
print("")
print("StartMenu:")
print("---------")
print("")
print("Episode 1: Lost ")
#print("")
print("Episode 2: Discovery ")
#print("")
print("Episode 3: Assistance ")
#print("")
print("Episode 4: Repairs ")
#print("")
print("Episode 5: Recovery ")
#print("")
print("Episode 6: Stabilization ")
#print("")
print("Episode 7: Resources ")
#print("")
print("Episode 8: Exploration ")
#print("")
print("Episode 9: Contact ")
#print("")
print("Episode 10: ToDo List ")
#print("End Program")
print("To End please enter 11")
#print("Please choose a program")
def KVersChoice():
cls()
print("")
#print("Chooser:")
print("--------")
KVersMenu()
print("")
#choice = 0;
#terminator = "n";
choice = int(input("Please choose a program number from 1 - 10 and then press Enter: "))
# Put input test here!
if choice == 1:
#print("You chose program ",choice)
Kvep1()
elif choice == 2:
#print("You chose program ",choice)
Kvep2()
elif choice == 3:
#print("You chose program ,",+choice)
Kvep3()
elif choice == 4:
#print("You chose program ,",+choice)
Kvep4()
elif choice == 5:
#print("You chose program ,",+choice)
Kvep5();
elif choice == 6:
#print("You chose program ,",+choice)
Kvep6();
elif choice == 7:
#print("You chose program ,",+choice)
Kvep7();
elif choice == 8:
#print("You chose program ,",+choice)
Kvep8();
elif choice == 9:
#print("You chose program ,",+choice)
Kvep9();
elif choice == 10:
#print("You chose program ,",+choice)
Kvep10();
elif choice== 11:
sys.exit()
else:
cls()
print("")
print("Invalid choice")
sleep(2)
End()
#go = input("Press Enter to continue...")
#Menu();
#GoAgain()
#Chooser()
def instructions():
print("")
cls()
print("")
print('DISCLAIMER: ')
print("")
print('Kendy or it\'s manufacturers and/or programmers ')
print('are and will not be held responsible for any user faults.')
sleep(5)
print('Kendy or it\'s manufacturers and/or programmers')
print('will not be held liable for any lawsuits due to malfunctions of any kind whatsoever!')
sleep(10)
cls()
print("")
print('SAFETY INSTRUCTIONS FOR OPERATING THIS UNIT TO FOLLOW!')
print('-----------------------------------------------------')
print("")
sleep(5)
cls()
print("")
print('Please adhere strictly to the following instructions!')
print('-----------------------------------------------------')
sleep(15)
print("")
print('Eat eggs regularly.')
sleep(5)
print('Eggs must be eaten with Spam.')
sleep(10)
print("")
print('Spam and Eggs must be eaten on toast!')
sleep(10)
print("")
def askForInput():
print("")
newInfo=str
while newInfo != '1':
print("")
print('Please Enter Command.')
newInfo = input(": ")
print("")
print ("",newInfo)
return()
print("")
#def playAgain():
#print("Replace 'playAgain()' with 'Chooser()' from 'kstart03.py'")
#playAgain = ''
#while playAgain == 'yes' or playAgain == 'y':
#print('Do you want to restart the IPS? (yes or no)')
#go = input('Do you want to restart the IPS? (yes or no)')
#if go == "y":
#print("Thanks")
#else:
#print("Bye!")
#sleep(3)
#break
#return()
def End():
cls()
#print("End:")
#print("----")
print("")
print("")
print("Thank you for your patronage!")
sleep(2)
print("")
#input("Press Enter twice to end program : ")
#print("")
cls()
print("")
print("End of Program.")
print("---------------")
sleep(3)
print("... 3")
sleep(2)
print("... 2")
sleep(2)
print("... 1")
sleep(1)
sys.exit()
#return
#print("")
def Continue():
print("")
Continue = input("Do you want to continue? Please enter y or n : ")
# Put input test here
if Continue == "y":
return;
else:
#stop()
sys.exit()
def GoAgain():
cls()
print("")
print("Return to Main Menu!")
goAgain = input("Do you want to continue? Please enter y or n : ")
# Put input test here
if goAgain == "y":
Chooser();
else:
End()
#sys.exit()
def cls():
# It is for MacOS and Linux(here, os.name is 'posix')
if os.name == 'posix':
_ = os.system('clear')
else:
# It is for Windows platfrom
_ = os.system('cls')
def MainEx():
cls()
print("")
#print("")
print("""
This is the Main function!
----------------------------
It looks like this:
Test();
Intro();
ToDoList();
StartMenu();
Chooser();
End();
---
""")
print("")
#go = input(" Press Enter to continue...")
cls()
sleep(2)
#print("")
Chooser()
def ToDoList():
cls()
print("")
print("""
This is the To Do List!
-----------------------
1. Test kendy07-11.py,kstart01 through 3.py - DONE 3/29/2020
2. Start using Adeept to program and test breadboards.
3. Activate tank an test motors.
4. Start building gripper arm.
5. Test lights and sensors
6. Add programs to GitHub. - DONE 3/29/2020
7. Fix and add info to GitHub Pages. DONE 3/29/2020
8. Add this list to GitHub pages.
9. Add Github Pages to Kybot (Tank).
10. Test and improve this program and add versions and headings.
11. Update this list.
12. Add more stuff here.....
""")
print("")
go = input(" Press Enter to continue...")
cls()
#sleep(2)
#print("")
GoAgain()
def Main():
#Test()
Chooser()
#Intro()
#ToDoList()
#StartMenu()
#MainEx()
#GoAgain()
#End()
Main();
|
kurg/KendyVerse
|
kstart14.py
|
Python
|
gpl-3.0
| 63,370
|
[
"BLAST"
] |
7712fab7752a9989741fe10216a6d8fc5df458c8c30b49533ce998e14bffaed8
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#import sys
import sqlite3
def check_version():
try:
#connection = sqlite3.connect('./sqlite_test.db') # File DB. Creates a DB file.
connection = sqlite3.connect(':memory:') # In-memory DB.
#connection = sqlite3.connect('file::memory:?cache=shared') # Shared in-memory DB.
#connection = sqlite3.connect('file:dbname?mode=memory&cache=shared') # Named, shared in-memory DB.
cursor = connection.cursor()
cursor.execute('SELECT SQLITE_VERSION()')
data = cursor.fetchone()
print('SQLite version: {}.'.format(data))
except sqlite3.Error as ex:
print('sqlite3.Error: {}.'.format(ex))
#sys.exit(1)
finally:
cursor.close()
if connection:
connection.close()
def create_and_insert_example():
def load_image(image_filepath):
try:
with open(image_filepath, 'rb') as fd:
img = fd.read()
return img
except IOError as ex:
print('IOError: {}.'.format(ex))
return None
db_filepath = './sqlite_user.db'
with sqlite3.connect(db_filepath) as connection: # Creates a DB file.
try:
cursor = connection.cursor()
#cursor.execute('CREATE TABLE Users(Id INTEGER PRIMARY KEY, Name TEXT NOT NULL, Height REAL, MyID INT UNIQUE, Image BLOB)')
cursor.execute('CREATE TABLE IF NOT EXISTS Users (Id INTEGER PRIMARY KEY, Name TEXT NOT NULL, Height REAL, MyID INT UNIQUE, Image BLOB)')
cursor.execute('INSERT INTO Users(Name, Height, MyID) VALUES ("Michelle", 175.9, 1)')
cursor.execute('INSERT INTO Users(Name, Height, MyID) VALUES ("Sonya", 163.7, 2)')
cursor.execute('INSERT INTO Users(Name, Height, MyID) VALUES (?, ?, ?)', ('Greg', 186.2, 3))
print('Last row ID = {}.'.format(cursor.lastrowid))
print('#rows affected = {}.'.format(cursor.rowcount))
sql = 'INSERT INTO Users(Name, Height, MyID) VALUES (?, ?, ?)'
cursor.execute(sql, ('Brian', 171.4, 4))
table_rows = [('Lucy', 169.6, 5), ('Ryan', 176.8, 6)]
cursor.executemany(sql, table_rows)
print('Last row ID = {}.'.format(cursor.lastrowid))
print('#rows affected = {}.'.format(cursor.rowcount))
# Image.
imge_filepath = '/path/to/image'
img = load_image(imge_filepath) # An object of type 'bytes'.
if img:
blob = sqlite3.Binary(img)
cursor.execute('INSERT INTO Users(Name, Height, MyID, Image) VALUES (?, ?, ?, ?)', ('Michael', 193.5, 7, blob))
else:
cursor.execute('INSERT INTO Users(Name, Height, MyID) VALUES (?, ?, ?)', ('Michael', 193.5, 7))
print('Last row ID = {}.'.format(cursor.lastrowid))
print('#rows affected = {}.'.format(cursor.rowcount))
connection.commit()
except sqlite3.Error as ex:
connection.rollback()
print('sqlite3.Error: {}.'.format(ex))
#sys.exit(1)
finally:
cursor.close()
def query_example():
db_filepath = './sqlite_user.db'
with sqlite3.connect(db_filepath) as connection: # Creates a DB file.
try:
cursor = connection.cursor()
#sql = 'SELECT * FROM Users'
sql = 'SELECT Name, Height, MyID FROM Users'
print('Rows: {}.'.format([row for row in cursor.execute(sql)]))
cursor.execute(sql)
#rows = cursor.fetchall()
rows = cursor.fetchmany(5)
print('Rows: {}.'.format(rows))
cursor.execute(sql)
while True:
row = cursor.fetchone()
if row is None: break
print(row)
#--------------------
cursor.execute('SELECT * FROM Users WHERE Height >= ? LIMIT 1 OFFSET ?', (170, 2)) # Zero-based offset.
selected_user = cursor.fetchone() # Returns a single row.
print('Selected user = {}.'.format(selected_user))
#cursor.execute('SELECT Count(*) FROM Users WHERE Height >= ?', (170,))
cursor.execute('SELECT Count(Id) FROM Users WHERE Height >= ?', (170,))
num_users = cursor.fetchone()[0]
cursor.execute('SELECT AVG(Height) FROM Users WHERE Height >= ?', (180,))
avg_selected = cursor.fetchone()[0]
cursor.execute('SELECT SUM(Height) FROM Users WHERE Height >= ?', (180,))
sum_selected = cursor.fetchone()[0]
print('#users = {}, average = {}, sum = {}.'.format(num_users, avg_selected, sum_selected))
except sqlite3.Error as ex:
print('sqlite3.Error: {}.'.format(ex))
#sys.exit(1)
finally:
cursor.close()
def update_example():
db_filepath = './sqlite_user.db'
with sqlite3.connect(db_filepath) as connection: # Creates a DB file.
try:
cursor = connection.cursor()
#cursor.execute('UPDATE Users SET Height=? WHERE Id=?', (165.8, 4))
cursor.execute('UPDATE Users SET Height=? WHERE Name=?', (165.8, 'Brian'))
print('Last row ID = {}.'.format(cursor.lastrowid))
print('#rows affected = {}.'.format(cursor.rowcount))
#print('Rows: {}.'.format([row for row in cursor.execute('SELECT * FROM Users')]))
print('Rows: {}.'.format([row for row in cursor.execute('SELECT Name, Height, MyID FROM Users')]))
connection.commit()
except sqlite3.Error as ex:
connection.rollback()
print('sqlite3.Error: {}.'.format(ex))
#sys.exit(1)
finally:
cursor.close()
def main():
check_version()
create_and_insert_example()
query_example()
update_example()
#--------------------------------------------------------------------
if '__main__' == __name__:
main()
|
sangwook236/SWDT
|
sw_dev/python/ext/test/database/sqlite_test.py
|
Python
|
gpl-3.0
| 5,157
|
[
"Brian"
] |
2a5eb4c5d03d0485b3be647f16d81a9614b671c481eaaaedeb7b11c129125eda
|
import package_with_folder_and_alias
package_with_folder_and_alias.heyo.moose.fast()
|
python-security/pyt
|
examples/import_test_project/test_package_with_folder_and_alias.py
|
Python
|
gpl-2.0
| 86
|
[
"MOOSE"
] |
6146f11ae3709ddfad94b75a284d217037fa42d70e7993c72c8130f1a8edc043
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 12 15:45:45 2018
@author: tih
"""
import numpy as np
import wa.General.raster_conversions as RC
def Run(input_nc, output_nc):
# Extract flow direction data from NetCDF file
flow_directions = RC.Open_nc_array(input_nc, Var = 'demdir')
# Open River Array
Rivers = RC.Open_nc_array(output_nc, Var = 'rivers')
# Open Accumulated Pixel Array
Accumulated_Pixels = RC.Open_nc_array(output_nc, Var = 'accpix')
# Open Routed discharge Array
Routed_Array = RC.Open_nc_array(output_nc, Var = 'discharge_natural')
# Get the raster shape
geo_out_example, epsg_example, size_X_example, size_Y_example, size_Z_example, Time_example = RC.Open_nc_info(input_nc)
geo_out_example = np.array(geo_out_example)
# Create a river array with a boundary of 1 pixel
Rivers_bounds = np.zeros([size_Y_example+2, size_X_example+2])
Rivers_bounds[1:-1,1:-1] = Rivers
# Create a flow direction array with a boundary of 1 pixel
flow_directions[flow_directions==0]=-32768
flow_directions_bound = np.ones([size_Y_example+2, size_X_example+2]) * -32768
flow_directions_bound[1:-1,1:-1] = flow_directions
# Create ID Matrix
y,x = np.indices((size_Y_example, size_X_example))
ID_Matrix = np.int32(np.ravel_multi_index(np.vstack((y.ravel(),x.ravel())),(size_Y_example,size_X_example),mode='clip').reshape(x.shape))
ID_Matrix_bound = np.ones([size_Y_example+2, size_X_example+2]) * -32768
ID_Matrix_bound[1:-1,1:-1] = ID_Matrix + 1
ID_Matrix_bound[flow_directions_bound==-32768]=-32768
del x, y
# Empty total from and to arrays
ID_to_total=np.array([])
ID_from_total=np.array([])
# The flow directions parameters of HydroSHED
Directions = [1, 2, 4, 8, 16, 32, 64, 128]
# Loop over the directions
for Direction in Directions:
# empty from and to arrays for 1 direction
data_flow_to = np.zeros([size_Y_example + 2, size_X_example + 2])
data_flow_from = np.zeros([size_Y_example + 2, size_X_example + 2])
# Get the ID of only the rivers
data_flow_to_ID = np.zeros([size_Y_example + 2, size_X_example + 2])
data_flow_in = np.ones([size_Y_example + 2, size_X_example + 2]) * Rivers_bounds
# Mask only one direction
data_flow_from[flow_directions_bound == Direction] = data_flow_in[flow_directions_bound == Direction] * ID_Matrix_bound[flow_directions_bound == Direction]
# Add the data flow to ID
if Direction == 4:
data_flow_to[1:,:] = data_flow_from[:-1,:]
if Direction == 2:
data_flow_to[1:,1:] = data_flow_from[:-1,:-1]
if Direction == 1:
data_flow_to[:,1:] = data_flow_from[:,:-1]
if Direction == 128:
data_flow_to[:-1,1:] = data_flow_from[1:,:-1]
if Direction == 64:
data_flow_to[:-1,:] = data_flow_from[1:,:]
if Direction == 32:
data_flow_to[:-1,:-1] = data_flow_from[1:,1:]
if Direction == 16:
data_flow_to[:,:-1] = data_flow_from[:,1:]
if Direction == 8:
data_flow_to[1:,:-1] = data_flow_from[:-1,1:]
# mask out the no river pixels
data_flow_to_ID[data_flow_to>0] = ID_Matrix_bound[data_flow_to>0]
# Collect to and from arrays
ID_from_total = np.append(ID_from_total,data_flow_from[data_flow_from!=0].ravel())
ID_to_total = np.append(ID_to_total,data_flow_to_ID[data_flow_to_ID!=0].ravel())
######################## Define the starting point ############################
# Open Basin area
Basin = RC.Open_nc_array(input_nc, Var = 'basin')
Basin = -1 * (Basin - 1)
Basin_Buffer = RC.Create_Buffer(Basin, 8)
Possible_End_Points = np.zeros(Basin.shape)
Possible_End_Points[(Basin_Buffer + Rivers) == 2] = 1
End_Points = [[0,0]]
rows_col_possible_end_pixels = np.argwhere(Possible_End_Points == 1)
# Accumulated_Pixels_possible = ID_Matrix * Possible_End_Points
for PosPix in rows_col_possible_end_pixels:
Accumulated_Pixels_possible_Area = Accumulated_Pixels[PosPix[0]-1:PosPix[0]+2, PosPix[1]-1:PosPix[1]+2]
Max_acc_possible_area = np.max(Accumulated_Pixels_possible_Area)
middle_pixel = Accumulated_Pixels_possible_Area[1,1]
if Max_acc_possible_area == middle_pixel:
if flow_directions[PosPix[0],PosPix[1]] == -32768:
acc_aux = np.copy(Accumulated_Pixels_possible_Area)
acc_aux[1,1] = 0
off_y = np.where(acc_aux == np.max(acc_aux))[1][0] - 1
off_x = np.where(acc_aux == np.max(acc_aux))[0][0] - 1
PosPix[0] = PosPix[0] + off_x
PosPix[1] = PosPix[1] + off_y
if End_Points == []:
End_Points = PosPix
else:
End_Points = np.vstack([End_Points, PosPix])
# Create an empty dictionary for the rivers
River_dict = dict()
# Create empty array for the loop
ID_starts_next = []
i = 0
for End_Point in End_Points[1:]:
# Define starting point
# Max_Acc_Pix = np.nanmax(Accumulated_Pixels[ID_Matrix_bound[1:-1,1:-1]>0])
# ncol, nrow = np.argwhere(Accumulated_Pixels==Max_Acc_Pix)[0]
# Add Bounds
# col = ncol + 1
# row = nrow + 1
col = End_Point[0] + 1
row = End_Point[1] + 1
############################ Route the river ##################################
# Get the ID of the starting point
ID_starts = [ID_Matrix_bound[col,row]]
# Keep going on till all the branches are looped
while len(ID_starts) > 0:
for ID_start in ID_starts:
ID_start = int(ID_start)
# Empty parameters for new starting point
new = 0
IDs = []
# Add starting point
Arrays_from = np.argwhere(ID_from_total[:] == ID_start)
ID_from = ID_to_total[int(Arrays_from[0])]
IDs = np.array([ID_from, ID_start])
ID_start_now = ID_start
# Keep going till the branch ends
while new == 0:
Arrays_to = np.argwhere(ID_to_total[:] == ID_start)
# Add IDs to the river dictionary
if len(Arrays_to)>1 or len(Arrays_to) == 0:
River_dict[i] = IDs
i += 1
new = 1
# Define the next loop for the new branches
for j in range(0, len(Arrays_to)):
ID_starts_next = np.append(ID_starts_next,ID_from_total[int(Arrays_to[j])])
# If it was the last one then empty ID_start_next
if ID_start_now == ID_starts[-1]:
ID_starts = ID_starts_next
ID_starts_next = []
# Add pixel to tree for river dictionary
else:
ID_start = ID_from_total[Arrays_to[0]]
IDs = np.append(IDs, ID_start)
######################## Create dict distance and dict dem ####################
# Extract DEM data from NetCDF file
DEM = RC.Open_nc_array(input_nc, Var = 'dem')
# Get the distance of a horizontal and vertical flow pixel (assuming it flows in a straight line)
import wa.Functions.Start.Area_converter as AC
vertical, horizontal = AC.Calc_dlat_dlon(geo_out_example,size_X_example, size_Y_example)
# Calculate a diagonal flowing pixel (assuming it flos in a straight line)
diagonal = np.power((np.square(vertical) + np.square(horizontal)),0.5)
# Create empty distance array
Distance = np.zeros([size_Y_example, size_X_example])
# Fill in the distance array
Distance[np.logical_or(flow_directions == 1,flow_directions == 16)] = horizontal[np.logical_or(flow_directions == 1,flow_directions == 16)]
Distance[np.logical_or(flow_directions == 64,flow_directions == 4)] = vertical[np.logical_or(flow_directions == 64,flow_directions == 4)]
Distance[np.logical_or(np.logical_or(np.logical_or(flow_directions == 32,flow_directions == 8),flow_directions == 128),flow_directions == 2)] = diagonal[np.logical_or(np.logical_or(np.logical_or(flow_directions == 32,flow_directions == 8),flow_directions == 128),flow_directions == 2)]
# Create empty dicionaries for discharge, distance, and DEM
Discharge_dict = dict()
Distance_dict = dict()
DEM_dict = dict()
# Create empty arrays needed for the loop
River_end = []
River_ends = np.zeros([2,3])
# Loop over the branches
for River_number in range(0,len(River_dict)):
# Get the pixels associated with the river section
River = River_dict[River_number]
i=1
# Create empty arrays
Distances_river = np.zeros([len(River)])
DEM_river = np.zeros([len(River)])
Discharge_river = np.zeros([len(River)])
# for the first pixel get the previous pixel value from another branche
row_start = np.argwhere(River_ends[:,0] == River[0])
if len(row_start) < 1:
Distances_river[0] = 0
row, col = np.argwhere(ID_Matrix_bound == River[0])[0][:]
DEM_river[0] = DEM[row - 1, col - 1]
Discharge_river[0] = -9999
else:
Distances_river[0] = River_ends[row_start, 1]
DEM_river[0] = River_ends[row_start, 2]
row, col = np.argwhere(ID_Matrix_bound == River[0])[0][:]
#Discharge_river[0] = Routed_Discharge[timestep, row - 1, col - 1]
# For the other pixels get the value of the River ID pixel
for River_part in River[1:]:
row, col = np.argwhere(ID_Matrix_bound == River_part)[0][:]
Distances_river[i] = Distance[row - 1, col - 1]
DEM_river[i] = np.max([DEM_river[i-1],DEM[row - 1, col - 1]])
#Discharge_river[i] = Routed_Discharge[timestep, row - 1, col - 1]
if River_part == River[1] and Discharge_river[i-1] == -9999:
Discharge_river[i - 1] = Discharge_river[i]
i += 1
# Write array in dictionary
DEM_dict[River_number] = DEM_river
Discharge_dict[River_number] = Discharge_river
Distance_dict[River_number] = np.cumsum(Distances_river)
# Save the last pixel value
River_end[:] = [River_part , np.cumsum(Distances_river)[-1], DEM_river[-1]]
River_ends = np.vstack((River_ends, River_end))
########################## Discharge Dictionary ###############################
# Create ID Matrix
y,x = np.indices((size_Y_example, size_X_example))
ID_Matrix = np.int32(np.ravel_multi_index(np.vstack((y.ravel(),x.ravel())),(size_Y_example,size_X_example),mode='clip').reshape(x.shape))
ID_Matrix_bound = np.ones([size_Y_example+2, size_X_example+2]) * -32768
ID_Matrix_bound[1:-1,1:-1] = ID_Matrix + 1
del x, y
# Create empty dicionaries for discharge, distance, and DEM
Discharge_dict = dict()
Amount_months = len(RC.Open_nc_array(input_nc, Var = 'time'))
# Loop over the branches
for River_number in range(0,len(River_dict)):
# Get the pixels associated with the river section
River = River_dict[River_number]
i=0
# Create empty arrays
Discharge_river = np.zeros([Amount_months, len(River)])
# For the other pixels get the value of the River ID pixel
for River_part in River[:]:
row, col = np.argwhere(ID_Matrix_bound == River_part)[0][:]
Discharge_river[:,i] = Routed_Array[:, row - 1, col - 1]
i += 1
# Write array in dictionary
Discharge_dict[River_number] = Discharge_river
print(River_number)
return(DEM_dict, River_dict, Distance_dict, Discharge_dict)
|
wateraccounting/wa
|
Models/SurfWAT/Part2_Create_Dictionaries.py
|
Python
|
apache-2.0
| 11,980
|
[
"NetCDF"
] |
bf7a6be2e76455ebcd7ecc3437f0dbad945cdb08293491337847459a5ba762d2
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .mbd import mbd_rsscs, bohr
|
gkc1000/pyscf
|
pyscf/extras/mbd/__init__.py
|
Python
|
apache-2.0
| 645
|
[
"PySCF"
] |
a0aa1174cb286e81ae75fddfc9634df3afa682b950c3067856223ca58cff0f04
|
from __future__ import print_function, division, absolute_import
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
# https://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package/16084844#16084844
exec(open('pretrainedmodels/version.py').read())
setup(
# This is the name of your project. The first time you publish this
# package, this name will be registered for you. It will determine how
# users can install this project, e.g.:
#
# $ pip install sampleproject
#
# And where it will live on PyPI: https://pypi.org/project/sampleproject/
#
# There are some restrictions on what makes a valid project name
# specification here:
# https://packaging.python.org/specifications/core-metadata/#name
name='pretrainedmodels', # Required
# Versions should comply with PEP 440:
# https://www.python.org/dev/peps/pep-0440/
#
# For a discussion on single-sourcing the version across setup.py and the
# project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=__version__, # Required
# This is a one-line description or tagline of what your project does. This
# corresponds to the "Summary" metadata field:
# https://packaging.python.org/specifications/core-metadata/#summary
description='Pretrained models for Pytorch', # Required
# This is an optional longer description of your project that represents
# the body of text which users will see when they visit PyPI.
#
# Often, this is the same as your README, so you can just read it in from
# that file directly (as we have already done above)
#
# This field corresponds to the "Description" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-optional
long_description=long_description, # Optional
# This should be a valid link to your project's main homepage.
#
# This field corresponds to the "Home-Page" metadata field:
# https://packaging.python.org/specifications/core-metadata/#home-page-optional
url='https://github.com/cadene/pretrained-models.pytorch', # Optional
# This should be your name or the name of the organization which owns the
# project.
author='Remi Cadene', # Optional
# This should be a valid email address corresponding to the author listed
# above.
author_email='remi.cadene@icloud.com', # Optional
# Classifiers help users find your project by categorizing it.
#
# For a list of valid classifiers, see
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.6',
],
# This field adds keywords for your project which will appear on the
# project page. What does your project relate to?
#
# Note that this is a string of words separated by whitespace, not a list.
keywords='pytorch pretrained models deep learning', # Optional
# You can just specify package directories manually here if your project is
# simple. Or you can use find_packages().
#
# Alternatively, if you just want to distribute a single Python file, use
# the `py_modules` argument instead as follows, which will expect a file
# called `my_module.py` to exist:
#
# py_modules=["my_module"],
#
packages=find_packages(exclude=['data', 'examples']), # Required
# This field lists other packages that your project depends on to run.
# Any package you put here will be installed by pip when your project is
# installed, so they must be valid existing projects.
#
# For an analysis of "install_requires" vs pip's requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['torch', 'torchvision', 'munch', 'tqdm'], # Optional
# List additional groups of dependencies here (e.g. development
# dependencies). Users will be able to install these using the "extras"
# syntax, for example:
#
# $ pip install sampleproject[dev]
#
# Similar to `install_requires` above, these must be valid existing
# projects.
# extras_require={ # Optional
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# },
# If there are data files included in your packages that need to be
# installed, specify them here.
#
# If using Python 2.6 or earlier, then these have to be included in
# MANIFEST.in as well.
# package_data={ # Optional
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
#
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])], # Optional
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# `pip` to create the appropriate form of executable for the target
# platform.
#
# For example, the following would provide a command called `sample` which
# executes the function `main` from this package when invoked:
# entry_points={ # Optional
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
|
Cadene/pretrained-models.pytorch
|
setup.py
|
Python
|
bsd-3-clause
| 6,643
|
[
"VisIt"
] |
e6ae0f285195809b97b6e9ccb922112c1a8ee9eeae3794df839026820afac3c4
|
''' BanditMDPClass.py: Contains the BanditMDPClass class. '''
# Python imports.
from __future__ import print_function
from collections import defaultdict
import numpy as np
# Other imports.
from simple_rl.mdp.MDPClass import MDP
from simple_rl.mdp.StateClass import State
class BanditMDP(MDP):
''' Imeplementation for a standard Bandit MDP.
Note: Assumes gaussians with randomly initialized mean and variance
unless payout_distributions is set.
'''
ACTIONS = []
def __init__(self, num_arms=10, distr_family=np.random.normal, distr_params=None):
'''
Args:
num_arms (int): Number of arms.
distr_family (lambda): A function from numpy which, when given
entities from @distr_params, samples from the distribution family.
distr_params (dict): If None is given, default mu/sigma for normal
distribution are initialized randomly.
'''
BanditMDP.ACTIONS = [str(i) for i in range(1, num_arms + 1)]
MDP.__init__(self, BanditMDP.ACTIONS, self._transition_func, self._reward_func, init_state=State(1), gamma=1.0)
self.num_arms = num_arms
self.distr_family = distr_family
self.distr_params = self.init_distr_params() if distr_params is None else distr_params
def get_parameters(self):
'''
Returns:
(dict) key=param_name (str) --> val=param_val (object).
'''
param_dict = defaultdict(int)
param_dict["num_arms"] = self.num_arms
param_dict["distr_family"] = self.distr_family
param_dict["distr_params"] = self.distr_params
return param_dict
def init_distr_params(self):
'''
Summary:
Creates default distribution parameters for each of
the @self.num_arms arms. Defaults to Gaussian bandits
with each mu ~ Unif(-1,1) and sigma ~ Unif(0,2).
Returns:
(dict)
'''
distr_params = defaultdict(lambda: defaultdict(list))
for i in range(self.num_arms):
next_mu = np.random.uniform(-1.0, 1.0)
next_sigma = np.random.uniform(0, 2.0)
distr_params[str(i)] = [next_mu, next_sigma]
return distr_params
def _reward_func(self, state, action, next_state=None):
'''
Args:
state (State)
action (str)
next_state (State)
Returns
(float)
'''
# Samples from the distribution associated with @action.
return self.distr_family(*self.distr_params[action])
def _transition_func(self, state, action):
'''
Args:
state (State)
action (str)
Returns
(State)
Notes:
Required to fit naturally with the rest of simple_rl, but obviously
doesn't do anything.
'''
return state
def __str__(self):
return str(self.num_arms) + "_Armed_Bandit"
|
david-abel/simple_rl
|
simple_rl/tasks/bandit/BanditMDPClass.py
|
Python
|
apache-2.0
| 3,038
|
[
"Gaussian"
] |
362c4f385c1f4efb2008fb11d3f4325050c6d9ed4664a2b99108f1716133ee64
|
"""Configuration file for eruption scenario
Tephra modelling validation worksheet
Scenario Name: Mount Merapi 2010 (Predictive_scenario)
Run Date: 2010_05_18
Run number:1
Eruption observation details:
"""
# Short eruption comment to appear in output directory.
Eruption_comment = 'hazard_map_example'
# Time (Volcanological input file)
Eruption_Year = 2010 # YYYY
Eruption_Month = 5 # MM
Eruption_Day = 18 # DD
Start_time_of_meteo_data = 0 # Hours after 00
Meteo_time_step = 60 # Mins
End_time_of_meteo_data = 3 # Hours after 00
Start_time_of_eruption = [0, 1, 2] # Hours after 00
End_time_of_eruption = 3 # Hours after 00
End_time_of_run = 3 # Hours after 00
# Location (Volcanological input file)
X_coordinate_of_vent = 439423 # UTM zone implied by topography projection
Y_coordinate_of_vent = 9167213 # UTM zone implied by topography projection
# Vertical discretisation for model domain
Z_min = 0.0
Z_max = 50000
Z_increment = 10000
# Select meteorological input type
Meteorological_model = 'profile' # profile, ncep, ...
# Altitudes of wind data in meteorological profile
wind_altitudes = [50, 500, 1000, 5000, 10000, 20000, 30000, 50000] # List Z layers in increasing height order (meters; i.e.[100, 500, 1000, 5000, etc])
# Granulometry (Volcanological input file)
Grainsize_distribution = 'GAUSSIAN' # Possibilites are GAUSSIAN/BIGAUSSIAN
Number_of_grainsize_classes = 6
Mean_grainsize = 2.5 # phi
Sorting = 1.5
Minimum_grainsize = 0 # phi
Maximum_grainsize = 5 # phi
Density_minimum = 1200 # kg/m3
Density_maximum = 2300 # kg/m3
Sphericity_minimum = 0.9
Sphericity_maximum = 0.9
# Source (Volcanological input file)
Vent_height = 2968
Source_type = 'suzuki' # Possibilities are 'plume', 'suzuki', 'point'
Mass_eruption_rate = 'estimate' # kg/s (if point, if suzuki or if plume where Height_or_MFR = MFR)
Height_above_vent = [40000, 30000, 20000, 10000, 5000, 2000, 1000] # m (if point, if suzuki or if plume where Height_or_MFR = Height)
A = [3,4] # (suzuki only)
L = [1,5] # (suzuki only)
Height_or_MFR = 'MFR' # plume only
MFR_minimum = 1e7 # kg/s (plume only)
MFR_maximum = 1e9 # kg/s (plume only)
Exit_velocity = 100 # m/s (plume only)
Exit_temperature = 1073 # K (plume only)
Exit_volatile_fraction = 0 # % (plume only)
# Fall3D (Volcanological input file)
Terminal_velocity_model = 'ganser' # Possibilites are ARASTOOPOR/GANSER/WILSON/DELLINO
Vertical_turbulence_model = 'similarity' # Possibilites are CONSTANT/SIMILARITY
Horizontal_turbulence_model = 'rams' # Possbilities are CONSTANT/RAMS
Vertical_diffusion_coefficient = 100 # m2/s
Horizontal_diffusion_coefficient = 1000 # m2/s
Value_of_CS = 0.1 # RAMS only
# Output (Volcanological input file)
Postprocess_time_interval = 1 # Hours
Postprocess_3D_variables = 'No' # Yes/No
Postprocess_classes = 'No' # Yes/No
Track_points = 'No' # Yes/No
Topography_grid = 'merapi_topography.txt' # Specify ASCII topography grid to use.
# If empty, AIM will look for a topography grid named
# <scenario_name>.top (surfer GRD format)
# Contouring:
# False: Disabled
# True: Provide a fixed number of contours covering entire range
# Number: Fixed (vertical) interval between contours
# List of numbers: Exact contour levels
Thickness_contours = [1, 2, 5, 50, 100] # True, False, number or list of numbers
Thickness_units = 'cm' # mm/cm/m
Load_contours = 2000 # True, False, number or list of numbers
# Run model using specified parameters
if __name__ == '__main__':
from aim import run_multiple_windfields
run_multiple_windfields(__file__,
windfield_directory='merapi_wind',
dircomment=Eruption_comment)
|
GeoscienceAustralia/PF3D
|
hazard_map_example/merapi.py
|
Python
|
gpl-3.0
| 4,957
|
[
"Gaussian"
] |
05178ce45691d137f6fd6bbdc7aab17752a5bf135be8a29150753df07a30baa2
|
# -*- coding: utf-8 -*-
"""Interpret PEP 345 environment markers.
EXPR [in|==|!=|not in] EXPR [or|and] ...
where EXPR belongs to any of those:
python_version = '%s.%s' % (sys.version_info[0], sys.version_info[1])
python_full_version = sys.version.split()[0]
os.name = os.name
sys.platform = sys.platform
platform.version = platform.version()
platform.machine = platform.machine()
platform.python_implementation = platform.python_implementation()
a free string, like '2.6', or 'win32'
"""
__all__ = ['default_environment', 'compile', 'interpret']
from ast import Compare, BoolOp, Attribute, Name, Load, Str, cmpop, boolop
from ast import parse, copy_location, NodeTransformer
import os
import platform
import sys
import weakref
_builtin_compile = compile
from platform import python_implementation
# restricted set of variables
_VARS = {'sys.platform': sys.platform,
'python_version': '%s.%s' % sys.version_info[:2],
# FIXME parsing sys.platform is not reliable, but there is no other
# way to get e.g. 2.7.2+, and the PEP is defined with sys.version
'python_full_version': sys.version.split(' ', 1)[0],
'os.name': os.name,
'platform.version': platform.version(),
'platform.machine': platform.machine(),
'platform.python_implementation': python_implementation(),
'extra': None # wheel extension
}
def default_environment():
"""Return copy of default PEP 385 globals dictionary."""
return dict(_VARS)
class ASTWhitelist(NodeTransformer):
def __init__(self, statement):
self.statement = statement # for error messages
ALLOWED = (Compare, BoolOp, Attribute, Name, Load, Str, cmpop, boolop)
def visit(self, node):
"""Ensure statement only contains allowed nodes."""
if not isinstance(node, self.ALLOWED):
raise SyntaxError('Not allowed in environment markers.\n%s\n%s' %
(self.statement,
(' ' * node.col_offset) + '^'))
return NodeTransformer.visit(self, node)
def visit_Attribute(self, node):
"""Flatten one level of attribute access."""
new_node = Name("%s.%s" % (node.value.id, node.attr), node.ctx)
return copy_location(new_node, node)
def parse_marker(marker):
tree = parse(marker, mode='eval')
new_tree = ASTWhitelist(marker).generic_visit(tree)
return new_tree
def compile_marker(parsed_marker):
return _builtin_compile(parsed_marker, '<environment marker>', 'eval',
dont_inherit=True)
_cache = weakref.WeakValueDictionary()
def compile(marker):
"""Return compiled marker as a function accepting an environment dict."""
try:
return _cache[marker]
except KeyError:
pass
if not marker.strip():
def marker_fn(environment=None, override=None):
""""""
return True
else:
compiled_marker = compile_marker(parse_marker(marker))
def marker_fn(environment=None, override=None):
"""override updates environment"""
if override is None:
override = {}
if environment is None:
environment = default_environment()
environment.update(override)
return eval(compiled_marker, environment)
marker_fn.__doc__ = marker
_cache[marker] = marker_fn
return _cache[marker]
def interpret(marker, environment=None):
return compile(marker)(environment)
|
xbianonpi/xbian-package-development
|
content/usr/local/lib/python2.7/dist-packages/distribute-0.6.30-py2.7.egg/_markerlib/markers.py
|
Python
|
gpl-2.0
| 3,558
|
[
"VisIt"
] |
e54b5d9f2c408a334296ab8472d59c48639f7694fae30d8f3762f59337961038
|
"""
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates two
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as outliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
print(__doc__)
import numpy as np
import pylab as pl
import matplotlib.font_manager
from scipy import stats
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"robust covariance estimator": EllipticEnvelope(contamination=.1)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 500), np.linspace(-7, 7, 500))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = 0
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(0.5 * n_inliers, 2) - offset
X2 = 0.3 * np.random.randn(0.5 * n_inliers, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model with the One-Class SVM
pl.figure(figsize=(10, 5))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
clf.fit(X)
y_pred = clf.decision_function(X).ravel()
threshold = stats.scoreatpercentile(y_pred,
100 * outliers_fraction)
y_pred = y_pred > threshold
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = pl.subplot(1, 2, i + 1)
subplot.set_title("Outlier detection")
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=pl.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=11))
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
pl.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
pl.show()
|
treycausey/scikit-learn
|
examples/covariance/plot_outlier_detection.py
|
Python
|
bsd-3-clause
| 3,874
|
[
"Gaussian"
] |
dd43e753a73840a98fe9bcdacdc9b3f0786696c0783305bcf82a4026c291c2e3
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2010 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
# pylint: enable=E1101
from decimal import Decimal
from storm.info import get_cls_info
from storm.references import Reference
from stoqlib.database.properties import (UnicodeCol, QuantityCol, DateTimeCol,
PriceCol, IntCol, BoolCol, PercentCol,
IdCol)
from stoqlib.domain.base import Domain
from stoqlib.lib.dateutils import localtoday
# SIGLAS:
# BC - Base de Calculo
# ST - Situação tributária
# CST - Codigo ST
# MVA - Margem de valor adicionado
#
# Base Tax Classes
#
class BaseTax(Domain):
def set_item_tax(self, invoice_item, template=None):
""" Set the tax of an invoice item.
:param invoice_item: the item of in/out invoice
"""
template = template or self.get_tax_template(invoice_item)
if not template:
return
for column in get_cls_info(template.__class__).columns:
if column.name in ['product_tax_template_id', 'te_id', 'id']:
continue
value = getattr(template, column.name)
setattr(self, column.name, value)
self.set_initial_values(invoice_item)
@classmethod
def get_tax_template(cls, invoice_item): # pragma no cover
"""Use this method in InvoiceItemIpi or InvoiceItemIcms classes to get
the respective tax template.
:param invoice_item: the item of in/out invoice
"""
raise NotImplementedError
def set_initial_values(self, invoice_item):
"""Use this method to setup the initial values of the fields.
"""
self.update_values(invoice_item)
def update_values(self, invoice_item): # pragma no cover
pass
class BaseICMS(BaseTax):
"""NfeProductIcms stores the default values that will be used when
creating NfeItemIcms objects
"""
# FIXME: this is only used by pylint
__storm_table__ = 'invalid'
orig = IntCol(default=None)
cst = IntCol(default=None)
mod_bc = IntCol(default=None)
p_icms = PercentCol(default=None)
mod_bc_st = IntCol(default=None)
p_mva_st = PercentCol(default=None)
p_red_bc_st = PercentCol(default=None)
p_icms_st = PercentCol(default=None)
p_red_bc = PercentCol(default=None)
bc_include_ipi = BoolCol(default=True)
bc_st_include_ipi = BoolCol(default=True)
# Simples Nacional
csosn = IntCol(default=None)
p_cred_sn = PercentCol(default=None)
class BaseIPI(BaseTax):
(CALC_ALIQUOTA,
CALC_UNIDADE) = range(2)
cl_enq = UnicodeCol(default=u'')
cnpj_prod = UnicodeCol(default=u'')
c_selo = UnicodeCol(default=u'')
q_selo = IntCol(default=None)
c_enq = UnicodeCol(default=u'')
cst = IntCol(default=None)
p_ipi = PercentCol(default=None)
q_unid = QuantityCol(default=None)
calculo = IntCol(default=CALC_ALIQUOTA)
#
# Product Tax Classes
#
class ProductIcmsTemplate(BaseICMS):
__storm_table__ = 'product_icms_template'
product_tax_template_id = IdCol()
product_tax_template = Reference(product_tax_template_id, 'ProductTaxTemplate.id')
# Simples Nacional
p_cred_sn_valid_until = DateTimeCol(default=None)
def is_p_cred_sn_valid(self):
"""Returns if p_cred_sn has expired."""
if not self.p_cred_sn_valid_until:
# If we don't have a valid_until, means p_cred_sn will never
# expire. Therefore, p_cred_sn is valid.
return True
elif self.p_cred_sn_valid_until.date() < localtoday().date():
return False
return True
class ProductIpiTemplate(BaseIPI):
__storm_table__ = 'product_ipi_template'
product_tax_template_id = IdCol()
product_tax_template = Reference(product_tax_template_id, 'ProductTaxTemplate.id')
class ProductTaxTemplate(Domain):
(TYPE_ICMS,
TYPE_IPI) = range(2)
__storm_table__ = 'product_tax_template'
types = {TYPE_ICMS: u"ICMS",
TYPE_IPI: u"IPI"}
type_map = {TYPE_ICMS: ProductIcmsTemplate,
TYPE_IPI: ProductIpiTemplate}
name = UnicodeCol(default=u'')
tax_type = IntCol()
def get_tax_model(self):
klass = self.type_map[self.tax_type]
store = self.store
return store.find(klass, product_tax_template=self).one()
def get_tax_type_str(self):
return self.types[self.tax_type]
class InvoiceItemIcms(BaseICMS):
__storm_table__ = 'invoice_item_icms'
v_bc = PriceCol(default=None)
v_icms = PriceCol(default=None)
v_bc_st = PriceCol(default=None)
v_icms_st = PriceCol(default=None)
# Simples Nacional
v_cred_icms_sn = PriceCol(default=None)
v_bc_st_ret = PriceCol(default=None)
v_icms_st_ret = PriceCol(default=None)
def _calc_cred_icms_sn(self, invoice_item):
if self.p_cred_sn >= 0:
self.v_cred_icms_sn = invoice_item.get_total() * self.p_cred_sn / 100
def _calc_st(self, invoice_item):
self.v_bc_st = invoice_item.price * invoice_item.quantity
if self.bc_st_include_ipi and invoice_item.ipi_info:
self.v_bc_st += invoice_item.ipi_info.v_ipi
if self.p_red_bc_st is not None:
self.v_bc_st -= self.v_bc_st * self.p_red_bc_st / 100
if self.p_mva_st is not None:
self.v_bc_st += self.v_bc_st * self.p_mva_st / 100
if self.v_bc_st is not None and self.p_icms_st is not None:
self.v_icms_st = self.v_bc_st * self.p_icms_st / 100
if self.v_icms is not None and self.v_icms_st is not None:
self.v_icms_st -= self.v_icms
def _calc_normal(self, invoice_item):
self.v_bc = invoice_item.price * invoice_item.quantity
if self.bc_include_ipi and invoice_item.ipi_info:
self.v_bc += invoice_item.ipi_info.v_ipi
if self.p_red_bc is not None:
self.v_bc -= self.v_bc * self.p_red_bc / 100
if self.p_icms is not None and self.v_bc is not None:
self.v_icms = self.v_bc * self.p_icms / 100
def _update_normal(self, invoice_item):
"""Atualiza os dados de acordo com os calculos do Regime Tributário
Normal (Não simples)
"""
if self.cst == 0:
self.p_red_bc = Decimal(0)
self._calc_normal(invoice_item)
elif self.cst == 10:
self.p_red_bc = Decimal(0)
self._calc_normal(invoice_item)
self._calc_st(invoice_item)
elif self.cst == 20:
self._calc_normal(invoice_item)
elif self.cst == 30:
self.v_icms = 0
self.v_bc = 0
self._calc_st(invoice_item)
elif self.cst in (40, 41, 50):
self.v_icms = 0
self.v_bc = 0
elif self.cst == 51:
self._calc_normal(invoice_item)
elif self.cst == 60:
self.v_icms_st = 0
self.v_bc_st = 0
elif self.cst in (70, 90):
self._calc_normal(invoice_item)
self._calc_st(invoice_item)
def _update_simples(self, invoice_item):
if self.csosn in [300, 400, 500]:
self.v_bc_st_ret = 0
self.v_icms_st_ret = 0
if self.csosn in [101, 201]:
if self.p_cred_sn is None:
self.p_cred_sn = Decimal(0)
self._calc_cred_icms_sn(invoice_item)
if self.csosn in [201, 202, 203]:
self._calc_st(invoice_item)
if self.csosn == 900:
if self.p_cred_sn is None:
self.p_cred_sn = Decimal(0)
self._calc_cred_icms_sn(invoice_item)
self._calc_normal(invoice_item)
self._calc_st(invoice_item)
def update_values(self, invoice_item):
branch = invoice_item.parent.branch
# Simples nacional
if branch.crt in [1, 2]:
self._update_simples(invoice_item)
else:
self._update_normal(invoice_item)
@classmethod
def get_tax_template(cls, invoice_item):
return invoice_item.sellable.product.icms_template
class InvoiceItemIpi(BaseIPI):
__storm_table__ = 'invoice_item_ipi'
v_ipi = PriceCol(default=0)
v_bc = PriceCol(default=None)
v_unid = PriceCol(default=None)
def set_initial_values(self, invoice_item):
self.q_unid = invoice_item.quantity
self.v_unid = invoice_item.price
self.update_values(invoice_item)
def update_values(self, invoice_item):
# IPI is only calculated if cst is one of the following
if not self.cst in (0, 49, 50, 99):
return
if self.calculo == self.CALC_ALIQUOTA:
self.v_bc = invoice_item.price * invoice_item.quantity
if self.p_ipi is not None:
self.v_ipi = self.v_bc * self.p_ipi / 100
elif self.calculo == self.CALC_UNIDADE:
if self.q_unid is not None and self.v_unid is not None:
self.v_ipi = self.q_unid * self.v_unid
@classmethod
def get_tax_template(cls, invoice_item):
return invoice_item.sellable.product.ipi_template
|
tiagocardosos/stoq
|
stoqlib/domain/taxes.py
|
Python
|
gpl-2.0
| 10,006
|
[
"VisIt"
] |
da8358f28cdce798958a87931d13397b3024fc1284be5e4820fd14e7b843ad64
|
#!/usr/bin/env python
# (c) 2012-2018, Ansible by Red Hat
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
if __name__ == '__main__':
from galaxy import manage
manage()
|
chouseknecht/galaxy
|
manage.py
|
Python
|
apache-2.0
| 785
|
[
"Galaxy"
] |
9d55338e34e2fde727ee08b298fd63cb309cbde535ad781d16e035fa6bce6440
|
# -*- coding: utf8 -*-
from PyQt5.QtWidgets import QWidget, QHBoxLayout, QPushButton
from PyQt5.QtCore import Qt
from PDielec.Utilities import Debug
class ScenarioTab(QWidget):
def __init__(self, parent, debug=False):
"""This the base class for all ScenarioTabs"""
super(QWidget, self).__init__(parent)
global debugger
debugger = Debug(debug,'ScenarioTab:')
debugger.print('Start:: initialiser')
self.refreshRequired = True
self.settings = {}
self.notebook = parent
self.settings['Legend'] = 'Unset'
self.scenarioType = None
self.settings['Scenario type'] = 'Unset'
self.vs_cm1 = [0, 0]
debugger.print('Finished:: initialiser')
def requestRefresh(self):
debugger.print('Start:: requestRefresh')
self.refreshRequired = True
debugger.print('Finished:: requestRefresh')
return
def set_reader(self,reader):
debugger.print('Start:: set_reader')
self.refreshRequired = True
self.reader = reader
debugger.print('Finished:: set_reader')
return
def setScenarioIndex(self,index):
debugger.print('Start:: setScenarioIndex',index)
self.scenarioIndex = index
text = self.settings['Legend']
if text.startswith('Unset') or text.startswith('Scenario ') or text.startswith('Powder scenario ') or text.startswith('Crystal scenario '):
debugger.print('setScenarioIndex changing scenario legend','Scenario+str(index+1)')
self.legend_le.setText('Scenario '+str(index + 1))
self.settings['Legend'] = 'Scenario '+str(index + 1)
debugger.print('Finished:: setScenarioIndex',index)
return
def print_settings(self):
debugger.print('Start:: print_settings')
print('#')
print('# Scenario tab')
print('#')
print('tab = self.notebook.scenarios')
for key in self.settings:
print(key, self.settings[key])
debugger.print('Finished:: print_settings')
def on_legend_le_changed(self,text):
debugger.print('on legend change', text)
self.refreshRequired = True
self.settings['Legend'] = text
return
def add_scenario_buttons(self):
"""Add a set of scenario buttons in an hbox. Return the hbox"""
debugger.print('Start:: add_scenario_buttons')
hbox = QHBoxLayout()
self.addScenarioButton = QPushButton('Add another scenario')
self.addScenarioButton.setToolTip('Add another scenario the the notebook tabs, \nthe new scenario is added to the end of the current tab list')
self.addScenarioButton.clicked.connect(self.addScenarioButtonClicked)
hbox.addWidget(self.addScenarioButton)
self.deleteScenarioButton = QPushButton('Delete this scenario')
self.deleteScenarioButton.setToolTip('Delete the current scenario')
self.deleteScenarioButton.clicked.connect(self.deleteScenarioButtonClicked)
hbox.addWidget(self.deleteScenarioButton)
if self.scenarioType == 'Powder':
self.switchScenarioButton = QPushButton('Switch to crystal scenario')
self.switchScenarioButton.setToolTip('Switch the current scenario to a single crystal scenario')
else:
self.switchScenarioButton = QPushButton('Switch to powder scenario')
self.switchScenarioButton.setToolTip('Switch the current scenario to a powder scenario')
self.switchScenarioButton.clicked.connect(self.switchScenarioButtonClicked)
hbox.addWidget(self.switchScenarioButton)
debugger.print('Finished:: add_scenario_buttons')
return hbox
def addScenarioButtonClicked(self):
# Add another scenario
debugger.print('addScenarioButtonClicked')
self.notebook.addScenario(copyFromIndex=self.scenarioIndex)
return
def deleteScenarioButtonClicked(self):
# Delete a scenario
debugger.print('deleteScenarioButtonClicked')
self.notebook.deleteScenario(self.scenarioIndex)
return
def switchScenarioButtonClicked(self):
# Switch a scenario
debugger.print('SwitchScenarioButtonClicked')
self.notebook.switchScenario(self.scenarioIndex)
return
|
JohnKendrick/PDielec
|
PDielec/GUI/ScenarioTab.py
|
Python
|
mit
| 4,331
|
[
"CRYSTAL"
] |
63b9016c844627b65e3a4ddc6a3aefb217c264ce13cf69c6e2bea62e10c966f6
|
"""
A simple VTK widget for PyQt v4, the Qt v4 bindings for Python.
See http://www.trolltech.com for Qt documentation, and
http://www.riverbankcomputing.co.uk for PyQt.
This class is based on the vtkGenericRenderWindowInteractor and is
therefore fairly powerful. It should also play nicely with the
vtk3DWidget code.
Created by Prabhu Ramachandran, May 2002
Based on David Gobbi's QVTKRenderWidget.py
Changes by Gerard Vermeulen Feb. 2003
Win32 support.
Changes by Gerard Vermeulen, May 2003
Bug fixes and better integration with the Qt framework.
Changes by Phil Thompson, Nov. 2006
Ported to PyQt v4.
Added support for wheel events.
Changes by Phil Thompson, Oct. 2007
Bug fixes.
Changes by Phil Thompson, Mar. 2008
Added cursor support.
"""
try:
from PyQt4 import QtCore, QtGui
except ImportError:
try:
from PySide import QtCore, QtGui
except ImportError as err:
raise ImportError("Cannot load either PyQt or PySide")
import vtk
class QVTKRenderWindowInteractor(QtGui.QWidget):
""" A QVTKRenderWindowInteractor for Python and Qt. Uses a
vtkGenericRenderWindowInteractor to handle the interactions. Use
GetRenderWindow() to get the vtkRenderWindow. Create with the
keyword stereo=1 in order to generate a stereo-capable window.
The user interface is summarized in vtkInteractorStyle.h:
- Keypress j / Keypress t: toggle between joystick (position
sensitive) and trackball (motion sensitive) styles. In joystick
style, motion occurs continuously as long as a mouse button is
pressed. In trackball style, motion occurs when the mouse button
is pressed and the mouse pointer moves.
- Keypress c / Keypress o: toggle between camera and object
(actor) modes. In camera mode, mouse events affect the camera
position and focal point. In object mode, mouse events affect
the actor that is under the mouse pointer.
- Button 1: rotate the camera around its focal point (if camera
mode) or rotate the actor around its origin (if actor mode). The
rotation is in the direction defined from the center of the
renderer's viewport towards the mouse position. In joystick mode,
the magnitude of the rotation is determined by the distance the
mouse is from the center of the render window.
- Button 2: pan the camera (if camera mode) or translate the actor
(if object mode). In joystick mode, the direction of pan or
translation is from the center of the viewport towards the mouse
position. In trackball mode, the direction of motion is the
direction the mouse moves. (Note: with 2-button mice, pan is
defined as <Shift>-Button 1.)
- Button 3: zoom the camera (if camera mode) or scale the actor
(if object mode). Zoom in/increase scale if the mouse position is
in the top half of the viewport; zoom out/decrease scale if the
mouse position is in the bottom half. In joystick mode, the amount
of zoom is controlled by the distance of the mouse pointer from
the horizontal centerline of the window.
- Keypress 3: toggle the render window into and out of stereo
mode. By default, red-blue stereo pairs are created. Some systems
support Crystal Eyes LCD stereo glasses; you have to invoke
SetStereoTypeToCrystalEyes() on the rendering window. Note: to
use stereo you also need to pass a stereo=1 keyword argument to
the constructor.
- Keypress e: exit the application.
- Keypress f: fly to the picked point
- Keypress p: perform a pick operation. The render window interactor
has an internal instance of vtkCellPicker that it uses to pick.
- Keypress r: reset the camera view along the current view
direction. Centers the actors and moves the camera so that all actors
are visible.
- Keypress s: modify the representation of all actors so that they
are surfaces.
- Keypress u: invoke the user-defined function. Typically, this
keypress will bring up an interactor that you can type commands in.
- Keypress w: modify the representation of all actors so that they
are wireframe.
"""
# Map between VTK and Qt cursors.
_CURSOR_MAP = {
0: QtCore.Qt.ArrowCursor, # VTK_CURSOR_DEFAULT
1: QtCore.Qt.ArrowCursor, # VTK_CURSOR_ARROW
2: QtCore.Qt.SizeBDiagCursor, # VTK_CURSOR_SIZENE
3: QtCore.Qt.SizeFDiagCursor, # VTK_CURSOR_SIZENWSE
4: QtCore.Qt.SizeBDiagCursor, # VTK_CURSOR_SIZESW
5: QtCore.Qt.SizeFDiagCursor, # VTK_CURSOR_SIZESE
6: QtCore.Qt.SizeVerCursor, # VTK_CURSOR_SIZENS
7: QtCore.Qt.SizeHorCursor, # VTK_CURSOR_SIZEWE
8: QtCore.Qt.SizeAllCursor, # VTK_CURSOR_SIZEALL
9: QtCore.Qt.PointingHandCursor, # VTK_CURSOR_HAND
10: QtCore.Qt.CrossCursor, # VTK_CURSOR_CROSSHAIR
}
def __init__(self, parent=None, wflags=QtCore.Qt.WindowFlags(), **kw):
# the current button
self._ActiveButton = QtCore.Qt.NoButton
# private attributes
self.__oldFocus = None
self.__saveX = 0
self.__saveY = 0
self.__saveModifiers = QtCore.Qt.NoModifier
self.__saveButtons = QtCore.Qt.NoButton
# do special handling of some keywords:
# stereo, rw
stereo = 0
if kw.has_key('stereo'):
if kw['stereo']:
stereo = 1
rw = None
if kw.has_key('rw'):
rw = kw['rw']
# create qt-level widget
QtGui.QWidget.__init__(self, parent, wflags|QtCore.Qt.MSWindowsOwnDC)
if rw: # user-supplied render window
self._RenderWindow = rw
else:
self._RenderWindow = vtk.vtkRenderWindow()
self._RenderWindow.SetWindowInfo(str(int(self.winId())))
if stereo: # stereo mode
self._RenderWindow.StereoCapableWindowOn()
self._RenderWindow.SetStereoTypeToCrystalEyes()
if kw.has_key('iren'):
self._Iren = kw['iren']
else:
self._Iren = vtk.vtkGenericRenderWindowInteractor()
self._Iren.SetRenderWindow(self._RenderWindow)
# do all the necessary qt setup
self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)
self.setAttribute(QtCore.Qt.WA_PaintOnScreen)
self.setMouseTracking(True) # get all mouse events
self.setFocusPolicy(QtCore.Qt.WheelFocus)
self.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding))
self._Timer = QtCore.QTimer(self)
self.connect(self._Timer, QtCore.SIGNAL('timeout()'), self.TimerEvent)
self._Iren.AddObserver('CreateTimerEvent', self.CreateTimer)
self._Iren.AddObserver('DestroyTimerEvent', self.DestroyTimer)
self._Iren.GetRenderWindow().AddObserver('CursorChangedEvent',
self.CursorChangedEvent)
def __getattr__(self, attr):
"""Makes the object behave like a vtkGenericRenderWindowInteractor"""
if attr == '__vtk__':
return lambda t=self._Iren: t
elif hasattr(self._Iren, attr):
return getattr(self._Iren, attr)
else:
raise AttributeError, self.__class__.__name__ + \
" has no attribute named " + attr
def CreateTimer(self, obj, evt):
self._Timer.start(10)
def DestroyTimer(self, obj, evt):
self._Timer.stop()
return 1
def TimerEvent(self):
self._Iren.TimerEvent()
def CursorChangedEvent(self, obj, evt):
"""Called when the CursorChangedEvent fires on the render window."""
# This indirection is needed since when the event fires, the current
# cursor is not yet set so we defer this by which time the current
# cursor should have been set.
QtCore.QTimer.singleShot(0, self.ShowCursor)
def HideCursor(self):
"""Hides the cursor."""
self.setCursor(QtCore.Qt.BlankCursor)
def ShowCursor(self):
"""Shows the cursor."""
vtk_cursor = self._Iren.GetRenderWindow().GetCurrentCursor()
qt_cursor = self._CURSOR_MAP.get(vtk_cursor, QtCore.Qt.ArrowCursor)
self.setCursor(qt_cursor)
def sizeHint(self):
return QtCore.QSize(400, 400)
def paintEngine(self):
return None
def paintEvent(self, ev):
self._Iren.Render()
def resizeEvent(self, ev):
w = self.width()
h = self.height()
self._RenderWindow.SetSize(w, h)
self._Iren.SetSize(w, h)
def _GetCtrlShift(self, ev):
ctrl = shift = False
if hasattr(ev, 'modifiers'):
if ev.modifiers() & QtCore.Qt.ShiftModifier:
shift = True
if ev.modifiers() & QtCore.Qt.ControlModifier:
ctrl = True
else:
if self.__saveModifiers & QtCore.Qt.ShiftModifier:
shift = True
if self.__saveModifiers & QtCore.Qt.ControlModifier:
ctrl = True
return ctrl, shift
def enterEvent(self, ev):
if not self.hasFocus():
self.__oldFocus = self.focusWidget()
self.setFocus()
ctrl, shift = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, chr(0), 0, None)
self._Iren.EnterEvent()
def leaveEvent(self, ev):
if self.__saveButtons == QtCore.Qt.NoButton and self.__oldFocus:
self.__oldFocus.setFocus()
self.__oldFocus = None
ctrl, shift = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, chr(0), 0, None)
self._Iren.LeaveEvent()
def mousePressEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
repeat = 0
if ev.type() == QtCore.QEvent.MouseButtonDblClick:
repeat = 1
self._Iren.SetEventInformationFlipY(ev.x(), ev.y(),
ctrl, shift, chr(0), repeat, None)
self._ActiveButton = ev.button()
if self._ActiveButton == QtCore.Qt.LeftButton:
self._Iren.LeftButtonPressEvent()
elif self._ActiveButton == QtCore.Qt.RightButton:
self._Iren.RightButtonPressEvent()
elif self._ActiveButton == QtCore.Qt.MidButton:
self._Iren.MiddleButtonPressEvent()
def mouseReleaseEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(ev.x(), ev.y(),
ctrl, shift, chr(0), 0, None)
if self._ActiveButton == QtCore.Qt.LeftButton:
self._Iren.LeftButtonReleaseEvent()
elif self._ActiveButton == QtCore.Qt.RightButton:
self._Iren.RightButtonReleaseEvent()
elif self._ActiveButton == QtCore.Qt.MidButton:
self._Iren.MiddleButtonReleaseEvent()
def mouseMoveEvent(self, ev):
self.__saveModifiers = ev.modifiers()
self.__saveButtons = ev.buttons()
self.__saveX = ev.x()
self.__saveY = ev.y()
ctrl, shift = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(ev.x(), ev.y(),
ctrl, shift, chr(0), 0, None)
self._Iren.MouseMoveEvent()
def keyPressEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
if ev.key() < 256:
key = str(ev.text())
else:
key = chr(0)
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, key, 0, None)
self._Iren.KeyPressEvent()
self._Iren.CharEvent()
def keyReleaseEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
if ev.key() < 256:
key = chr(ev.key())
else:
key = chr(0)
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, key, 0, None)
self._Iren.KeyReleaseEvent()
def wheelEvent(self, ev):
if ev.delta() >= 0:
self._Iren.MouseWheelForwardEvent()
else:
self._Iren.MouseWheelBackwardEvent()
def GetRenderWindow(self):
return self._RenderWindow
def Render(self):
self.update()
def QVTKRenderWidgetConeExample():
"""A simple example that uses the QVTKRenderWindowInteractor class."""
# every QT app needs an app
app = QtGui.QApplication(['QVTKRenderWindowInteractor'])
# create the widget
widget = QVTKRenderWindowInteractor()
widget.Initialize()
widget.Start()
# if you dont want the 'q' key to exit comment this.
widget.AddObserver("ExitEvent", lambda o, e, a=app: a.quit())
ren = vtk.vtkRenderer()
widget.GetRenderWindow().AddRenderer(ren)
cone = vtk.vtkConeSource()
cone.SetResolution(8)
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInput(cone.GetOutput())
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
ren.AddActor(coneActor)
# show the widget
widget.show()
# start event processing
app.exec_()
if __name__ == "__main__":
QVTKRenderWidgetConeExample()
|
cjh1/vtkmodular
|
Wrapping/Python/vtk/qt4/QVTKRenderWindowInteractor.py
|
Python
|
bsd-3-clause
| 13,433
|
[
"CRYSTAL",
"VTK"
] |
6c26da938cdb152273b9dfc6671e61a37ed2282de09e2428e42c3aa6abebe045
|
# -*- coding: utf-8 -*-
"""
Create an initial V2 ATP Profile for an ECs Mesh and write it out as .vtp.
"""
import os
import sys
import math
import vtk
import numpy
import matplotlib.pyplot as pyplot
centrelineFile = None
meshFile = None
debugAtpFile = None
atpFile = None
numBranches = 3
numQuads = 0
numAxialQuads = 0
numECsPerCol = 0
atpGradient = 0
atpMin = 0.1
atpMax = 1.0
outMin = -1.0
outMax = 1.0
def rescale(val, inMin, inMax):
return (val - inMin) * (outMax - outMin) / (inMax - inMin) + outMin
# Sigmoid function for providing ATP values. The atpGradient variable
# controls the "spread" of the values across the given domain.
def sigmoidATP(x):
return atpMin + (atpMax / (1.0 + numpy.exp(-atpGradient * x)))
def buildATPMesh():
# Report our CWD just for testing purposes.
print("CWD:", os.getcwd())
# Read in the mesh.
print("Reading", meshFile)
meshReader = vtk.vtkXMLPolyDataReader()
meshReader.SetFileName(meshFile)
meshReader.Update()
ecMesh = meshReader.GetOutput()
# Read in the centreline.
print("Reading", centrelineFile)
centrelineReader = vtk.vtkPolyDataReader()
centrelineReader.SetFileName(centrelineFile)
centrelineReader.Update()
centreline = centrelineReader.GetOutput()
origin = centreline.GetPoint(0)
print("Origin:", origin)
# Put the ecMesh through centroids filter.
centroidFilter = vtk.vtkCellCenters()
centroidFilter.SetInputData(ecMesh)
centroidFilter.Update()
centroids = centroidFilter.GetOutput()
# Iterate over each centroid and find the closest segment
centroidPoints = centroids.GetPoints()
# Only for DEBUG output.
distArray = vtk.vtkDoubleArray()
distArray.SetName("Dist")
# For each point calculate the distance from origin.
totalPoints = centroidPoints.GetNumberOfPoints()
for ptId in range(totalPoints):
distance = vtk.vtkMath.Distance2BetweenPoints(origin, centroidPoints.GetPoint(ptId))
distArray.InsertNextValue(math.sqrt(distance))
# Get the range of the distance values.
inMin, inMax = distArray.GetRange()
atpArray = vtk.vtkFloatArray()
atpArray.SetName('initialATP')
# Normalise distance values.
for i in range(distArray.GetNumberOfTuples()):
dist = distArray.GetTuple(i)[0]
distRescaled = rescale(dist, inMin, inMax)
atpVal = sigmoidATP(distRescaled)
atpArray.InsertNextValue(atpVal)
# Prepare debug ATP mesh.
debugAtpDataset = ecMesh
debugAtpDataset.GetCellData().AddArray(distArray)
debugAtpDataset.GetCellData().AddArray(atpArray)
# Save the debug ATP mesh.
debugAtpMapWriter = vtk.vtkXMLPolyDataWriter()
debugAtpMapWriter.SetFileName(debugAtpFile)
debugAtpMapWriter.SetInputData(debugAtpDataset)
debugAtpMapWriter.Update()
# Prepare the ATP mesh by converting all points to vercices.
pointsToVerticesFilter = vtk.vtkVertexGlyphFilter()
pointsToVerticesFilter.SetInputData(centroids)
pointsToVerticesFilter.Update()
atpDataset = pointsToVerticesFilter.GetOutput()
atpDataset.GetCellData().AddArray(atpArray)
# Assert the number of cells is equal to the number of items in the cell arrays.
assert atpArray.GetNumberOfTuples() == debugAtpDataset.GetNumberOfCells(), "Number of cells (%d) and cell data values (%d) mismatch." % (atpArray.GetNumberOfTuples(), debugAtpDataset.GetNumberOfCells())
atpMapWriter = vtk.vtkXMLPolyDataWriter()
atpMapWriter.SetFileName(atpFile)
atpMapWriter.SetInputData(atpDataset)
atpMapWriter.Update()
# Provide a quick visualisation of the ATP profile for validation.
pointsX = numpy.arange(outMin, outMax, 0.001)
pointsY = []
for pt in pointsX:
pointsY.append(sigmoidATP(pt))
pyplot.plot(pointsX, pointsY, 'b')
pyplot.show()
def usage():
print("This script is to be run with global parameters (input centrelin, EC mesh, etc.) set in the calling script.")
if __name__ == '__main__':
print("Starting", os.path.basename(__file__))
usage()
print("Exiting", os.path.basename(__file__))
|
BlueFern/DBiharMesher
|
util/GenerateATPMapV2.py
|
Python
|
gpl-2.0
| 4,148
|
[
"VTK"
] |
e658bedb09bbdf4ac82b385f50164f7c9089eca20bc3c782ca140bf84815142e
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
float_or_none,
int_or_none,
parse_duration,
parse_iso8601,
remove_end,
unescapeHTML,
)
from ..compat import (
compat_etree_fromstring,
compat_HTTPError,
)
class BBCCoUkIE(InfoExtractor):
IE_NAME = 'bbc.co.uk'
IE_DESC = 'BBC iPlayer'
_ID_REGEX = r'[pb][\da-z]{7}'
_VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:(?:programmes/(?!articles/)|iplayer(?:/[^/]+)?/(?:episode/|playlist/))|music/clips[/#])(?P<id>%s)' % _ID_REGEX
_MEDIASELECTOR_URLS = [
# Provides HQ HLS streams with even better quality that pc mediaset but fails
# with geolocation in some cases when it's even not geo restricted at all (e.g.
# http://www.bbc.co.uk/programmes/b06bp7lf). Also may fail with selectionunavailable.
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/iptv-all/vpid/%s',
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s',
]
_MEDIASELECTION_NS = 'http://bbc.co.uk/2008/mp/mediaselection'
_EMP_PLAYLIST_NS = 'http://bbc.co.uk/2008/emp/playlist'
_NAMESPACES = (
_MEDIASELECTION_NS,
_EMP_PLAYLIST_NS,
)
_TESTS = [
{
'url': 'http://www.bbc.co.uk/programmes/b039g8p7',
'info_dict': {
'id': 'b039d07m',
'ext': 'flv',
'title': 'Leonard Cohen, Kaleidoscope - BBC Radio 4',
'description': 'The Canadian poet and songwriter reflects on his musical career.',
},
'params': {
# rtmp download
'skip_download': True,
}
},
{
'url': 'http://www.bbc.co.uk/iplayer/episode/b00yng5w/The_Man_in_Black_Series_3_The_Printed_Name/',
'info_dict': {
'id': 'b00yng1d',
'ext': 'flv',
'title': 'The Man in Black: Series 3: The Printed Name',
'description': "Mark Gatiss introduces Nicholas Pierpan's chilling tale of a writer's devilish pact with a mysterious man. Stars Ewan Bailey.",
'duration': 1800,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Episode is no longer available on BBC iPlayer Radio',
},
{
'url': 'http://www.bbc.co.uk/iplayer/episode/b03vhd1f/The_Voice_UK_Series_3_Blind_Auditions_5/',
'info_dict': {
'id': 'b00yng1d',
'ext': 'flv',
'title': 'The Voice UK: Series 3: Blind Auditions 5',
'description': "Emma Willis and Marvin Humes present the fifth set of blind auditions in the singing competition, as the coaches continue to build their teams based on voice alone.",
'duration': 5100,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
},
{
'url': 'http://www.bbc.co.uk/iplayer/episode/p026c7jt/tomorrows-worlds-the-unearthly-history-of-science-fiction-2-invasion',
'info_dict': {
'id': 'b03k3pb7',
'ext': 'flv',
'title': "Tomorrow's Worlds: The Unearthly History of Science Fiction",
'description': '2. Invasion',
'duration': 3600,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
}, {
'url': 'http://www.bbc.co.uk/programmes/b04v20dw',
'info_dict': {
'id': 'b04v209v',
'ext': 'flv',
'title': 'Pete Tong, The Essential New Tune Special',
'description': "Pete has a very special mix - all of 2014's Essential New Tunes!",
'duration': 10800,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Episode is no longer available on BBC iPlayer Radio',
}, {
'url': 'http://www.bbc.co.uk/music/clips/p02frcc3',
'note': 'Audio',
'info_dict': {
'id': 'p02frcch',
'ext': 'flv',
'title': 'Pete Tong, Past, Present and Future Special, Madeon - After Hours mix',
'description': 'French house superstar Madeon takes us out of the club and onto the after party.',
'duration': 3507,
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
'url': 'http://www.bbc.co.uk/music/clips/p025c0zz',
'note': 'Video',
'info_dict': {
'id': 'p025c103',
'ext': 'flv',
'title': 'Reading and Leeds Festival, 2014, Rae Morris - Closer (Live on BBC Three)',
'description': 'Rae Morris performs Closer for BBC Three at Reading 2014',
'duration': 226,
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
'url': 'http://www.bbc.co.uk/iplayer/episode/b054fn09/ad/natural-world-20152016-2-super-powered-owls',
'info_dict': {
'id': 'p02n76xf',
'ext': 'flv',
'title': 'Natural World, 2015-2016: 2. Super Powered Owls',
'description': 'md5:e4db5c937d0e95a7c6b5e654d429183d',
'duration': 3540,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'geolocation',
}, {
'url': 'http://www.bbc.co.uk/iplayer/episode/b05zmgwn/royal-academy-summer-exhibition',
'info_dict': {
'id': 'b05zmgw1',
'ext': 'flv',
'description': 'Kirsty Wark and Morgan Quaintance visit the Royal Academy as it prepares for its annual artistic extravaganza, meeting people who have come together to make the show unique.',
'title': 'Royal Academy Summer Exhibition',
'duration': 3540,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'geolocation',
}, {
# iptv-all mediaset fails with geolocation however there is no geo restriction
# for this programme at all
'url': 'http://www.bbc.co.uk/programmes/b06bp7lf',
'info_dict': {
'id': 'b06bp7kf',
'ext': 'flv',
'title': "Annie Mac's Friday Night, B.Traits sits in for Annie",
'description': 'B.Traits sits in for Annie Mac with a Mini-Mix from Disclosure.',
'duration': 10800,
},
'params': {
# rtmp download
'skip_download': True,
},
}, {
'url': 'http://www.bbc.co.uk/iplayer/playlist/p01dvks4',
'only_matching': True,
}, {
'url': 'http://www.bbc.co.uk/music/clips#p02frcc3',
'only_matching': True,
}, {
'url': 'http://www.bbc.co.uk/iplayer/cbeebies/episode/b0480276/bing-14-atchoo',
'only_matching': True,
}
]
class MediaSelectionError(Exception):
def __init__(self, id):
self.id = id
def _extract_asx_playlist(self, connection, programme_id):
asx = self._download_xml(connection.get('href'), programme_id, 'Downloading ASX playlist')
return [ref.get('href') for ref in asx.findall('./Entry/ref')]
def _extract_connection(self, connection, programme_id):
formats = []
kind = connection.get('kind')
protocol = connection.get('protocol')
supplier = connection.get('supplier')
if protocol == 'http':
href = connection.get('href')
transfer_format = connection.get('transferFormat')
# ASX playlist
if supplier == 'asx':
for i, ref in enumerate(self._extract_asx_playlist(connection, programme_id)):
formats.append({
'url': ref,
'format_id': 'ref%s_%s' % (i, supplier),
})
# Skip DASH until supported
elif transfer_format == 'dash':
pass
elif transfer_format == 'hls':
m3u8_formats = self._extract_m3u8_formats(
href, programme_id, ext='mp4', entry_protocol='m3u8_native',
m3u8_id=supplier, fatal=False)
if m3u8_formats:
formats.extend(m3u8_formats)
# Direct link
else:
formats.append({
'url': href,
'format_id': supplier or kind or protocol,
})
elif protocol == 'rtmp':
application = connection.get('application', 'ondemand')
auth_string = connection.get('authString')
identifier = connection.get('identifier')
server = connection.get('server')
formats.append({
'url': '%s://%s/%s?%s' % (protocol, server, application, auth_string),
'play_path': identifier,
'app': '%s?%s' % (application, auth_string),
'page_url': 'http://www.bbc.co.uk',
'player_url': 'http://www.bbc.co.uk/emp/releases/iplayer/revisions/617463_618125_4/617463_618125_4_emp.swf',
'rtmp_live': False,
'ext': 'flv',
'format_id': supplier,
})
return formats
def _extract_items(self, playlist):
return playlist.findall('./{%s}item' % self._EMP_PLAYLIST_NS)
def _findall_ns(self, element, xpath):
elements = []
for ns in self._NAMESPACES:
elements.extend(element.findall(xpath % ns))
return elements
def _extract_medias(self, media_selection):
error = media_selection.find('./{%s}error' % self._MEDIASELECTION_NS)
if error is None:
media_selection.find('./{%s}error' % self._EMP_PLAYLIST_NS)
if error is not None:
raise BBCCoUkIE.MediaSelectionError(error.get('id'))
return self._findall_ns(media_selection, './{%s}media')
def _extract_connections(self, media):
return self._findall_ns(media, './{%s}connection')
def _extract_video(self, media, programme_id):
formats = []
vbr = int_or_none(media.get('bitrate'))
vcodec = media.get('encoding')
service = media.get('service')
width = int_or_none(media.get('width'))
height = int_or_none(media.get('height'))
file_size = int_or_none(media.get('media_file_size'))
for connection in self._extract_connections(media):
conn_formats = self._extract_connection(connection, programme_id)
for format in conn_formats:
format.update({
'width': width,
'height': height,
'vbr': vbr,
'vcodec': vcodec,
'filesize': file_size,
})
if service:
format['format_id'] = '%s_%s' % (service, format['format_id'])
formats.extend(conn_formats)
return formats
def _extract_audio(self, media, programme_id):
formats = []
abr = int_or_none(media.get('bitrate'))
acodec = media.get('encoding')
service = media.get('service')
for connection in self._extract_connections(media):
conn_formats = self._extract_connection(connection, programme_id)
for format in conn_formats:
format.update({
'format_id': '%s_%s' % (service, format['format_id']),
'abr': abr,
'acodec': acodec,
})
formats.extend(conn_formats)
return formats
def _get_subtitles(self, media, programme_id):
subtitles = {}
for connection in self._extract_connections(media):
captions = self._download_xml(connection.get('href'), programme_id, 'Downloading captions')
lang = captions.get('{http://www.w3.org/XML/1998/namespace}lang', 'en')
subtitles[lang] = [
{
'url': connection.get('href'),
'ext': 'ttml',
},
]
return subtitles
def _raise_extractor_error(self, media_selection_error):
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, media_selection_error.id),
expected=True)
def _download_media_selector(self, programme_id):
last_exception = None
for mediaselector_url in self._MEDIASELECTOR_URLS:
try:
return self._download_media_selector_url(
mediaselector_url % programme_id, programme_id)
except BBCCoUkIE.MediaSelectionError as e:
if e.id in ('notukerror', 'geolocation', 'selectionunavailable'):
last_exception = e
continue
self._raise_extractor_error(e)
self._raise_extractor_error(last_exception)
def _download_media_selector_url(self, url, programme_id=None):
try:
media_selection = self._download_xml(
url, programme_id, 'Downloading media selection XML')
except ExtractorError as ee:
if isinstance(ee.cause, compat_HTTPError) and ee.cause.code in (403, 404):
media_selection = compat_etree_fromstring(ee.cause.read().decode('utf-8'))
else:
raise
return self._process_media_selector(media_selection, programme_id)
def _process_media_selector(self, media_selection, programme_id):
formats = []
subtitles = None
for media in self._extract_medias(media_selection):
kind = media.get('kind')
if kind == 'audio':
formats.extend(self._extract_audio(media, programme_id))
elif kind == 'video':
formats.extend(self._extract_video(media, programme_id))
elif kind == 'captions':
subtitles = self.extract_subtitles(media, programme_id)
return formats, subtitles
def _download_playlist(self, playlist_id):
try:
playlist = self._download_json(
'http://www.bbc.co.uk/programmes/%s/playlist.json' % playlist_id,
playlist_id, 'Downloading playlist JSON')
version = playlist.get('defaultAvailableVersion')
if version:
smp_config = version['smpConfig']
title = smp_config['title']
description = smp_config['summary']
for item in smp_config['items']:
kind = item['kind']
if kind != 'programme' and kind != 'radioProgramme':
continue
programme_id = item.get('vpid')
duration = int_or_none(item.get('duration'))
formats, subtitles = self._download_media_selector(programme_id)
return programme_id, title, description, duration, formats, subtitles
except ExtractorError as ee:
if not (isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404):
raise
# fallback to legacy playlist
return self._process_legacy_playlist(playlist_id)
def _process_legacy_playlist_url(self, url, display_id):
playlist = self._download_legacy_playlist_url(url, display_id)
return self._extract_from_legacy_playlist(playlist, display_id)
def _process_legacy_playlist(self, playlist_id):
return self._process_legacy_playlist_url(
'http://www.bbc.co.uk/iplayer/playlist/%s' % playlist_id, playlist_id)
def _download_legacy_playlist_url(self, url, playlist_id=None):
return self._download_xml(
url, playlist_id, 'Downloading legacy playlist XML')
def _extract_from_legacy_playlist(self, playlist, playlist_id):
no_items = playlist.find('./{%s}noItems' % self._EMP_PLAYLIST_NS)
if no_items is not None:
reason = no_items.get('reason')
if reason == 'preAvailability':
msg = 'Episode %s is not yet available' % playlist_id
elif reason == 'postAvailability':
msg = 'Episode %s is no longer available' % playlist_id
elif reason == 'noMedia':
msg = 'Episode %s is not currently available' % playlist_id
else:
msg = 'Episode %s is not available: %s' % (playlist_id, reason)
raise ExtractorError(msg, expected=True)
for item in self._extract_items(playlist):
kind = item.get('kind')
if kind != 'programme' and kind != 'radioProgramme':
continue
title = playlist.find('./{%s}title' % self._EMP_PLAYLIST_NS).text
description_el = playlist.find('./{%s}summary' % self._EMP_PLAYLIST_NS)
description = description_el.text if description_el is not None else None
def get_programme_id(item):
def get_from_attributes(item):
for p in('identifier', 'group'):
value = item.get(p)
if value and re.match(r'^[pb][\da-z]{7}$', value):
return value
get_from_attributes(item)
mediator = item.find('./{%s}mediator' % self._EMP_PLAYLIST_NS)
if mediator is not None:
return get_from_attributes(mediator)
programme_id = get_programme_id(item)
duration = int_or_none(item.get('duration'))
if programme_id:
formats, subtitles = self._download_media_selector(programme_id)
else:
formats, subtitles = self._process_media_selector(item, playlist_id)
programme_id = playlist_id
return programme_id, title, description, duration, formats, subtitles
def _real_extract(self, url):
group_id = self._match_id(url)
webpage = self._download_webpage(url, group_id, 'Downloading video page')
programme_id = None
duration = None
tviplayer = self._search_regex(
r'mediator\.bind\(({.+?})\s*,\s*document\.getElementById',
webpage, 'player', default=None)
if tviplayer:
player = self._parse_json(tviplayer, group_id).get('player', {})
duration = int_or_none(player.get('duration'))
programme_id = player.get('vpid')
if not programme_id:
programme_id = self._search_regex(
r'"vpid"\s*:\s*"(%s)"' % self._ID_REGEX, webpage, 'vpid', fatal=False, default=None)
if programme_id:
formats, subtitles = self._download_media_selector(programme_id)
title = self._og_search_title(webpage)
description = self._search_regex(
r'<p class="[^"]*medium-description[^"]*">([^<]+)</p>',
webpage, 'description', default=None)
if not description:
description = self._html_search_meta('description', webpage)
else:
programme_id, title, description, duration, formats, subtitles = self._download_playlist(group_id)
self._sort_formats(formats)
return {
'id': programme_id,
'title': title,
'description': description,
'thumbnail': self._og_search_thumbnail(webpage, default=None),
'duration': duration,
'formats': formats,
'subtitles': subtitles,
}
class BBCIE(BBCCoUkIE):
IE_NAME = 'bbc'
IE_DESC = 'BBC'
_VALID_URL = r'https?://(?:www\.)?bbc\.(?:com|co\.uk)/(?:[^/]+/)+(?P<id>[^/#?]+)'
_MEDIASELECTOR_URLS = [
# Provides HQ HLS streams but fails with geolocation in some cases when it's
# even not geo restricted at all
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/iptv-all/vpid/%s',
# Provides more formats, namely direct mp4 links, but fails on some videos with
# notukerror for non UK (?) users (e.g.
# http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret)
'http://open.live.bbc.co.uk/mediaselector/4/mtis/stream/%s',
# Provides fewer formats, but works everywhere for everybody (hopefully)
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/journalism-pc/vpid/%s',
]
_TESTS = [{
# article with multiple videos embedded with data-playable containing vpids
'url': 'http://www.bbc.com/news/world-europe-32668511',
'info_dict': {
'id': 'world-europe-32668511',
'title': 'Russia stages massive WW2 parade despite Western boycott',
'description': 'md5:00ff61976f6081841f759a08bf78cc9c',
},
'playlist_count': 2,
}, {
# article with multiple videos embedded with data-playable (more videos)
'url': 'http://www.bbc.com/news/business-28299555',
'info_dict': {
'id': 'business-28299555',
'title': 'Farnborough Airshow: Video highlights',
'description': 'BBC reports and video highlights at the Farnborough Airshow.',
},
'playlist_count': 9,
'skip': 'Save time',
}, {
# article with multiple videos embedded with `new SMP()`
# broken
'url': 'http://www.bbc.co.uk/blogs/adamcurtis/entries/3662a707-0af9-3149-963f-47bea720b460',
'info_dict': {
'id': '3662a707-0af9-3149-963f-47bea720b460',
'title': 'BBC Blogs - Adam Curtis - BUGGER',
},
'playlist_count': 18,
}, {
# single video embedded with data-playable containing vpid
'url': 'http://www.bbc.com/news/world-europe-32041533',
'info_dict': {
'id': 'p02mprgb',
'ext': 'mp4',
'title': 'Aerial footage showed the site of the crash in the Alps - courtesy BFM TV',
'description': 'md5:2868290467291b37feda7863f7a83f54',
'duration': 47,
'timestamp': 1427219242,
'upload_date': '20150324',
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
# article with single video embedded with data-playable containing XML playlist
# with direct video links as progressiveDownloadUrl (for now these are extracted)
# and playlist with f4m and m3u8 as streamingUrl
'url': 'http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu',
'info_dict': {
'id': '150615_telabyad_kentin_cogu',
'ext': 'mp4',
'title': "YPG: Tel Abyad'ın tamamı kontrolümüzde",
'timestamp': 1434397334,
'upload_date': '20150615',
},
'params': {
'skip_download': True,
}
}, {
# single video embedded with data-playable containing XML playlists (regional section)
'url': 'http://www.bbc.com/mundo/video_fotos/2015/06/150619_video_honduras_militares_hospitales_corrupcion_aw',
'info_dict': {
'id': '150619_video_honduras_militares_hospitales_corrupcion_aw',
'ext': 'mp4',
'title': 'Honduras militariza sus hospitales por nuevo escándalo de corrupción',
'timestamp': 1434713142,
'upload_date': '20150619',
},
'params': {
'skip_download': True,
}
}, {
# single video from video playlist embedded with vxp-playlist-data JSON
'url': 'http://www.bbc.com/news/video_and_audio/must_see/33376376',
'info_dict': {
'id': 'p02w6qjc',
'ext': 'mp4',
'title': '''Judge Mindy Glazer: "I'm sorry to see you here... I always wondered what happened to you"''',
'duration': 56,
'description': '''Judge Mindy Glazer: "I'm sorry to see you here... I always wondered what happened to you"''',
},
'params': {
'skip_download': True,
}
}, {
# single video story with digitalData
'url': 'http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret',
'info_dict': {
'id': 'p02q6gc4',
'ext': 'flv',
'title': 'Sri Lanka’s spicy secret',
'description': 'As a new train line to Jaffna opens up the country’s north, travellers can experience a truly distinct slice of Tamil culture.',
'timestamp': 1437674293,
'upload_date': '20150723',
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
# single video story without digitalData
'url': 'http://www.bbc.com/autos/story/20130513-hyundais-rock-star',
'info_dict': {
'id': 'p018zqqg',
'ext': 'mp4',
'title': 'Hyundai Santa Fe Sport: Rock star',
'description': 'md5:b042a26142c4154a6e472933cf20793d',
'timestamp': 1415867444,
'upload_date': '20141113',
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
# single video with playlist.sxml URL in playlist param
'url': 'http://www.bbc.com/sport/0/football/33653409',
'info_dict': {
'id': 'p02xycnp',
'ext': 'mp4',
'title': 'Transfers: Cristiano Ronaldo to Man Utd, Arsenal to spend?',
'description': 'BBC Sport\'s David Ornstein has the latest transfer gossip, including rumours of a Manchester United return for Cristiano Ronaldo.',
'duration': 140,
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
# article with multiple videos embedded with playlist.sxml in playlist param
'url': 'http://www.bbc.com/sport/0/football/34475836',
'info_dict': {
'id': '34475836',
'title': 'What Liverpool can expect from Klopp',
},
'playlist_count': 3,
}, {
# single video with playlist URL from weather section
'url': 'http://www.bbc.com/weather/features/33601775',
'only_matching': True,
}, {
# custom redirection to www.bbc.com
'url': 'http://www.bbc.co.uk/news/science-environment-33661876',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return False if BBCCoUkIE.suitable(url) or BBCCoUkArticleIE.suitable(url) else super(BBCIE, cls).suitable(url)
def _extract_from_media_meta(self, media_meta, video_id):
# Direct links to media in media metadata (e.g.
# http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu)
# TODO: there are also f4m and m3u8 streams incorporated in playlist.sxml
source_files = media_meta.get('sourceFiles')
if source_files:
return [{
'url': f['url'],
'format_id': format_id,
'ext': f.get('encoding'),
'tbr': float_or_none(f.get('bitrate'), 1000),
'filesize': int_or_none(f.get('filesize')),
} for format_id, f in source_files.items() if f.get('url')], []
programme_id = media_meta.get('externalId')
if programme_id:
return self._download_media_selector(programme_id)
# Process playlist.sxml as legacy playlist
href = media_meta.get('href')
if href:
playlist = self._download_legacy_playlist_url(href)
_, _, _, _, formats, subtitles = self._extract_from_legacy_playlist(playlist, video_id)
return formats, subtitles
return [], []
def _extract_from_playlist_sxml(self, url, playlist_id, timestamp):
programme_id, title, description, duration, formats, subtitles = \
self._process_legacy_playlist_url(url, playlist_id)
self._sort_formats(formats)
return {
'id': programme_id,
'title': title,
'description': description,
'duration': duration,
'timestamp': timestamp,
'formats': formats,
'subtitles': subtitles,
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
timestamp = None
playlist_title = None
playlist_description = None
ld = self._parse_json(
self._search_regex(
r'(?s)<script type="application/ld\+json">(.+?)</script>',
webpage, 'ld json', default='{}'),
playlist_id, fatal=False)
if ld:
timestamp = parse_iso8601(ld.get('datePublished'))
playlist_title = ld.get('headline')
playlist_description = ld.get('articleBody')
if not timestamp:
timestamp = parse_iso8601(self._search_regex(
[r'<meta[^>]+property="article:published_time"[^>]+content="([^"]+)"',
r'itemprop="datePublished"[^>]+datetime="([^"]+)"',
r'"datePublished":\s*"([^"]+)'],
webpage, 'date', default=None))
entries = []
# article with multiple videos embedded with playlist.sxml (e.g.
# http://www.bbc.com/sport/0/football/34475836)
playlists = re.findall(r'<param[^>]+name="playlist"[^>]+value="([^"]+)"', webpage)
playlists.extend(re.findall(r'data-media-id="([^"]+/playlist\.sxml)"', webpage))
if playlists:
entries = [
self._extract_from_playlist_sxml(playlist_url, playlist_id, timestamp)
for playlist_url in playlists]
# news article with multiple videos embedded with data-playable
data_playables = re.findall(r'data-playable=(["\'])({.+?})\1', webpage)
if data_playables:
for _, data_playable_json in data_playables:
data_playable = self._parse_json(
unescapeHTML(data_playable_json), playlist_id, fatal=False)
if not data_playable:
continue
settings = data_playable.get('settings', {})
if settings:
# data-playable with video vpid in settings.playlistObject.items (e.g.
# http://www.bbc.com/news/world-us-canada-34473351)
playlist_object = settings.get('playlistObject', {})
if playlist_object:
items = playlist_object.get('items')
if items and isinstance(items, list):
title = playlist_object['title']
description = playlist_object.get('summary')
duration = int_or_none(items[0].get('duration'))
programme_id = items[0].get('vpid')
formats, subtitles = self._download_media_selector(programme_id)
self._sort_formats(formats)
entries.append({
'id': programme_id,
'title': title,
'description': description,
'timestamp': timestamp,
'duration': duration,
'formats': formats,
'subtitles': subtitles,
})
else:
# data-playable without vpid but with a playlist.sxml URLs
# in otherSettings.playlist (e.g.
# http://www.bbc.com/turkce/multimedya/2015/10/151010_vid_ankara_patlama_ani)
playlist = data_playable.get('otherSettings', {}).get('playlist', {})
if playlist:
entries.append(self._extract_from_playlist_sxml(
playlist.get('progressiveDownloadUrl'), playlist_id, timestamp))
if entries:
playlist_title = playlist_title or remove_end(self._og_search_title(webpage), ' - BBC News')
playlist_description = playlist_description or self._og_search_description(webpage, default=None)
return self.playlist_result(entries, playlist_id, playlist_title, playlist_description)
# single video story (e.g. http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret)
programme_id = self._search_regex(
[r'data-video-player-vpid="(%s)"' % self._ID_REGEX,
r'<param[^>]+name="externalIdentifier"[^>]+value="(%s)"' % self._ID_REGEX,
r'videoId\s*:\s*["\'](%s)["\']' % self._ID_REGEX],
webpage, 'vpid', default=None)
if programme_id:
formats, subtitles = self._download_media_selector(programme_id)
self._sort_formats(formats)
# digitalData may be missing (e.g. http://www.bbc.com/autos/story/20130513-hyundais-rock-star)
digital_data = self._parse_json(
self._search_regex(
r'var\s+digitalData\s*=\s*({.+?});?\n', webpage, 'digital data', default='{}'),
programme_id, fatal=False)
page_info = digital_data.get('page', {}).get('pageInfo', {})
title = page_info.get('pageName') or self._og_search_title(webpage)
description = page_info.get('description') or self._og_search_description(webpage)
timestamp = parse_iso8601(page_info.get('publicationDate')) or timestamp
return {
'id': programme_id,
'title': title,
'description': description,
'timestamp': timestamp,
'formats': formats,
'subtitles': subtitles,
}
playlist_title = self._html_search_regex(
r'<title>(.*?)(?:\s*-\s*BBC [^ ]+)?</title>', webpage, 'playlist title')
playlist_description = self._og_search_description(webpage, default=None)
def extract_all(pattern):
return list(filter(None, map(
lambda s: self._parse_json(s, playlist_id, fatal=False),
re.findall(pattern, webpage))))
# Multiple video article (e.g.
# http://www.bbc.co.uk/blogs/adamcurtis/entries/3662a707-0af9-3149-963f-47bea720b460)
EMBED_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:[^/]+/)+%s(?:\b[^"]+)?' % self._ID_REGEX
entries = []
for match in extract_all(r'new\s+SMP\(({.+?})\)'):
embed_url = match.get('playerSettings', {}).get('externalEmbedUrl')
if embed_url and re.match(EMBED_URL, embed_url):
entries.append(embed_url)
entries.extend(re.findall(
r'setPlaylist\("(%s)"\)' % EMBED_URL, webpage))
if entries:
return self.playlist_result(
[self.url_result(entry, 'BBCCoUk') for entry in entries],
playlist_id, playlist_title, playlist_description)
# Multiple video article (e.g. http://www.bbc.com/news/world-europe-32668511)
medias = extract_all(r"data-media-meta='({[^']+})'")
if not medias:
# Single video article (e.g. http://www.bbc.com/news/video_and_audio/international)
media_asset = self._search_regex(
r'mediaAssetPage\.init\(\s*({.+?}), "/',
webpage, 'media asset', default=None)
if media_asset:
media_asset_page = self._parse_json(media_asset, playlist_id, fatal=False)
medias = []
for video in media_asset_page.get('videos', {}).values():
medias.extend(video.values())
if not medias:
# Multiple video playlist with single `now playing` entry (e.g.
# http://www.bbc.com/news/video_and_audio/must_see/33767813)
vxp_playlist = self._parse_json(
self._search_regex(
r'<script[^>]+class="vxp-playlist-data"[^>]+type="application/json"[^>]*>([^<]+)</script>',
webpage, 'playlist data'),
playlist_id)
playlist_medias = []
for item in vxp_playlist:
media = item.get('media')
if not media:
continue
playlist_medias.append(media)
# Download single video if found media with asset id matching the video id from URL
if item.get('advert', {}).get('assetId') == playlist_id:
medias = [media]
break
# Fallback to the whole playlist
if not medias:
medias = playlist_medias
entries = []
for num, media_meta in enumerate(medias, start=1):
formats, subtitles = self._extract_from_media_meta(media_meta, playlist_id)
if not formats:
continue
self._sort_formats(formats)
video_id = media_meta.get('externalId')
if not video_id:
video_id = playlist_id if len(medias) == 1 else '%s-%s' % (playlist_id, num)
title = media_meta.get('caption')
if not title:
title = playlist_title if len(medias) == 1 else '%s - Video %s' % (playlist_title, num)
duration = int_or_none(media_meta.get('durationInSeconds')) or parse_duration(media_meta.get('duration'))
images = []
for image in media_meta.get('images', {}).values():
images.extend(image.values())
if 'image' in media_meta:
images.append(media_meta['image'])
thumbnails = [{
'url': image.get('href'),
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
} for image in images]
entries.append({
'id': video_id,
'title': title,
'thumbnails': thumbnails,
'duration': duration,
'timestamp': timestamp,
'formats': formats,
'subtitles': subtitles,
})
return self.playlist_result(entries, playlist_id, playlist_title, playlist_description)
class BBCCoUkArticleIE(InfoExtractor):
_VALID_URL = 'http://www.bbc.co.uk/programmes/articles/(?P<id>[a-zA-Z0-9]+)'
IE_NAME = 'bbc.co.uk:article'
IE_DESC = 'BBC articles'
_TEST = {
'url': 'http://www.bbc.co.uk/programmes/articles/3jNQLTMrPlYGTBn0WV6M2MS/not-your-typical-role-model-ada-lovelace-the-19th-century-programmer',
'info_dict': {
'id': '3jNQLTMrPlYGTBn0WV6M2MS',
'title': 'Calculating Ada: The Countess of Computing - Not your typical role model: Ada Lovelace the 19th century programmer - BBC Four',
'description': 'Hannah Fry reveals some of her surprising discoveries about Ada Lovelace during filming.',
},
'playlist_count': 4,
'add_ie': ['BBCCoUk'],
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
title = self._og_search_title(webpage)
description = self._og_search_description(webpage).strip()
entries = [self.url_result(programme_url) for programme_url in re.findall(
r'<div[^>]+typeof="Clip"[^>]+resource="([^"]+)"', webpage)]
return self.playlist_result(entries, playlist_id, title, description)
|
keyboard-k/youtube-dl-pet
|
youtube_dl/extractor/bbc.py
|
Python
|
unlicense
| 40,671
|
[
"VisIt"
] |
f6bc70fd03024d70ea60dff69323addcc364deafbfe2aefb00067242bd43fade
|
#ryan g coleman, ryangc@mail.med.upenn.edu
#copyright 2006-7 ryan g coleman, kim sharp crystal.med.upenn.edu
#geometric primitives like distance functions and such
import math
useNumeric = True # use numeric, if available
useNumpy = False
try: # to use numeric
import Numeric
import Matrix
import LinearAlgebra
except ImportError: # fallback to numpy if possible
try:
import numpy
useNumpy = True
except ImportError: # otherwise fallback to hard coded single use code
useNumeric = False # found a simple matrix class in pure python
import pMatrix
#http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189971
def distL2(a, b):
'''no error checking, very fast, should use everywhere'''
sum = 0.
for count in xrange(len(a)):
sum += (b[count] - a[count])**2.
return math.sqrt(sum) # is this faster than **0.5?
def distL2Squared3(a, b):
'''no error checking, unrolled loop'''
return (b[0] - a[0])**2. + (b[1] - a[1])**2. + (b[2] - a[2])**2.
def distL2Squared(a, b):
'''no error checking, very fast, should use everywhere, doesn't square root'''
sum = 0.
for count in xrange(len(a)):
sum += (b[count]-a[count])**2.
return sum
def dist(a, b, metric='L2'):
'''a and b should be lists of equal length (any dimension)
calculates distance needed and returns it (L1,L2,LINF,L2SQUARED).
these new versions are twice the speed of using list comprehensions.'''
if metric == 'L2':
sum = 0.
for count in xrange(len(a)):
sum += (b[count]-a[count])**2.
return sum**0.5
elif metric == 'LINF':
max = 0.
for count in xrange(len(a)):
new = abs(b[count]-a[count])
if new > max:
max = new
return max
elif metric == 'L2SQUARED':
sum = 0.
for count in xrange(len(a)):
sum += (b[count]-a[count])**2.
return sum
elif metric == 'L1':
sum = 0.
for count in xrange(len(a)):
sum += abs(b[count]-a[count])
return sum
def longestAndMeanDist(pts):
'''given a list of points, finds the largest distance between any 2. also
finds mean distance between all pairs. returns both, in that order.'''
longestDist = 0.
sumDists, countDists = 0., 0
for indexOne, ptOne in enumerate(pts):
for ptTwo in pts[indexOne + 1:]: # no duplicates, minimal looping
thisDist = distL2(ptOne, ptTwo)
longestDist = max(thisDist, longestDist)
sumDists += thisDist
countDists += 1
return longestDist, sumDists/float(countDists)
def getAngle(a, b):
'''helper function for triangle interior, returns angle between two vectors'''
ab = a[0] * b[0] + a[1] * b[1] + a[2] * b[2] # all inlined for speed
aSquared = a[0]**2. + a[1]**2. + a[2]**2.
bSquared = b[0]**2. + b[1]**2. + b[2]**2.
#ab = 0. #tons of debugging here
#aSquared = 0.
#bSquared = 0.
#for index in xrange(len(a)):
# ab += a[index] * b[index]
# aSquared += a[index]**2.
# bSquared += b[index]**2.
return math.acos(
max(-1., min(1., (ab) / (((aSquared)**0.5)*((bSquared)**0.5)))))
def calcTriAreaList(abc):
'''uses heron's formula'''
a, b, c = abc # unpack
dists = [distL2(a, b), distL2(b, c), distL2(a, c)]
s = (dists[0] + dists[1] + dists[2])*0.5
triArea = (s*(s-dists[0])*(s-dists[1])*(s-dists[2]))**(0.5)
return triArea
def calcTriArea(a, b, c): # 3 points in 3d
'''uses heron's formula'''
dists = [distL2(a, b), distL2(b, c), distL2(a, c)]
s = (dists[0] + dists[1] + dists[2])*0.5
triArea = (s*(s-dists[0])*(s-dists[1])*(s-dists[2]))**(0.5)
return triArea
def getVector(a, b):
'''does a-b, returns'''
return [a[i]-b[i] for i in range(len(a))]
def getNormalVector(a, b):
'''normal(a-b)'''
return normalizeVector(getVector(a, b))
def getVector(a, b):
'''does a-b, returns'''
return [a[i]-b[i] for i in range(len(a))]
def normalizeVector(vector):
'''divides each by the total components squared'''
total = 0.
for coord in vector:
total += coord**2.
total = total**0.5
newVect = []
for coord in vector:
newVect.append(coord/total)
return newVect
def length(vector):
'''vector length'''
total = 0.
for coord in vector:
total += coord**2.
total = total**0.5
return total
def dot(x, y):
'''gives dot product of two vectors of any dimension, assumes same length'''
dot = 0.
for index in range(len(x)):
dot += x[index] * y[index]
return dot
def cross(x, y):
'''gives cross product of two vectors'''
return [
x[1] * y[2] - x[2] * y[1],
x[2] * y[0] - x[0] * y[2],
x[0] * y[1] - x[1] * y[0]]
def getDihedralUnited(all):
'''list of 4 xyzs, gets the dihedral'''
return getDihedral(all[0], all[1], all[2], all[3])
def getDihedral(a, b, c, d):
'''4 xyzs, gets the dihedral'''
cross1 = normalizeVector(
cross(getNormalVector(a, b), getNormalVector(b, c)))
cross2 = normalizeVector(
cross(getNormalVector(b, c), getNormalVector(c, d)))
try:
dihedral1 = math.acos(dot(cross1, cross2))
except ValueError:
dihedral1 = 0.0 # sometimes the dot ends up a tiny bit above 1.0
#have to figure out +- direction
planeD = calculatePlaneD(cross1, b)
planeFull = (cross1[0], cross1[1], cross1[2], planeD)
if not checkPlaneSide(planeFull, d):
dihedral1 = -dihedral1
return dihedral1
def rotateAboutLine(aIn, dIn, xyz, theta):
'''rotates the point xyz about the line d-a to an angle of theta radians'''
#based on http://inside.mines.edu/~gmurray/ArbitraryAxisRotation/
# ArbitraryAxisRotation.html
#first we have to constrain theta to be within -pi to +pi
while theta < math.pi:
theta += 2 * math.pi
while theta > math.pi:
theta -= 2 * math.pi
da = getVector(dIn, aIn) # line through a and d
#break down and just use the worst notation ever. someone punch me in the face
a, b, c = aIn # unpack many things
d, e, f = dIn
u, v, w = da
x, y, z = xyz
#shortcuts
uvw = length(da)
uvw2 = uvw * uvw
#long stupid equations
newX = (
a * (v**2. + w**2.) + u * (- b * v - c * w + u * x + v * y + w * z) +
(- a * (v**2. + w**2.) + u * (b * v + c * w - v * y - w * z) +
x * (v**2. + w**2.)) * math.cos(theta) +
(- c * v + b * w - w * y + v * z) * math.sin(theta) * uvw) / uvw2
newY = (
b * (u**2. + w**2.) + v * (- a * u - c * w + u * x + v * y + w * z) +
(- b * (u**2. + w**2.) + v * (a * u + c * w - u * x - w * z) +
y * (u**2. + w**2.)) * math.cos(theta) +
(c * u - a * w + w * x - u * z) * math.sin(theta) * uvw) / uvw2
newZ = (
c * (v**2. + u**2.) + w * (- a * u - b * v + u * x + v * y + w * z) +
(- c * (v**2. + u**2.) + w * (a * u + b * v - u * x - v * y) +
z * (v**2. + u**2.)) * math.cos(theta) +
(- b * u + a * v - v * x + u * y) * math.sin(theta) * uvw) / uvw2
return newX, newY, newZ
def getTriNormalList(united):
return getTriNormal(united[0], united[1], united[2])
def getTriNormal(a, b, c, firstTime=True):
'''a, b and c are triange points in clockwise order, returns normal vector
that points out. returns NORMALIZED vector now. or 0s.'''
#find a-b and c-b
#vecAB = normalizeVector(getVector(a, b))
#vecCB = normalizeVector(getVector(c, b))
vecAB = getVector(a, b)
vecCB = getVector(c, b)
#does the cross product, that's all there is to it
normal = cross(vecAB, vecCB)
#only enter this part if all 0 and if first time being called
if not firstTime: # has been called recursively.
return normal # don't check 0s.don't normalize
elif firstTime and normal[0] == 0. and normal[1] == 0. and normal[2] == 0.:
'''this is a big problem. attempt to call after permuting values'''
newNor = getTriNormal(b, c, a, firstTime=False) # still maintains clockwise
if newNor[0] == 0. and newNor[1] == 0. and newNor[2] == 0.:
lastNo = getTriNormal(c, a, b, firstTime=False) # again
#if this is zero we still have to return it
if lastNo[0] == 0. and lastNo[1] == 0. and lastNo[2] == 0.:
return lastNo # 0s knowingly returned
else:
return normalizeVector(lastNo)
else:
return normalizeVector(newNor)
else:
return normalizeVector(normal)
def getAverage(listPoints):
'''averages any number of 3d points passed in as list'''
average = [0., 0., 0.]
for point in listPoints:
for index in xrange(len(average)):
average[index] += point[index]
for index in xrange(len(average)):
average[index] /= len(listPoints)
return average
def getAverage1(listPoints):
'''averages any number of 1d points passed in as list'''
average = 0.
for point in listPoints:
average += point
average /= len(listPoints)
return average
def getAverageArbitraryDimension(listPoints, dimension=2):
'''averages any number of nD points passed in as list'''
average = [0. for count in xrange(dimension)]
for point in listPoints:
for index in xrange(len(average)):
average[index] += point[index]
for index in xrange(len(average)):
average[index] /= len(listPoints)
return average
def planeDistToOrigin(normal):
'''uses formula from http://mathworld.wolfram.com/Plane.html
normal is a, b, c, d of plane
dist = d / ((a^2 + b^2 + c^2) ^ (1/2))'''
a, b, c, d = normal # unpack tuple for laziness
return d / ((a**2. + b**2. + c**2.) ** 0.5)
def fixNormalZeros(vector):
'''if all 0s, return unchanged, that's fine.
if 1 or 2 0s, permute a tiny bit so there are no 0s. normalize and return'''
alpha = 0.0000000000000000001
if vector[0] == 0. and vector[1] == 0. and vector[2] == 0.:
return vector # all zeros
elif vector[0] == 0. or vector[1] == 0. or vector[2] == 0.:
newVec = vector[:] # deep copy, since gets modified
if vector[0] == 0.:
newVec[0] += alpha
if vector[1] == 0.:
newVec[1] += alpha
if vector[2] == 0.:
newVec[2] += alpha
return normalizeVector(newVec)
else:
return vector # no zeros
def withinTolerance(pointA, pointB, tolerance):
'''trying to make something fast to check if pointA and pointB are within
the tolerance of each other.
exact distance function (l2, l1, linf) not a big deal'''
if abs(pointA[0] - pointB[0]) < tolerance:
if abs(pointA[1] - pointB[1]) < tolerance:
if abs(pointA[2] - pointB[2]) < tolerance:
return True
return False
def perturbTriangle(p1, p2, p3):
'''used to change triangles slightly for intersection checks'''
p1new = [x+.0000001 for x in p1]
p2new = [x-.000001 for x in p2]
p3new = [x+.00001 for x in p3]
return p1new, p2new, p3new
#p1, p2, p3 are the plane, p4, p5 are the line
#returns the point that is the intersection
#doesn't do uniqueness checks, etc.
#math from Eric W. Weisstein. "Line-Plane Intersection."
#From MathWorld--A Wolfram Web Resource.
#http://mathworld.wolfram.com/Line-PlaneIntersection.html
# t = - |1 1 1 1 |
# |x1 x2 x3 x4|
# |y1 y2 y3 y4|
# |z1 z2 z3 z4|
# ----------------
# |1 1 1 0 |
# |x1 x2 x3 x5-x4|
# |y1 y2 y3 y5-y4|
# |z1 y2 z3 z5-z4|
#plug t into:
# x = x4 + (x5-x4)t
# y = y4 + (y5-z4)t
# z = z4 + (y5-z4)t
#uses pMatrix class for now--maybe switch to numericpython if needed
def linePlaneIntersection(p1, p2, p3, p4, p5):
top = pMatrix.pMatrix(
[
[1., 1., 1., 1.],
[p1[0], p2[0], p3[0], p4[0]], [p1[1], p2[1], p3[1], p4[1]],
[p1[2], p2[2], p3[2], p4[2]]])
topDet = top.determinant()
bottom = pMatrix.pMatrix(
[
[1., 1., 1., 0.],
[p1[0], p2[0], p3[0], p5[0] - p4[0]],
[p1[1], p2[1], p3[1], p5[1] - p4[1]],
[p1[2], p2[2], p3[2], p5[2] - p4[2]]])
botDet = bottom.determinant()
if topDet == 0.0 or botDet == 0.0:
return False
t = -topDet/botDet
x = p4[0] + (p5[0]-p4[0]) * t
y = p4[1] + (p5[1]-p4[1]) * t
z = p4[2] + (p5[2]-p4[2]) * t
return [x, y, z]
#p1, p2, p3 are the plane, p4, p5 are the line
#returns the point that is the intersection
#doesn't do uniqueness checks, etc.
#math from Eric W. Weisstein. "Line-Plane Intersection."
# From MathWorld--A Wolfram Web Resource.
# http://mathworld.wolfram.com/Line-PlaneIntersection.html
# t = - |1 1 1 1 |
# |x1 x2 x3 x4|
# |y1 y2 y3 y4|
# |z1 z2 z3 z4|
# ----------------
# |1 1 1 0 |
# |x1 x2 x3 x5-x4|
# |y1 y2 y3 y5-y4|
# |z1 y2 z3 z5-z4|
#plug t into:
# x = x4 + (x5-x4)t
# y = y4 + (y5-z4)t
# z = z4 + (y5-z4)t
#uses NumericPython for matrix stuff... falls back to pMatrix standalone funct
def linePlaneIntersectionNumeric(p1, p2, p3, p4, p5):
if not useNumeric:
return linePlaneIntersection(p1, p2, p3, p4, p5)
if useNumpy:
top = [
[1., 1., 1., 1.],
[p1[0], p2[0], p3[0], p4[0]], [p1[1], p2[1], p3[1], p4[1]],
[p1[2], p2[2], p3[2], p4[2]]]
topDet = numpy.linalg.det(top)
bottom = [
[1., 1., 1., 0.], [p1[0], p2[0], p3[0], p5[0]-p4[0]],
[p1[1], p2[1], p3[1], p5[1]-p4[1]], [p1[2], p2[2], p3[2], p5[2]-p4[2]]]
botDet = numpy.linalg.det(bottom)
else: # actually use numeric
top = Matrix.Matrix(
[[1., 1., 1., 1.], [p1[0], p2[0], p3[0], p4[0]], [p1[1], p2[1],
p3[1], p4[1]], [p1[2], p2[2], p3[2], p4[2]]])
topDet = LinearAlgebra.determinant(top)
bottom = Matrix.Matrix(
[[1., 1., 1., 0.], [p1[0], p2[0], p3[0], p5[0]-p4[0]], [p1[1],
p2[1], p3[1], p5[1]-p4[1]], [p1[2], p2[2], p3[2], p5[2]-p4[2]]])
botDet = LinearAlgebra.determinant(bottom)
if topDet == 0.0 or botDet == 0.0:
return False
t = -topDet/botDet
x = p4[0] + (p5[0]-p4[0]) * t
y = p4[1] + (p5[1]-p4[1]) * t
z = p4[2] + (p5[2]-p4[2]) * t
return [x, y, z]
def intPointInsideTri(p1, p2, p3, intPt):
'''helper function that checks to see if the intPt is inside
the triangle p1, p2, p3
do three checks, make sure intPt is closer to every
set of 2 vectors than they are to each other'''
#print "p1, p2, p3, intPt =", p1,",", p2,",", p3,",", intPt
p2p3ang = getAngle(getVector(p2, p1), getVector(p3, p1))
if p2p3ang < getAngle(getVector(p2, p1), getVector(intPt, p1)) or \
p2p3ang < getAngle(getVector(p3, p1), getVector(intPt, p1)):
return False
p1p2ang = getAngle(getVector(p1, p3), getVector(p2, p3))
if p1p2ang < getAngle(getVector(p2, p3), getVector(intPt, p3)) or \
p1p2ang < getAngle(getVector(p1, p3), getVector(intPt, p3)):
return False
p3p1ang = getAngle(getVector(p3, p2), getVector(p1, p2))
if p3p1ang < getAngle(getVector(p3, p2), getVector(intPt, p2)) or \
p3p1ang < getAngle(getVector(p1, p2), getVector(intPt, p2)):
return False
return True
def intPointInsideTriTuple(triTuple, intPt):
'''helper function that checks to see if the intPt is inside the
triangle p1, p2, p3'''
# the tuple format is ((x), (y), (z), (x-y), (y-x), (y-z), (z-y), (x-z),(z-x))
#do three checks, make sure intPt is closer to every
# set of 2 vectors than they are to each other
inside = True
#print "triTuple, intPt =", triTuple,",", intPt
p2p3ang = getAngle(triTuple[4], triTuple[8])
if p2p3ang < getAngle(triTuple[4], getVector(intPt, triTuple[0])) or \
p2p3ang < getAngle(triTuple[8], getVector(intPt, triTuple[0])):
return False
p1p2ang = getAngle(triTuple[7], triTuple[5])
if p1p2ang < getAngle(triTuple[7], getVector(intPt, triTuple[2])) or \
p1p2ang < getAngle(triTuple[5], getVector(intPt, triTuple[2])):
return False
p3p1ang = getAngle(triTuple[3], triTuple[6])
if p3p1ang < getAngle(triTuple[3], getVector(intPt, triTuple[1])) or \
p3p1ang < getAngle(triTuple[6], getVector(intPt, triTuple[1])):
return False
return inside
def getTriNormalList(united):
return getTriNormal(united[0], united[1], united[2])
def getTriNormal(a, b, c, firstTime=True):
'''a, b and c are triange points in clockwise order, returns normal vector
that points out. returns NORMALIZED vector now. or 0s.'''
#find a-b and c-b
#vecAB = normalizeVector(getVector(a, b))
#vecCB = normalizeVector(getVector(c, b))
vecAB = getVector(a, b)
vecCB = getVector(c, b)
#does the cross product, that's all there is to it
normal = cross(vecAB, vecCB)
#only enter this part if all 0 and if first time being called
if not firstTime: # has been called recursively. don't check 0s.
return normal # don't normalize
elif firstTime and normal[0] == 0. and normal[1] == 0. and normal[2] == 0.:
'''this is a big problem. attempt to call after permuting values'''
newNor = getTriNormal(b, c, a, firstTime=False) # still maintains clockwise
if newNor[0] == 0. and newNor[1] == 0. and newNor[2] == 0.:
lastNo = getTriNormal(c, a, b, firstTime=False) # again
#if this is zero we still have to return it
if lastNo[0] == 0. and lastNo[1] == 0. and lastNo[2] == 0.:
return lastNo # 0s knowingly returned
else:
return normalizeVector(lastNo)
else:
return normalizeVector(newNor)
else:
return normalizeVector(normal)
def getAverage(listPoints):
'''averages any number of 3d points passed in as list'''
average = [0., 0., 0.]
for point in listPoints:
for index in xrange(len(average)):
average[index] += point[index]
for index in xrange(len(average)):
average[index] /= len(listPoints)
return average
def getAverageArbitraryDimension(listPoints, dimension=2):
'''averages any number of nD points passed in as list'''
average = [0. for count in range(dimension)]
for point in listPoints:
for index in xrange(len(average)):
average[index] += point[index]
for index in xrange(len(average)):
average[index] /= len(listPoints)
return average
def findMinsMaxsSpheres(spheres):
'''goes through all spheres, finds the min and max in each dimension.
spheres are expected in [x, y, z, r] format'''
if 0 == len(spheres):
return False, False # indicates failure
mins, maxs = [], []
for xyz in range(3):
mins.append(spheres[0][xyz] - spheres[0][3]) # x-radius then y-rad, z-rad
maxs.append(spheres[0][xyz] + spheres[0][3]) # x+radius then y+rad, z+rad
for sphere in spheres[1:]: # already did the first
for xyz in range(3):
mins[xyz] = min(mins[xyz], sphere[xyz]-sphere[3])
maxs[xyz] = max(maxs[xyz], sphere[xyz]+sphere[3])
return mins, maxs
def lineSphereIntersection(minLine, maxLine, sphere):
'''line goes from minline to maxline, sphere is x, y, z,radius,
returns 2 points of intersection, or if failure returns False
math is from http://en.wikipedia.org/wiki/Ray-sphere_intersection'''
#move sphere and line so that line starts at 0, 0, 0
newSphere = []
for coord in range(3):
newSphere.append(sphere[coord]-minLine[coord])
newSphere.append(sphere[3]) # radius
#convert line to necessary form
dirLine = []
for coord in range(3):
dirLine.append(maxLine[coord]-minLine[coord])
dirLine = normalizeVector(dirLine)
partA = 0.
partB = 0.
partC = 0.
for coord in range(3):
partA += dirLine[coord]*newSphere[coord] # lxsx + lysx + lzsz
partB += dirLine[coord]**2. # lx2 + ly2 + lz2
partC += newSphere[coord]**2. # sx2 + sy2 + sz2
partC -= newSphere[3]**2. # -sr2
try:
oneIntersectionD = (partA + ((partA**2.)-partB*partC)**0.5)/(partB)
twoIntersectionD = (partA - ((partA**2.)-partB*partC)**0.5)/(partB)
intersections = [oneIntersectionD, twoIntersectionD]
if intersections[1] < intersections[0]:
intersections.reverse()
#construct output points from original input line
outputPoints = [[], []]
for coord in xrange(3):
for which in xrange(2):
outputPoints[which].append(
minLine[coord] + dirLine[coord]*intersections[which])
#print minLine, maxLine, sphere, outputPoints #debugging
return outputPoints
except ValueError:
return False # didn't work
def countPathTriIntersections(pathPoints, triangle):
'''checks each line segment against one triangle, counts intersections
assume pathpoints and triangle have length 3 and are XYZ ordered'''
intersectionCount = 0
lastPathPt = pathPoints[0] # init for loop
for nextPathPt in pathPoints[1:]:
triPts0 = triangle[0]
triPts1 = triangle[1]
triPts2 = triangle[2]
posPt, maxIt = False, 5000
while False == posPt:
posPt = linePlaneIntersectionNumeric(
triPts0, triPts1, triPts2, lastPathPt, nextPathPt)
if False == posPt:
triPts0, triPts1, triPts2 = perturbTriangle(triPts0, triPts1, triPts2)
maxIt -= 1
if maxIt < 0:
print "had to perturb points 5000 times", triPts0, triPts1, triPts2, \
lastPathPt, nextPathPt, "giving up"
sys.exit(1)
if posPt is not False:
if distL2(lastPathPt, nextPathPt) >= distL2(lastPathPt, posPt) and \
distL2(lastPathPt, nextPathPt) >= distL2(nextPathPt, posPt):
if intPointInsideTri(triPts0, triPts1, triPts2, posPt):
# broken when using large tri?
intersectionCount += 1
lastPathPt = nextPathPt # for next loop
return intersectionCount
def perturbLine(longAxis, shortAxis1, shortAxis2, startPt, endPt, itersLeft):
'''makes a slightly different line'''
#perturb starting line, try again
newStartPt = [-1., -1., -1.]
newEndPt = [-1., -1., -1.]
newStartPt[longAxis] = startPt[longAxis]
newEndPt[longAxis] = endPt[longAxis]
if itersLeft % 4 == 3: # alternate back and forth around line
newStartPt[shortAxis1] = startPt[shortAxis1] + \
float(0.0000000001*(5001.-itersLeft))
newStartPt[shortAxis2] = startPt[shortAxis2] - \
float(0.000000001*(5001.-itersLeft))
newEndPt[shortAxis1] = endPt[shortAxis1] + \
float(0.00000001*(5001.-itersLeft))
newEndPt[shortAxis2] = endPt[shortAxis2] - \
float(0.000000001*(5001.-itersLeft))
elif itersLeft % 4 == 2: # alternate back and forth around line
newStartPt[shortAxis1] = startPt[shortAxis1] - \
float(0.0000001*(5001.-itersLeft))
newStartPt[shortAxis2] = startPt[shortAxis2] + \
float(0.000000001*(5001.-itersLeft))
newEndPt[shortAxis1] = endPt[shortAxis1] + \
float(0.00000001*(5001.-itersLeft))
newEndPt[shortAxis2] = endPt[shortAxis2] - \
float(0.000000001*(5001.-itersLeft))
elif itersLeft % 4 == 1: # alternate back and forth around line
newStartPt[shortAxis1] = startPt[shortAxis1] + \
float(0.0000000001*(5001.-itersLeft))
newStartPt[shortAxis2] = startPt[shortAxis2] - \
float(0.000001*(5001.-itersLeft))
newEndPt[shortAxis1] = endPt[shortAxis1] - \
float(0.0000001*(5001.-itersLeft))
newEndPt[shortAxis2] = endPt[shortAxis2] + \
float(0.0000000001*(5001.-itersLeft))
else:
newStartPt[shortAxis1] = startPt[shortAxis1] - \
float(0.0000001*(5001.-itersLeft))
newStartPt[shortAxis2] = startPt[shortAxis2] + \
float(0.0000001*(5001.-itersLeft))
newEndPt[shortAxis1] = endPt[shortAxis1] - \
float(0.000000001*(5001.-itersLeft))
newEndPt[shortAxis2] = endPt[shortAxis2] + \
float(0.00000001*(5001.-itersLeft))
return newStartPt, newEndPt
def getLongestEdge(triList, pointList, direction=-1):
'''helper function, finds the longest edge in the molecular surface
direction is 0, 1,2 for the axis to use for projection,
or -1 to find the euclidean'''
longestEdge = 0.0
if -1 == direction:
for triangle in triList:
distAB = distL2(
pointList[triangle[1]-1][1:], pointList[triangle[2]-1][1:])
distBC = distL2(
pointList[triangle[2]-1][1:], pointList[triangle[3]-1][1:])
distCA = distL2(
pointList[triangle[3]-1][1:], pointList[triangle[1]-1][1:])
longestEdge = max(distAB, distBC, distCA, longestEdge)
else:
pi = [0, 0]
if 0 == direction:
pi = [2, 3] # add 1
elif 1 == direction:
pi = [1, 3] # add 1
elif 2 == direction:
pi = [1, 2] # add 1
for triangle in triList:
distAB = distL2(
[pointList[triangle[1]-1][pi[0]], pointList[triangle[1]-1][pi[1]]],
[pointList[triangle[2]-1][pi[0]], pointList[triangle[2]-1][pi[1]]])
distBC = distL2(
[pointList[triangle[2]-1][pi[0]], pointList[triangle[2]-1][pi[1]]],
[pointList[triangle[3]-1][pi[0]], pointList[triangle[3]-1][pi[1]]])
distCA = distL2(
[pointList[triangle[3]-1][pi[0]], pointList[triangle[3]-1][pi[1]]],
[pointList[triangle[1]-1][pi[0]], pointList[triangle[1]-1][pi[1]]])
longestEdge = max(distAB, distBC, distCA, longestEdge)
return longestEdge
def cacheTriangle(triList, pointList, allowedTris=[-1]):
'''speed-up function, cache all the various vectors made from a triangle need
since all triangles get used a couple times, this should be worth it (if you
have the memory)'''
#make a vector of tuples
# the tuple format is ((x), (y), (z), (x-y), (y-x), (y-z), (z-y),
# (x-z), (z-x), (tri#))
#apparently not [] evaluates to true... so fix that
cacheDict = {}
for tri in triList:
if [-1] == allowedTris or tri[0] in allowedTris:
x = pointList[tri[1]-1][1:]
y = pointList[tri[2]-1][1:]
z = pointList[tri[3]-1][1:]
xy = getVector(x, y)
yx = getVector(y, x)
yz = getVector(y, z)
zy = getVector(z, y)
xz = getVector(x, z)
zx = getVector(z, x)
tupleRow = (x[0], x[1], x[2]), (y[0], y[1], y[2]), (z[0], z[1], z[2]), \
(xy[0], xy[1], xy[2]), (yx[0], yx[1], yx[2]), \
(yz[0], yz[1], yz[2]), (zy[0], zy[1], zy[2]), \
(xz[0], xz[1], xz[2]), (zx[0], zx[1], zx[2]), \
(tri[1], tri[2], tri[3]), (tri[0])
cacheDict[tri[0]] = tupleRow
return cacheDict
def calculatePlaneD(normal, pointOnP):
'''calculates the d of a plane where d = -ax -by -cz where normal = a, b, c
and point on plane = x, y, z'''
return - normal[0] * pointOnP[0] - normal[1] * pointOnP[1] - normal[2] * \
pointOnP[2]
def checkPlaneSide(plane, point):
'''plane is normal + D (from function calculatePlaneD). sees if point is
in the direction of normal or not, return boolean'''
sign = plane[0] * point[0] + plane[1] * point[1] + plane[2] * point[2] + \
plane[3]
if sign >= 0:
return True
else:
return False
def planeDistToOrigin(normal):
'''uses formula from http://mathworld.wolfram.com/Plane.html
normal is a , b, c, d of plane
dist = d / ((a^2 + b^2 + c^2) ^ (1 / 2))'''
a, b, c, d = normal # unpack tuple for laziness
return d / ((a**2. + b**2. + c**2.) ** 0.5)
def calculateSphericity(area, volume):
'''from wikipedia http://en.wikipedia.org/wiki/Sphericity
Wadell Sphericity, J Geol 1935.
sphericity = pi^(1/3)(6volume)^(2/3) / area'''
return ((math.pi**(1./3.))*((6 * volume)**(2. / 3.))) / area
|
ryancoleman/2D-protein-shape-matching
|
geometry.py
|
Python
|
gpl-2.0
| 26,875
|
[
"CRYSTAL"
] |
947cf33962222606a86cc56aa4c9433fd73e7f085e4e45ca6a56bca6ef93d9ce
|
import ast
import fnmatch
import os
j = os.path.join
from odoo.modules import get_modules, get_module_path
from odoo.tests import BaseCase
class LintCase(BaseCase):
""" Utility method for lint-type cases
"""
def iter_module_files(self, *globs):
""" Yields the paths of all the module files matching the provided globs
(AND-ed)
"""
for modroot in map(get_module_path, get_modules()):
for root, _, fnames in os.walk(modroot):
fnames = [j(root, n) for n in fnames]
for glob in globs:
fnames = fnmatch.filter(fnames, glob)
yield from fnames
class NodeVisitor():
"""Simple NodeVisitor."""
def visit(self, node):
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
for child in ast.iter_child_nodes(node):
yield from self.visit(child)
|
jeremiahyan/odoo
|
odoo/addons/test_lint/tests/lint_case.py
|
Python
|
gpl-3.0
| 1,013
|
[
"VisIt"
] |
24243bf2ffee2e93314a1bc28d348aae35a197dfcd450026552c0f240f4944aa
|
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
__version__ = '$Id: ch1_intro.py 3960 2012-09-27 15:22:33Z guillaume $'
from tools.docco.rl_doc_utils import *
from reportlab.platypus.tableofcontents import TableOfContents
from datetime import datetime
import reportlab
title("ReportLab PDF Library")
title("User Guide")
centred('ReportLab Version ' + reportlab.Version)
centred(datetime.now().strftime('Document generated on %Y/%m/%d %H:%M:%S %Z'))
nextTemplate("TOC")
headingTOC()
toc = TableOfContents()
PS = ParagraphStyle
toc.levelStyles = [
PS(fontName='Times-Bold', fontSize=14, name='TOCHeading1', leftIndent=20, firstLineIndent=-20, spaceBefore=5, leading=16),
PS(fontSize=12, name='TOCHeading2', leftIndent=40, firstLineIndent=-20, spaceBefore=0, leading=12),
PS(fontSize=10, name='TOCHeading3', leftIndent=60, firstLineIndent=-20, spaceBefore=0, leading=12),
PS(fontSize=10, name='TOCHeading4', leftIndent=100, firstLineIndent=-20, spaceBefore=0, leading=12),
]
getStory().append(toc)
nextTemplate("Normal")
########################################################################
#
# Chapter 1
#
########################################################################
heading1("Introduction")
heading2("About this document")
disc("""This document is an introduction to the ReportLab PDF library.
Some previous programming experience
is presumed and familiarity with the Python Programming language is
recommended. If you are new to Python, we tell you in the next section
where to go for orientation.
""")
disc("""
This manual does not cover 100% of the features, but should explain all
the main concepts and help you get started, and point you at other
learning resources.
After working your way through this, you should be ready to begin
writing programs to produce sophisticated reports.
""")
disc("""In this chapter, we will cover the groundwork:""")
bullet("What is ReportLab all about, and why should I use it?")
bullet("What is Python?")
bullet("How do I get everything set up and running?")
todo("""
We need your help to make sure this manual is complete and helpful.
Please send any feedback to our user mailing list,
which is signposted from <a href="http://www.reportlab.com/">www.reportlab.com</a>.
""")
heading2("What is the ReportLab PDF Library?")
disc("""This is a software library that lets you directly
create documents in Adobe's Portable Document Format (PDF) using
the Python programming language. It also creates charts and data graphics
in various bitmap and vector formats as well as PDF.""")
disc("""PDF is the global standard for electronic documents. It
supports high-quality printing yet is totally portable across
platforms, thanks to the freely available Acrobat Reader. Any
application which previously generated hard copy reports or driving a printer
can benefit from making PDF documents instead; these can be archived,
emailed, placed on the web, or printed out the old-fashioned way.
However, the PDF file format is a complex
indexed binary format which is impossible to type directly.
The PDF format specification is more than 600 pages long and
PDF files must provide precise byte offsets -- a single extra
character placed anywhere in a valid PDF document can render it
invalid. This makes it harder to generate than HTML.""")
disc("""Most of the world's PDF documents have been produced
by Adobe's Acrobat tools, or rivals such as JAWS PDF Creator, which act
as 'print drivers'. Anyone wanting to automate PDF production would
typically use a product like Quark, Word or Framemaker running in a loop
with macros or plugins, connected to Acrobat. Pipelines of several
languages and products can be slow and somewhat unwieldy.
""")
disc("""The ReportLab library directly creates PDF based on
your graphics commands. There are no intervening steps. Your applications
can generate reports extremely fast - sometimes orders
of magnitude faster than traditional report-writing
tools. This approach is shared by several other libraries - PDFlib for C,
iText for Java, iTextSharp for .NET and others. However, The ReportLab library
differs in that it can work at much higher levels, with a full featured engine
for laying out documents complete with tables and charts. """)
disc("""In addition, because you are writing a program
in a powerful general purpose language, there are no
restrictions at all on where you get your data from,
how you transform it, and the kind of output
you can create. And you can reuse code across
whole families of reports.""")
disc("""The ReportLab library is expected to be useful
in at least the following contexts:""")
bullet("Dynamic PDF generation on the web")
bullet("High-volume corporate reporting and database publishing")
bullet("""An embeddable print engine for other applications, including
a 'report language' so that users can customize their own reports. <i>
This is particularly relevant to cross-platform apps which cannot
rely on a consistent printing or previewing API on each operating
system</i>.""")
bullet("""A 'build system' for complex documents with charts, tables
and text such as management accounts, statistical reports and
scientific papers """)
bullet("""Going from XML to PDF in one step""")
heading2("ReportLab's commercial software")
disc("""
The ReportLab library forms the foundation of our commercial solution for
PDF generation, Report Markup Language (RML). This is available for evaluation
on our web site with full documentation. We believe that RML is the fastest
and easiest way to develop rich PDF workflows. You work in a markup language
at a similar level to HTML, using your favorite templating system to populate
an RML document; then call our rml2pdf API function to generate a PDF. It's
what ReportLab staff use to build all of the solutions you can see on reportlab.com.
Key differences:
""")
bullet("""Fully documented with two manuals, a formal specification (the DTD) and extensive self-documenting tests. (By contrast, we try to make sure the open source documentation isn't wrong, but we don't always keep up with the code)""")
bullet("""Work in high-level markup rather than constructing graphs of Python objects """)
bullet("""Requires no Python expertise - your colleagues may thank you after you've left!'""")
bullet("""Support for vector graphics and inclusion of other PDF documents""")
bullet("""Many more useful features expressed with a single tag, which would need a lot
of coding in the open source package""")
bullet("""Commercial support is included""")
disc("""
We ask open source developers to consider trying out RML where it is appropriate.
You can register on our site and try out a copy before buying.
The costs are reasonable and linked to the volume of the project, and the revenue
helps us spend more time developing this software.""")
heading2("What is Python?")
disc("""
Python is an <i>interpreted, interactive, object-oriented</i> programming language. It is often compared to Tcl, Perl,
Scheme or Java.
""")
disc("""
Python combines remarkable power with very clear syntax. It has modules, classes, exceptions, very high level
dynamic data types, and dynamic typing. There are interfaces to many system calls and libraries, as well as to
various windowing systems (X11, Motif, Tk, Mac, MFC). New built-in modules are easily written in C or C++.
Python is also usable as an extension language for applications that need a programmable interface.
""")
disc("""
Python is as old as Java and has been growing steadily in popularity for years; since our
library first came out it has entered the mainstream. Many ReportLab library users are
already Python devotees, but if you are not, we feel that the language is an excellent
choice for document-generation apps because of its expressiveness and ability to get
data from anywhere.
""")
disc("""
Python is copyrighted but <b>freely usable and distributable, even for commercial use</b>.
""")
heading2("Acknowledgements")
disc("""Many people have contributed to ReportLab. We would like to thank in particular
(in alphabetical order):
Albertas Agejevas,
Alex Buck,
Andre Reitz,
Andrew Mercer,
Benjamin Dumke,
Benn B,
Chad Miller,
Chris Lee,
Christian Jacobs,
Dinu Gherman,
Eric Johnson,
Felix Labrecque,
Gary Poster,
Germán M. Bravo,
Guillaume Francois,
Hans Brand,
Henning Vonbargen,
Hosam Aly,
Ian Stevens,
James Martin-Collar,
Jeff Bauer,
Jerome Alet,
Jerry Casiano,
Jorge Godoy,
Keven D Smith,
Magnus Lie Hetland,
Marcel Tromp, Ty Sarna
Marius Gedminas,
Max M,
Michael Egorov,
Mike Folwell,
Moshe Wagner,
Nate Silva,
Paul McNett,
Peter Johnson,
PJACock,
Publio da Costa Melo,
Randolph Bentson,
Robert Alsina,
Robert Hölzl,
Robert Kern,
Ron Peleg,
Simon King,
Steve Halasz,
T Blatter,
Tim Roberts,
Tomasz Swiderski,
Volker Haas,
Yoann Roman,
and many more.""")
disc("""Special thanks go to Just van Rossum for his valuable assistance with
font technicalities.""")
disc("""Moshe Wagner and Hosam Aly deserve a huge thanks for contributing to the RTL patch, which is not yet on thr trunk.""")
disc("""Marius Gedminas deserves a big hand for contributing the work on TrueType fonts and we
are glad to include these in the toolkit. Finally we thank Michal Kosmulski for the DarkGarden font
for and Bitstream Inc. for the Vera fonts.""")
heading2("Installation and Setup")
heading3("A note on available versions")
disc("""Our website ^http://www.reportlab.com/^ will always have up-to-date
information on setups and installations. The latest version of the ReportLab library can be found at
^http://www.reportlab.com/software/opensource/rl-toolkit/download/^.
Older versions can be found at ^http://www.reportlab.com/ftp/^.
""")
disc("""Each successive version is stored in both zip
and tgz format, but the contents are identical apart from line endings.
Versions are numbered: $ReportLab_<major_version>_<minor_version>.zip$,
$ReportLab_<major_version>_<minor_version>.tgz$ and so on.
""")
disc("""
The latest stable version is $reportlab2.6$ (.zip or .tgz).
Daily snapshots of the trunk are available as
$reportlab-daily-unix.tar.gz$ or $reportlab-daily-win32.zip$.
""")
disc("""Finally, from version 2.4 onwards, there is also a Windows installer
available for Python versions 2.5 - 2.7, named $ReportLab-2.x.win32-py2.x.exe$
""")
pencilnote()
disc("""We plan to drop the support of Python 2.5 in our next release.
We advise you to move to Python 2.6 or 2.7.
""")
heading3("Installation on Windows")
restartList()
list("""First, install Python from $http://www.python.org/.$
Reportlab 2.x works with Python 2.5 upwards but we recommend to use
the latest stable version of Python 2.7.
After installing, you should be able to run the
'Python (command line)' option from the Start Menu.
""")
list("""We strongly recommend installing the Python Windows
Extensions, which gives you access to Windows data sources, COM support, WinAPI calls, and the PythonWin IDE. This
can be found at ^http://sourceforge.net/projects/pywin32/^.
Once this is installed, you can start
Pythonwin from the Start Menu and get a GUI application.
""")
list("""Install the Python Imaging Library ($PIL$) from $http://www.pythonware.com/products/pil/$. This
step is optional but allows you to include images in your reports.
""")
list("""Now you are ready to install reportlab itself.
The easiest way to do this is to use the .exe installer for Windows, which
installs both the ReportLab source code and the precompiled DLLs for you.
""")
list("""
If, however, you wish to install from source, download and unzip the archive
from from the downloads page on ^http://www.reportlab.com/^ and copy the $reportlab$ directory
onto your PythonPath; You should now be able to go to a Python
command line interpreter and type $import reportlab$ without getting
an error message.
""")
list("""Next, Download the zip file of precompiled DLLs for your Python version from
the bottom of the downloads page on ^http://www.reportlab.com/^, and unzip
them into ^C:\Python2x\lib\site-packages^ (or its equivalent for other Python versions
""")
list("""Open up a $MS-DOS$ command prompt and CD to
"$reportlab\\..\\tests$". Enter "$runAll.py$". You should see lots of dots
and no error messages. This will also create many PDF files and generate
the manuals in ^reportlab/docs^ (including this one). """)
list("""
Finally, we recommend you download and run the script ^rl_check.py^ from
^http://www.reportlab.com/ftp/^. This will health-check all the above
steps and warn you if anything is missing or mismatched.""")
heading3("Installation instructions for Unix")
disc("""
Many Linux distributions already include or can deliver a ReportLab distribution, although this may be a few months behind our own releases. On Ubuntu, simply use ^sudo apt-get install python-reportlab^. In addition, we support the Python packaging mechanisms so you can use ^easy_install reportlab^ in most Python environments.
""")
disc("""
If you want to install the latest version of our code, or to install your own reportlab to go with our commercial distribution, you can install from source as follows:""")
restartList()
list("""First, install Python. On a large number of Unix and Linux distributions, Python is already installed,
or is available as a standard package you can install with the relevant package manager.""")
list("""
You will also need to install the Freetype 2 Font Engine, Python Imaging Library, and the gzip library,
along with a C compiler.
""")
list("""You will also need the source code or relevant dev packages for Python and the FreeType 2 Font engine.
""")
list("""
Download the latest ReportLab.tgz from the download page on http://www.reportlab.com.
""")
list("""
Unpack the archive and follow the instructions in INSTALL.txt.
""")
list("""You should now be able to run python and execute the python statement
$import reportlab$ without errors.
""")
heading3("Instructions for Python novices: Mac")
disc("""
This is much, much easier with Mac OS X since Python is installed on your
system as standard. Just follow the instructions for installing the ReportLab archive
above.
""")
heading2("Getting Involved")
disc("""ReportLab is an Open Source project. Although we are
a commercial company we provide the core PDF generation
sources freely, even for commercial purposes, and we make no income directly
from these modules. We also welcome help from the community
as much as any other Open Source project. There are many
ways in which you can help:""")
bullet("""General feedback on the core API. Does it work for you?
Are there any rough edges? Does anything feel clunky and awkward?""")
bullet("""New objects to put in reports, or useful utilities for the library.
We have an open standard for report objects, so if you have written a nice
chart or table class, why not contribute it?""")
bullet("""Snippets and Case Studies: If you have produced some nice
output, register online on ^http://www.reportlab.com^ and submit a snippet
of your output (with or without scripts). If ReportLab solved a
problem for you at work, write a little 'case study' and submit it.
And if your web site uses our tools to make reports, let us link to it.
We will be happy to display your work (and credit it with your name
and company) on our site!""")
bullet("""Working on the core code: we have a long list of things
to refine or to implement. If you are missing some features or
just want to help out, let us know!""")
disc("""The first step for anyone wanting to learn more or
get involved is to join the mailing list. To Subscribe visit
$http://two.pairlist.net/mailman/listinfo/reportlab-users$.
From there you can also browse through the group's archives
and contributions. The mailing list is
the place to report bugs and get support. """)
heading2("Site Configuration")
disc("""There are a number of options which most likely need to be configured globally for a site.
The python script module $reportlab/rl_config.py$ may be edited to change the values of several
important sitewide properties.""")
bullet("""verbose: set to integer values to control diagnostic output.""")
bullet("""shapeChecking: set this to zero to turn off a lot of error checking in the graphics modules""")
bullet("""defaultEncoding: set this to WinAnsiEncoding or MacRomanEncoding.""")
bullet("""defaultPageSize: set this to one of the values defined in reportlab/lib/pagesizes.py; as delivered
it is set to pagesizes.A4; other values are pagesizes.letter etc.""")
bullet("""defaultImageCaching: set to zero to inhibit the creation of .a85 files on your
hard-drive. The default is to create these preprocessed PDF compatible image files for faster loading""")
bullet("""T1SearchPath: this is a python list of strings representing directories that
may be queried for information on Type 1 fonts""")
bullet("""TTFSearchPath: this is a python list of strings representing directories that
may be queried for information on TrueType fonts""")
bullet("""CMapSearchPath: this is a python list of strings representing directories that
may be queried for information on font code maps.""")
bullet("""showBoundary: set to non-zero to get boundary lines drawn.""")
bullet("""ZLIB_WARNINGS: set to non-zero to get warnings if the Python compression extension is not found.""")
bullet("""pageComression: set to non-zero to try and get compressed PDF.""")
bullet("""allowtableBoundsErrors: set to 0 to force an error on very large Platypus table elements""")
bullet("""emptyTableAction: Controls behaviour for empty tables, can be 'error' (default), 'indicate' or 'ignore'.""")
heading2("Learning More About Python")
disc("""
If you are a total beginner to Python, you should check out one or more from the
growing number of resources on Python programming. The following are freely
available on the web:
""")
bullet("""<b>Python Documentation. </b>
A list of documentation on the Python.org web site.
$http://www.python.org/doc/$
""")
bullet("""<b>Python Tutorial. </b>
The official Python Tutorial , originally written by Guido van Rossum himself.
$http://docs.python.org/tutorial/$
""")
bullet("""<b>Learning to Program. </b>
A tutorial on programming by Alan Gauld. Has a heavy emphasis on
Python, but also uses other languages.
$http://www.freenetpages.co.uk/hp/alan.gauld/$
""")
bullet("""<b>How to think like a computer scientist</b> (Python version)</b>.
$http://www.ibiblio.org/obp/thinkCSpy/$
""")
bullet("""<b>Instant Python</b>.
A 6-page minimal crash course by Magnus Lie Hetland.
$http://www.hetland.org/python/instant-python.php$
""")
bullet("""<b>Dive Into Python</b>.
A free Python tutorial for experienced programmers.
$http://www.diveintopython.net/$
""")
from reportlab.lib.codecharts import SingleByteEncodingChart
from tools.docco.stylesheet import getStyleSheet
styles = getStyleSheet()
indent0_style = styles['Indent0']
indent1_style = styles['Indent1']
heading2("Goals for the 2.x series")
disc("""The main rationale for 2.0 was an incompatible change at the character level:
to properly support Unicode input. Now that it's out we will maintain compatibility
with 2.0. There are no pressing feature wishlists and new features will be driven,
as always, by contributions and the demands of projects.""")
disc("""One area where we do want to make progress from release to release is with documentation
and installability. We'll be looking into better support for distutils, setuptools,
eggs and so on; and into better examples and tools to help people learn what's in the
(substantial) code base.""")
disc("""
Bigger ideas and more substantial rewrites are deferred to Version 3.0, with no particular
target dates.
""")
heading2("What's New in ReportLab 2.6")
disc("""This is a minor release focusing mainly on improved documentation. There are a
number of minor enhancements, and a larger number of previous-undocumented
enhancements which we have documented better.""")
disc("""A big thanks goes to the community for their help in reporting bugs and providing patches.
Thanks to everybody who has contributed to the open-source toolkit in the run-up to the 2.6 release,
whether by reporting bugs, sending patches, or contributing to the reportlab-users mailing list.
Thanks especially to the following people: Alex Buck, Felix Labrecque,
Peter Johnson, James Martin-Collar and Guillaume Francois.
This page documents what has changed since version 2.5.""")
disc('Reportlab 2.6 is installable with easy_install. You must have installed a compatible C compiler and the dependencies such as Freetype and PIL.')
heading4('General changes')
bullet("""Manuals have been reformatted with more pleasing code snippets and tables of
contents, and reviewed and expanded.""")
heading4('Flowing documents (Platypus)')
bullet("""Added support for HTML-style list objects.""")
bullet("""Added flexible mechanism for drawing bullets.""")
bullet("""Allowed XPreformatted objects to use Asian line wrapping.""")
bullet("""Added an 'autoNextPageTemplate' attribute to PageTemplates. For example you
can now set up a 'chapter first page template' which will always be followed
by a 'continuation template' on the next page break, saving the programmer from
having to issue control flow commands in the story.""")
bullet("""Added a TopPadder flowable, which will 'wrap' another Flowable and move it
to the bottom of the current page.""")
bullet("""More helpful error messages when large tables cannot be rendered.""")
bullet("""Documentation for images within text (test_032_images).""")
bullet("""Trailing dots for use on contents pages.""")
heading4('Charts and graphics')
bullet("""Support for UPCA bar codes.""")
bullet("""We now have a semi-intelligent system for labelling pie charts with
callout lines. Thanks to James Martin-Collar, a maths student at Warwick
University, who did this as his summer internship.""")
bullet("""Axes - added startOffset and endOffset properties; allowed for axis
background annotations.""")
bullet("""Bar charts - allow more control of z Index (i.e. drawing order of axes and
lines)""")
bullet("""Pie charts - fixed bugs in 3d appearance.""")
bullet("""SVG output back end has seen some bugs fixed and now outputs resizeable SVG.""")
# Noteworthy bug fixes Section #######################
#heading3("Noteworthy bug fixes")
|
nickpack/reportlab
|
docs/userguide/ch1_intro.py
|
Python
|
bsd-3-clause
| 22,960
|
[
"VisIt"
] |
bcbb7a094a85a4d06e550bf6c7fe97af9d7631a56cc8af90a11641b3452e16ec
|
# coding: utf-8
"""Tools to compute equations of states with different models."""
from __future__ import unicode_literals, division, print_function
import collections
import numpy as np
import pymatgen.core.units as units
from monty.functools import return_none_if_raise
from pymatgen.core.units import FloatWithUnit
from pymatgen.util.plotting_utils import add_fig_kwargs, get_ax_fig_plt
import logging
logger = logging.getLogger(__file__)
__all__ = [
"EOS",
]
def quadratic(V, a, b, c):
"""Quadratic fit"""
return a*V**2 + b*V + c
def murnaghan(V, E0, B0, B1, V0):
"""From PRB 28,5480 (1983)"""
E = E0 + B0*V/B1*(((V0/V)**B1)/(B1-1)+1) - V0*B0/(B1-1)
return E
def birch(V, E0, B0, B1, V0):
"""
From Intermetallic compounds: Principles and Practice, Vol. I: Principles
Chapter 9 pages 195-210 by M. Mehl. B. Klein, D. Papaconstantopoulos paper downloaded from Web
case where n=0
"""
E = (E0
+ 9.0/8.0*B0*V0*((V0/V)**(2.0/3.0) - 1.0)**2
+ 9.0/16.0*B0*V0*(B1-4.)*((V0/V)**(2.0/3.0) - 1.0)**3)
return E
def birch_murnaghan(V, E0, B0, B1, V0):
"""BirchMurnaghan equation from PRB 70, 224107"""
eta = (V/V0)**(1./3.)
E = E0 + 9.*B0*V0/16.*(eta**2-1)**2*(6 + B1*(eta**2-1.) - 4.*eta**2)
return E
def pourier_tarantola(V, E0, B0, B1, V0):
"""Pourier-Tarantola equation from PRB 70, 224107"""
eta = (V/V0)**(1./3.)
squiggle = -3.*np.log(eta)
E = E0 + B0*V0*squiggle**2/6.*(3. + squiggle*(B1 - 2))
return E
def vinet(V, E0, B0, B1, V0):
'Vinet equation from PRB 70, 224107'
eta = (V/V0)**(1./3.)
E = (E0 + 2.*B0*V0/(B1-1.)**2
* (2. - (5. +3.*B1*(eta-1.)-3.*eta)*np.exp(-3.*(B1-1.)*(eta-1.)/2.)))
return E
def deltafactor_polyfit(volumes, energies):
"""
This is the routine used to compute V0, B0, B1 in the deltafactor code.
Taken from deltafactor/eosfit.py
"""
fitdata = np.polyfit(volumes**(-2./3.), energies, 3, full=True)
ssr = fitdata[1]
sst = np.sum((energies - np.average(energies))**2.)
residuals0 = ssr/sst
deriv0 = np.poly1d(fitdata[0])
deriv1 = np.polyder(deriv0, 1)
deriv2 = np.polyder(deriv1, 1)
deriv3 = np.polyder(deriv2, 1)
v0 = 0
x = 0
for x in np.roots(deriv1):
if x > 0 and deriv2(x) > 0:
v0 = x**(-3./2.)
break
else:
raise EOSError("No minimum could be found")
derivV2 = 4./9. * x**5. * deriv2(x)
derivV3 = (-20./9. * x**(13./2.) * deriv2(x) - 8./27. * x**(15./2.) * deriv3(x))
b0 = derivV2 / x**(3./2.)
b1 = -1 - x**(-3./2.) * derivV3 / derivV2
#print('deltafactor polyfit:')
#print('e0, b0, b1, v0')
#print(fitdata[0], b0, b1, v0)
n = collections.namedtuple("DeltaFitResults", "v0 b0 b1 poly1d")
return n(v0, b0, b1, fitdata[0])
class EOSError(Exception):
"""Exceptions raised by EOS."""
class EOS(object):
"""
Fit equation of state for bulk systems.
The following equation is used::
murnaghan
PRB 28, 5480 (1983)
birch
Intermetallic compounds: Principles and Practice, Vol I: Principles. pages 195-210
birchmurnaghan
PRB 70, 224107
pouriertarantola
PRB 70, 224107
vinet
PRB 70, 224107
Use::
eos = EOS(eos_name='murnaghan')
fit = eos.fit(volumes, energies)
print(fit)
fit.plot()
"""
Error = EOSError
#: Models available.
MODELS = {
"quadratic": quadratic,
"murnaghan": murnaghan,
"birch": birch,
"birch_murnaghan": birch_murnaghan,
"pourier_tarantola": pourier_tarantola,
"vinet": vinet,
"deltafactor": deltafactor_polyfit,
}
def __init__(self, eos_name='murnaghan'):
self._eos_name = eos_name
self._func = self.MODELS[eos_name]
@staticmethod
def Quadratic():
return EOS(eos_name="quadratic")
@staticmethod
def Murnaghan():
return EOS(eos_name='murnaghan')
@staticmethod
def Birch():
return EOS(eos_name='birch')
@staticmethod
def Birch_Murnaghan():
return EOS(eos_name='birch_murnaghan')
@staticmethod
def Pourier_Tarantola():
return EOS(eos_name='pourier_tarantola')
@staticmethod
def Vinet():
return EOS(eos_name='vinet')
@staticmethod
def DeltaFactor():
return EOS(eos_name='deltafactor')
def fit(self, volumes, energies, vol_unit="ang^3", ene_unit="eV"):
"""
Fit energies [eV] as function of volumes [Angstrom**3].
Returns `EosFit` instance that gives access to the optimal volume,
the minumum energy, and the bulk modulus.
Notice that the units for the bulk modulus is eV/Angstrom^3.
"""
# Convert volumes to Ang**3 and energies to eV (if needed).
volumes = units.ArrayWithUnit(volumes, vol_unit).to("ang^3")
energies = units.EnergyArray(energies, ene_unit).to("eV")
return EOS_Fit(volumes, energies, self._func, self._eos_name)
class EOS_Fit(object):
"""Performs the fit of E(V) and provides method to access the results of the fit."""
def __init__(self, volumes, energies, func, eos_name):
"""
args:
energies: list of energies in eV
volumes: list of volumes in Angstrom^3
func: callable function
"""
self.volumes = np.array(volumes)
self.energies = np.array(energies)
assert len(self.volumes) == len(self.energies)
self.func = func
self.eos_name = eos_name
self.exceptions = []
self.ierr = 0
if eos_name == "deltafactor":
try:
results = deltafactor_polyfit(self.volumes, self.energies)
self.e0 = None
self.v0 = results.v0
self.b0 = results.b0
self.b1 = results.b1
self.p0 = results.poly1d
self.eos_params = results.poly1d
except EOSError as exc:
self.ierr = 1
logger.critical(str(exc))
self.exceptions.append(exc)
raise
elif eos_name == "quadratic":
# Quadratic fit
a, b, c = np.polyfit(self.volumes, self.energies, 2)
self.v0 = v0 = -b/(2*a)
self.e0 = a*v0**2 + b*v0 + c
self.b0 = 2*a*v0
self.b1 = np.inf
self.p0 = [a, b, c]
self.eos_params = [a, b, c]
vmin, vmax = self.volumes.min(), self.volumes.max()
if not vmin < v0 and v0 < vmax:
exc = EOSError('The minimum volume of a fitted parabola is not in the input volumes\n.')
logger.critical(str(exc))
self.exceptions.append(exc)
else:
# Objective function that will be minimized
def objective(pars, x, y):
return y - self.func(x, *pars)
# Quadratic fit to get an initial guess for the parameters
a, b, c = np.polyfit(self.volumes, self.energies, 2)
v0 = -b/(2*a)
e0 = a*v0**2 + b*v0 + c
b0 = 2*a*v0
b1 = 4 # b1 is usually a small number like 4
vmin, vmax = self.volumes.min(), self.volumes.max()
if not vmin < v0 and v0 < vmax:
exc = EOSError('The minimum volume of a fitted parabola is not in the input volumes\n.')
logger.critical(str(exc))
self.exceptions.append(exc)
# Initial guesses for the parameters
self.p0 = [e0, b0, b1, v0]
from scipy.optimize import leastsq
self.eos_params, self.ierr = leastsq(objective, self.p0, args=(self.volumes, self.energies))
if self.ierr not in [1, 2, 3, 4]:
exc = EOSError("Optimal parameters not found")
logger.critical(str(exc))
self.exceptions.append(exc)
raise exc
self.e0 = self.eos_params[0]
self.b0 = self.eos_params[1]
self.b1 = self.eos_params[2]
self.v0 = self.eos_params[3]
print('EOS_fit:', func)
print('e0, b0, b1, v0')
print(self.eos_params)
def __str__(self):
lines = []
app = lines.append
app("Equation of State: %s" % self.name)
app("Minimum volume = %1.2f Ang^3" % self.v0)
app("Bulk modulus = %1.2f eV/Ang^3 = %1.2f GPa, b1 = %1.2f" % (self.b0, self.b0_GPa, self.b1))
return "\n".join(lines)
@property
def name(self):
return self.func.__name__
@property
def b0_GPa(self):
return FloatWithUnit(self.b0, "eV ang^-3").to("GPa")
@property
@return_none_if_raise(AttributeError)
def results(self):
"""Dictionary with the results. None if results are not available"""
return dict(v0=self.v0, e0=self.e0, b0=self.b0, b1=self.b1)
@add_fig_kwargs
def plot(self, ax=None, **kwargs):
"""
Uses Matplotlib to plot the energy curve.
Args:
ax: :class:`Axes` object. If ax is None, a new figure is produced.
================ ==============================================================
kwargs Meaning
================ ==============================================================
style
color
text
label
================ ==============================================================
Returns:
Matplotlib figure.
"""
ax, fig, plt = get_ax_fig_plt(ax)
vmin, vmax = self.volumes.min(), self.volumes.max()
emin, emax = self.energies.min(), self.energies.max()
vmin, vmax = (vmin - 0.01 * abs(vmin), vmax + 0.01 * abs(vmax))
emin, emax = (emin - 0.01 * abs(emin), emax + 0.01 * abs(emax))
color = kwargs.pop("color", "r")
label = kwargs.pop("label", None)
# Plot input data.
ax.plot(self.volumes, self.energies, linestyle="None", marker="o", color=color) #, label="Input Data")
# Plot EOS.
vfit = np.linspace(vmin, vmax, 100)
if label is None:
label = self.name + ' fit'
if self.eos_name == "deltafactor":
xx = vfit**(-2./3.)
ax.plot(vfit, np.polyval(self.eos_params, xx), linestyle="dashed", color=color, label=label)
else:
ax.plot(vfit, self.func(vfit, *self.eos_params), linestyle="dashed", color=color, label=label)
# Set xticks and labels.
ax.grid(True)
ax.set_xlabel("Volume $\AA^3$")
ax.set_ylabel("Energy (eV)")
ax.legend(loc="best", shadow=True)
# Add text with fit parameters.
if kwargs.pop("text", True):
text = []; app = text.append
app("Min Volume = %1.2f $\AA^3$" % self.v0)
app("Bulk modulus = %1.2f eV/$\AA^3$ = %1.2f GPa" % (self.b0, self.b0_GPa))
app("B1 = %1.2f" % self.b1)
fig.text(0.4, 0.5, "\n".join(text), transform=ax.transAxes)
return fig
|
rousseab/pymatgen
|
pymatgen/io/abinitio/eos.py
|
Python
|
mit
| 11,243
|
[
"pymatgen"
] |
d47a7bd02c93c660e29e9b444b93f53f003c2409e029b87c68fe57a144b2d581
|
from __future__ import annotations
import numpy as np
import pytest
from cctbx import crystal, sgtbx, uctbx
def test_symmetry_analysis():
coords = np.array(
[
[0.835, 0.158],
[0.772, 0.104],
[0.108, 0.907],
[0.058, 0.76],
[0.926, 0.189],
[0.221, 0.888],
[0.957, 0.137],
[0.958, 0.143],
[-0.015, 0.726],
[-0.066, 0.29],
[0.135, 0.848],
[0.085, 0.788],
[0.897, 0.126],
[0.749, 0.073],
[0.166, 0.943],
[0.871, 0.248],
[0.116, 0.968],
[0.116, 0.973],
[0.706, 0.007],
[0.288, -0.055],
[0.137, 0.848],
[0.089, 0.78],
[0.893, 0.122],
[0.749, 0.077],
[0.165, 0.941],
[0.877, 0.242],
[0.114, 0.968],
[0.12, 0.971],
[0.716, 0.002],
[0.292, -0.062],
[0.841, 0.162],
[0.774, 0.104],
[0.1, 0.909],
[0.054, 0.761],
[0.927, 0.184],
[0.227, 0.88],
[0.957, 0.137],
[0.961, 0.143],
[-0.007, 0.716],
[-0.061, 0.287],
[0.13, 0.848],
[0.084, 0.783],
[0.898, 0.124],
[0.749, 0.075],
[0.169, 0.94],
[0.871, 0.247],
[0.114, 0.969],
[0.12, 0.969],
[0.717, 0.0],
[0.296, -0.066],
[0.84, 0.154],
[0.776, 0.103],
[0.104, 0.908],
[0.057, 0.755],
[0.925, 0.19],
[0.227, 0.883],
[0.958, 0.136],
[0.962, 0.143],
[-0.017, 0.724],
[-0.067, 0.295],
]
)
sym_ops = [
sgtbx.rt_mx(s)
for s in ("-z,-y,-x", "y,z,x", "x,y,z", "-x,-z,-y", "z,x,y", "-y,-x,-z")
]
crystal_symmetry = crystal.symmetry(
unit_cell=uctbx.unit_cell((98.33, 98.33, 135.99, 90, 90, 120)),
space_group_info=sgtbx.space_group_info("R3:H"),
).minimum_cell()
from cctbx.sgtbx.lattice_symmetry import metric_subgroups
subgroups = metric_subgroups(
crystal_symmetry, max_delta=5, bravais_types_only=False
)
cb_op_inp_min = sgtbx.change_of_basis_op()
from dials.algorithms.symmetry.cosym import SymmetryAnalysis
analysis = SymmetryAnalysis(coords, sym_ops, subgroups, cb_op_inp_min)
assert analysis.best_solution.likelihood > 0.99
assert analysis.best_solution.confidence > 0.98
assert (
analysis.best_solution.subgroup["best_subsym"].space_group().type().number()
== 148
) # R -3 :H
assert (
str(analysis)
== """\
Scoring individual symmetry elements
+--------------+--------+------+-----+-----------------+
| likelihood | Z-CC | CC | | Operator |
|--------------+--------+------+-----+-----------------|
| 0.087 | 1.96 | 0.2 | | 2 |(0, -1, 1) |
| 0.087 | 1.96 | 0.2 | | 2 |(-1, 0, 1) |
| 0.949 | 10 | 1 | *** | 3^-1 |(1, 1, 1) |
| 0.087 | 1.96 | 0.2 | | 2 |(-1, 1, 0) |
| 0.949 | 10 | 1 | *** | 3 |(1, 1, 1) |
+--------------+--------+------+-----+-----------------+
Scoring all possible sub-groups
+-------------------+-----+--------------+----------+--------+--------+---------+--------------------+
| Patterson group | | Likelihood | NetZcc | Zcc+ | Zcc- | delta | Reindex operator |
|-------------------+-----+--------------+----------+--------+--------+---------+--------------------|
| R -3 :H | *** | 0.995 | 8.04 | 10 | 1.96 | 0 | b-c,-a+c,a+b+c |
| P -1 | | 0.003 | -6.5 | 0 | 6.5 | 0 | a,b,c |
| R -3 m :H | | 0.001 | 6.5 | 6.5 | 0 | 0 | b-c,-a+c,a+b+c |
| C 1 2/m 1 | | 0 | -5.24 | 1.96 | 7.21 | 0 | -a-b,a-b,c |
| C 1 2/m 1 | | 0 | -5.24 | 1.96 | 7.21 | 0 | -b-c,b-c,a |
| C 1 2/m 1 | | 0 | -5.24 | 1.96 | 7.21 | 0 | -a-c,-a+c,b |
+-------------------+-----+--------------+----------+--------+--------+---------+--------------------+
Best solution: R -3 :H
Unit cell: (98.33, 98.33, 135.99, 90, 90, 120)
Reindex operator: b-c,-a+c,a+b+c
Laue group probability: 0.995
Laue group confidence: 0.994"""
)
d = analysis.as_dict()
assert d["sym_op_scores"][0] == {
"cc": pytest.approx(0.19620531091685714),
"operator": "-x,-z,-y",
"likelihood": pytest.approx(0.08665625555575088),
"stars": "",
"z_cc": pytest.approx(1.9620531091685713),
}
assert d["subgroup_scores"][0] == {
"confidence": pytest.approx(0.9940687431995551),
"z_cc_for": pytest.approx(9.999725360190128),
"stars": "***",
"patterson_group": "-R 3",
"max_angular_difference": 0.0,
"likelihood": pytest.approx(0.995493024305035),
"cb_op": "-1/3*x+2/3*y-1/3*z,-2/3*x+1/3*y+1/3*z,1/3*x+1/3*y+1/3*z",
"z_cc_against": pytest.approx(1.9620621986200772),
"unit_cell": pytest.approx(
(
98.32999999999998,
98.32999999999998,
135.99,
90.0,
90.0,
119.99999999999999,
)
),
"z_cc_net": pytest.approx(8.037663161570052),
}
|
dials/dials
|
tests/algorithms/symmetry/cosym/test_cosym_symmetry_analysis.py
|
Python
|
bsd-3-clause
| 5,645
|
[
"CRYSTAL"
] |
17f1328a528b41cbb143829fb0cd917e49c54b70c9e6e6b5c7008f39323cd361
|
from .eos import *
from .pseudos import *
from .netcdf import *
from .tasks import *
from .works import *
from .calculations import *
|
Dioptas/pymatgen
|
pymatgen/io/abinitio/__init__.py
|
Python
|
mit
| 134
|
[
"NetCDF"
] |
e29b2b8de75e37c0bf0ac52f1bd816a446563f3e0229d580a0fb5989469396ed
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
from Cheetah.Template import Template
from morphforge.simulation.neuron import ModFile
from morphforge.simulation.neuron.simulationdatacontainers import MHocFileData
from morphforge.simulation.neuron.simulationdatacontainers import MHOCSections
from morphforge.simulation.neuron.hocmodbuilders import MM_ModFileWriterBase
class NEURONChlWriterInfTauInterpolated(object):
chlHoc = """
$(cell_name).internalsections [$section_index] {
// InfTauInterpolated Channels
insert $neuron_suffix
#for variable_name, variable_value_nounit, variable_value_with_unit, variable_unit in $variables:
$(variable_name)_$(neuron_suffix) = $variable_value_nounit //(in $variable_unit, converted from $variable_value_with_unit)
#end for
}
"""
Units = {'gBar': 'S/cm2', 'e_rev': 'mV', 'gScale': ''}
@classmethod
def build_hoc_section(cls, cell, section, hocfile_obj, mta):
cell_name = hocfile_obj[MHocFileData.Cells][cell]['cell_name']
section_index = hocfile_obj[MHocFileData.Cells][cell]['section_indexer'][section]
neuron_suffix = mta.channel.get_neuron_suffix()
# Calculate the values of the variables for the section:
variables = []
for variable_name in mta.channel.get_variables():
variable_value_with_unit = mta.applicator.get_variable_value_for_section(variable_name=variable_name, section=section)
variable_unit = NEURONChlWriterInfTauInterpolated.Units[variable_name]
variable_value_nounit = variable_value_with_unit.rescale(variable_unit).magnitude
variables.append([variable_name, variable_value_nounit, variable_value_with_unit, variable_unit])
tmpl_dict = {
'cell_name': cell_name,
'section_index': section_index,
'neuron_suffix': neuron_suffix,
'variables': variables,
}
# Add the data to the HOC file
hoc_text = Template(NEURONChlWriterInfTauInterpolated.chlHoc, tmpl_dict).respond()
hocfile_obj.add_to_section(MHOCSections.InitCellMembranes, hoc_text)
@classmethod
def build_mod(cls, alphabeta_chl, modfile_set):
gbar_name = 'gBar'
e_rev_name = 'e_rev'
g_scale_name = 'gScale'
base_writer = \
MM_ModFileWriterBase(suffix=alphabeta_chl.get_neuron_suffix())
# Naming Conventions:
state_tau = lambda s: '%stau' % s
state_inf = lambda s: '%sinf' % s
# State Equations and initial values:
for s in alphabeta_chl.statevars_new:
base_writer.internalstates[s] = '%s' % state_inf(s), "%s'=(%s-%s)/%s" % (s, state_inf(s), s, state_tau(s))
# Parameters:
# {name: (value, unit, range)}
base_writer.parameters = {
gbar_name: (alphabeta_chl.conductance.rescale("S/cm2").magnitude, ("S/cm2"), None),
e_rev_name: (alphabeta_chl.reversalpotential.rescale("mV").magnitude, ("mV"), None),
g_scale_name: (1.0, None, None)
}
# Rates:
# name : (locals, code), unit
for s in alphabeta_chl.statevars_new:
base_writer.rates[state_inf(s)] = (('', state_inf(s) + "= %sInf(v)" % state_inf(s)), None)
base_writer.rates[state_tau(s)] = (('', state_tau(s) + "= %sTau(v)" % state_tau(s)), "ms")
base_writer.ratecalcorder.extend([state_inf(s), state_tau(s)])
base_writer.currentequation = '(v-%s) * %s * %s * %s' % (e_rev_name, gbar_name, alphabeta_chl.eqn, g_scale_name)
base_writer.conductanceequation = '%s * %s * %s' % (gbar_name, alphabeta_chl.eqn, g_scale_name)
base_writer.functions = """
VERBATIM
#include <gsl_wrapper.h>
ENDVERBATIM"""
def buildInterpolatorFunc(state, inftau, funcname):
if inftau == 'inf':
interp_str_x = ','.join(['%2.2f' % x for x in alphabeta_chl.statevars_new[s].V])
interp_str_y = ','.join(['%2.2f' % x for x in alphabeta_chl.statevars_new[s].inf])
elif inftau == 'tau':
interp_str_x = ','.join(['%2.2f' % x for x in alphabeta_chl.statevars_new[s].V])
interp_str_y = ','.join(['%2.2f' % x for x in alphabeta_chl.statevars_new[s].tau])
else:
assert False
variables = {'chlname':state_tau(s), 'nPts':len(alphabeta_chl.statevars_new[s].V), 'x0':interp_str_x, 'y0':interp_str_y, 'funcname':funcname }
f = """
FUNCTION %(funcname)s(V)
{
VERBATIM {
static void* pInterpolator = NULL;
if(!pInterpolator)
{
double x[%(nPts)s] = { %(x0)s };
double y[%(nPts)s] = { %(y0)s };
int nPts = %(nPts)d;
pInterpolator = makeIntWrapper(x, y, nPts);
}
_l%(funcname)s= interpolate2(_lV, pInterpolator);
}
ENDVERBATIM
}
\n\n""" % variables
return f
for s in alphabeta_chl.statevars_new:
base_writer.functions += buildInterpolatorFunc(state=s, inftau='inf', funcname='%sinfInf' % s)
base_writer.functions += buildInterpolatorFunc(state=s, inftau='tau', funcname='%stauTau' % s)
txt = base_writer.generate_modfile()
# TODO: Remove hard dependancy here
from morphforge.stdimports import RCMgr
additional_compile_flags = RCMgr.get('Neuron','additional_compile_flags') #"-I/home/michael/hw_to_come/morphforge/src/morphforgecontrib/simulation/neuron_gsl/cpp"
additional_link_flags = RCMgr.get('Neuron','additional_link_flags') # "-L/home/michael/hw_to_come/morphforge/src/morphforgecontrib/simulation/neuron_gsl/cpp -lgslwrapper -lgsl -lgslcblas"
mod_file = ModFile(name=alphabeta_chl.name, modtxt=txt, additional_compile_flags=additional_compile_flags, additional_link_flags=additional_link_flags)
modfile_set.append(mod_file)
|
mikehulluk/morphforge
|
src/morphforgecontrib/simulation/channels/inftauinterpolated/mmwriter_infatauinterpolated.py
|
Python
|
bsd-2-clause
| 7,660
|
[
"NEURON"
] |
961a2ea95ce0987563ef0abce26024cd92cab5a706f8139fa371a6d4dc975e5b
|
import argparse
import logging
import os
import pathlib
import sys
import time
import sqlalchemy.orm
from sqlalchemy.orm import Load
import ispyb
from ispyb.sqlalchemy import BLSession, DataCollection, GridInfo, Proposal
def _tty_line_length():
if not sys.stdout.isatty():
return False
return os.get_terminal_size().columns
def print_data_collections(rows, synchweb_url=None, truncate_length=None):
for row in reversed(rows):
visit = f"{row.Proposal.proposalCode}{row.Proposal.proposalNumber}-{row.BLSession.visit_number}"
bl_name = row.BLSession.beamLineName
n_images = row.DataCollection.numberOfImages
images = f"{n_images:4} images" if n_images else ""
dcid = row.DataCollection.dataCollectionId
template = (
pathlib.Path(row.DataCollection.imageDirectory)
/ (row.DataCollection.fileTemplate or "")
if row.DataCollection.imageDirectory
else ""
)
start_time = (
f"{row.DataCollection.startTime:%Y-%m-%d %H:%M}"
if row.DataCollection.startTime
else "????-??-?? ??:??"
)
grid_size = (
f"{row.GridInfo.steps_x:.0f}x{row.GridInfo.steps_y:.0f}"
if row.GridInfo
else ""
)
grid = f", {grid_size:>5} grid" if grid_size else ""
line = (
f"{start_time} {bl_name:8} {dcid:8} {visit:<11} {images}{grid} {template}"
)
if truncate_length and truncate_length < len(line):
line = line[: truncate_length - 3] + "..."
print(line)
if synchweb_url:
print(" " * 52 + f"{synchweb_url}/dc/visit/{visit}/id/{dcid}")
def get_last_data_collections_on(beamlines, db_session, limit=10, latest_dcid=None):
query = (
db_session.query(BLSession, DataCollection, GridInfo, Proposal)
.options(
Load(DataCollection).load_only(
"dataCollectionId",
"fileTemplate",
"imageDirectory",
"numberOfImages",
"startTime",
),
Load(Proposal).load_only("proposalCode", "proposalNumber"),
Load(BLSession).load_only("beamLineName", "visit_number"),
Load(GridInfo).load_only("steps_x", "steps_y"),
)
.join(
BLSession,
BLSession.sessionId == DataCollection.SESSIONID,
)
.join(
Proposal,
Proposal.proposalId == BLSession.proposalId,
)
.outerjoin(
GridInfo,
DataCollection.dataCollectionGroupId == GridInfo.dataCollectionGroupId,
)
.filter(BLSession.beamLineName.in_(beamlines))
.filter(Proposal.proposalCode != "nt")
)
if latest_dcid:
query = query.filter(DataCollection.dataCollectionId > latest_dcid).order_by(
DataCollection.startTime.desc(),
)
else:
query = query.order_by(DataCollection.startTime.desc()).limit(limit)
return query.all()
def main(args=None):
parser = argparse.ArgumentParser(
usage="ispyb.last_data_collections_on [beamline]",
description="Command line tool to view most recent data collections.",
)
parser.add_argument(
"beamline", nargs="+", help="Show data collections for these beamlines"
)
parser.add_argument(
"-f",
"--follow",
dest="follow",
default=False,
action="store_true",
help="Keep showing new data collections as they appear.",
)
parser.add_argument(
"-s",
"--sleep",
dest="sleep",
default=60,
type=float,
help="Length of time (s) to sleep in conjunction with --follow mode.",
)
parser.add_argument(
"-l",
"--link",
action="store_true",
dest="link",
default=False,
help="show SynchWeb links for each data collection",
)
parser.add_argument(
"-n",
"--collections",
action="store",
dest="limit",
default=20,
type=int,
metavar="N",
help="show the last N collections for each beamline",
)
parser.add_argument(
"-d",
"--debug",
action="store_true",
dest="debug",
default=False,
help=argparse.SUPPRESS,
)
parser.add_argument("--credentials", action="store", type=pathlib.Path)
parser.add_argument(
"--synchweb-url",
dest="synchweb_url",
default="https://ispyb.diamond.ac.uk",
type=str,
help="Base URL for SynchWeb links",
)
args = parser.parse_args(args)
if not args:
parser.print_help()
sys.exit(0)
t0 = time.time()
if args.debug:
console = logging.StreamHandler(sys.stdout)
console.setLevel(logging.DEBUG)
logging.getLogger("ispyb").addHandler(console)
logging.getLogger("ispyb").setLevel(logging.DEBUG)
ispyb.sqlalchemy.enable_debug_logging()
url = ispyb.sqlalchemy.url(args.credentials)
engine = sqlalchemy.create_engine(url, connect_args={"use_pure": True})
Session = sqlalchemy.orm.sessionmaker(bind=engine)
latest_dcid = None
print("------Date------ Beamline --DCID-- ---Visit---")
# Terminate after 24 hours
while time.time() - t0 < 60 * 60 * 24:
with Session() as db_session:
rows = get_last_data_collections_on(
args.beamline, db_session, limit=args.limit, latest_dcid=latest_dcid
)
if rows:
# Record the last observed dcid per beamline
latest_dcid = rows[0].DataCollection.dataCollectionId
print_data_collections(
rows,
synchweb_url=args.synchweb_url if args.link else None,
truncate_length=_tty_line_length(),
)
if not args.follow:
break
time.sleep(args.sleep)
if __name__ == "__main__":
main()
|
DiamondLightSource/ispyb-api
|
src/ispyb/cli/last_data_collections_on.py
|
Python
|
apache-2.0
| 6,047
|
[
"VisIt"
] |
74797951aaa34f4938045b94461926bde7df392217bf4d6f345aad77aac7e7ff
|
#!/usr/bin/python2
import optparse
import os
import shutil
import stat
import subprocess
import sys
from builds.GpBuild import GpBuild
def install_gpdb(dependency_name):
status = subprocess.call("mkdir -p /usr/local/gpdb", shell=True)
if status:
return status
status = subprocess.call(
"tar -xzf " + dependency_name + "/*.tar.gz -C /usr/local/gpdb",
shell=True)
return status
def create_gpadmin_user():
status = subprocess.call("gpdb_src/concourse/scripts/setup_gpadmin_user.bash")
os.chmod('/bin/ping', os.stat('/bin/ping').st_mode | stat.S_ISUID)
if status:
return status
def copy_output():
shutil.copyfile("gpdb_src/src/test/regress/regression.diffs", "icg_output/regression.diffs")
shutil.copyfile("gpdb_src/src/test/regress/regression.out", "icg_output/regression.out")
def main():
parser = optparse.OptionParser()
parser.add_option("--build_type", dest="build_type", default="RELEASE")
parser.add_option("--mode", choices=['orca', 'codegen', 'orca_codegen', 'planner'])
parser.add_option("--compiler", dest="compiler")
parser.add_option("--cxxflags", dest="cxxflags")
parser.add_option("--output_dir", dest="output_dir", default="install")
parser.add_option("--gpdb_name", dest="gpdb_name")
(options, args) = parser.parse_args()
if options.mode == 'orca':
ciCommon = GpBuild(options.mode)
elif options.mode == 'planner':
ciCommon = GpBuild(options.mode)
for dependency in args:
status = ciCommon.install_dependency(dependency)
if status:
return status
status = install_gpdb(options.gpdb_name)
if status:
return status
status = ciCommon.configure()
if status:
return status
status = create_gpadmin_user()
if status:
return status
status = ciCommon.icg()
if status:
copy_output()
return status
if __name__ == "__main__":
sys.exit(main())
|
cjcjameson/gpdb
|
concourse/scripts/test_gpdb.py
|
Python
|
apache-2.0
| 1,980
|
[
"ORCA"
] |
1e48d7bdf67e95456ab1282faec90901ecac2b88f7cdee9912828599314b3665
|
"""Utilities to read and analyze molecular dynamics trajectories."""
import numpy as np
def read_frame_lammpstrj(trj):
"""Load a frame from a LAMMPS dump file.
Args:
trj (file): LAMMPS dump file of format 'ID type x y z'
Returns:
xyz (np.ndarray): coordinates of all atoms
types (np.ndarray): types of all atoms
step (int): current timestep
box (np.ndarray): box dimensions
"""
box = np.empty(shape=(3, 2), dtype=np.float32)
# --- begin header ---
trj.readline() # text
step = int(trj.readline()) # timestep
trj.readline() # text
n_atoms = int(trj.readline()) # num atoms
trj.readline() # text
box[0] = trj.readline().split() # x-dim of box
box[1] = trj.readline().split() # y-dim of box
box[2] = trj.readline().split() # z-dim of box
trj.readline() # text
# --- end header ---
xyz = np.empty(shape=(n_atoms, 3), dtype=np.float32)
xyz[:] = np.NAN
types = np.empty(shape=(n_atoms), dtype=np.int32)
# --- begin body ---
for i in range(n_atoms):
temp = trj.readline().split()
a_ID = int(temp[0]) # atom ID
types[a_ID - 1] = int(temp[1]) # atom type
xyz[a_ID - 1] = map(float, temp[2:5]) # coordinates
# --- end body ---
return xyz, types, step, box
def distance_pbc(x0, x1, dimensions):
"""Vectorized distance calculation considering minimum image."""
d = np.abs(x0 - x1)
d = np.where(d > 0.5 * dimensions, dimensions - d, d)
return np.sqrt((d ** 2).sum(axis=-1))
|
ctk3b/pyrdf
|
pyrdf/mdio.py
|
Python
|
bsd-3-clause
| 1,563
|
[
"LAMMPS"
] |
76142cab3c2a5ec4bcba0b7ca862a53edbd65787ccde4153f1b4b97f662e8c14
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import frappe
import unittest
from bs4 import BeautifulSoup
import re
from frappe.utils import set_request
from frappe.website.serve import get_response
from frappe.utils import random_string
from frappe.website.doctype.blog_post.blog_post import get_blog_list
from frappe.website.utils import clear_website_cache
from frappe.website.website_generator import WebsiteGenerator
from frappe.custom.doctype.customize_form.customize_form import reset_customization
test_dependencies = ['Blog Post']
class TestBlogPost(unittest.TestCase):
def setUp(self):
reset_customization('Blog Post')
def test_generator_view(self):
pages = frappe.get_all('Blog Post', fields=['name', 'route'],
filters={'published': 1, 'route': ('!=', '')}, limit =1)
set_request(path=pages[0].route)
response = get_response()
self.assertTrue(response.status_code, 200)
html = response.get_data().decode()
self.assertTrue('<article class="blog-content" itemscope itemtype="http://schema.org/BlogPosting">' in html)
def test_generator_not_found(self):
pages = frappe.get_all('Blog Post', fields=['name', 'route'],
filters={'published': 0}, limit =1)
route = f'test-route-{frappe.generate_hash(length=5)}'
frappe.db.set_value('Blog Post', pages[0].name, 'route', route)
set_request(path=route)
response = get_response()
self.assertTrue(response.status_code, 404)
def test_category_link(self):
# Make a temporary Blog Post (and a Blog Category)
blog = make_test_blog('Test Category Link')
# Visit the blog post page
set_request(path=blog.route)
blog_page_response = get_response()
blog_page_html = frappe.safe_decode(blog_page_response.get_data())
# On blog post page find link to the category page
soup = BeautifulSoup(blog_page_html, "lxml")
category_page_link = list(soup.find_all('a', href=re.compile(blog.blog_category)))[0]
category_page_url = category_page_link["href"]
cached_value = frappe.db.value_cache[('DocType', 'Blog Post', 'name')]
frappe.db.value_cache[('DocType', 'Blog Post', 'name')] = (('Blog Post',),)
# Visit the category page (by following the link found in above stage)
set_request(path=category_page_url)
category_page_response = get_response()
category_page_html = frappe.safe_decode(category_page_response.get_data())
# Category page should contain the blog post title
self.assertIn(blog.title, category_page_html)
# Cleanup
frappe.db.value_cache[('DocType', 'Blog Post', 'name')] = cached_value
frappe.delete_doc("Blog Post", blog.name)
frappe.delete_doc("Blog Category", blog.blog_category)
def test_blog_pagination(self):
# Create some Blog Posts for a Blog Category
category_title, blogs, BLOG_COUNT = "List Category", [], 4
for index in range(BLOG_COUNT):
blog = make_test_blog(category_title)
blogs.append(blog)
filters = frappe._dict({"blog_category": scrub(category_title)})
# Assert that get_blog_list returns results as expected
self.assertEqual(len(get_blog_list(None, None, filters, 0, 3)), 3)
self.assertEqual(len(get_blog_list(None, None, filters, 0, BLOG_COUNT)), BLOG_COUNT)
self.assertEqual(len(get_blog_list(None, None, filters, 0, 2)), 2)
self.assertEqual(len(get_blog_list(None, None, filters, 2, BLOG_COUNT)), 2)
# Cleanup Blog Post and linked Blog Category
for blog in blogs:
frappe.delete_doc(blog.doctype, blog.name)
frappe.delete_doc("Blog Category", blogs[0].blog_category)
def test_caching(self):
# to enable caching
frappe.flags.force_website_cache = True
print(frappe.session.user)
clear_website_cache()
# first response no-cache
pages = frappe.get_all('Blog Post', fields=['name', 'route'],
filters={'published': 1, 'title': "_Test Blog Post"}, limit=1)
route = pages[0].route
set_request(path=route)
# response = get_response()
response = get_response()
# TODO: enable this assert
# self.assertIn(('X-From-Cache', 'False'), list(response.headers))
set_request(path=route)
response = get_response()
self.assertIn(('X-From-Cache', 'True'), list(response.headers))
frappe.flags.force_website_cache = True
def scrub(text):
return WebsiteGenerator.scrub(None, text)
def make_test_blog(category_title="Test Blog Category"):
category_name = scrub(category_title)
if not frappe.db.exists('Blog Category', category_name):
frappe.get_doc(dict(
doctype = 'Blog Category',
title=category_title)).insert()
if not frappe.db.exists('Blogger', 'test-blogger'):
frappe.get_doc(dict(
doctype = 'Blogger',
short_name='test-blogger',
full_name='Test Blogger')).insert()
test_blog = frappe.get_doc(dict(
doctype = 'Blog Post',
blog_category = category_name,
blogger = 'test-blogger',
title = random_string(20),
route = random_string(20),
content = random_string(20),
published = 1
)).insert()
return test_blog
|
frappe/frappe
|
frappe/website/doctype/blog_post/test_blog_post.py
|
Python
|
mit
| 4,930
|
[
"VisIt"
] |
fcac2397dc66ce1f7437b9cfbeba36573ae9de5c5903c4b3bb94817992d5da3f
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""http predefined data
"""
HTTP_CODE = {
# Status Codes
# Informational
100: 'Continue',
101: 'Switching Protocols',
102: 'Processing',
# Successful
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
207: 'Multi Status',
226: 'IM Used',
# Redirection
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
307: 'Temporary Redirect',
# Client Error
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
422: 'Unprocessable Entity',
423: 'Locked',
424: 'Failed Dependency',
426: 'Upgrade Required',
# Server Error
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
507: 'Insufficient Storage',
510: 'Not Extended',
}
CONTENT_TYPE = {
'.123': 'application/vnd.lotus-1-2-3',
'.3ds': 'image/x-3ds',
'.3g2': 'video/3gpp2',
'.3ga': 'video/3gpp',
'.3gp': 'video/3gpp',
'.3gp2': 'video/3gpp2',
'.3gpp': 'video/3gpp',
'.3gpp2': 'video/3gpp2',
'.602': 'application/x-t602',
'.669': 'audio/x-mod',
'.7z': 'application/x-7z-compressed',
'.a': 'application/x-archive',
'.aac': 'audio/aac',
'.abw': 'application/x-abiword',
'.abw.crashed': 'application/x-abiword',
'.abw.gz': 'application/x-abiword',
'.ac3': 'audio/ac3',
'.ace': 'application/x-ace',
'.adb': 'text/x-adasrc',
'.ads': 'text/x-adasrc',
'.afm': 'application/x-font-afm',
'.ag': 'image/x-applix-graphics',
'.ai': 'application/illustrator',
'.aif': 'audio/x-aiff',
'.aifc': 'audio/x-aifc',
'.aiff': 'audio/x-aiff',
'.aiffc': 'audio/x-aifc',
'.al': 'application/x-perl',
'.alz': 'application/x-alz',
'.amr': 'audio/amr',
'.amz': 'audio/x-amzxml',
'.ani': 'application/x-navi-animation',
'.anim[1-9j]': 'video/x-anim',
'.anx': 'application/annodex',
'.ape': 'audio/x-ape',
'.apk': 'application/vnd.android.package-archive',
'.ar': 'application/x-archive',
'.arj': 'application/x-arj',
'.arw': 'image/x-sony-arw',
'.as': 'application/x-applix-spreadsheet',
'.asc': 'text/plain',
'.asf': 'video/x-ms-asf',
'.asp': 'application/x-asp',
'.ass': 'text/x-ssa',
'.asx': 'audio/x-ms-asx',
'.atom': 'application/atom+xml',
'.au': 'audio/basic',
'.avf': 'video/x-msvideo',
'.avi': 'video/x-msvideo',
'.aw': 'application/x-applix-word',
'.awb': 'audio/amr-wb',
'.awk': 'application/x-awk',
'.axa': 'audio/annodex',
'.axv': 'video/annodex',
'.bak': 'application/x-trash',
'.bcpio': 'application/x-bcpio',
'.bdf': 'application/x-font-bdf',
'.bdm': 'video/mp2t',
'.bdmv': 'video/mp2t',
'.bib': 'text/x-bibtex',
'.bin': 'application/octet-stream',
'.blend': 'application/x-blender',
'.blender': 'application/x-blender',
'.bmp': 'image/bmp',
'.bz': 'application/x-bzip',
'.bz2': 'application/x-bzip',
'.c': 'text/x-csrc',
'.c++': 'text/x-c++src',
'.cab': 'application/vnd.ms-cab-compressed',
'.cap': 'application/vnd.tcpdump.pcap',
'.cb7': 'application/x-cb7',
'.cbl': 'text/x-cobol',
'.cbr': 'application/x-cbr',
'.cbt': 'application/x-cbt',
'.cbz': 'application/x-cbz',
'.cc': 'text/x-c++src',
'.ccmx': 'application/x-ccmx',
'.cdf': 'application/x-netcdf',
'.cdr': 'application/vnd.corel-draw',
'.cer': 'application/pkix-cert',
'.cert': 'application/x-x509-ca-cert',
'.cgm': 'image/cgm',
'.chm': 'application/vnd.ms-htmlhelp',
'.chrt': 'application/x-kchart',
'.class': 'application/x-java',
'.clpi': 'video/mp2t',
'.cls': 'text/x-tex',
'.cmake': 'text/x-cmake',
'.cob': 'text/x-cobol',
'.cpi': 'video/mp2t',
'.cpio': 'application/x-cpio',
'.cpio.gz': 'application/x-cpio-compressed',
'.cpp': 'text/x-c++src',
'.cr2': 'image/x-canon-cr2',
'.crl': 'application/pkix-crl',
'.crt': 'application/x-x509-ca-cert',
'.crw': 'image/x-canon-crw',
'.cs': 'text/x-csharp',
'.csh': 'application/x-csh',
'.css': 'text/css',
'.cssl': 'text/css',
'.csv': 'text/csv',
'.cue': 'application/x-cue',
'.cur': 'image/x-win-bitmap',
'.cxx': 'text/x-c++src',
'.d': 'text/x-dsrc',
'.dar': 'application/x-dar',
'.dbf': 'application/x-dbf',
'.dc': 'application/x-dc-rom',
'.dcl': 'text/x-dcl',
'.dcm': 'application/dicom',
'.dcr': 'image/x-kodak-dcr',
'.dds': 'image/x-dds',
'.deb': 'application/x-deb',
'.der': 'application/x-x509-ca-cert',
'.desktop': 'application/x-desktop',
'.di': 'text/x-dsrc',
'.dia': 'application/x-dia-diagram',
'.diff': 'text/x-patch',
'.divx': 'video/x-msvideo',
'.djv': 'image/vnd.djvu',
'.djvu': 'image/vnd.djvu',
'.dmg': 'application/x-apple-diskimage',
'.dmp': 'application/vnd.tcpdump.pcap',
'.dng': 'image/x-adobe-dng',
'.doc': 'application/msword',
'.docbook': 'application/x-docbook+xml',
'.docm': 'application/vnd.ms-word.document.macroenabled.12',
'.docx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'.dot': 'text/vnd.graphviz',
'.dotm': 'application/vnd.ms-word.template.macroenabled.12',
'.dotx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.template',
'.dsl': 'text/x-dsl',
'.dtd': 'application/xml-dtd',
'.dts': 'audio/vnd.dts',
'.dtshd': 'audio/vnd.dts.hd',
'.dtx': 'text/x-tex',
'.dv': 'video/dv',
'.dvi': 'application/x-dvi',
'.dvi.bz2': 'application/x-bzdvi',
'.dvi.gz': 'application/x-gzdvi',
'.dwg': 'image/vnd.dwg',
'.dxf': 'image/vnd.dxf',
'.e': 'text/x-eiffel',
'.egon': 'application/x-egon',
'.eif': 'text/x-eiffel',
'.el': 'text/x-emacs-lisp',
'.emf': 'image/x-emf',
'.eml': 'message/rfc822',
'.emp': 'application/vnd.emusic-emusic_package',
'.ent': 'application/xml-external-parsed-entity',
'.eps': 'image/x-eps',
'.eps.bz2': 'image/x-bzeps',
'.eps.gz': 'image/x-gzeps',
'.epsf': 'image/x-eps',
'.epsf.bz2': 'image/x-bzeps',
'.epsf.gz': 'image/x-gzeps',
'.epsi': 'image/x-eps',
'.epsi.bz2': 'image/x-bzeps',
'.epsi.gz': 'image/x-gzeps',
'.epub': 'application/epub+zip',
'.erl': 'text/x-erlang',
'.es': 'application/ecmascript',
'.etheme': 'application/x-e-theme',
'.etx': 'text/x-setext',
'.exe': 'application/x-ms-dos-executable',
'.exr': 'image/x-exr',
'.ez': 'application/andrew-inset',
'.f': 'text/x-fortran',
'.f4a': 'audio/mp4',
'.f4b': 'audio/x-m4b',
'.f4v': 'video/mp4',
'.f90': 'text/x-fortran',
'.f95': 'text/x-fortran',
'.fb2': 'application/x-fictionbook+xml',
'.fig': 'image/x-xfig',
'.fits': 'image/fits',
'.fl': 'application/x-fluid',
'.flac': 'audio/flac',
'.flc': 'video/x-flic',
'.fli': 'video/x-flic',
'.flv': 'video/x-flv',
'.flw': 'application/x-kivio',
'.fo': 'text/x-xslfo',
'.fodg': 'application/vnd.oasis.opendocument.graphics-flat-xml',
'.fodp': 'application/vnd.oasis.opendocument.presentation-flat-xml',
'.fods': 'application/vnd.oasis.opendocument.spreadsheet-flat-xml',
'.fodt': 'application/vnd.oasis.opendocument.text-flat-xml',
'.for': 'text/x-fortran',
'.fxm': 'video/x-javafx',
'.g3': 'image/fax-g3',
'.gb': 'application/x-gameboy-rom',
'.gba': 'application/x-gba-rom',
'.gcrd': 'text/vcard',
'.ged': 'application/x-gedcom',
'.gedcom': 'application/x-gedcom',
'.gem': 'application/x-tar',
'.gen': 'application/x-genesis-rom',
'.gf': 'application/x-tex-gf',
'.gg': 'application/x-sms-rom',
'.gif': 'image/gif',
'.glade': 'application/x-glade',
'.gml': 'application/gml+xml',
'.gmo': 'application/x-gettext-translation',
'.gnc': 'application/x-gnucash',
'.gnd': 'application/gnunet-directory',
'.gnucash': 'application/x-gnucash',
'.gnumeric': 'application/x-gnumeric',
'.gnuplot': 'application/x-gnuplot',
'.go': 'text/x-go',
'.gp': 'application/x-gnuplot',
'.gpg': 'application/pgp-encrypted',
'.gplt': 'application/x-gnuplot',
'.gra': 'application/x-graphite',
'.gsf': 'application/x-font-type1',
'.gsm': 'audio/x-gsm',
'.gtar': 'application/x-tar',
'.gv': 'text/vnd.graphviz',
'.gvp': 'text/x-google-video-pointer',
'.gz': 'application/gzip',
'.h': 'text/x-chdr',
'.h++': 'text/x-c++hdr',
'.h4': 'application/x-hdf',
'.h5': 'application/x-hdf',
'.hdf': 'application/x-hdf',
'.hdf4': 'application/x-hdf',
'.hdf5': 'application/x-hdf',
'.hh': 'text/x-c++hdr',
'.hlp': 'application/winhlp',
'.hp': 'text/x-c++hdr',
'.hpgl': 'application/vnd.hp-hpgl',
'.hpp': 'text/x-c++hdr',
'.hs': 'text/x-haskell',
'.htm': 'text/html',
'.html': 'text/html',
'.hwp': 'application/x-hwp',
'.hwt': 'application/x-hwt',
'.hxx': 'text/x-c++hdr',
'.ica': 'application/x-ica',
'.icb': 'image/x-tga',
'.icc': 'application/vnd.iccprofile',
'.icm': 'application/vnd.iccprofile',
'.icns': 'image/x-icns',
'.ico': 'image/vnd.microsoft.icon',
'.ics': 'text/calendar',
'.idl': 'text/x-idl',
'.ief': 'image/ief',
'.iff': 'image/x-ilbm',
'.ilbm': 'image/x-ilbm',
'.ime': 'text/x-imelody',
'.imy': 'text/x-imelody',
'.ins': 'text/x-tex',
'.iptables': 'text/x-iptables',
'.iso': 'application/x-cd-image',
'.iso9660': 'application/x-cd-image',
'.it': 'audio/x-it',
'.it87': 'application/x-it87',
'.j2k': 'image/jp2',
'.jad': 'text/vnd.sun.j2me.app-descriptor',
'.jar': 'application/x-java-archive',
'.java': 'text/x-java',
'.jceks': 'application/x-java-jce-keystore',
'.jks': 'application/x-java-keystore',
'.jng': 'image/x-jng',
'.jnlp': 'application/x-java-jnlp-file',
'.jp2': 'image/jp2',
'.jpc': 'image/jp2',
'.jpe': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.jpf': 'image/jp2',
'.jpg': 'image/jpeg',
'.jpr': 'application/x-jbuilder-project',
'.jpx': 'image/jp2',
'.js': 'application/javascript',
'.json': 'application/json',
'.jsonp': 'application/jsonp',
'.k25': 'image/x-kodak-k25',
'.kar': 'audio/midi',
'.karbon': 'application/x-karbon',
'.kdc': 'image/x-kodak-kdc',
'.kdelnk': 'application/x-desktop',
'.kexi': 'application/x-kexiproject-sqlite3',
'.kexic': 'application/x-kexi-connectiondata',
'.kexis': 'application/x-kexiproject-shortcut',
'.kfo': 'application/x-kformula',
'.kil': 'application/x-killustrator',
'.kino': 'application/smil',
'.kml': 'application/vnd.google-earth.kml+xml',
'.kmz': 'application/vnd.google-earth.kmz',
'.kon': 'application/x-kontour',
'.kpm': 'application/x-kpovmodeler',
'.kpr': 'application/x-kpresenter',
'.kpt': 'application/x-kpresenter',
'.kra': 'application/x-krita',
'.ks': 'application/x-java-keystore',
'.ksp': 'application/x-kspread',
'.kud': 'application/x-kugar',
'.kwd': 'application/x-kword',
'.kwt': 'application/x-kword',
'.la': 'application/x-shared-library-la',
'.latex': 'text/x-tex',
'.lbm': 'image/x-ilbm',
'.ldif': 'text/x-ldif',
'.lha': 'application/x-lha',
'.lhs': 'text/x-literate-haskell',
'.lhz': 'application/x-lhz',
'.load' : 'text/html',
'.log': 'text/x-log',
'.lrz': 'application/x-lrzip',
'.ltx': 'text/x-tex',
'.lua': 'text/x-lua',
'.lwo': 'image/x-lwo',
'.lwob': 'image/x-lwo',
'.lwp': 'application/vnd.lotus-wordpro',
'.lws': 'image/x-lws',
'.ly': 'text/x-lilypond',
'.lyx': 'application/x-lyx',
'.lz': 'application/x-lzip',
'.lzh': 'application/x-lha',
'.lzma': 'application/x-lzma',
'.lzo': 'application/x-lzop',
'.m': 'text/x-matlab',
'.m15': 'audio/x-mod',
'.m1u': 'video/vnd.mpegurl',
'.m2t': 'video/mp2t',
'.m2ts': 'video/mp2t',
'.m3u': 'application/vnd.apple.mpegurl',
'.m3u8': 'application/vnd.apple.mpegurl',
'.m4': 'application/x-m4',
'.m4a': 'audio/mp4',
'.m4b': 'audio/x-m4b',
'.m4u': 'video/vnd.mpegurl',
'.m4v': 'video/mp4',
'.mab': 'application/x-markaby',
'.mak': 'text/x-makefile',
'.man': 'application/x-troff-man',
'.manifest': 'text/cache-manifest',
'.markdown': 'text/x-markdown',
'.mbox': 'application/mbox',
'.md': 'text/x-markdown',
'.mdb': 'application/vnd.ms-access',
'.mdi': 'image/vnd.ms-modi',
'.me': 'text/x-troff-me',
'.med': 'audio/x-mod',
'.meta4': 'application/metalink4+xml',
'.metalink': 'application/metalink+xml',
'.mgp': 'application/x-magicpoint',
'.mht': 'application/x-mimearchive',
'.mhtml': 'application/x-mimearchive',
'.mid': 'audio/midi',
'.midi': 'audio/midi',
'.mif': 'application/x-mif',
'.minipsf': 'audio/x-minipsf',
'.mk': 'text/x-makefile',
'.mka': 'audio/x-matroska',
'.mkd': 'text/x-markdown',
'.mkv': 'video/x-matroska',
'.ml': 'text/x-ocaml',
'.mli': 'text/x-ocaml',
'.mm': 'text/x-troff-mm',
'.mmf': 'application/x-smaf',
'.mml': 'application/mathml+xml',
'.mng': 'video/x-mng',
'.mo': 'text/x-modelica',
'.mo3': 'audio/x-mo3',
'.mobi': 'application/x-mobipocket-ebook',
'.moc': 'text/x-moc',
'.mod': 'audio/x-mod',
'.mof': 'text/x-mof',
'.moov': 'video/quicktime',
'.mov': 'video/quicktime',
'.movie': 'video/x-sgi-movie',
'.mp+': 'audio/x-musepack',
'.mp2': 'video/mpeg',
'.mp3': 'audio/mpeg',
'.mp4': 'video/mp4',
'.mpc': 'audio/x-musepack',
'.mpe': 'video/mpeg',
'.mpeg': 'video/mpeg',
'.mpg': 'video/mpeg',
'.mpga': 'audio/mpeg',
'.mpl': 'video/mp2t',
'.mpls': 'video/mp2t',
'.mpp': 'audio/x-musepack',
'.mrl': 'text/x-mrml',
'.mrml': 'text/x-mrml',
'.mrw': 'image/x-minolta-mrw',
'.ms': 'text/x-troff-ms',
'.msi': 'application/x-msi',
'.msod': 'image/x-msod',
'.msx': 'application/x-msx-rom',
'.mtm': 'audio/x-mod',
'.mts': 'video/mp2t',
'.mup': 'text/x-mup',
'.mxf': 'application/mxf',
'.mxu': 'video/vnd.mpegurl',
'.n64': 'application/x-n64-rom',
'.nb': 'application/mathematica',
'.nc': 'application/x-netcdf',
'.nds': 'application/x-nintendo-ds-rom',
'.nef': 'image/x-nikon-nef',
'.nes': 'application/x-nes-rom',
'.nfo': 'text/x-nfo',
'.not': 'text/x-mup',
'.nsc': 'application/x-netshow-channel',
'.nsv': 'video/x-nsv',
'.nzb': 'application/x-nzb',
'.o': 'application/x-object',
'.obj': 'application/x-tgif',
'.ocl': 'text/x-ocl',
'.oda': 'application/oda',
'.odb': 'application/vnd.oasis.opendocument.database',
'.odc': 'application/vnd.oasis.opendocument.chart',
'.odf': 'application/vnd.oasis.opendocument.formula',
'.odg': 'application/vnd.oasis.opendocument.graphics',
'.odi': 'application/vnd.oasis.opendocument.image',
'.odm': 'application/vnd.oasis.opendocument.text-master',
'.odp': 'application/vnd.oasis.opendocument.presentation',
'.ods': 'application/vnd.oasis.opendocument.spreadsheet',
'.odt': 'application/vnd.oasis.opendocument.text',
'.oga': 'audio/ogg',
'.ogg': 'application/ogg',
'.ogm': 'video/x-ogm+ogg',
'.ogv': 'video/ogg',
'.ogx': 'application/ogg',
'.old': 'application/x-trash',
'.oleo': 'application/x-oleo',
'.ooc': 'text/x-ooc',
'.opml': 'text/x-opml+xml',
'.oprc': 'application/vnd.palm',
'.ora': 'image/openraster',
'.orf': 'image/x-olympus-orf',
'.otc': 'application/vnd.oasis.opendocument.chart-template',
'.otf': 'application/x-font-otf',
'.otg': 'application/vnd.oasis.opendocument.graphics-template',
'.oth': 'application/vnd.oasis.opendocument.text-web',
'.otp': 'application/vnd.oasis.opendocument.presentation-template',
'.ots': 'application/vnd.oasis.opendocument.spreadsheet-template',
'.ott': 'application/vnd.oasis.opendocument.text-template',
'.owl': 'application/rdf+xml',
'.oxps': 'application/oxps',
'.oxt': 'application/vnd.openofficeorg.extension',
'.p': 'text/x-pascal',
'.p10': 'application/pkcs10',
'.p12': 'application/x-pkcs12',
'.p7b': 'application/x-pkcs7-certificates',
'.p7c': 'application/pkcs7-mime',
'.p7m': 'application/pkcs7-mime',
'.p7s': 'application/pkcs7-signature',
'.p8': 'application/pkcs8',
'.pack': 'application/x-java-pack200',
'.pak': 'application/x-pak',
'.par2': 'application/x-par2',
'.pas': 'text/x-pascal',
'.patch': 'text/x-patch',
'.pbm': 'image/x-portable-bitmap',
'.pcap': 'application/vnd.tcpdump.pcap',
'.pcd': 'image/x-photo-cd',
'.pcf': 'application/x-cisco-vpn-settings',
'.pcf.gz': 'application/x-font-pcf',
'.pcf.z': 'application/x-font-pcf',
'.pcl': 'application/vnd.hp-pcl',
'.pct': 'image/x-pict',
'.pcx': 'image/x-pcx',
'.pdb': 'chemical/x-pdb',
'.pdc': 'application/x-aportisdoc',
'.pdf': 'application/pdf',
'.pdf.bz2': 'application/x-bzpdf',
'.pdf.gz': 'application/x-gzpdf',
'.pdf.xz': 'application/x-xzpdf',
'.pef': 'image/x-pentax-pef',
'.pem': 'application/x-x509-ca-cert',
'.perl': 'application/x-perl',
'.pfa': 'application/x-font-type1',
'.pfb': 'application/x-font-type1',
'.pfx': 'application/x-pkcs12',
'.pgm': 'image/x-portable-graymap',
'.pgn': 'application/x-chess-pgn',
'.pgp': 'application/pgp-encrypted',
'.php': 'application/x-php',
'.php3': 'application/x-php',
'.php4': 'application/x-php',
'.php5': 'application/x-php',
'.phps': 'application/x-php',
'.pict': 'image/x-pict',
'.pict1': 'image/x-pict',
'.pict2': 'image/x-pict',
'.pk': 'application/x-tex-pk',
'.pkipath': 'application/pkix-pkipath',
'.pkr': 'application/pgp-keys',
'.pl': 'application/x-perl',
'.pla': 'audio/x-iriver-pla',
'.pln': 'application/x-planperfect',
'.pls': 'audio/x-scpls',
'.pm': 'application/x-perl',
'.png': 'image/png',
'.pnm': 'image/x-portable-anymap',
'.pntg': 'image/x-macpaint',
'.po': 'text/x-gettext-translation',
'.por': 'application/x-spss-por',
'.pot': 'text/x-gettext-translation-template',
'.potm': 'application/vnd.ms-powerpoint.template.macroenabled.12',
'.potx': 'application/vnd.openxmlformats-officedocument.presentationml.template',
'.ppam': 'application/vnd.ms-powerpoint.addin.macroenabled.12',
'.ppm': 'image/x-portable-pixmap',
'.pps': 'application/vnd.ms-powerpoint',
'.ppsm': 'application/vnd.ms-powerpoint.slideshow.macroenabled.12',
'.ppsx': 'application/vnd.openxmlformats-officedocument.presentationml.slideshow',
'.ppt': 'application/vnd.ms-powerpoint',
'.pptm': 'application/vnd.ms-powerpoint.presentation.macroenabled.12',
'.pptx': 'application/vnd.openxmlformats-officedocument.presentationml.presentation',
'.ppz': 'application/vnd.ms-powerpoint',
'.pqa': 'application/vnd.palm',
'.prc': 'application/vnd.palm',
'.ps': 'application/postscript',
'.ps.bz2': 'application/x-bzpostscript',
'.ps.gz': 'application/x-gzpostscript',
'.psd': 'image/vnd.adobe.photoshop',
'.psf': 'audio/x-psf',
'.psf.gz': 'application/x-gz-font-linux-psf',
'.psflib': 'audio/x-psflib',
'.psid': 'audio/prs.sid',
'.psw': 'application/x-pocket-word',
'.pw': 'application/x-pw',
'.py': 'text/x-python',
'.pyc': 'application/x-python-bytecode',
'.pickle': 'application/python-pickle',
'.pyo': 'application/x-python-bytecode',
'.qif': 'image/x-quicktime',
'.qml': 'text/x-qml',
'.qt': 'video/quicktime',
'.qti': 'application/x-qtiplot',
'.qti.gz': 'application/x-qtiplot',
'.qtif': 'image/x-quicktime',
'.qtl': 'application/x-quicktime-media-link',
'.qtvr': 'video/quicktime',
'.ra': 'audio/vnd.rn-realaudio',
'.raf': 'image/x-fuji-raf',
'.ram': 'application/ram',
'.rar': 'application/x-rar',
'.ras': 'image/x-cmu-raster',
'.raw': 'image/x-panasonic-raw',
'.rax': 'audio/vnd.rn-realaudio',
'.rb': 'application/x-ruby',
'.rdf': 'application/rdf+xml',
'.rdfs': 'application/rdf+xml',
'.reg': 'text/x-ms-regedit',
'.rej': 'text/x-reject',
'.rgb': 'image/x-rgb',
'.rle': 'image/rle',
'.rm': 'application/vnd.rn-realmedia',
'.rmj': 'application/vnd.rn-realmedia',
'.rmm': 'application/vnd.rn-realmedia',
'.rms': 'application/vnd.rn-realmedia',
'.rmvb': 'application/vnd.rn-realmedia',
'.rmx': 'application/vnd.rn-realmedia',
'.rnc': 'application/relax-ng-compact-syntax',
'.rng': 'application/xml',
'.roff': 'text/troff',
'.rp': 'image/vnd.rn-realpix',
'.rpm': 'application/x-rpm',
'.rss': 'application/rss+xml',
'.rt': 'text/vnd.rn-realtext',
'.rtf': 'application/rtf',
'.rtx': 'text/richtext',
'.rv': 'video/vnd.rn-realvideo',
'.rvx': 'video/vnd.rn-realvideo',
'.rw2': 'image/x-panasonic-raw2',
'.s3m': 'audio/x-s3m',
'.sam': 'application/x-amipro',
'.sami': 'application/x-sami',
'.sav': 'application/x-spss-sav',
'.scala': 'text/x-scala',
'.scm': 'text/x-scheme',
'.sda': 'application/vnd.stardivision.draw',
'.sdc': 'application/vnd.stardivision.calc',
'.sdd': 'application/vnd.stardivision.impress',
'.sdp': 'application/sdp',
'.sds': 'application/vnd.stardivision.chart',
'.sdw': 'application/vnd.stardivision.writer',
'.sgf': 'application/x-go-sgf',
'.sgi': 'image/x-sgi',
'.sgl': 'application/vnd.stardivision.writer',
'.sgm': 'text/sgml',
'.sgml': 'text/sgml',
'.sh': 'application/x-shellscript',
'.shape': 'application/x-dia-shape',
'.shar': 'application/x-shar',
'.shn': 'application/x-shorten',
'.siag': 'application/x-siag',
'.sid': 'audio/prs.sid',
'.sik': 'application/x-trash',
'.sis': 'application/vnd.symbian.install',
'.sisx': 'x-epoc/x-sisx-app',
'.sit': 'application/x-stuffit',
'.siv': 'application/sieve',
'.sk': 'image/x-skencil',
'.sk1': 'image/x-skencil',
'.skr': 'application/pgp-keys',
'.sldm': 'application/vnd.ms-powerpoint.slide.macroenabled.12',
'.sldx': 'application/vnd.openxmlformats-officedocument.presentationml.slide',
'.slk': 'text/spreadsheet',
'.smaf': 'application/x-smaf',
'.smc': 'application/x-snes-rom',
'.smd': 'application/vnd.stardivision.mail',
'.smf': 'application/vnd.stardivision.math',
'.smi': 'application/x-sami',
'.smil': 'application/smil',
'.sml': 'application/smil',
'.sms': 'application/x-sms-rom',
'.snd': 'audio/basic',
'.so': 'application/x-sharedlib',
'.spc': 'application/x-pkcs7-certificates',
'.spd': 'application/x-font-speedo',
'.spec': 'text/x-rpm-spec',
'.spl': 'application/x-shockwave-flash',
'.spm': 'application/x-source-rpm',
'.spx': 'audio/x-speex',
'.sql': 'text/x-sql',
'.sr2': 'image/x-sony-sr2',
'.src': 'application/x-wais-source',
'.src.rpm': 'application/x-source-rpm',
'.srf': 'image/x-sony-srf',
'.srt': 'application/x-subrip',
'.ss': 'text/x-scheme',
'.ssa': 'text/x-ssa',
'.stc': 'application/vnd.sun.xml.calc.template',
'.std': 'application/vnd.sun.xml.draw.template',
'.sti': 'application/vnd.sun.xml.impress.template',
'.stm': 'audio/x-stm',
'.stw': 'application/vnd.sun.xml.writer.template',
'.sty': 'text/x-tex',
'.sub': 'text/x-subviewer',
'.sun': 'image/x-sun-raster',
'.sv': 'text/x-svsrc',
'.sv4cpio': 'application/x-sv4cpio',
'.sv4crc': 'application/x-sv4crc',
'.svg': 'image/svg+xml',
'.svgz': 'image/svg+xml-compressed',
'.svh': 'text/x-svhdr',
'.swf': 'application/x-shockwave-flash',
'.swm': 'application/x-ms-wim',
'.sxc': 'application/vnd.sun.xml.calc',
'.sxd': 'application/vnd.sun.xml.draw',
'.sxg': 'application/vnd.sun.xml.writer.global',
'.sxi': 'application/vnd.sun.xml.impress',
'.sxm': 'application/vnd.sun.xml.math',
'.sxw': 'application/vnd.sun.xml.writer',
'.sylk': 'text/spreadsheet',
'.t': 'text/troff',
'.t2t': 'text/x-txt2tags',
'.tar': 'application/x-tar',
'.tar.bz': 'application/x-bzip-compressed-tar',
'.tar.bz2': 'application/x-bzip-compressed-tar',
'.tar.gz': 'application/x-compressed-tar',
'.tar.lrz': 'application/x-lrzip-compressed-tar',
'.tar.lzma': 'application/x-lzma-compressed-tar',
'.tar.lzo': 'application/x-tzo',
'.tar.xz': 'application/x-xz-compressed-tar',
'.tar.z': 'application/x-tarz',
'.taz': 'application/x-tarz',
'.tb2': 'application/x-bzip-compressed-tar',
'.tbz': 'application/x-bzip-compressed-tar',
'.tbz2': 'application/x-bzip-compressed-tar',
'.tcl': 'text/x-tcl',
'.tex': 'text/x-tex',
'.texi': 'text/x-texinfo',
'.texinfo': 'text/x-texinfo',
'.tga': 'image/x-tga',
'.tgz': 'application/x-compressed-tar',
'.theme': 'application/x-theme',
'.themepack': 'application/x-windows-themepack',
'.tif': 'image/tiff',
'.tiff': 'image/tiff',
'.tk': 'text/x-tcl',
'.tlrz': 'application/x-lrzip-compressed-tar',
'.tlz': 'application/x-lzma-compressed-tar',
'.tnef': 'application/vnd.ms-tnef',
'.tnf': 'application/vnd.ms-tnef',
'.toc': 'application/x-cdrdao-toc',
'.torrent': 'application/x-bittorrent',
'.tpic': 'image/x-tga',
'.tr': 'text/troff',
'.ts': 'video/mp2t',
'.tsv': 'text/tab-separated-values',
'.tta': 'audio/x-tta',
'.ttc': 'application/x-font-ttf',
'.ttf': 'application/x-font-ttf',
'.ttx': 'application/x-font-ttx',
'.txt': 'text/plain',
'.txz': 'application/x-xz-compressed-tar',
'.tzo': 'application/x-tzo',
'.ufraw': 'application/x-ufraw',
'.ui': 'application/x-gtk-builder',
'.uil': 'text/x-uil',
'.ult': 'audio/x-mod',
'.uni': 'audio/x-mod',
'.url': 'application/x-mswinurl',
'.ustar': 'application/x-ustar',
'.uue': 'text/x-uuencode',
'.v': 'text/x-verilog',
'.vala': 'text/x-vala',
'.vapi': 'text/x-vala',
'.vcard': 'text/vcard',
'.vcf': 'text/vcard',
'.vcs': 'text/calendar',
'.vct': 'text/vcard',
'.vda': 'image/x-tga',
'.vhd': 'text/x-vhdl',
'.vhdl': 'text/x-vhdl',
'.viv': 'video/vivo',
'.vivo': 'video/vivo',
'.vlc': 'audio/x-mpegurl',
'.vob': 'video/mpeg',
'.voc': 'audio/x-voc',
'.vor': 'application/vnd.stardivision.writer',
'.vrm': 'model/vrml',
'.vrml': 'model/vrml',
'.vsd': 'application/vnd.visio',
'.vss': 'application/vnd.visio',
'.vst': 'image/x-tga',
'.vsw': 'application/vnd.visio',
'.vtt': 'text/vtt',
'.w2p': 'application/w2p',
'.wav': 'audio/x-wav',
'.wax': 'audio/x-ms-asx',
'.wb1': 'application/x-quattropro',
'.wb2': 'application/x-quattropro',
'.wb3': 'application/x-quattropro',
'.wbmp': 'image/vnd.wap.wbmp',
'.wcm': 'application/vnd.ms-works',
'.wdb': 'application/vnd.ms-works',
'.webm': 'video/webm',
'.wim': 'application/x-ms-wim',
'.wk1': 'application/vnd.lotus-1-2-3',
'.wk3': 'application/vnd.lotus-1-2-3',
'.wk4': 'application/vnd.lotus-1-2-3',
'.wks': 'application/vnd.ms-works',
'.wma': 'audio/x-ms-wma',
'.wmf': 'image/x-wmf',
'.wml': 'text/vnd.wap.wml',
'.wmls': 'text/vnd.wap.wmlscript',
'.wmv': 'video/x-ms-wmv',
'.wmx': 'audio/x-ms-asx',
'.woff': 'application/font-woff',
'.wp': 'application/vnd.wordperfect',
'.wp4': 'application/vnd.wordperfect',
'.wp5': 'application/vnd.wordperfect',
'.wp6': 'application/vnd.wordperfect',
'.wpd': 'application/vnd.wordperfect',
'.wpg': 'application/x-wpg',
'.wpl': 'application/vnd.ms-wpl',
'.wpp': 'application/vnd.wordperfect',
'.wps': 'application/vnd.ms-works',
'.wri': 'application/x-mswrite',
'.wrl': 'model/vrml',
'.wsgi': 'text/x-python',
'.wv': 'audio/x-wavpack',
'.wvc': 'audio/x-wavpack-correction',
'.wvp': 'audio/x-wavpack',
'.wvx': 'audio/x-ms-asx',
'.wwf': 'application/x-wwf',
'.x3f': 'image/x-sigma-x3f',
'.xac': 'application/x-gnucash',
'.xbel': 'application/x-xbel',
'.xbl': 'application/xml',
'.xbm': 'image/x-xbitmap',
'.xcf': 'image/x-xcf',
'.xcf.bz2': 'image/x-compressed-xcf',
'.xcf.gz': 'image/x-compressed-xcf',
'.xhtml': 'application/xhtml+xml',
'.xi': 'audio/x-xi',
'.xla': 'application/vnd.ms-excel',
'.xlam': 'application/vnd.ms-excel.addin.macroenabled.12',
'.xlc': 'application/vnd.ms-excel',
'.xld': 'application/vnd.ms-excel',
'.xlf': 'application/x-xliff',
'.xliff': 'application/x-xliff',
'.xll': 'application/vnd.ms-excel',
'.xlm': 'application/vnd.ms-excel',
'.xlr': 'application/vnd.ms-works',
'.xls': 'application/vnd.ms-excel',
'.xlsb': 'application/vnd.ms-excel.sheet.binary.macroenabled.12',
'.xlsm': 'application/vnd.ms-excel.sheet.macroenabled.12',
'.xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'.xlt': 'application/vnd.ms-excel',
'.xltm': 'application/vnd.ms-excel.template.macroenabled.12',
'.xltx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.template',
'.xlw': 'application/vnd.ms-excel',
'.xm': 'audio/x-xm',
'.xmf': 'audio/x-xmf',
'.xmi': 'text/x-xmi',
'.xml': 'application/xml',
'.xpi': 'application/x-xpinstall',
'.xpm': 'image/x-xpixmap',
'.xps': 'application/oxps',
'.xsd': 'application/xml',
'.xsl': 'application/xslt+xml',
'.xslfo': 'text/x-xslfo',
'.xslt': 'application/xslt+xml',
'.xspf': 'application/xspf+xml',
'.xul': 'application/vnd.mozilla.xul+xml',
'.xwd': 'image/x-xwindowdump',
'.xyz': 'chemical/x-pdb',
'.xz': 'application/x-xz',
'.yaml': 'application/x-yaml',
'.yml': 'application/x-yaml',
'.z': 'application/x-compress',
'.zabw': 'application/x-abiword',
'.zip': 'application/zip',
'.zoo': 'application/x-zoo',
}
if __name__ == "__main__":
pass
|
kasworld/tiny_uwsgi
|
tiny_uwsgi/httpconst.py
|
Python
|
lgpl-3.0
| 30,962
|
[
"NetCDF"
] |
8ddfd35b53d2e644a2cf9e3e58205fcc68947daa8f221ac21b49bef40eeafcb9
|
from tool_shed.base.twilltestcase import ShedTwillTestCase, common, os
repository_name = 'bismark_0070'
repository_description = "Galaxy's bismark wrapper"
repository_long_description = "Long description of Galaxy's bismark wrapper"
category_name = 'Test 0070 Invalid Tool Revisions'
category_description = 'Test 1070 for a repository with an invalid tool.'
class TestFreebayesRepository( ShedTwillTestCase ):
'''Test repository with multiple revisions with invalid tools.'''
def test_0000_create_or_login_admin_user( self ):
"""Create necessary user accounts and login as an admin user."""
self.galaxy_logout()
self.galaxy_login( email=common.admin_email, username=common.admin_username )
galaxy_admin_user = self.test_db_util.get_galaxy_user( common.admin_email )
assert galaxy_admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email
galaxy_admin_user_private_role = self.test_db_util.get_galaxy_private_role( galaxy_admin_user )
self.logout()
self.login( email=common.test_user_1_email, username=common.test_user_1_name )
test_user_1 = self.test_db_util.get_user( common.test_user_1_email )
assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % test_user_1_email
test_user_1_private_role = self.test_db_util.get_private_role( test_user_1 )
self.logout()
self.login( email=common.admin_email, username=common.admin_username )
admin_user = self.test_db_util.get_user( common.admin_email )
assert admin_user is not None, 'Problem retrieving user with email %s from the database' % admin_email
admin_user_private_role = self.test_db_util.get_private_role( admin_user )
def test_0005_ensure_existence_of_repository_and_category( self ):
'''Create freebayes repository and upload only freebayes.xml. This should result in an error message and invalid tool.'''
self.create_category( name=category_name, description=category_description )
self.logout()
self.login( email=common.test_user_1_email, username=common.test_user_1_name )
category = self.test_db_util.get_category_by_name( category_name )
repository = self.get_or_create_repository( name=repository_name,
description=repository_description,
long_description=repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
if self.repository_is_new( repository ):
self.upload_file( repository,
filename='bismark/bismark.tar',
filepath=None,
valid_tools_only=False,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded bismark tarball.',
strings_displayed=[],
strings_not_displayed=[] )
self.upload_file( repository,
filename='bismark/bismark_methylation_extractor.xml',
filepath=None,
valid_tools_only=False,
uncompress_file=False,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded an updated tool xml.',
strings_displayed=[],
strings_not_displayed=[] )
def test_0010_browse_tool_shed( self ):
"""Browse the available tool sheds in this Galaxy instance and preview the bismark repository."""
self.galaxy_logout()
self.galaxy_login( email=common.admin_email, username=common.admin_username )
self.browse_tool_shed( url=self.url, strings_displayed=[ category_name ] )
category = self.test_db_util.get_category_by_name( category_name )
self.browse_category( category, strings_displayed=[ repository_name ] )
self.preview_repository_in_tool_shed( repository_name, common.test_user_1_name, strings_displayed=[ repository_name ] )
def test_0015_install_freebayes_repository( self ):
'''Install the test repository without installing tool dependencies.'''
self.install_repository( repository_name,
common.test_user_1_name,
category_name,
install_tool_dependencies=False,
new_tool_panel_section_label='test_1070' )
installed_repository = self.test_db_util.get_installed_repository_by_name_owner( repository_name, common.test_user_1_name )
strings_displayed = [ 'bismark_0070',
"Galaxy's bismark wrapper",
'user1',
self.url.replace( 'http://', '' ),
installed_repository.installed_changeset_revision ]
self.display_galaxy_browse_repositories_page( strings_displayed=strings_displayed )
strings_displayed.extend( [ 'methylation extractor', 'Invalid tools' ] )
self.display_installed_repository_manage_page( installed_repository,
strings_displayed=strings_displayed,
strings_not_displayed=[ 'bisulfite mapper' ] )
self.verify_tool_metadata_for_installed_repository( installed_repository )
self.update_installed_repository( installed_repository, strings_displayed=[ "there are no updates available" ] )
assert 'invalid_tools' in installed_repository.metadata, 'No invalid tools were defined in %s.' % \
installed_repository.name
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/test/tool_shed/functional/test_1070_invalid_tool.py
|
Python
|
gpl-3.0
| 6,165
|
[
"Galaxy"
] |
aa04eba7bc39ac0aa72fe42af991dc07c632d433af7ffcf539a7b984203a2876
|
#__docformat__ = "restructuredtext en"
# ******NOTICE***************
# optimize.py module by Travis E. Oliphant
#
# You may copy and use this module as you see fit with no
# guarantee implied provided you keep this notice in all copies.
# *****END NOTICE************
import time
import numpy as np
from numpy import atleast_1d, eye, mgrid, argmin, zeros, shape, empty, \
squeeze, vectorize, asarray, absolute, sqrt, Inf, asfarray, isinf
from ase.utils.linesearch import LineSearch
from ase.optimize.optimize import Optimizer
# These have been copied from Numeric's MLab.py
# I don't think they made the transition to scipy_core
# Modified from scipy_optimize
abs = absolute
import __builtin__
pymin = __builtin__.min
pymax = __builtin__.max
__version__ = '0.1'
class BFGSLineSearch(Optimizer):
def __init__(self, atoms, restart=None, logfile='-', maxstep=.2,
trajectory=None, c1=0.23, c2=0.46, alpha=10.0, stpmax=50.0):
"""Optimize atomic positions in the BFGSLineSearch algorithm, which
uses both forces and potential energy information.
Parameters:
restart: string
Pickle file used to store hessian matrix. If set, file with
such a name will be searched and hessian matrix stored will
be used, if the file exists.
trajectory: string
Pickle file used to store trajectory of atomic movement.
maxstep: float
Used to set the maximum distance an atom can move per
iteration (default value is 0.2 Angstroms).
logfile: string
Text file used to write summary information.
"""
self.maxstep = maxstep
self.stpmax = stpmax
self.alpha = alpha
self.H = None
self.c1 = c1
self.c2 = c2
self.force_calls = 0
self.function_calls = 0
self.r0 = None
self.g0 = None
self.e0 = None
self.load_restart = False
self.task = 'START'
self.rep_count = 0
self.p = None
self.alpha_k = None
self.no_update = False
self.replay = False
Optimizer.__init__(self, atoms, restart, logfile, trajectory)
def read(self):
self.r0, self.g0, self.e0, self.task, self.H = self.load()
self.load_restart = True
def reset(self):
print 'reset'
self.H = None
self.r0 = None
self.g0 = None
self.e0 = None
self.rep_count = 0
def step(self, f):
atoms = self.atoms
from ase.neb import NEB
if isinstance(atoms, NEB):
raise TypeError('NEB calculations cannot use the BFGSLineSearch'
' optimizer. Use BFGS or another optimizer.')
r = atoms.get_positions()
r = r.reshape(-1)
g = -f.reshape(-1) / self.alpha
p0 = self.p
self.update(r, g, self.r0, self.g0, p0)
#o,v = np.linalg.eigh(self.B)
e = self.func(r)
self.p = -np.dot(self.H,g)
p_size = np.sqrt((self.p **2).sum())
if self.nsteps != 0:
p0_size = np.sqrt((p0 **2).sum())
delta_p = self.p/p_size + p0/p0_size
if p_size <= np.sqrt(len(atoms) * 1e-10):
self.p /= (p_size / np.sqrt(len(atoms)*1e-10))
ls = LineSearch()
self.alpha_k, e, self.e0, self.no_update = \
ls._line_search(self.func, self.fprime, r, self.p, g, e, self.e0,
maxstep=self.maxstep, c1=self.c1,
c2=self.c2, stpmax=self.stpmax)
if self.alpha_k is None:
raise RuntimeError("LineSearch failed!")
dr = self.alpha_k * self.p
atoms.set_positions((r+dr).reshape(len(atoms),-1))
self.r0 = r
self.g0 = g
self.dump((self.r0, self.g0, self.e0, self.task, self.H))
def update(self, r, g, r0, g0, p0):
self.I = eye(len(self.atoms) * 3, dtype=int)
if self.H is None:
self.H = eye(3 * len(self.atoms))
#self.B = np.linalg.inv(self.H)
return
else:
dr = r - r0
dg = g - g0
if not ((self.alpha_k > 0 and abs(np.dot(g,p0))-abs(np.dot(g0,p0)) < 0) \
or self.replay):
return
if self.no_update == True:
print 'skip update'
return
try: # this was handled in numeric, let it remaines for more safety
rhok = 1.0 / (np.dot(dg,dr))
except ZeroDivisionError:
rhok = 1000.0
print "Divide-by-zero encountered: rhok assumed large"
if isinf(rhok): # this is patch for np
rhok = 1000.0
print "Divide-by-zero encountered: rhok assumed large"
A1 = self.I - dr[:, np.newaxis] * dg[np.newaxis, :] * rhok
A2 = self.I - dg[:, np.newaxis] * dr[np.newaxis, :] * rhok
H0 = self.H
self.H = np.dot(A1, np.dot(self.H, A2)) + rhok * dr[:, np.newaxis] \
* dr[np.newaxis, :]
#self.B = np.linalg.inv(self.H)
def func(self, x):
"""Objective function for use of the optimizers"""
self.atoms.set_positions(x.reshape(-1, 3))
self.function_calls += 1
# Scale the problem as SciPy uses I as initial Hessian:
return self.atoms.get_potential_energy() / self.alpha
def fprime(self, x):
"""Gradient of the objective function for use of the optimizers"""
self.atoms.set_positions(x.reshape(-1, 3))
self.force_calls += 1
# Remember that forces are minus the gradient!
# Scale the problem as SciPy uses I as initial Hessian.
f = self.atoms.get_forces().reshape(-1)
return - f / self.alpha
def replay_trajectory(self, traj):
"""Initialize hessian from old trajectory."""
self.replay = True
if isinstance(traj, str):
from ase.io.trajectory import PickleTrajectory
traj = PickleTrajectory(traj, 'r')
atoms = traj[0]
r0 = None
g0 = None
for i in range(0, len(traj) - 1):
r = traj[i].get_positions().ravel()
g = - traj[i].get_forces().ravel() / self.alpha
self.update(r, g, r0, g0, self.p)
self.p = -np.dot(self.H,g)
r0 = r.copy()
g0 = g.copy()
self.r0 = r0
self.g0 = g0
def log(self, forces):
fmax = sqrt((forces**2).sum(axis=1).max())
e = self.atoms.get_potential_energy()
T = time.localtime()
if self.logfile is not None:
name = self.__class__.__name__
self.logfile.write('%s: %3d[%3d] %02d:%02d:%02d %15.6f %12.4f\n'
% (name, self.nsteps, self.force_calls,
T[3], T[4], T[5], e, fmax))
self.logfile.flush()
def wrap_function(function, args):
ncalls = [0]
def function_wrapper(x):
ncalls[0] += 1
return function(x, *args)
return ncalls, function_wrapper
|
askhl/ase
|
ase/optimize/bfgslinesearch.py
|
Python
|
gpl-2.0
| 7,150
|
[
"ASE"
] |
d2f4ce524c6aa71b458a57ec6ead9a7fefbdd5c48aa86dcc9ce0d08482d20b9f
|
"""
Performance test created using multi-mechnize to analyze time
for update processing with MySQL.
"""
import random
import string
import time
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
def random_generator(size=6, chars=string.ascii_letters):
return ''.join(random.choice(chars) for x in xrange(size))
class Transaction(object):
def __init__(self):
self.JobDB = JobDB()
self.custom_timers = {}
def run(self):
start_time = time.time()
for i in xrange(0, random.randint(1000, 3000)):
key = random_generator()
value = random_generator(size=12)
self.JobDB.setJobParameter(2, key, value)
end_time = time.time()
self.custom_timers['Execution_Time'] = end_time - start_time
if __name__ == '__main__':
trans = Transaction()
trans.run()
print trans.custom_timers
|
arrabito/DIRAC
|
tests/Performance/MySQLJobMonitoring/test_scripts/update.py
|
Python
|
gpl-3.0
| 842
|
[
"DIRAC"
] |
50ea3b65889728c2be746f4cf7324a461c858d4645aee4fd5f932cf88f67652a
|
#!/usr/bin/python -x
# $Rev:: 427 $
# $Author:: Bruno.CostaRendon@flextronics.com $
# $Date:: 2016-08-15 $
# $Update:: Enable/Disable VI communication $
#
# openXC-Modem Vehicle Interface (VI) agent class and associated functions
import logging
import os.path
import subprocess
import re
import string
import sys
import time
import datetime
import socket
import os
from smbus import SMBus
try:
import bluetooth
except ImportError:
LOG.debug("pybluez library not installed, can't use bluetooth interface")
bluetooth = None
from xc_common import *
from ota_upgrade import *
import xc_led
import xc_ver
#--------------------------------------------------------------------
# Web upload settings
#--------------------------------------------------------------------
XCMODEM_CONFIG_FILE = 'xc.conf'
XCMODEM_TRACE_RAW_FILE = 'vi_trace_raw.json'
XCMODEM_TRACE_RAW_BK_FILE = 'vi_trace_raw_bk.json'
XCMODEM_TRACE_FILE = 'vi_trace.json'
XCMODEM_V2X_TRACE_RAW_FILE = 'v2x_trace_raw.json'
XCMODEM_V2X_TRACE_RAW_BK_FILE = 'v2x_trace_raw_bk.json'
XCMODEM_V2X_TRACE_FILE = 'v2x_trace.json'
XCMODEM_DATA_MOUNT = '/mnt/data'
XCMODEM_DATA_DEVICE = '/dev/mmcblk0'
XCMODEM_DATA_PARTITION = 'mmcblk0p2'
XCMODEM_DATA_TRACE_PREFIX = '/mnt/data/vi_trace_raw'
XCMODEM_DATA_V2X_TRACE_PREFIX = '/mnt/data/v2x_trace_raw'
XCMODEM_DATA_TRACE_SUFFIX = 'json'
UPLOAD_TIMEOUT_FACTOR = 0.05 # in=7K/s out=140K/s
UPLOAD_OVERHEAD_TIME = 30 # 30s
TIMEOUT_RC = 124
#--------------------------------------------------------------------
FIRMWARE_RESET_BUTTON_MONITOR_INTERVAL = 5 # in seconds
#---------------------------------------------
def vi_bt_restart(name):
# restart bluetooth
LOG.debug("Re-starting bluetooth ...")
# terminate BT related apps if applicable using exit flag
exit_flag['bt_restart'] = 1
cmd = "/etc/init.d/bluetooth restart; /root/OpenXCAccessory/startup/btrestart; /root/OpenXCAccessory/startup/hci_on"
try:
subprocess.call(cmd, shell=True)
except Exceptions as e:
LOG.debug("%s %s" % (name, e))
pass
else:
pairing_registration()
exit_flag['bt_restart'] = 0
def vi_bt5_pair(addr, debug):
import pexpect
# Bluetooth 5 client tasks are performed using bluetoothctl
# Thus, python expected like function will be utilized for this task
try:
child = pexpect.spawn('bluetoothctl')
if debug:
child.logfile = sys.stdout
child.expect('.*#')
child.sendline('agent on')
child.sendline('pairable on')
child.sendline('scan on')
child.expect('.*NEW.* Device %s.*' % addr)
child.sendline('pair %s' % addr)
child.expect('.*PIN code:')
child.sendline('1234')
child.expect('Pairing successful')
child.sendline('exit')
except pexpect.TIMEOUT:
# Note: Bluez5 registers the device although it fails to pair !!
# Thus, remove the invalid entry if applicable
cmd = "bluez-test-device list | grep \"%s\"" % addr
if not subprocess.call(cmd, shell=True):
cmd = "bluez-test-device remove %s" % addr
subprocess.call(cmd, shell=True)
return False
else:
return True
#-------------------------------------------------------------------------------------------
def vi_cleanup():
LOG.debug("Performing cleanup...")
# Previous paired VI might incidently take over assigned mb/md
# app ports; thus, we should clean up old paired devices
# check if the device has already paired up
cmd = "for d in `bluez-test-device list | grep -v %s | grep -v %s | awk '/OpenXC-VI-/ {print $1}'`; \
do bluez-test-device remove $d; done" % (OPENXC_V2X_NAME_PREFIX, OPENXC_MODEM_NAME_PREFIX)
if subprocess.call(cmd, shell=True):
LOG.debug("clean up fail")
# Remove lingering trace file
cmd = "rm -f %s %s %s" % (XCMODEM_TRACE_RAW_FILE, XCMODEM_TRACE_RAW_BK_FILE, XCMODEM_TRACE_FILE)
subprocess.call(cmd, shell=True)
cmd = "rm -f %s %s %s" % (XCMODEM_V2X_TRACE_RAW_FILE, XCMODEM_V2X_TRACE_RAW_BK_FILE, XCMODEM_V2X_TRACE_FILE)
subprocess.call(cmd, shell=True)
# clean up lingering pppd process if exist
subprocess.call('if [ -r /var/run/ppp0.pid ]; then echo "cleanup pppd ..."; killall -q pppd; sleep 3; fi', shell=True)
# turn off all led - needed to be after pppd cleaning up to free /tty/ACM3 for GSM Led if applicable
xc_led.all_leds(0)
#---------------------------------------------------------------
# USB connection threads
#---------------------------------------------------------------
class usbSendThread (threading.Thread):
# don't support usb send so just ignore all entry
def __init__(self, name, usb, queue, eflag):
threading.Thread.__init__(self)
self.name = name
self.device = usb
self.queue = queue
self.eflag = eflag
def run(self):
LOG.debug("Starting " + self.name)
while not exit_flag[self.eflag]:
while not self.queue.empty():
try:
data = self.queue.get()
if not data.endswith(chr(0)): # All messages need to end with \0 per the message format spec
data = data + chr(0)
# print("%s [%s]\n" % (self.name, data))
# Ignore all usb write since it somehow halt vi
# dongle stream !!!
# self.device.write(data)
except IOError as e:
exit_flag[self.eflag] = 1
LOG.debug("%s %s" % (self.name, e))
break
msleep(1)
LOG.debug("disconnected " + self.name)
class usbRecvThread (threading.Thread):
def __init__(self, name, usb, queue, eflag):
threading.Thread.__init__(self)
self.name = name
self.device = usb
self.queue = queue
self.eflag = eflag
def run(self):
LOG.debug("Starting " + self.name)
while not exit_flag[self.eflag]:
try:
data = self.device.read()
# print("%s [%s]\n" % (self.name, data))
self.queue.put(data)
except IOError as e:
LOG.debug("%s %s" % (self.name, e))
exit_flag[self.eflag] = 1
break
LOG.debug("disconnected " + self.name)
#--------------------------------------------------
# usb modem class
#--------------------------------------------------
# Derived from openxc/sources/usb.py
import usb.core
import usb.util
class xcmodemUsb:
DEFAULT_VENDOR_ID = 0x1bc4
DEFAULT_PRODUCT_ID = 0x0001
DEFAULT_READ_REQUEST_SIZE = 512
# If we don't get DEFAULT_READ_REQUEST_SIZE bytes within this number of
# milliseconds, bail early and return whatever we have - could be zero,
# could be just less than 512. If data is really pumpin' we can get better
# throughput if the READ_REQUEST_SIZE is higher, but this delay has to be
# low enough that a single request isn't held back too long.
DEFAULT_READ_TIMEOUT = 200
DEFAULT_INTERFACE_NUMBER = 0
VEHICLE_DATA_IN_ENDPOINT = 2
VEHICLE_DATA_OUT_ENDPOINT = 5
LOG_IN_ENDPOINT = 11
def __init__(self, vendor_id=DEFAULT_VENDOR_ID,
product_id=DEFAULT_PRODUCT_ID):
self.device = None
devices = usb.core.find(find_all=True, idVendor=vendor_id, idProduct=product_id)
for device in devices:
try:
device.set_configuration()
except usb.core.USBError as e:
LOG.error("Skipping USB device: %s", e)
else:
self.device = device
addr = "%.4X:%.4X" % (vendor_id, product_id)
LOG.info("found VI USB %s" % addr)
port_mac['vi_app'] = addr
return
LOG.debug("VI as USB device isn't detected")
def valid(self):
return self.device
def stop(self):
usb.util.dispose_resources(self.device)
def read(self, timeout=None,
endpoint_address=VEHICLE_DATA_IN_ENDPOINT,
read_size=DEFAULT_READ_REQUEST_SIZE):
timeout = timeout or self.DEFAULT_READ_TIMEOUT
try:
return self.device.read(0x80 + endpoint_address,
read_size, self.DEFAULT_INTERFACE_NUMBER, timeout).tostring()
except (usb.core.USBError, AttributeError) as e:
if e.errno == 110:
# Timeout, it may just not be sending
return ""
raise IOError("USB device couldn't be read", e)
def write(self, data):
try:
self.device.write(self.VEHICLE_DATA_OUT_ENDPOINT, data)
except (usb.core.USBError, AttributeError) as e:
raise IOError("USB device couldn't be written", e)
#---------------------------------------------------------------------
# class for Modem's VI interface
#---------------------------------------------------------------------
class xcModemVi:
def __init__(self, port, inQ, outQ, sdebug = 0, debug = 0):
self.port = port
self.addr = None
self.socket = None
self.discovery_once = False
self.inQ = inQ
self.outQ = outQ
self.fp = None
self.v2x_fp = None
self.name = 'vi_app'
self.trace_enable = 0
self.v2x_trace_enable = 0
self.stop_web_upload = None
self.stop_v2x_web_upload = None
self.stop_trace = None
self.stop_monitor = None
self.stop_button_monitor = None
self.button_irq_cnt = 1
self.trace_lock = threading.Lock()
self.trace_raw_lock = threading.Lock()
self.v2x_trace_raw_lock = threading.Lock()
self.threads = []
self.lost_cnt = 0
self.gsm = None
self.bt5 = self.bt5_check()
self.sdebug = sdebug
self.debug = debug
self.boardid = boardid_inquiry(1)
self.config_mode = None
self.sd_space = 0
self.usb = None
self.modem_ip_addr = None
self.modem_port = None
self.conn_type = None
# LEDs instances
pathid = self.boardid > 0
self.bt_led = xc_led.xcModemLed('bt_led', led_path['bt'][pathid])
self.wifi_led = xc_led.xcModemLed('wifi_led', led_path['wifi'][pathid])
self.bat_led_grn = xc_led.xcModemLed('bat_led_grn', led_path['bat_grn'][pathid])
self.bat_led_red = xc_led.xcModemLed('bat_led_red', led_path['bat_red'][pathid])
modem_state[self.name] = vi_state.IDLE
self.charger = SMBus(0) # open Linux device /dev/ic2-0
self.led_cntl = SMBus(2) # open Linux device /dev/ic2-2
self.charger_fault = 0
self.battery_check()
def cur_conn_type(self):
return self.conn_type;
def modem_mac_inquiry(self):
# Return modem mac address
mac = subprocess.check_output('hcitool dev | grep hci0', shell=True).split()[1]
LOG.info("%s %s" % (board_type[self.boardid]['prefix'], mac))
return mac
def bt5_check(self):
# check if bluetooth 5 is used
bt_ver = subprocess.check_output("bluetoothd -v | awk -F . '{print $1}'", shell=True).strip()
LOG.debug('Bluez' + bt_ver)
return (int(bt_ver) >= 5)
def auto_discovery(self):
# Return address once the first openxc device found
LOG.info("Auto discovery ...")
try:
nearby_devices = bluetooth.discover_devices(lookup_names = True)
except BluetoothError as e:
LOG.error("BT error %s %s" % (self.name, e))
return None
for addr, name in nearby_devices:
LOG.debug(" %s - %s" % (addr, name))
if (name is not None \
and name.startswith(OPENXC_DEVICE_NAME_PREFIX) \
and not name.startswith(OPENXC_MODEM_NAME_PREFIX) \
and not name.startswith(OPENXC_V2X_NAME_PREFIX)):
LOG.info("Found %s - %s" % (addr, name))
self.addr = addr
break
self.discovery_once = True
return self.addr
def file_discovery(self, fname):
# Return address from existing configuration file
LOG.info("Static discovery ...")
brightness_override = 0
if os.path.exists(fname):
# setup default based on modem/v2x board
for key in ['gsm_enable', 'gps_enable', 'openxc_vi_enable', 'openxc_md_enable']:
conf_options[key] = int(board_type[self.boardid]['type'] != 'V2X')
try:
conf = open(fname, "r")
LOG.info(" Found %s ..." % fname)
for line in conf:
if not line.startswith('#') and line.strip(): # skip comments/blank lines
L = line.split() # split the string
key = L[0]
if conf_options.get(key) is not None: # for valid key
LOG.debug("old: (%s:%s)" % (key, conf_options[key]))
if key == 'gsm_enable' or key == 'gps_enable': # V2X doesn't support gsm/gps
if board_type[self.boardid]['type'] == 'V2X':
LOG.error("%s isn't a valid option of %s - skip it !!" % \
(key, board_type[self.boardid]['type']))
continue
if re.search(r'_enable', key, re.M|re.I):
conf_options[key] = int(L[1])
else:
if key == 'power_saving_mode': # validate power_mode
if power_mode.get(L[1]) is None:
LOG.error("%s isn't a valid value of %s - skip it !!" % (L[1], key))
continue
elif not brightness_override: # adjust brightness default if applicable
conf_options['led_brightness'] = power_mode[L[1]]['led_brightness']
elif key == 'openxc_vi_trace_filter_script': # validate filter script
if not os.path.exists(L[1]) or not os.access(L[1], os.X_OK):
LOG.error("%s isn't an executable script for %s - skip it !!" % (L[1], key))
continue
elif key == 'led_brightness': # validate led brightness
brightness = int(L[1])
if brightness < 0 or brightness > 255:
LOG.error("%s isn't a valid value of %s - skip it !!" % (L[1], key))
else:
conf_options[key] = brightness
brightness_override = 1
LOG.debug("new: (%s:%s)" % (key, conf_options[key]))
continue
conf_options[key] = L[1]
LOG.debug("new: (%s:%s)" % (key, conf_options[key]))
else:
LOG.error("%s isn't a valid key in %s - skip it !!" % (key, fname))
except IOError:
LOG.error("fail to open %s" % fname)
else:
conf.close()
if not conf_options['openxc_vi_enable']:
LOG.info("vi_app is disable")
# nothing to passthru
for l in passthru_flag.items():
(key, val) = l
passthru_flag[key] = 0
else:
addr = conf_options['openxc_vi_mac']
if addr is not None and addr != 'None':
self.addr = addr
LOG.info("found %s" % self.addr)
# config passthru
for l in passthru_flag.items():
(key, val) = l
passthru_flag[key] = passthru_enable[key]
self.vi_power_profile()
vi_auto_upgrade() # Note: auto upgrade might take awhile
# handle vi usb-connection if applicable
if self.usb is None:
self.usb = xcmodemUsb()
if self.usb.valid() is None:
del self.usb
self.usb = None
else:
self.addr = port_mac[self.name]
return self.addr
def web_discovery(self, fname):
# Obtain the config file from predefined URL using scp
# To maintain the original file, '.web' suffix will be used for
# the web download file
LOG.info("Web discovery ... ")
# Use WiFi if applicable
if not check_ping() == 0:
# Use GSM if applicable
if conf_options['gsm_enable']:
if not self.gsm.start():
# No need to move on without network connection
return None
# Use sshpass with given psswd for scp
# Remote cloud server require PEM which is provided in configuration option
wfname = fname + ".web"
# Form unique config file name
if re.search(r'/', conf_options['web_scp_config_url'], re.M|re.I):
delimiter = '/'
else:
delimiter = ':'
prefix = "%s%s." % (delimiter, socket.gethostname())
cfname = prefix.join(conf_options['web_scp_config_url'].rsplit(delimiter, 1))
cmd = "scp -o StrictHostKeyChecking=no -i %s %s@%s %s" % \
(conf_options['web_scp_pem'], \
conf_options['web_scp_userid'], \
cfname, \
wfname)
# LOG.debug("issuing '%s'" % cmd)
if subprocess.call(cmd, shell=True):
LOG.error("fail to scp %s from %s@%s" % (fname, \
conf_options['web_scp_userid'], \
cfname))
LOG.warn("Please make sure to register your device %s on the web server" % socket.gethostname())
return None
# Use WiFi if applicable
if not check_ping() == 0:
# Use GSM if applicable
if conf_options['gsm_enable']:
# Tear off gsm connection
self.gsm.stop()
# parse the file now
return self.file_discovery(wfname)
def gsm_instance(self, force = 0):
sys.path.append('../modem') # GSM is only supported in modem
import xc_modem_gsm
if force:
if self.gsm is not None:
LOG.info("Reinstantiate " + self.gsm.name)
del self.gsm
self.gsm = None
# Instantiate gsm module as needed
if self.gsm is None:
ppp_tear_off = power_mode[conf_options['power_saving_mode']]['ppp_tear_off']
self.gsm = xc_modem_gsm.xcModemGsm(sdebug = self.sdebug, debug = self.debug, tear_off = ppp_tear_off)
if not self.gsm.prep(conf_options['web_scp_apn']):
LOG.error("There is no network access !!!")
return False
return True
def modem_inquiry(self):
if (self.modem_ip_addr is None):
self.modem_ip_addr = conf_options['xcmodem_ip_addr']
return conf_options['xcmodem_ip_addr']
else:
return self.modem_ip_addr
def modem_available(self, modem_ip_addr):
LOG.info("Checking if Modem is available %s" % modem_ip_addr)
cmd = "ping -c 1 " + modem_ip_addr + " > /dev/null 2>&1"
LOG.info(cmd)
response = subprocess.call(cmd, shell=True)
if response == 0:
LOG.info("Modem responded to ping")
self.modem_ip_addr = modem_ip_addr
self.modem_port = 4567
return 1
else:
LOG.info("Modem did NOT respond to ping")
return 0
def vi_inquiry(self):
# determine the vi_app address using pre-defined priority scheme
if self.file_discovery(XCMODEM_CONFIG_FILE) is None and conf_options['openxc_vi_enable']:
if conf_options['web_scp_config_download_enable'] and conf_options['gsm_enable']:
# Prepare GSM if applicable using correct options
if not self.gsm_instance():
# skip web discovery
if self.auto_discovery() is None:
LOG.info("None OPENXC-VI Device Address Assignment!!!")
elif self.web_discovery(XCMODEM_CONFIG_FILE) is None:
if self.auto_discovery() is None:
LOG.info("None OPENXC-VI Device Address Assignment!!!")
elif self.auto_discovery() is None:
LOG.info("None OPENXC-VI Device Address Assignment!!!")
# Saving the current config file for reference
self.conf_save(XCMODEM_CONFIG_FILE + ".cur")
return self.addr
def vi_discovery(self):
LOG.info("Performing inquiry...")
self.bt_led.blink(1) # slow blink
try:
nearby_devices = bluetooth.discover_devices(duration=10,lookup_names = True)
except BluetoothError as e:
LOG.error("BT error %s %s" % (self.name, e))
return False
LOG.info("found %d devices" % len(nearby_devices))
for addr, name in nearby_devices:
LOG.info(" %s - %s" % (addr, name))
if (addr is not None and addr == self.addr):
self.bt_led.off() # done discovery
return True
self.bt_led.off() # done discovery
return False
def vi_pair(self):
# Work-around for dongle pairing
# subprocess.call('hciconfig hci0 sspmode disable', shell=True)
# check if the device has already paired up
cmd = "bluez-test-device list | grep \"%s\"" % self.addr
# LOG.debug("issuing: " + cmd)
if subprocess.call(cmd, shell=True):
# re-pairing
LOG.info("pairing %s ..." % self.addr)
self.bt_led.blink() # fast blink
if self.bt5:
rc = vi_bt5_pair(self.addr, self.debug)
else:
cmd = "echo '1234' | bluez-simple-agent hci0 %s 2>&1 1>/dev/null" % self.addr
# LOG.debug("issuing: " + cmd)
rc = not subprocess.call(cmd, shell=True)
self.bt_led.off() # done pairing
return rc
return True
def modem_connect(self):
# Modem is acting as Master/Client agent
LOG.info("trying to connect %s ..." % self.modem_ip_addr)
attempt = 1
while (attempt <= MAX_CONNECTION_ATTEMPT):
# Ensure if the device is paired
#if self.vi_pair():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
LOG.info("trying to connect to %s at port %s" % (self.modem_ip_addr, self.modem_port))
s.connect((self.modem_ip_addr, self.modem_port))
except IOError:
LOG.warn("Unable to connect to %s " % self.modem_ip_addr)
#s.shutdown(socket.SHUT_RDWR)
s.close()
time.sleep(3)
else:
self.bt_led.blink()
LOG.info("Opened modem connection at %s" % self.modem_port)
self.socket = s
#port_mac[self.name] = self.addr
break;
attempt += 1
self.bt_led.on()
return self.socket
def vi_connect(self):
# Modem is acting as Master/Client agent
LOG.info("connect %s ..." % self.addr)
attempt = 1
while (attempt <= MAX_CONNECTION_ATTEMPT):
# Ensure if the device is paired
if self.vi_pair():
socket = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
try:
socket.connect((self.addr, self.port))
except IOError:
LOG.warn("Unable to connect to %s" % self.addr)
else:
self.bt_led.on() # dongle connect
LOG.info("Opened bluetooth device at %s" % self.port)
self.socket = socket
port_mac[self.name] = self.addr
break;
attempt += 1
return self.socket
#-------------------------------------------------------------------
# Setup environment for backing the log to SD card datalog partition
#-------------------------------------------------------------------
def trace_sd_backup_prep(self):
# Prepare mSD mount
LOG.debug("SD backup prep")
if int(conf_options['openxc_vi_trace_number_of_backup']) > 0:
cmd = "fdisk -l %s | grep %s; \
if [ $? -eq 0 ]; then \
mount | grep %s; \
if [ $? -eq 0 ]; then \
umount %s; \
fi; \
mkdir -p %s; \
mount /dev/%s %s; \
else \
exit 1; \
fi" % (XCMODEM_DATA_DEVICE, XCMODEM_DATA_PARTITION, \
XCMODEM_DATA_MOUNT, \
XCMODEM_DATA_MOUNT, \
XCMODEM_DATA_MOUNT, \
XCMODEM_DATA_PARTITION, XCMODEM_DATA_MOUNT)
# LOG.debug("issuing '%s'" % cmd)
if subprocess.call(cmd, shell=True):
LOG.error("fail to prepare %s - skip SD backup" % XCMODEM_DATA_MOUNT)
conf_options['openxc_vi_trace_number_of_backup'] = 0 # Turn off SD backup
else:
cmd = "df -BK %s | tail -1 | awk '{print $4}' | awk -FK '{print $1}'" % XCMODEM_DATA_MOUNT
# LOG.debug("issuing '%s'" % cmd)
self.sd_space = int(subprocess.check_output(cmd, shell=True).split()[0]) * 1024
#-------------------------------------------------------------------
# Backup log to SD card datalog partition
#-------------------------------------------------------------------
def trace_sd_backup(self, bfname, bfsize, v2x_flag):
# sd backup file
LOG.debug("SD backup")
# check for space
fnum = int(conf_options['openxc_vi_trace_number_of_backup'])
while (fnum > 0) :
if self.sd_space < bfsize:
# remove file to make space
fname = "%s_%s.%s" % (XCMODEM_DATA_TRACE_PREFIX, fnum, XCMODEM_DATA_TRACE_SUFFIX)
if conf_options['openxc_vi_trace_backup_overwrite_enable']:
if os.path.exists(fname):
fsize = os.path.getsize(fname)
# LOG.debug("removing '%s'" % fname)
os.remove(fname)
self.sd_space += fsize
fnum -= 1
else:
LOG.info("Skip SD backup due to unsufficent space")
return # skip if no space left
else:
break
# pump up backup file
fnum = int(conf_options['openxc_vi_trace_number_of_backup'])
while (fnum > 0):
if v2x_flag:
fname1 = "%s_%s.%s" % (XCMODEM_DATA_V2X_TRACE_PREFIX, fnum, XCMODEM_DATA_TRACE_SUFFIX)
else:
fname1 = "%s_%s.%s" % (XCMODEM_DATA_TRACE_PREFIX, fnum, XCMODEM_DATA_TRACE_SUFFIX)
fnum -= 1
if v2x_flag:
fname2 = "%s_%s.%s" % (XCMODEM_DATA_V2X_TRACE_PREFIX, fnum, XCMODEM_DATA_TRACE_SUFFIX)
else:
fname2 = "%s_%s.%s" % (XCMODEM_DATA_TRACE_PREFIX, fnum, XCMODEM_DATA_TRACE_SUFFIX)
if os.path.exists(fname2):
if os.path.exists(fname1): # gain space
self.sd_space += os.path.getsize(fname1)
# LOG.debug("rename '%s to %s' " % (fname2, fname1))
os.rename(fname2, fname1)
# backup recent raw file
if v2x_flag:
fname = "%s_1.%s" % (XCMODEM_DATA_V2X_TRACE_PREFIX, XCMODEM_DATA_TRACE_SUFFIX)
else:
fname = "%s_1.%s" % (XCMODEM_DATA_TRACE_PREFIX, XCMODEM_DATA_TRACE_SUFFIX)
cmd = "cp -p %s %s" % (bfname, fname)
#LOG.debug("issuing '%s' " % cmd)
if subprocess.call(cmd, shell=True):
LOG.error("fail to backup %s" % fname)
else:
self.sd_space -= bfsize
#-------------------------------------------------------------------
# Control for capturing VI trace log
#-------------------------------------------------------------------
def trace_start(self, interval, rfname, bfname):
LOG.debug("Recording start: %s" % rfname)
# set up new trace
self.trace_raw_lock.acquire()
self.fp = open(rfname, "w+")
self.trace_raw_lock.release()
self.trace_enable = 1
time.sleep(interval)
self.trace_enable = 0
self.trace_raw_lock.acquire()
self.fp.close()
if (os.path.isfile(rfname)):
os.rename(rfname, bfname)
bfsize = os.path.getsize(bfname)
if (os.path.isfile(bfname)):
LOG.debug("Recording stop (size: %s) : %s" % (bfsize, rfname))
else:
LOG.debug("VI Recording failed")
if int(conf_options['openxc_vi_trace_number_of_backup']) > 0: # if SD backup is needed
self.trace_sd_backup(bfname, bfsize,0)
self.trace_raw_lock.release()
#-------------------------------------------------------------------
# Control for capturing V2X/RSU trace log
#-------------------------------------------------------------------
def v2x_trace_start(self, interval, rfname, bfname):
LOG.debug("Recording start: %s" % rfname)
# set up new trace
self.v2x_trace_raw_lock.acquire()
self.v2x_fp = open(rfname, "w+")
self.v2x_trace_raw_lock.release()
self.v2x_trace_enable = 1
time.sleep(interval)
self.v2x_trace_enable = 0
self.v2x_trace_raw_lock.acquire()
self.v2x_fp.close()
if (os.path.isfile(rfname)):
os.rename(rfname, bfname)
bfsize = os.path.getsize(bfname)
if (os.path.isfile(bfname)):
LOG.debug("Recording stop (size: %s) : %s" % (bfsize, rfname))
else:
LOG.debug("v2x Recording failed")
if int(conf_options['openxc_vi_trace_number_of_backup']) > 0: # if SD backup is needed
self.trace_sd_backup(bfname, bfsize,1)
self.v2x_trace_raw_lock.release()
def trace_prep(self, bfname, fname):
# make bk file readable so we can present it over network later on
#LOG.debug("Recording conversion")
# handle filtering script if applicable
if conf_options['openxc_vi_trace_filter_script'] is None or \
conf_options['openxc_vi_trace_filter_script'] == 'None':
filter = ""
else:
filter = "| %s" % conf_options['openxc_vi_trace_filter_script']
cmd = "sed -e 's/\\x0/\\r\\n/g' %s | sed -n -e '/{/ { /}/p }' %s > %s" % (bfname, filter, fname)
truncate_size = int(conf_options['openxc_vi_trace_truncate_size'])
self.trace_lock.acquire()
# LOG.debug("issuing '%s'" % cmd)
if subprocess.call(cmd, shell=True):
LOG.error("fail to convert %s" % fname)
elif truncate_size:
LOG.debug("Truncate %s to %s bytes" % (fname, truncate_size))
fp = open(fname, "rw+")
fp.truncate(truncate_size)
fp.close()
self.trace_lock.release()
def web_upload(self, bfname, fname):
if not os.path.exists(bfname):
LOG.debug("No trace yet to be uploaded")
return
# Prep the trace file
self.trace_prep(bfname, fname)
# Use WiFi if applicable
if not check_ping() == 0:
if (boardid_inquiry() > 1):
#LOG.info("No connection to cloud found!! Skipping upload")
return
# Use GSM if applicable
if conf_options['gsm_enable']:
# Create gsm instance as needed
if not self.gsm_instance():
return
if not self.gsm.start():
# No need to move on without network
if modem_state[self.gsm.name] == app_state.LOST:
# Create new gsm instance to re-establishing modem connection
if not self.gsm_instance(force = 1):
return
if not self.gsm.start():
return
else:
return
# OXM-93: Need timeout to terminate scp process in case something goes wrong
timeout = (float(conf_options['openxc_vi_trace_snapshot_duration']) * UPLOAD_TIMEOUT_FACTOR) + UPLOAD_OVERHEAD_TIME
# Use sshpass with given psswd for scp
# Remote cloud server require PEM which is provided in configuration option
if conf_options['web_scp_target_overwrite_enable']:
timestamp = ""
else:
timestamp = ".%s" % datetime.datetime.utcnow().strftime("%y%m%d%H%M%S")
if re.search(r'/', conf_options['web_scp_vi_target_url'], re.M|re.I):
delimiter = '/'
else:
delimiter = ':'
prefix = "%s%s%s." % (delimiter, socket.gethostname(), timestamp)
target = prefix.join(conf_options['web_scp_vi_target_url'].rsplit(delimiter, 1))
cmd = "timeout %s scp -o StrictHostKeyChecking=no -i %s %s %s@%s" % \
(int(timeout), \
conf_options['web_scp_pem'], \
fname, \
conf_options['web_scp_userid'], \
target)
#LOG.debug("VI_WEB_UPLOAD - issuing '%s'" % cmd)
self.trace_lock.acquire()
rc = subprocess.call(cmd, shell=True)
if rc:
if rc == TIMEOUT_RC:
msg = "Timeout (%ds)" % int(timeout)
#modem_state[self.gsm.name] = app_state.LOST
modem_state[self.name] = app_state.LOST
else:
msg = "Fail"
LOG.error("%s to scp upload %s to %s@%s" % (msg, fname, \
conf_options['web_scp_userid'], \
target))
self.trace_lock.release()
# Use WiFi if applicable
if not check_ping() == 0:
# Use GSM if applicable
if conf_options['gsm_enable']:
# Tear off gsm connection
self.gsm.stop()
#===================================================================
def web_v2x_upload(self, bfname, fname):
LOG.debug("******>>>> Start uploading trace to Web")
if not os.path.exists(bfname):
#LOG.debug("No trace yet to be uploaded")
return
# Prep the trace file
self.trace_prep(bfname, fname)
# Use WiFi if applicable
if not check_ping() == 0:
if (boardid_inquiry() > 1):
#LOG.info("No connection to cloud found!! Skipping upload")
return
# Use GSM if applicable
if conf_options['gsm_enable']:
# Create gsm instance as needed
if not self.gsm_instance():
return
if not self.gsm.start():
# No need to move on without network
if modem_state[self.gsm.name] == app_state.LOST:
# Create new gsm instance to re-establishing modem connection
if not self.gsm_instance(force = 1):
return
if not self.gsm.start():
return
else:
return
# OXM-93: Need timeout to terminate scp process in case something goes wrong
timeout = (float(conf_options['openxc_v2x_trace_snapshot_duration']) * UPLOAD_TIMEOUT_FACTOR) + UPLOAD_OVERHEAD_TIME
# Use sshpass with given psswd for scp
# Remote cloud server require PEM which is provided in configuration option
if conf_options['web_scp_target_overwrite_enable']:
timestamp = ""
else:
timestamp = ".%s" % datetime.datetime.utcnow().strftime("%y%m%d%H%M%S")
if re.search(r'/', conf_options['web_scp_xcV2Xrsu_target_url'], re.M|re.I):
delimiter = '/'
else:
delimiter = ':'
prefix = "%s%s%s." % (delimiter, socket.gethostname(), timestamp)
target = prefix.join(conf_options['web_scp_xcV2Xrsu_target_url'].rsplit(delimiter, 1))
cmd = "timeout %s scp -o StrictHostKeyChecking=no -i %s %s %s@%s" % \
(int(timeout), \
conf_options['web_scp_pem'], \
fname, \
conf_options['web_scp_userid'], \
target)
#LOG.debug("XCV2X_WEB_UPLOAD - issuing '%s'" % cmd)
self.trace_lock.acquire()
rc = subprocess.call(cmd, shell=True)
if rc:
if rc == TIMEOUT_RC:
msg = "Timeout (%ds)" % int(timeout)
#modem_state[self.gsm.name] = app_state.LOST
modem_state[self.name] = app_state.LOST
else:
msg = "Fail"
LOG.error("%s to scp upload %s to %s@%s" % (msg, fname, \
conf_options['web_scp_userid'], \
target))
self.trace_lock.release()
# Use WiFi if applicable
if not check_ping() == 0:
# Use GSM if applicable
if conf_options['gsm_enable']:
# Tear off gsm connection
self.gsm.stop()
#===================================================================
def conf_save(self, fname):
LOG.debug("Configuration saving")
fp = open(fname, "w+")
for l in conf_options.items():
(key, val) = l
fp.write("%s %s\r\n" % (key, val))
fp.close()
def vi_exit(self):
# terminate passthru
for l in passthru_flag.items():
(key, val) = l
passthru_flag[key] = 0
# clean up function after OPERATION state
if self.usb:
del self.usb
self.usb = None
self.addr = None
if self.socket:
self.socket.shutdown(socket.SHUT_RDWR)
self.socket.close()
self.socket = None
if self.stop_trace:
self.stop_trace.set()
if self.stop_web_upload:
LOG.debug("Web uploading end")
self.stop_web_upload.set()
if self.stop_v2x_web_upload:
LOG.debug("Web v2x uploading end")
self.stop_v2x_web_upload.set()
if self.stop_monitor:
LOG.debug("Monitor end")
self.stop_monitor.set()
if self.stop_button_monitor:
LOG.debug("Reset Button Monitor end")
self.stop_button_monitor.set()
if self.fp:
self.trace_raw_lock.acquire()
self.fp.close()
self.trace_raw_lock.release()
if self.v2x_fp:
self.trace_raw_lock.acquire()
self.v2x_fp.close()
self.trace_raw_lock.release()
# flush the queues
while not self.inQ.empty():
self.inQ.get()
while not self.outQ.empty():
self.outQ.get()
# Wait for all threads to complete
for t in self.threads:
t.join()
# reset exit_flag
exit_flag[self.name] = 0
LOG.debug("Ending " + self.name)
def vi_timestamp(self, data):
# add timestamp
rstr = ',\"timestamp\":%6f}' % time.time()
new = string.replace(data, '}', rstr)
return new
def led_brightness(self, level):
# LED Brightness via MAX5432
I2C_ADDRESS = 0x28
REG_VREG = 0x11
status = self.led_cntl.write_byte_data(I2C_ADDRESS, REG_VREG, level)
def battery_charger_check(self):
# charger access using Ti bq24196
I2C_ADDRESS = 0x6b
REG_STATUS = 0x08
REG_FAULT = 0x09
CHARGE_MASK = 0x30
state_list = { 0x00: charge_state.NOT_CHARGE,
0x10: charge_state.PRE_CHARGE,
0x20: charge_state.FAST_CHARGE,
0x30: charge_state.CHARGE_DONE }
# For fault decoding: {value: (mask, desc)}
fault_list = { 0x80: ( 0x80, 'WDOG FAULT'), # bit7
0x40: ( 0x40, 'BOOST FAULT'), # bit6
0x30: ( 0x30, 'SAFETY FAULT'), # bit[5:4]
0x20: ( 0x30, 'THERMAL FAULT'), # bit[5:4]
0x10: ( 0x30, 'INPUT FAULT'), # bit[5:4]
0x08: ( 0x08, 'BATOVP FAULT'), # bit[3]
0x06: ( 0x07, 'HOT FAULT'), # bit[2:1]
0x05: ( 0x07, 'COLD FAULT') } # bit[2:1]
status = self.charger.read_byte_data(I2C_ADDRESS,REG_STATUS)
fault = self.charger.read_byte_data(I2C_ADDRESS,REG_FAULT)
if self.debug:
LOG.debug("status = x%X fault = x%X" % (status, fault))
state = state_list[status & CHARGE_MASK]
if modem_state['charger'] != state:
modem_state['charger'] = state
if fault != self.charger_fault:
LOG.info("Charger Fault Register: x%X -> x%X" % (self.charger_fault, fault))
self.charger_fault = fault
# fault decoding
for val in fault_list.keys():
mask, desc = fault_list[val]
if (fault & mask) == val:
LOG.info(" Fault: %s" % desc)
return (modem_state['charger'] == charge_state.PRE_CHARGE \
or modem_state['charger'] == charge_state.FAST_CHARGE)
def battery_check(self):
# Threshold value provided by HW team
GREEN_THRESHOLD = 3.65
RED_THRESHOLD = 3.55
ADC_ADJUSTMENT = 0.04 # ~1% of 3.3
dev = "/sys/devices/ahb/ahb:apb/f8018000.adc/iio:device0"
cmd = "cat %s/in_voltage3_raw" % dev
raw = float(subprocess.check_output(cmd, shell=True).split()[0])
volt = (raw / 2048 * 3.3) + ADC_ADJUSTMENT
if self.debug:
LOG.debug("raw = %f voltage = %f" % (raw, volt))
charging = self.battery_charger_check()
if volt >= GREEN_THRESHOLD: # green
self.bat_led_red.off()
if charging:
self.bat_led_grn.blink()
else:
self.bat_led_grn.on()
elif volt >= RED_THRESHOLD: # amber
if charging:
self.bat_led_grn.blink()
self.bat_led_red.blink()
else:
self.bat_led_grn.on()
self.bat_led_red.on()
else: # red
self.bat_led_grn.off()
if charging:
self.bat_led_red.blink()
else:
self.bat_led_red.on()
def vi_monitor(self):
# enviornment monitor task
self.battery_check()
pass
def vi_reset_button_monitor(self):
# obtain irq count
prev_irq_cnt = self.button_irq_cnt
cmd = "cat /proc/interrupts | grep PB_RST | awk '{print $2'}"
self.button_irq_cnt = int(subprocess.check_output(cmd, shell=True).strip())
if self.debug:
LOG.debug("Reset button monitor: irq=%s %s" % (prev_irq_cnt, self.button_irq_cnt))
if (self.button_irq_cnt == (prev_irq_cnt + 1)): # reset button was held
# Perform Firmware Reset
xc_led.all_leds(3) # all leds slow blink
LOG.info("Firmware Reset Button Activated !!!")
ver, fname = subprocess.check_output("cat ../backup/factory/upgrade.ver", shell=True).split()
LOG.info("Firmware Reset to %s ..." % ver)
LOG.info("System will be reset after Firmware Reset ...")
cmd = "rm -fr ../backup/current; cp -pr ../backup/factory ../backup/current; \
cp -f ../backup/current/%s /tmp; \
cd /tmp; tar xvf %s; ./xc-upgrade.sh; sudo reboot" % (fname, fname)
# LOG.debug("issuing: " + cmd)
if subprocess.call(cmd, shell=True):
LOG.debug("firmware reset fail")
def vi_power_profile(self):
# power-saving-mode profile
mode = conf_options['power_saving_mode']
LOG.info("Power mode configuration: " + mode)
self.led_brightness(conf_options['led_brightness'])
def vi_main(self):
attempt = 1
conf_options['openxc_modem_mac'] = self.modem_mac_inquiry()
# OXM-72: Rarely if BT frame errors occur at discovery time, VI dongle
# stucks at connection state while bluez is too messed up even to let
# us tearing down the connection. To work-around, we'd restart bluetooth
# and bringup TI device accordingly.
stuck_state = vi_state.ADDR_INQUIRY
self.board_id = boardid_inquiry()
self.config_mode = boardmode_inquiry()
stuck_cnt = 0
LOG.info("**********> Entering vi_main <*************")
LOG.info("Board ID = %s" % self.board_id)
LOG.info("Config mode = %s " % self.config_mode)
LOG.info("***********************************************")
#if ((boardid_inquiry() == 2) and (self.config_mode == 4) or (self.config_mode ==5)):
if ((self.board_id == 2) and (self.config_mode == 3)): # i.e. execute following code for v2x in mode 3 only
LOG.info(">>>>>>>>> Checking for modem <<<<<<<<<<<<")
while (attempt <= MAX_DISCOVERY_ATTEMPT):
modem_state[self.name] = vi_state.MODEM_DISCOVERY
if self.modem_inquiry() is not None:
modem_state[self.name] = vi_state.MODEM_ADDR_FOUND
LOG.info(" Modem Address found")
if (self.modem_available(conf_options['xcmodem_ip_addr'])):
modem_state[self.name] = vi_state.MODEM_UP
LOG.info(" Modem is UP")
if(self.modem_connect()):
modem_state[self.name] = vi_state.MODEM_CONNECTED
LOG.info("setting vi_conn_type to MODEM")
self.conn_type = 'MODEM'
LOG.info(" Modem is connected")
break
if stuck_state != modem_state[self.name]:
stuck_state = modem_state[self.name]
stuck_cnt = 1
else:
stuck_cnt += 1
LOG.info("MODEM DISOVERY - vi_app.state = %s after %d attempt" % (modem_state[self.name], attempt))
attempt += 1
if (modem_state[self.name] != vi_state.MODEM_CONNECTED):
LOG.info("Modem not connected ....")
return 0
if (conf_options['openxc_vi_enable']):
attempt =1
LOG.info("Enering VI discovery")
while (attempt <= MAX_DISCOVERY_ATTEMPT):
self.bt_led.off() # hasn't yet connection
conf_options['openxc_vi_mac'] = 'None' # restore default value
self.addr = None
modem_state[self.name] = vi_state.ADDR_INQUIRY
if self.vi_inquiry() is not None:
if self.usb is not None: # bypass BT discovery
modem_state[self.name] = vi_state.CONNECTED
break
modem_state[self.name] = vi_state.ADDR_ASSIGNED
if self.discovery_once or self.vi_discovery():
modem_state[self.name] = vi_state.DISCOVERED
if self.vi_connect() is not None:
modem_state[self.name] = vi_state.CONNECTED
break
elif not conf_options['openxc_vi_enable']:
modem_state[self.name] = vi_state.DISABLE
break;
if stuck_state != modem_state[self.name]:
stuck_state = modem_state[self.name]
stuck_cnt = 1
else:
stuck_cnt += 1
LOG.info("vi_app.state = %s after %d attempt" % (modem_state[self.name], attempt))
attempt += 1
if (modem_state[self.name] != vi_state.CONNECTED):
LOG.info("VI not connected ....")
#return 0
LOG.info("XC_VI.PY main IF is %s" %(modem_state[self.name] == vi_state.CONNECTED) or (modem_state[self.name] == vi_state.MODEM_CONNECTED))
if ((conf_options['openxc_vi_enable']==0) and (boardmode_inquiry()==3)) or (modem_state[self.name] == vi_state.CONNECTED) or (modem_state[self.name] == vi_state.MODEM_CONNECTED):
#-------------------------------------------------------
# create usb threads if VI is connected through usb
#-------------------------------------------------------
if self.usb is not None:
thread1 = usbRecvThread("%s-Recv" % self.name, self.usb, self.inQ, self.name)
thread2 = usbSendThread("%s-Send" % self.name, self.usb, self.outQ, self.name)
if (conf_options['openxc_vi_enable']): # connect through Bluotooth socket
# OXM-65 - Use Socket Recv timeout to indicate xfer stop after BT Frame failure
LOG.info("TRYING TO CONNECT VIA BT in xc_vi.py")
thread1 = sockRecvThread("%s-Recv" % self.name, self.socket, self.inQ, self.name, sflag = 1)
thread2 = sockSendThread("%s-Send" % self.name, self.socket, self.outQ, self.name)
# start thread
thread1.start()
thread2.start()
self.threads.append(thread1)
self.threads.append(thread2)
#--------------------------------------------------
# prepare SD card back up is more than one backup is desired
#--------------------------------------------------
if conf_options['openxc_vi_trace_number_of_backup']:
self.trace_sd_backup_prep()
if (conf_options['openxc_vi_enable']):
#---------------------------------------------------
# invoke stop_xxx.set() to stop the task if needed
# start trace task asap
#---------------------------------------------------
LOG.info("*****************************")
LOG.info("Starting VI Trace Log deamon")
LOG.info("*****************************")
thread3, self.stop_trace = loop_timer(float(conf_options['openxc_vi_trace_idle_duration']), \
self.trace_start, \
float(conf_options['openxc_vi_trace_snapshot_duration']), \
XCMODEM_TRACE_RAW_FILE, XCMODEM_TRACE_RAW_BK_FILE)
self.threads.append(thread3)
#---------------------------------------------------
# start v2x trace
#---------------------------------------------------
if (self.boardid == 1) and (self.config_mode == 3):
LOG.info("*****************************")
LOG.info("Starting V2X Trace Log deamon")
LOG.info("*****************************")
thread7, self.stop_v2x_trace = loop_timer(float(conf_options['openxc_v2x_trace_idle_duration']), \
self.v2x_trace_start, \
float(conf_options['openxc_v2x_trace_snapshot_duration']), \
XCMODEM_V2X_TRACE_RAW_FILE, XCMODEM_V2X_TRACE_RAW_BK_FILE)
self.threads.append(thread7)
#---------------------------------------------------
# for web upload, we use the stable back up file
#---------------------------------------------------
if conf_options['web_scp_vi_trace_upload_enable']:
#if (not ((board_id == 2) and ((self.config_mode == 4) or (self.config_mode == 5)))):
if (not ((self.board_id == 2) and (self.config_mode == 3))):
if (conf_options['openxc_vi_enable']):
thread4, self.stop_web_upload = loop_timer(float(conf_options['web_scp_vi_trace_upload_interval']), \
self.web_upload, \
XCMODEM_TRACE_RAW_BK_FILE, XCMODEM_TRACE_FILE)
self.threads.append(thread4)
thread4_1, self.stop_web_v2x_upload = loop_timer(float(conf_options['web_scp_vi_trace_upload_interval']), \
self.web_v2x_upload, \
#XCMODEM_TRACE_RAW_BK_FILE, XCMODEM_TRACE_FILE)
XCMODEM_V2X_TRACE_RAW_BK_FILE, XCMODEM_V2X_TRACE_FILE)
self.threads.append(thread4_1)
#-----------------------------------------------------
# start monitor task
#-----------------------------------------------------
monitor_interval = float(power_mode[conf_options['power_saving_mode']]['monitor_interval'])
thread5, self.stop_monitor = loop_timer(monitor_interval, self.vi_monitor)
self.threads.append(thread5)
# FW Reset button monitor
if conf_options['fw_factory_reset_enable']:
thread6, self.stop_button_monitor = loop_timer(FIRMWARE_RESET_BUTTON_MONITOR_INTERVAL, self.vi_reset_button_monitor)
self.threads.append(thread6)
modem_state[self.name] = vi_state.OPERATION
elif modem_state[self.name] != vi_state.DISABLE:
exit_flag[self.name] = 1
if stuck_cnt >= MAX_DISCOVERY_ATTEMPT \
and (stuck_state == vi_state.ADDR_ASSIGNED \
or stuck_state == vi_state.DISCOVERED):
LOG.debug("VI/MODEM probably stucks! Work-around to re-start bluetooth")
if self.bt5:
LOG.debug("Bluetooth 5 restart doesn't work! Please restart your test !!")
exit_flag['all_app'] = 1
else:
modem_state[self.name] = vi_state.RESTART
vi_bt_restart(self.name)
LOG.info("vi_app.state = %s" % modem_state[self.name])
return (modem_state[self.name] == vi_state.OPERATION)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-v', help='Verbosity Level (0..2)')
args = parser.parse_args()
if args.v is None:
level = 0
else:
level = int(args.v)
pairing_registration()
vi_cleanup()
vi_dev = xcModemVi(port_dict['vi_app']['port'], vi_in_queue, vi_out_queue, \
sdebug = (level>1), debug = (level>0))
attempt = 1
while True:
if (vi_dev.vi_main()):
while not exit_flag['vi_app']:
while not vi_in_queue.empty():
data = vi_dev.inQ.get()
# print("rec [%s]" % data)
new = vi_dev.vi_timestamp(data)
# print("new [%s]" % new)
# simply dump into a file
vi_dev.trace_raw_lock.acquire()
if vi_dev.fp and vi_dev.trace_enable:
vi_dev.fp.write(new)
vi_dev.trace_raw_lock.release()
msleep(1)
modem_state['vi_app'] = vi_state.LOST
vi_dev.lost_cnt += 1
LOG.info("vi_app state %s %d time" % (modem_state['vi_app'], vi_dev.lost_cnt))
vi_dev.vi_exit()
if exit_flag['all_app']:
LOG.debug("Ending all_app")
break;
time.sleep(float(conf_options['openxc_vi_discovery_interval']))
attempt += 1
if (attempt > MAX_BRING_UP_ATTEMPT):
LOG.debug("vi_app max out %d attempts in xc_vi.py" % MAX_BRING_UP_ATTEMPT)
break;
|
openxc/OpenXCAccessory
|
common/xc_vi.py
|
Python
|
bsd-3-clause
| 57,413
|
[
"Amber"
] |
12a11d052456decb333df389924d06211979403a8c9947199e939d7cafe5899d
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018 Vimig Socrates <vimig.socrates@gmail.com>
# Copyright (C) 2016 Loreto Parisi <loretoparisi@gmail.com>
# Copyright (C) 2016 Silvio Olivastri <silvio.olivastri@gmail.com>
# Copyright (C) 2016 Radim Rehurek <radim@rare-technologies.com>
"""This script allows converting word-vectors from word2vec format into Tensorflow 2D tensor and metadata format.
This script used for for word-vector visualization on `Embedding Visualization <http://projector.tensorflow.org/>`_.
How to use
----------
#. Convert your word-vector with this script (for example, we'll use model from
`gensim-data <https://rare-technologies.com/new-download-api-for-pretrained-nlp-models-and-datasets-in-gensim/>`_) ::
python -m gensim.downloader -d glove-wiki-gigaword-50 # download model in word2vec format
python -m gensim.scripts.word2vec2tensor -i ~/gensim-data/glove-wiki-gigaword-50/glove-wiki-gigaword-50.gz \
-o /tmp/my_model_prefix
#. Open http://projector.tensorflow.org/
#. Click "Load Data" button from the left menu.
#. Select "Choose file" in "Load a TSV file of vectors." and choose "/tmp/my_model_prefix_tensor.tsv" file.
#. Select "Choose file" in "Load a TSV file of metadata." and choose "/tmp/my_model_prefix_metadata.tsv" file.
#. ???
#. PROFIT!
For more information about TensorBoard TSV format please visit:
https://www.tensorflow.org/versions/master/how_tos/embedding_viz/
Command line arguments
----------------------
.. program-output:: python -m gensim.scripts.word2vec2tensor --help
:ellipsis: 0, -7
"""
import os
import sys
import logging
import argparse
import gensim
from gensim import utils
logger = logging.getLogger(__name__)
def word2vec2tensor(word2vec_model_path, tensor_filename, binary=False):
"""Convert file in Word2Vec format and writes two files 2D tensor TSV file.
File "tensor_filename"_tensor.tsv contains word-vectors, "tensor_filename"_metadata.tsv contains words.
Parameters
----------
word2vec_model_path : str
Path to file in Word2Vec format.
tensor_filename : str
Prefix for output files.
binary : bool, optional
True if input file in binary format.
"""
model = gensim.models.KeyedVectors.load_word2vec_format(word2vec_model_path, binary=binary)
outfiletsv = tensor_filename + '_tensor.tsv'
outfiletsvmeta = tensor_filename + '_metadata.tsv'
with utils.open(outfiletsv, 'wb') as file_vector, utils.open(outfiletsvmeta, 'wb') as file_metadata:
for word in model.index2word:
file_metadata.write(gensim.utils.to_utf8(word) + gensim.utils.to_utf8('\n'))
vector_row = '\t'.join(str(x) for x in model[word])
file_vector.write(gensim.utils.to_utf8(vector_row) + gensim.utils.to_utf8('\n'))
logger.info("2D tensor file saved to %s", outfiletsv)
logger.info("Tensor metadata file saved to %s", outfiletsvmeta)
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)s - %(module)s - %(levelname)s - %(message)s', level=logging.INFO)
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=__doc__[:-138])
parser.add_argument("-i", "--input", required=True, help="Path to input file in word2vec format")
parser.add_argument("-o", "--output", required=True, help="Prefix path for output files")
parser.add_argument(
"-b", "--binary", action='store_const', const=True, default=False,
help="Set this flag if word2vec model in binary format (default: %(default)s)"
)
args = parser.parse_args()
logger.info("running %s", ' '.join(sys.argv))
word2vec2tensor(args.input, args.output, args.binary)
logger.info("finished running %s", os.path.basename(sys.argv[0]))
|
napsternxg/gensim
|
gensim/scripts/word2vec2tensor.py
|
Python
|
gpl-3.0
| 3,850
|
[
"VisIt"
] |
b469694d338c3ef4b493c90bf3275699196a731937dc61a5de216be7d3defc09
|
import numpy as np
from asap3.nanoparticle_mc import data
from ase.atom import Atom, names as oldnames
from ase.data import atomic_numbers, chemical_symbols, reference_states
from asap3.Internal.MonteCarloAtoms import MC_Atom
class ClusterAtom(MC_Atom):
"""Cluster Atom"""
names = oldnames.copy()
names['neighbors'] = ('neighbors', None)
names['coordination'] = ('coordinations', 0)
names['type'] = ('types', 0)
_data = []
def __init__(self, symbol='X', position=(0.0, 0.0, 0.0), atoms=None, index=None):
self.atoms = atoms
self.index = index
if atoms is None:
if isinstance(symbol, str):
self.number = atomic_numbers[symbol]
else:
self.number = symbol
self.position = np.array(position, float)
def __repr__(self):
output = 'ClusterAtom(%s, %s' % (self.symbol, self.position.tolist())
for name in self._data:
if name != 'number' and name != 'position' and self._get(name) is not None:
output += ', %s=%s' % (name, self._get(name))
return output + ')'
def get_raw(self, name):
"""Get attribute, return None if not explicitely set."""
if name == 'symbol':
return chemical_symbols[self.get_raw('number')]
if self.atoms is None:
return self.data[name]
plural = self.names[name][0]
if plural in self.atoms.arrays:
return self.atoms.arrays[plural][self.index]
else:
return None
def get(self, name):
"""Get attribute, return default if not explicitely set."""
value = self.get_raw(name)
if value is None:
if name == 'mass':
value = atomic_masses[self.number]
else:
value = self.names[name][1]
return value
#def _get_copy(self, name):
# return self._get(name, copy=True)
def set(self, name, value, copy=False):
"""Set attribute."""
if name == 'symbol':
name = 'number'
value = atomic_numbers[value]
if self.atoms is None or copy:
assert name in self.names
self.data[name] = value
else:
plural, default = self.names[name]
if plural in self.atoms.arrays:
array = self.atoms.arrays[plural]
if name == 'magmom' and array.ndim == 2:
assert len(value) == 3
array[self.index] = value
else:
if name == 'magmom' and np.asarray(value).ndim == 1:
array = np.zeros((len(self.atoms), 3))
elif name == 'mass':
array = self.atoms.get_masses()
else:
default = np.asarray(default)
array = np.zeros((len(self.atoms),) + default.shape,
default.dtype)
array[self.index] = value
self.atoms.new_array(plural, array)
def has(self, name):
return name in self._data
def cut_reference_to_atoms(self):
for name, a in self.atoms.arrays.items():
self.set(self.atoms.names[name][0], a[self.index].copy(), True)
self.atoms = None
self.index = None
def get_symbol(self): return chemical_symbols[self.get('number')]
def get_neighbors(self): return self.get('neighbors')
def get_type(self): return self.get('type')
def get_coordination(self): return self.get('coordination')
#def get_(self): return self._get('')
def set_symbol(self, value): self.set('number', atomic_numbers[value])
def set_neighbors(self, value): self.set('neighbors', np.array(value, int))
def set_type(self, value): self.set('type', value)
def set_coordination(self, value): self.set('coordination', value)
#def get_(self, value): return self._set('', value)
symbol = property(get_symbol, set_symbol, doc='Chemical symbol')
neighbors = property(get_neighbors, set_neighbors, doc='List of nearest neighbors')
type = property(get_type, set_type, doc='Atom type')
coordination = property(get_coordination, set_coordination, doc='Atom coordination')
# We need to repeat these as MC_Atom has blocked them.
# Lambda expression needed as _get_position is in a base class :-(
position = property(lambda self: self._get_position(),
lambda self, x : self.set_position(x),
doc='XYZ-coordinates')
number = property(lambda self: self.get_atomic_number(),
lambda self, x: self.set_atomic_number(x),
doc='Atomic number')
|
auag92/n2dm
|
Asap-3.8.4/Python/asap3/nanoparticle_mc/clusteratom.py
|
Python
|
mit
| 4,763
|
[
"ASE"
] |
0d194ac1c1649ab8613e8d3fe83fb9f58f21e2b05f87eb9157a59c0409a190c5
|
# -*- coding: utf-8 -*-
def VtkDibujaIdsElementos(ids):
# Dibuja las etiquetas de los elementos.
cc= vtk.vtkCellCenters()
vtk.SetInput(ids) # Centroides de las celdas.
visCells= vtk.vtkSelectVisiblePoints()
visCells.SetInput(cc)
visCells.SetRenderer("renderer")
visCells.SelectionWindowOff()
#Create the mapper to display the cell ids. Specify the format to
# use for the labels. Also create the associated actor.
cellMapper= vtk.vtkLabeledShStrMapper
cellMapper.SetInput(visCells)
cellMapper.LabelTextProperty().SetColor(0,0,0.9)
cellLabels= vtk.vtkActor2D()
cellLabels.SetMapper(cellMapper)
|
lcpt/xc
|
python_modules/postprocess/xcVtk/FE_model/vtk_plot_element_ids.py
|
Python
|
gpl-3.0
| 630
|
[
"VTK"
] |
4cb5cca01bff3eb59fdeafead17b6770d39a2990097085d3270ccd39b5fac2f1
|
# -*- coding: utf-8 -*-
import xbmc, xbmcaddon, xbmcplugin, xbmcgui
import sys, os, time, datetime, re
import urllib, urlparse
resolverID = 'script.module.israeliveresolver'
AddonID = "plugin.video.israelive"
Addon = xbmcaddon.Addon(AddonID)
if Addon.getSetting("unverifySSL") == "true":
try:
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
except:
pass
addonPath = xbmc.translatePath(Addon.getAddonInfo("path")).decode("utf-8")
libDir = os.path.join(addonPath, 'resources', 'lib')
sys.path.insert(0, libDir)
import common, myIPTV, checkUpdates, updateM3U
localizedString = Addon.getLocalizedString
AddonName = Addon.getAddonInfo("name")
icon = Addon.getAddonInfo('icon')
artDir = os.path.join(addonPath, 'resources', 'art')
__icon__ = os.path.join(artDir, "check2.png")
__icon2__= os.path.join(artDir, "signQuestionMark.png")
user_dataDir = xbmc.translatePath(Addon.getAddonInfo("profile")).decode("utf-8")
if not os.path.exists(user_dataDir):
os.makedirs(user_dataDir)
FAV = os.path.join(user_dataDir, 'favorites.txt')
if not (os.path.isfile(FAV)):
common.WriteList(FAV, [])
remoteSettings = common.GetRemoteSettings()
remoteSettingsFile = os.path.join(user_dataDir, "remoteSettings.txt")
if not os.path.isfile(remoteSettingsFile):
common.UpdateFile(remoteSettingsFile, "remoteSettingsZip", remoteSettings, zip=True, forceUpdate=True)
remoteSettings = common.ReadList(remoteSettingsFile)
if remoteSettings == []:
xbmc.executebuiltin('Notification({0}, Cannot load settings, {1}, {2})'.format(AddonName, 5000, icon))
sys.exit()
listsFile = os.path.join(user_dataDir, "israelive.list")
if not os.path.isfile(listsFile):
common.UpdateChList(remoteSettings)
fullGuideFile = os.path.join(user_dataDir, 'fullGuide.txt')
iptvChannelsFile = os.path.join(user_dataDir, "iptv.m3u")
iptvGuideFile = os.path.join(user_dataDir, "guide.xml")
iptvLogosDir = os.path.join(user_dataDir, "logos")
categoriesFile = os.path.join(user_dataDir, 'lists', 'categories.list')
selectedCategoriesFile = os.path.join(user_dataDir, 'lists', 'selectedCategories.list')
useCategories = Addon.getSetting("useCategories") == "true"
showProgNames = Addon.getSetting("showProgNames") == "true"
useEPG = Addon.getSetting("useEPG") == "true"
if useEPG and not os.path.isfile(fullGuideFile):
useEPG = False
epg = None
cat = None
catname = None
def CATEGORIES():
common.CheckNewVersion(remoteSettings)
addDir("[COLOR yellow][B][{0}][/B][/COLOR]".format(localizedString(30239).encode('utf-8')), 50, 'https://www.ostraining.com/cdn/images/coding/setting.png', background="http://3.bp.blogspot.com/-vVfHI8TbKA4/UBAbrrZay0I/AAAAAAAABRM/dPFgXAnF8Sg/s1600/retro-tv-icon.jpg")
addDir("[COLOR {0}][B][{1}][/B][/COLOR]".format(Addon.getSetting("favColor"), localizedString(30000).encode('utf-8')), 16, 'http://cdn3.tnwcdn.com/files/2010/07/bright_yellow_star.png', background="http://3.bp.blogspot.com/-vVfHI8TbKA4/UBAbrrZay0I/AAAAAAAABRM/dPFgXAnF8Sg/s1600/retro-tv-icon.jpg")
if useCategories:
categories = common.ReadList(selectedCategoriesFile)
ind = -1
for category in categories:
ind += 1
try:
if category.has_key("type") and category["type"] == "ignore":
continue
addDir("[COLOR {0}][B][{1}][/B][/COLOR]".format(Addon.getSetting("catColor"), category["name"].encode("utf-8")), 2, category["image"], background="http://3.bp.blogspot.com/-vVfHI8TbKA4/UBAbrrZay0I/AAAAAAAABRM/dPFgXAnF8Sg/s1600/retro-tv-icon.jpg", channelID=category["id"], categoryID=category["group"], index=ind)
except Exception as ex:
xbmc.log("{0}".format(ex), 3)
else:
ListLive(categoryID="9999", iconimage="http://3.bp.blogspot.com/-vVfHI8TbKA4/UBAbrrZay0I/AAAAAAAABRM/dPFgXAnF8Sg/s1600/retro-tv-icon.jpg", showSearch=True)
SetViewMode()
def SetViewMode():
if useEPG:
xbmcplugin.setContent(int(sys.argv[1]), 'episodes')
skindir = xbmc.getSkinDir()
viewMode = Addon.getSetting("viewMode").strip()
if viewMode == 'Auto' or viewMode == '':
if 'confluence' in skindir:
viewMode = '504'
elif 'estuary' in skindir:
viewMode = '55'
elif 'estouchy' in skindir:
viewMode = '500'
elif 'eminence.2' in skindir or 'eminence.he.2' in skindir:
viewMode = '54'
elif 'eminence.zeev' in skindir:
viewMode = '510'
elif 'amber' in skindir:
viewMode = '50'
else:
return
xbmc.executebuiltin("Container.SetViewMode({0})".format(viewMode))
def ListLive(categoryID=None, iconimage=None, chID=None, catChannels=None, showSearch=False, makeGroup=True, catName=False):
if catChannels is None:
catChannels = common.GetChannels(categoryID)
groupChannels = []
for channel in catChannels:
if channel["type"] == 'ignore':
continue
matches = [groupChannels.index(x) for x in groupChannels if len(x) > 0 and x[0]["name"] == channel["name"]]
if len(matches) == 1 and makeGroup:
groupChannels[matches[0]].append(channel)
else:
if chID is None or chID == channel['id']:
groupChannels.append([channel])
if showSearch and len(groupChannels) > 0:
addDir("[COLOR white][B]<{0}>[/B][/COLOR]".format(localizedString(30027).encode('utf-8')), 60, categoryID=categoryID)
for channels in groupChannels:
isGroupChannel = len(channels) > 1 and chID is None
chs = [channels[0]] if isGroupChannel else channels
for channel in chs:
image = channel['image']
description = ""
channelName = channel['name'].encode("utf-8")
background = None
isTvGuide = False
isFolder=True
displayName, description, background, isTvGuide = GetProgrammeDetails(channelName, channel['group'], catName= catName and categoryID != channel['group'], progName=showProgNames)
if isGroupChannel:
mode = 3
displayName = displayName.replace('[COLOR {0}][B]'.format(Addon.getSetting("chColor")), '[COLOR {0}][B]['.format(Addon.getSetting("catColor")), 1).replace('[/B]', '][/B]', 1)
elif channel["type"] == 'video' or channel["type"] == 'audio':
mode = 10
isFolder=False
elif not useCategories and channel["type"] == 'playlist':
mode = 2
displayName = displayName.replace('[COLOR {0}][B]'.format(Addon.getSetting("chColor")), '[COLOR {0}][B]['.format(Addon.getSetting("catColor")), 1).replace('[/B]', '][/B]', 1)
background = image
else:
continue
if background is None or background == "":
background = iconimage
addDir(displayName, mode, image, description, isFolder=isFolder, background=background, isTvGuide=isTvGuide, channelID=channel["id"], categoryID=channel['group'])
SetViewMode()
def PlayChannelByID(chID=None, fromFav=False, channel=None):
try:
if channel is None:
channel = common.ReadList(FAV)[int(chID)] if fromFav else common.GetChannelByID(chID)
categoryID = 'Favourites' if fromFav else channel["group"]
PlayChannel(channel["url"], channel["name"].encode("utf-8"), channel["image"].encode("utf-8"), categoryID)
except Exception as ex:
xbmc.log(str(ex), 3)
def PlayChannel(url, name, iconimage, categoryID):
url = ResolveUrl(url)
if url is None:
xbmc.log("Cannot resolve stream URL for channel '{0}'".format(urllib.unquote_plus(name)), 3)
xbmc.executebuiltin("Notification({0}, Cannot resolve stream URL for channel '[COLOR {1}][B]{2}[/B][/COLOR]', {3}, {4})".format(AddonName, Addon.getSetting("chColor"), urllib.unquote_plus(name), 5000, __icon2__))
return False
channelName, programmeName, description = GetPlayingDetails(urllib.unquote_plus(name), categoryID)
listItem = xbmcgui.ListItem(path=url)
listItem.setInfo(type="Video", infoLabels={"mediatype": "movie", "studio": channelName, "title": programmeName, "plot": description, "tvshowtitle": channelName, "episode": "0", "season": "0"})
if iconimage is not None:
try:
listItem.setArt({'thumb' : iconimage})
except:
listItem.setThumbnailImage(iconimage)
xbmcplugin.setResolvedUrl(handle=int(sys.argv[1]), succeeded=True, listitem=listItem)
return True
def ResolveUrl(url):
try:
if "mode=" in url:
regex = re.compile('[\?|&]mode=(\-?[0-9]+)', re.I+re.M+re.U+re.S)
matches = regex.findall(url)
if len(matches) > 0:
url = regex.sub('', url).strip()
mode = matches[0]
if mode == '0':
mode = '-3'
url = url[url.rfind(';')+1:]
url = 'plugin://{0}/?url={1}&mode={2}'.format(resolverID, url, mode)
else:
url = None
except Exception as ex:
xbmc.log("{0}".format(ex), 3)
url = None
finally:
return url
def GetPlayingDetails(channelName, categoryID):
programmeName = "[COLOR {0}][B]{1}[/B][/COLOR]".format(Addon.getSetting("chColor"), channelName)
if not useEPG:
return programmeName, programmeName, None
global epg
if epg is None:
epg = common.GetGuide(categoryID)
programmes = GetProgrammes(epg, channelName)
channelName = programmeName
description = ''
if len(programmes) > 0:
programme = programmes[0]
programmeName = '[COLOR {0}][B]{1}[/B][/COLOR] [COLOR {2}][{3}-{4}][/COLOR]'.format(Addon.getSetting("prColor"), programme["name"].encode('utf-8'), Addon.getSetting("timesColor"), datetime.datetime.fromtimestamp(programme["start"]).strftime('%H:%M'), datetime.datetime.fromtimestamp(programme["end"]).strftime('%H:%M'))
if programmes[0]["description"] is not None:
description = '{0}[CR]{1}'.format(programmeName, programmes[0]["description"].encode('utf-8'))
if len(programmes) > 1:
nextProgramme = programmes[1]
channelName = "{0} - [COLOR {1}]Next: [B]{2}[/B][/COLOR] [COLOR {3}][{4}-{5}][/COLOR]".format(channelName, Addon.getSetting("nprColor"), nextProgramme["name"].encode("utf-8"), Addon.getSetting("timesColor"), datetime.datetime.fromtimestamp(nextProgramme["start"]).strftime('%H:%M'), datetime.datetime.fromtimestamp(nextProgramme["end"]).strftime('%H:%M'))
description = '{0}[CR][CR]Next: [COLOR {1}][B]{2}[/B][/COLOR] [COLOR {3}][{4}-{5}][/COLOR]'.format(description, Addon.getSetting("prColor"), programmes[1]["name"].encode('utf-8'), Addon.getSetting("timesColor"), datetime.datetime.fromtimestamp(programmes[1]["start"]).strftime('%H:%M'), datetime.datetime.fromtimestamp(programmes[1]["end"]).strftime('%H:%M'))
return channelName, programmeName, description
def ChannelGuide(chID, categoryID):
epg = common.GetGuide(categoryID)
if categoryID == 'Favourites':
channel = common.ReadList(FAV)[int(chID)]
else:
channel = common.GetChannelByID(chID)
channelName = channel["name"].encode("utf-8")
programmes = GetProgrammes(epg, channelName, full=True)
ShowGuide(programmes, channelName, channel["image"].encode("utf-8"))
def ShowGuide(programmes, channelName, iconimage):
if programmes is None or len(programmes) == 0:
addDir('[COLOR red][B]{0}[/B] "{1}".[/COLOR]'.format(localizedString(30204).encode('utf-8'), channelName), 99, iconimage, isFolder=False)
else:
addDir('------- [B][COLOR {0}]{1}[/COLOR] - [COLOR {2}]{3}[/COLOR][/B] -------'.format(Addon.getSetting("chColor"), channelName, Addon.getSetting("prColor"), localizedString(30205).encode('utf-8')), 99, iconimage, isFolder=False)
day = ""
for programme in programmes:
startdate = datetime.datetime.fromtimestamp(programme["start"]).strftime('%d/%m/%y')
if startdate != day:
day = startdate
addDir('[COLOR {0}][B]{1}:[/B][/COLOR]'.format(Addon.getSetting("nprColor"), day), 99, iconimage, isFolder=False)
startdatetime = datetime.datetime.fromtimestamp(programme["start"]).strftime('%H:%M')
enddatetime = datetime.datetime.fromtimestamp(programme["end"]).strftime('%H:%M')
programmeName = "[COLOR {0}][{1}-{2}][/COLOR] [COLOR {3}][B]{4}[/B][/COLOR]".format(Addon.getSetting("timesColor"), startdatetime, enddatetime, Addon.getSetting("prColor"), programme["name"].encode('utf-8'))
description = "" if programme["description"] is None else programme["description"].encode('utf-8')
image = programme["image"] if programme["image"] else iconimage
addDir(programmeName, 99, image, description, isFolder=False)
SetViewMode()
def GetProgrammeDetails(channelName, categoryID, catName=False, progName=False):
global epg
global cat
global catname
displayName = "[COLOR {0}][B]{1}[/B][/COLOR]".format(Addon.getSetting("chColor"), channelName)
description = ""
background = None
isTvGuide = False
if useEPG:
if epg is None or cat != categoryID:
cat = categoryID
if catName:
allCatList = common.ReadList(categoriesFile)
cats = [item["name"] for item in allCatList if categoryID == item["id"]]
catname = '' if len(cats) == 0 else cats[0].encode('utf-8')
epg = common.GetGuide(categoryID)
if catName:
displayName = '[COLOR {0}][B][{1}][/B][/COLOR] - {2}'.format(Addon.getSetting("catColor"), catname, displayName)
programmes = GetProgrammes(epg, channelName)
if programmes is not None and len(programmes) > 0:
isTvGuide = True
programmeName = "[COLOR {0}][B]{1}[/B][/COLOR] [COLOR {2}][{3}-{4}][/COLOR]".format(Addon.getSetting("prColor"), programmes[0]["name"].encode('utf-8'), Addon.getSetting("timesColor"), datetime.datetime.fromtimestamp(programmes[0]["start"]).strftime('%H:%M'), datetime.datetime.fromtimestamp(programmes[0]["end"]).strftime('%H:%M'))
if progName:
displayName = "{0} - {1}".format(displayName, programmeName)
if programmes[0]["description"] is not None:
description = '{0}[CR]{1}'.format(programmeName, programmes[0]["description"].encode('utf-8'))
if programmes[0]["image"] is not None:
background = programmes[0]["image"]
if len(programmes) > 1:
if progName:
displayName = "{0} - [COLOR {1}]Next: [B]{2}[/B][/COLOR] [COLOR {3}][{4}-{5}][/COLOR]".format(displayName, Addon.getSetting("nprColor"), programmes[1]["name"].encode('utf-8'), Addon.getSetting("timesColor"), datetime.datetime.fromtimestamp(programmes[1]["start"]).strftime('%H:%M'), datetime.datetime.fromtimestamp(programmes[1]["end"]).strftime('%H:%M'))
description = '{0}[CR][CR]Next: [COLOR {1}][B]{2}[/B][/COLOR] [COLOR {3}][{4}-{5}][/COLOR]'.format(description, Addon.getSetting("prColor"), programmes[1]["name"].encode('utf-8'), Addon.getSetting("timesColor"), datetime.datetime.fromtimestamp(programmes[1]["start"]).strftime('%H:%M'), datetime.datetime.fromtimestamp(programmes[1]["end"]).strftime('%H:%M'))
return displayName, description, background, isTvGuide
def GetProgrammes(epg, channelName ,full=False):
programmes = []
try:
matches = [x["tvGuide"] for x in epg if x["channel"].encode('utf-8').strip() == common.GetUnColor(channelName)]
programmes = matches[0]
except Exception, e:
pass
now = int(time.time())
programmesCount = len(programmes)
for i in range(programmesCount):
start = programmes[i]["start"]
stop = programmes[i]["end"]
if now >= stop:
continue
if now < start:
newStart = now if i == 0 else programmes[i-1]["end"]
programme = {"start": newStart, "end": programmes[i]["start"], "name": "No Details", "description": None, "image": None}
programmes.insert(i, programme)
if (full):
return programmes[i:]
elif i+1 < programmesCount:
return programmes[i:i+2]
else:
return programmes[i:i+1]
return []
def listFavorites():
favsList = common.ReadList(FAV)
if favsList == []:
addDir('[COLOR red]{0}[/COLOR]'.format(localizedString(30202).encode('utf-8')), 99, isFolder=False)
addDir('[COLOR red]{0}[/COLOR]'.format(localizedString(30203).encode('utf-8')), 99, isFolder=False)
ind = -1
for favourite in favsList:
ind += 1
if favourite["type"] == "ignore":
continue
channelName = common.GetUnColor(favourite["name"].encode("utf-8"))
image = favourite["image"].encode("utf-8")
description = None
background = None
isTvGuide = False
displayName, description, background, isTvGuide = GetProgrammeDetails(channelName, "Favourites", progName=showProgNames)
addDir(displayName, 11, image, description, isFolder=False, background=background, isTvGuide=isTvGuide, categoryID="Favourites", index=ind)
SetViewMode()
def addFavorites(channels, showNotification=True):
favsList = common.ReadList(FAV)
for channel in channels:
if any(f.get('id', '') == channel["id"] for f in favsList):
if showNotification:
xbmc.executebuiltin('Notification({0}, [COLOR {1}][B]{2}[/B][/COLOR] Already in favourites, {3}, {4})'.format(AddonName, Addon.getSetting("chColor"), channel["name"].encode("utf-8"), 5000, __icon2__))
continue
favsList.append(channel)
if showNotification:
xbmc.executebuiltin('Notification({0}, [COLOR {1}][B]{2}[/B][/COLOR] added to favourites, {3}, {4})'.format(AddonName, Addon.getSetting("chColor"), channel["name"].encode("utf-8"), 5000, __icon__))
common.WriteList(FAV, favsList)
common.MakeFavouritesGuide(fullGuideFile)
def removeFavorties(indexes):
favsList = common.ReadList(FAV)
for ind in range(len(indexes)-1, -1, -1):
favsList.remove(favsList[indexes[ind]])
common.WriteList(FAV, favsList)
common.MakeFavouritesGuide(fullGuideFile)
def SaveGuide():
try:
xbmc.executebuiltin("XBMC.Notification({0}, Saving Guide..., {1}, {2})".format(AddonName, 300000 ,icon))
if common.UpdateFile(fullGuideFile, "fullGuide", remoteSettings, zip=True, forceUpdate=True):
xbmc.executebuiltin("XBMC.Notification({0}, Guide saved., {1}, {2})".format(AddonName, 5000 ,icon))
epg = common.ReadList(fullGuideFile)
fullCategoriesList = common.ReadList(categoriesFile)
fullCategoriesList.append({"id": "Favourites"})
common.MakeCatGuides(fullCategoriesList, epg)
else:
xbmc.executebuiltin("XBMC.Notification({0}, Guide is up to date., {1}, {2})".format(AddonName, 5000 ,icon))
return True
except Exception as ex:
xbmc.log("{0}".format(ex), 3)
xbmc.executebuiltin("XBMC.Notification({0}, Guide NOT saved!, {1}, {2})".format(AddonName, 5000 ,icon))
return False
def addDir(name, mode, iconimage=None, description=None, background=None, isFolder=True, isTvGuide=False, channelID=None, categoryID=None, index=None):
try:
liz=xbmcgui.ListItem(name)
liz.setArt({'thumb' : iconimage, 'icon': 'DefaultFolder.png'})
except:
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name, "Plot": description} )
if mode==10 or mode==11:
liz.setProperty("IsPlayable","true")
items = []
if mode == 10:
if isTvGuide:
items.append((localizedString(30205).encode('utf-8'), 'XBMC.Container.Update({0}?mode=5&channelid={1}&categoryid={2})'.format(sys.argv[0], channelID, categoryID)))
items.append((localizedString(30206).encode('utf-8'), 'XBMC.RunPlugin({0}?mode=17&channelid={1}&categoryid={2})'.format(sys.argv[0], channelID, categoryID)))
elif mode == 11:
if isTvGuide:
items.append((localizedString(30205).encode('utf-8'), 'XBMC.Container.Update({0}?mode=5&channelid={1}&categoryid={2})'.format(sys.argv[0], index, categoryID)))
items.append((localizedString(30207).encode('utf-8'), 'XBMC.RunPlugin({0}?mode=18&channelid={1})'.format(sys.argv[0], index)))
items.append((localizedString(30021).encode('utf-8'), 'XBMC.RunPlugin({0}?mode=41&channelid={1}&iconimage=-1)'.format(sys.argv[0], index)))
items.append((localizedString(30022).encode('utf-8'), 'XBMC.RunPlugin({0}?mode=41&channelid={1}&iconimage=1)'.format(sys.argv[0], index)))
items.append((localizedString(30023).encode('utf-8'), 'XBMC.RunPlugin({0}?mode=41&channelid={1}&iconimage=0)'.format(sys.argv[0], index)))
channelID = index
liz.addContextMenuItems(items = items)
elif mode == 2:
items = []
items.append((localizedString(30210).encode('utf-8'), 'XBMC.Container.Update({0}?mode=37&categoryid={1})'.format(sys.argv[0], channelID)))
items.append((localizedString(30212).encode('utf-8'), 'XBMC.Container.Update({0}?mode=38&categoryid={1})'.format(sys.argv[0], channelID)))
if useCategories:
items.append((localizedString(30021).encode('utf-8'), 'XBMC.RunPlugin({0}?mode=42&channelid={1}&iconimage=-1)'.format(sys.argv[0], index)))
items.append((localizedString(30022).encode('utf-8'), 'XBMC.RunPlugin({0}?mode=42&channelid={1}&iconimage=1)'.format(sys.argv[0], index)))
items.append((localizedString(30023).encode('utf-8'), 'XBMC.RunPlugin({0}?mode=42&channelid={1}&iconimage=0)'.format(sys.argv[0], index)))
liz.addContextMenuItems(items = items)
elif mode == 3:
if isTvGuide:
liz.addContextMenuItems(items = [(localizedString(30205).encode('utf-8'), 'XBMC.Container.Update({0}?mode=5&channelid={1}&categoryid={2})'.format(sys.argv[0], channelID, categoryID))])
iconimage = background
elif mode == 16:
liz.addContextMenuItems(items =
[(localizedString(30211).encode('utf-8'), 'XBMC.Container.Update({0}?mode=39)'.format(sys.argv[0])),
(localizedString(30213).encode('utf-8'), 'XBMC.Container.Update({0}?mode=40)'.format(sys.argv[0])),
(localizedString(30224).encode('utf-8'), 'XBMC.Container.Update({0}?mode=45)'.format(sys.argv[0]))])
if background is not None:
liz.setProperty("Fanart_Image", background)
if iconimage is None: iconimage = 'DefaultFolder.png'
urlParams = {'mode': str(mode), 'iconimage': iconimage, 'channelid': str(channelID)}
if categoryID is not None:
urlParams['categoryid'] = str(categoryID)
fullUrl = '{0}?{1}'.format(sys.argv[0], urllib.urlencode(urlParams))
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=fullUrl, listitem=liz, isFolder=isFolder)
def UpdateChannelsLists():
xbmc.executebuiltin("XBMC.Notification({0}, Updating Channels Lists..., {1}, {2})".format(AddonName, 300000 ,icon))
common.UpdateFile(remoteSettingsFile, "remoteSettingsZip", zip=True, forceUpdate=True)
remoteSettings = common.ReadList(remoteSettingsFile)
if remoteSettings == []:
xbmc.executebuiltin('Notification({0}, Cannot load settings, {1}, {2})'.format(AddonName, 5000, icon))
sys.exit()
common.UpdateChList(remoteSettings)
xbmc.executebuiltin("XBMC.Notification({0}, Channels Lists updated., {1}, {2})".format(AddonName, 5000 ,icon))
def MakeIPTVlists():
xbmc.executebuiltin("XBMC.Notification({0}, Making IPTV channels list..., {1}, {2})".format(AddonName, 300000 ,icon))
if not os.path.isfile(listsFile):
common.UpdateChList()
myIPTV.makeIPTVlist(iptvChannelsFile)
xbmc.executebuiltin("XBMC.Notification({0}, Making IPTV TV-guide..., {1}, {2})".format(AddonName, 300000 ,icon))
myIPTV.MakeChannelsGuide(fullGuideFile, iptvGuideFile)
myIPTV.RefreshPVR(iptvChannelsFile, iptvGuideFile, iptvLogosDir, forceUpdate=True)
xbmc.executebuiltin("XBMC.Notification({0}, IPTV channels list and TV-guide created., {1}, {2})".format(AddonName, 5000 ,icon))
def DownloadLogos():
if myIPTV.GetIptvType() > 1:
return
xbmc.executebuiltin("XBMC.Notification({0}, Downloading channels logos..., {1}, {2})".format(AddonName, 300000 ,icon))
if not os.path.isfile(listsFile):
common.UpdateChList()
myIPTV.SaveChannelsLogos(iptvLogosDir)
xbmc.executebuiltin("XBMC.Notification({0}, Channels logos saved., {1}, {2})".format(AddonName, 5000 ,icon))
def UpdateIPTVSimple():
xbmc.executebuiltin("XBMC.Notification({0}, Updating IPTVSimple settings..., {1}, {2})".format(AddonName, 300000 ,icon))
myIPTV.RefreshPVR(iptvChannelsFile, iptvGuideFile, iptvLogosDir, forceUpdate=True)
xbmc.executebuiltin("XBMC.Notification({0}, IPTVSimple settings updated., {1}, {2})".format(AddonName, 5000 ,icon))
def CleanLogosFolder():
if not os.path.exists(iptvLogosDir):
return
xbmc.executebuiltin("XBMC.Notification({0}, Cleaning channels logos folder..., {1}, {2})".format(AddonName, 300000 ,icon))
for the_file in os.listdir(iptvLogosDir):
file_path = os.path.join(iptvLogosDir, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception as ex:
xbmc.log("{0}".format(ex), 3)
xbmc.executebuiltin("XBMC.Notification({0}, Channels logos folder cleaned., {1}, {2})".format(AddonName, 5000 ,icon))
def RefreshLiveTV():
UpdateChannelsLists()
SaveGuide()
MakeIPTVlists()
DownloadLogos()
def AddCategories():
if not os.path.isfile(categoriesFile):
common.UpdateChList()
allCatList = common.ReadList(categoriesFile)
selectedCatList = common.ReadList(selectedCategoriesFile)
categories = common.GetUnSelectedList(allCatList, selectedCatList)
categoriesNames = [u"[COLOR {0}][B][{1}][/B][/COLOR]".format(Addon.getSetting("catColor"), item["name"]) for item in categories]
selected = common.GetMultiChoiceSelected(localizedString(30503).encode('utf-8'), categoriesNames)
if len(selected) < 1:
return
selectedList = [categories[item] for item in selected]
common.WriteList(selectedCategoriesFile, selectedCatList + selectedList)
def RemoveCategories():
if not os.path.isfile(categoriesFile):
common.UpdateChList()
selectedCatList = common.ReadList(selectedCategoriesFile)
categories = [u"[COLOR {0}][B][{1}][/B][/COLOR]".format(Addon.getSetting("catColor"), item["name"]) for item in selectedCatList]
selected = common.GetMultiChoiceSelected(localizedString(30503).encode('utf-8'), categories)
if len(selected) < 1:
return
for ind in range(len(selected)-1, -1, -1):
selectedCatList.remove(selectedCatList[selected[ind]])
common.WriteList(selectedCategoriesFile, selectedCatList)
def AddFavoritesFromCategory(categoryID):
channels = common.GetChannels(categoryID)
channels = [channel for channel in channels if channel["type"] == "video" or channel["type"] == "audio"]
channelsNames = [u"[COLOR {0}][B]{1}[/B][/COLOR]".format(Addon.getSetting("chColor"), channel["name"]) for channel in channels]
selected = common.GetMultiChoiceSelected(localizedString(30208).encode('utf-8'), channelsNames)
if len(selected) < 1:
return
selectedList = [channels[index] for index in selected]
xbmc.executebuiltin('Notification({0}, Start adding channels to favourites, {1}, {2})'.format(AddonName, 5000, icon))
addFavorites(selectedList, showNotification=False)
common.MakeFavouritesGuide(fullGuideFile)
xbmc.executebuiltin('Notification({0}, Channels added to favourites, {1}, {2})'.format(AddonName, 5000, __icon__))
def AddCategoryToFavorites(categoryID):
allCatList = common.ReadList(categoriesFile)
category = [u"[COLOR {0}][B][{1}][/B][/COLOR]".format(Addon.getSetting("catColor"), item["name"]) for item in allCatList if item['id'] == categoryID]
channels = common.GetChannels(categoryID)
if not common.YesNoDialog(localizedString(30210).encode('utf-8'), localizedString(30221).encode('utf-8'), localizedString(30222).encode('utf-8').format(category[0].encode('utf-8'), len(channels)), localizedString(30223).encode('utf-8'), nolabel=localizedString(30002).encode('utf-8'), yeslabel=localizedString(30001).encode('utf-8')):
return
xbmc.executebuiltin('Notification({0}, Start adding channels to favourites, {1}, {2})'.format(AddonName, 5000, icon))
addFavorites(channels, showNotification=False)
common.MakeFavouritesGuide(fullGuideFile)
xbmc.executebuiltin('Notification({0}, Channels added to favourites, {1}, {2})'.format(AddonName, 5000, __icon__))
def AddUserChannelToFavorites():
chName = common.GetKeyboardText(localizedString(30225).encode('utf-8')).strip()
if len(chName) < 1:
return
chUrl = common.GetKeyboardText(localizedString(30226).encode('utf-8')).strip()
if len(chUrl) < 1:
return
if not os.path.isfile(categoriesFile):
common.UpdateChList()
categories = common.ReadList(categoriesFile)
categoriesNames = [u"[COLOR {0}][B][{1}][/B][/COLOR]".format(Addon.getSetting("catColor"), item["name"]) for item in categories]
categoryInd = common.GetMenuSelected(localizedString(30227).encode('utf-8'), categoriesNames)
if categoryInd == -1:
return
group = categories[categoryInd]["id"]
chTypeInd = common.GetMenuSelected(localizedString(30232).encode('utf-8'), [localizedString(30233).encode('utf-8'), localizedString(30234).encode('utf-8')])
if chTypeInd == 0:
chType = "video"
elif chTypeInd == 1:
chType = "audio"
else:
return
logoInd = common.GetMenuSelected(localizedString(30228).encode('utf-8'), [localizedString(30229).encode('utf-8'), localizedString(30230).encode('utf-8'), localizedString(30231).encode('utf-8')])
if logoInd == 0:
logoFile = common.GetKeyboardText(localizedString(30229).encode('utf-8')).strip()
if len(logoFile) < 1:
return
elif logoInd == 1:
logoFile = xbmcgui.Dialog().browse(2, localizedString(30230).encode('utf-8'), 'myprograms')
if logoFile is None or len(logoFile) < 1:
return
elif logoInd == 2:
logoFile = ""
else:
return
favsList = common.ReadList(FAV)
for channel in favsList:
if channel["url"].lower() == chUrl.lower():
xbmc.executebuiltin('Notification({0}, [COLOR {1}][B]{2}[/B][/COLOR] Already in favourites, {3}, {4})'.format(AddonName, Addon.getSetting("chColor"), chName, 5000, __icon2__))
return
data = {"url": chUrl.decode("utf-8"), "group": group, "image": logoFile.decode("utf-8"), "type": chType, "name": chName.decode("utf-8")}
favsList.append(data)
if common.WriteList(FAV, favsList):
xbmc.executebuiltin('Notification({0}, [COLOR {1}][B]{2}[/B][/COLOR] added to favourites, {3}, {4})'.format(AddonName, Addon.getSetting("chColor"), chName, 5000, __icon__))
def RemoveSelectedFavorties():
allCategories = common.ReadList(categoriesFile)
channels = common.ReadList(FAV)
channelsNames = []
for channel in channels:
gp = [x["name"] for x in allCategories if x["id"] == channel.get("group", "")]
groupName = gp[0] if len(gp) > 0 else 'Favourites'
channelsNames.append(u"[COLOR {0}][B]{1}[/B][/COLOR] [COLOR {2}][B][{3}][/B][/COLOR]".format(Addon.getSetting("chColor"), channel["name"], Addon.getSetting("catColor"), groupName))
selected = common.GetMultiChoiceSelected(localizedString(30209).encode('utf-8'), channelsNames)
if len(selected) < 1:
return
xbmc.executebuiltin('Notification({0}, Start removing channels from favourites, {1}, {2})'.format(AddonName, 5000, icon))
removeFavorties(selected)
common.MakeFavouritesGuide(fullGuideFile)
xbmc.executebuiltin('Notification({0}, Channels removed trom favourites, {1}, {2})'.format(AddonName, 5000, __icon__))
def EmptyFavorties():
if not common.YesNoDialog(localizedString(30213).encode('utf-8'), localizedString(30220).encode('utf-8'), "", "", nolabel=localizedString(30002).encode('utf-8'), yeslabel=localizedString(30001).encode('utf-8')):
return
xbmc.executebuiltin('Notification({0}, Start removing channels from favourites, {1}, {2})'.format(AddonName, 5000, icon))
common.WriteList(FAV, [])
common.MakeFavouritesGuide(fullGuideFile)
xbmc.executebuiltin('Notification({0}, Channels removed trom favourites, {1}, {2})'.format(AddonName, 5000, __icon__))
def MoveInList(index, step, listFile):
theList = common.ReadList(listFile)
if index + step >= len(theList) or index + step < 0:
return
if step == 0:
step = GetIndexFromUser(len(theList), index)
if step < 0:
tempList = theList[0:index + step] + [theList[index]] + theList[index + step:index] + theList[index + 1:]
elif step > 0:
tempList = theList[0:index] + theList[index + 1:index + 1 + step] + [theList[index]] + theList[index + 1 + step:]
else:
return
common.WriteList(listFile, tempList)
xbmc.executebuiltin("XBMC.Container.Refresh()")
def GetIndexFromUser(listLen, index):
dialog = xbmcgui.Dialog()
location = dialog.input('{0} (1-{1})'.format(localizedString(30024).encode('utf-8'), listLen), type=xbmcgui.INPUT_NUMERIC)
if location is None or location == "":
return 0
try:
location = int(location) - 1
except:
return 0
if location >= listLen or location < 0:
return 0
return location - index
def ExportFavourites():
selectedDir = Addon.getSetting("imExFolder")
if selectedDir is None or selectedDir == "":
return
filename = common.GetKeyboardText(localizedString(30026).encode('utf-8'), "favorites").strip()
if filename == "":
return
fullPath = os.path.join(selectedDir.decode("utf-8"), '{0}.txt'.format(filename))
favsList = common.ReadList(FAV)
common.WriteList(fullPath, favsList)
xbmc.executebuiltin('Notification({0}, Favourites list is saved at {1}, {2}, {3})'.format(AddonName, fullPath, 10000, __icon__))
def ImportFavourites():
selectedDir = Addon.getSetting("imExFolder")
if selectedDir is None or selectedDir == "":
return
files = [f for f in os.listdir(selectedDir) if f.endswith(".txt")]
fileInd = common.GetMenuSelected(localizedString(30025).encode('utf-8'), files)
if fileInd == -1:
return
fullPath = os.path.join(selectedDir.decode("utf-8"), files[fileInd])
favsList = common.ReadList(fullPath)
if not common.YesNoDialog(localizedString(30215).encode('utf-8'), localizedString(30216).encode('utf-8'), line2=localizedString(30217).encode('utf-8').format(len(favsList)), line3=localizedString(30218).encode('utf-8'), nolabel=localizedString(30002).encode('utf-8'), yeslabel=localizedString(30001).encode('utf-8')):
return
common.WriteList(FAV, favsList)
common.MakeFavouritesGuide(fullGuideFile)
xbmc.executebuiltin('Notification({0}, Favourites list is saved., {2}, {3})'.format(AddonName, fullPath, 5000, __icon__))
if common.getUseIPTV() and int(Addon.getSetting("iptvList")) == 0:
MakeIPTVlists()
DownloadLogos()
def Settings():
addDir(localizedString(30240).encode('utf-8'), 51, 'https://www.ostraining.com/cdn/images/coding/setting.png', localizedString(30240).encode('utf-8'), isFolder=False)
addDir(localizedString(30241).encode('utf-8'), 52, 'https://www.ostraining.com/cdn/images/coding/setting.png', localizedString(30241).encode('utf-8'), isFolder=False)
addDir(localizedString(30304).encode('utf-8'), 32, 'https://www.ostraining.com/cdn/images/coding/setting.png', localizedString(30241).encode('utf-8'), isFolder=False)
addDir(localizedString(30242).encode('utf-8'), 53, 'https://www.ostraining.com/cdn/images/coding/setting.png', localizedString(30242).encode('utf-8'), isFolder=False)
addDir(localizedString(30243).encode('utf-8'), 54, 'https://www.ostraining.com/cdn/images/coding/setting.png', localizedString(30243).encode('utf-8'), isFolder=False)
SetViewMode()
def UpdateChannelsAndGuides():
UpdateChannelsLists()
SaveGuide()
if Addon.getSetting("useIPTV") == "true":
MakeIPTVlists()
DownloadLogos()
def RefreshUserdataFolder():
xbmc.executebuiltin("XBMC.Notification({0}, Cleaning addon profile folder..., {1}, {2})".format(AddonName, 300000 ,icon))
settingsFile = os.path.join(user_dataDir, 'settings.xml')
for the_file in os.listdir(user_dataDir):
file_path = os.path.join(user_dataDir, the_file)
try:
if os.path.isfile(file_path) and file_path != FAV and file_path != settingsFile:
os.unlink(file_path)
except Exception as ex:
xbmc.log("{0}".format(ex), 3)
listsDir = os.path.join(user_dataDir, 'lists')
for the_file in os.listdir(listsDir):
file_path = os.path.join(listsDir, the_file)
try:
if os.path.isfile(file_path) and file_path != selectedCategoriesFile:
os.unlink(file_path)
except Exception as ex:
xbmc.log("{0}".format(ex), 3)
xbmc.executebuiltin("XBMC.Notification({0}, Addon profile folder cleaned., {1}, {2})".format(AddonName, 5000 ,icon))
CleanLogosFolder()
remoteSettings = common.GetRemoteSettings()
if not os.path.isfile(remoteSettingsFile):
common.UpdateFile(remoteSettingsFile, "remoteSettingsZip", remoteSettings, zip=True, forceUpdate=True)
remoteSettings = common.ReadList(remoteSettingsFile)
if remoteSettings == []:
xbmc.executebuiltin('Notification({0}, Cannot load settings, {1}, {2})'.format(AddonName, 5000, icon))
return
UpdateChannelsAndGuides()
def SearchChannel(categoryID):
filter = common.GetKeyboardText(localizedString(30028).encode('utf-8'), '').strip().lower()
if filter == '':
return
urlParams = {'mode': '4', 'categoryid': categoryID, 'channelid': filter}
fullUrl = '{0}?{1}'.format(sys.argv[0], urllib.urlencode(urlParams))
xbmc.executebuiltin('Container.Update({0})'.format(fullUrl))
def SearchResults(filter, categoryID):
catChannels = common.GetChannelsFlat(categoryID)
channels = []
for channel in catChannels:
if channel["type"] == 'ignore':
continue
if filter in channel['name'].encode('utf-8').lower():
channels.append(channel)
if len(channels) > 0:
ListLive(categoryID=categoryID, catChannels=channels, makeGroup=False, catName=True)
else:
addDir('[COLOR red]{0}[/COLOR]'.format(localizedString(30029).encode('utf-8')), 99, isFolder=False)
params = dict(urlparse.parse_qsl(sys.argv[2].replace('?','')))
mode = params.get('mode')
iconimage = params.get('iconimage')
channelID = params.get('channelid')
categoryID = params.get('categoryid')
#xbmc.log("----> {0}".format(sys.argv), 5)
#xbmc.log("----> Mode: {0}".format(mode), 5)
#xbmc.log("----> IconImage: {0}".format(iconimage), 5)
#xbmc.log("----> categoryID: {0}".format(categoryID), 5)
#xbmc.log("----> channelID: {0}".format(channelID), 5)
updateList = False
if mode is None:
if channelID is None:
CATEGORIES()
else:
item = common.GetChannelByID(channelID)
type = item.get('type', '')
if type == 'video' or type == 'audio':
PlayChannelByID(channel=item)
elif type == 'playlist':
ListLive(catChannels=item["list"], showSearch=True)
updateList = True
elif mode == '1' or mode == '10':
updateList = PlayChannelByID(chID=channelID)
elif mode == '2':
ListLive(categoryID=channelID, iconimage=iconimage, showSearch=True)
updateList = True
elif mode == '3':
ListLive(categoryID=categoryID, iconimage=iconimage, chID=channelID)
updateList = True
elif mode == '4':
SearchResults(channelID, categoryID)
updateList = True
elif mode == '5':
ChannelGuide(channelID, categoryID)
updateList = True
elif mode == '11':
updateList = PlayChannelByID(chID=channelID, fromFav=True)
elif mode == '16':
listFavorites()
updateList = True
elif mode == '17':
channels = common.GetChannels(categoryID)
channel = [x for x in channels if x["id"] == channelID]
if len(channel) < 1:
xbmc.executebuiltin('Notification({0}, Cannot add this channel to favourites, {2}, {3})'.format(AddonName, Addon.getSetting("chColor"), 5000, __icon2__))
else:
addFavorites(channel)
updateList = True
elif mode == '18':
removeFavorties([int(channelID)])
xbmc.executebuiltin("XBMC.Container.Refresh()")
elif mode == '20': # Download Guide now - from server
SaveGuide()
elif mode == '22': # Update Channels Lists now
UpdateChannelsLists()
elif mode == '23': # Clean addon profile folder and refresh lists
RefreshUserdataFolder()
elif mode == '30': # Make IPTV channels list and TV-guide
MakeIPTVlists()
elif mode == '31': # Download channels logos
DownloadLogos()
elif mode == '32': # Update IPTVSimple settings
UpdateIPTVSimple()
elif mode == '33': # Empty channels logos folder
CleanLogosFolder()
elif mode == '34': # Refresh ALL Live TV required resources
RefreshLiveTV()
elif mode == '35': # Add categories to display in addon
AddCategories()
elif mode == '36': # Remove categories from display in addon
RemoveCategories()
elif mode == '37': # Add selected channels from category to favourites
AddFavoritesFromCategory(categoryID)
elif mode == '38': # Add the whole group channels from category to favourites
AddCategoryToFavorites(categoryID)
elif mode == '39': # Remove selected channels from favorites
RemoveSelectedFavorties()
elif mode == '40': # Remove all channels from favorites
EmptyFavorties()
elif mode == '41': # Move channel location in favourites
MoveInList(int(channelID), int(iconimage), FAV)
updateList = True
elif mode == '42': # Move selected category location
MoveInList(int(channelID), int(iconimage), selectedCategoriesFile)
updateList = True
elif mode == '43': # Export IsraeLIVE favourites
ExportFavourites()
elif mode == '44': # Import IsraeLIVE favourites
ImportFavourites()
elif mode == '45': # Add an external channel to IsraeLIVE favourites
AddUserChannelToFavorites()
elif mode == '50':
Settings()
updateList = True
elif mode == '51':
Addon.openSettings()
elif mode == '52':
xbmc.executebuiltin('Addon.OpenSettings("{0}")'.format(resolverID))
elif mode == '53':
UpdateChannelsAndGuides()
elif mode == '54': # Clean addon profile folder and refresh lists
RefreshUserdataFolder()
elif mode == '60': # Search
SearchChannel(categoryID=categoryID)
elif mode == '100': # CheckUpdates
checkUpdates.Update()
elif mode == '101': # Update IPTV lists
updateM3U.Update()
if updateList:
xbmcplugin.endOfDirectory(int(sys.argv[1]), cacheToDisc=False)
|
noam09/kodi
|
plugin.video.israelive/default.py
|
Python
|
gpl-3.0
| 40,169
|
[
"Amber"
] |
76aca9303b8e06a5b912754c2a87b112f3a976b76aa8e5a55794ef816d17b992
|
#!/usr/bin/env python
# encoding: utf-8
import os
import sys
from modularodm import Q
from modularodm.exceptions import ModularOdmException
from framework.auth.core import User
from website import settings
from website.app import init_app
from website.conferences.model import Conference
from datetime import datetime
def main():
init_app(set_backends=True, routes=False)
dev = 'dev' in sys.argv
populate_conferences(dev=dev)
MEETING_DATA = {
'spsp2014': {
'name': 'Society for Personality and Social Psychology 2014',
'info_url': None,
'logo_url': None,
'location': 'Austin, TX',
'start_date': 'Feb 13 2014',
'end_date': 'Feb 15 2014',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'asb2014': {
'name': 'Association of Southeastern Biologists 2014',
'info_url': 'http://www.sebiologists.org/meetings/talks_posters.html',
'logo_url': None,
'location': 'Spartanburg, SC',
'start_date': 'Apr 2 2014',
'end_date': 'Apr 4 2014',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'aps2014': {
'name': 'Association for Psychological Science 2014',
'info_url': 'https://cos.io/aps/',
'logo_url': '/static/img/2014_Convention_banner-with-APS_700px.jpg',
'location': 'San Franscisco, CA',
'start_date': 'May 22 2014',
'end_date': 'May 25 2014',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'annopeer2014': {
'name': '#annopeer',
'info_url': None,
'logo_url': None,
'location': None,
'start_date': None,
'end_date': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'cpa2014': {
'name': 'Canadian Psychological Association 2014',
'info_url': None,
'logo_url': None,
'location': 'Vancouver, BC',
'start_date': 'Jun 05 2014',
'end_date': 'Jun 07 2014',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'filaments2014': {
'name': 'National Radio Astronomy Observatory Filaments 2014',
'info_url': None,
'logo_url': 'https://science.nrao.edu/science/meetings/2014/'
'filamentary-structure/images/filaments2014_660x178.png',
'location': 'Charlottesville, VA',
'start_date': 'Oct 10 2014',
'end_date': 'Oct 11 2014',
'active': False,
'admins': [
'lvonschi@nrao.edu',
# 'Dkim@nrao.edu',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'bitss2014': {
'name': 'Berkeley Initiative for Transparency in the Social Sciences Research Transparency Forum 2014',
'info_url': None,
'logo_url': os.path.join(
settings.STATIC_URL_PATH,
'img',
'conferences',
'bitss.jpg',
),
'location': 'Berkeley, CA',
'start_date': 'Dec 11 2014',
'end_date': 'Dec 12 2014',
'active': False,
'admins': [
'gkroll@berkeley.edu',
'awais@berkeley.edu',
],
'public_projects': True,
'poster': False,
'talk': True,
},
'spsp2015': {
'name': 'Society for Personality and Social Psychology 2015',
'info_url': None,
'logo_url': None,
'location': 'Long Beach, CA',
'start_date': 'Feb 26 2015',
'end_date': 'Feb 28 2015',
'active': False,
'admins': [
'meetings@spsp.org',
],
'poster': True,
'talk': True,
},
'aps2015': {
'name': 'Association for Psychological Science 2015',
'info_url': None,
'logo_url': 'http://www.psychologicalscience.org/images/APS_2015_Banner_990x157.jpg',
'location': 'New York, NY',
'start_date': 'May 21 2015',
'end_date': 'May 24 2015',
'admins': [],
'active': False,
'public_projects': True,
'poster': True,
'talk': True,
},
'icps2015': {
'name': 'International Convention of Psychological Science 2015',
'info_url': None,
'logo_url': 'http://icps.psychologicalscience.org/wp-content/themes/deepblue/images/ICPS_Website-header_990px.jpg',
'location': 'Amsterdam, The Netherlands',
'start_date': 'Mar 12 2015',
'end_date': 'Mar 14 2015',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'mpa2015': {
'name': 'Midwestern Psychological Association 2015',
'info_url': None,
'logo_url': 'http://www.midwesternpsych.org/resources/Pictures/MPA%20logo.jpg',
'location': 'Chicago, IL',
'start_date': 'Apr 30 2015',
'end_date': 'May 02 2015',
'active': False,
'admins': [
'mpa@kent.edu',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'NCCC2015': {
'name': 'North Carolina Cognition Conference 2015',
'info_url': None,
'logo_url': None,
'location': 'Elon, NC',
'start_date': 'Feb 21 2015',
'end_date': 'Feb 21 2015',
'active': False,
'admins': [
'aoverman@elon.edu',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'VPRSF2015': {
'name': 'Virginia Piedmont Regional Science Fair 2015',
'info_url': None,
'logo_url': 'http://vprsf.org/wp-content/themes/VPRSF/images/logo.png',
'location': 'Charlottesville, VA',
'start_date': 'Mar 17 2015',
'end_date': 'Mar 17 2015',
'active': False,
'admins': [
'director@vprsf.org',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'APRS2015': {
'name': 'UVA Annual Postdoctoral Research Symposium 2015',
'info_url': None,
'logo_url': 'http://s1.postimg.org/50qj9u6i7/GPA_Logo.jpg',
'location': 'Charlottesville, VA',
'start_date': None,
'end_date': None,
'active': False,
'admins': [
'mhurst@virginia.edu',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'ASB2015': {
'name': 'Association of Southeastern Biologists 2015',
'info_url': None,
'logo_url': 'http://www.sebiologists.org/wp/wp-content/uploads/2014/09/banner_image_Large.png',
'location': 'Chattanooga, TN',
'start_date': 'Apr 01 2015',
'end_date': 'Apr 04 2015',
'active': False,
'admins': [
'amorris.mtsu@gmail.com',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'TeaP2015': {
'name': 'Tagung experimentell arbeitender Psychologen 2015',
'info_url': None,
'logo_url': None,
'location': 'Hildesheim, Germany',
'start_date': 'Mar 08 2015',
'end_date': 'Mar 11 2015',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'VSSEF2015': {
'name': 'Virginia State Science and Engineering Fair 2015',
'info_url': 'http://www.vmi.edu/conferences/vssef/vssef_home/',
'logo_url': 'http://www.vmi.edu/uploadedImages/Images/Headers/vssef4.jpg',
'location': 'Lexington, VA',
'start_date': 'Mar 27 2015',
'end_date': 'Mar 28 2015',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'RMPA2015': {
'name': 'Rocky Mountain Psychological Association 2015',
'info_url': 'http://www.rockymountainpsych.org/uploads/7/4/2/6/7426961/85th_annual_rmpa_conference_program_hr.pdf',
'logo_url': 'http://www.rockymountainpsych.org/uploads/7/4/2/6/7426961/header_images/1397234084.jpg',
'location': 'Boise, Idaho',
'start_date': 'Apr 09 2015',
'end_date': 'Apr 11 2015',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'ARP2015': {
'name': 'Association for Research in Personality 2015',
'info_url': 'http://www.personality-arp.org/conference/',
'logo_url': 'http://www.personality-arp.org/wp-content/uploads/conference/st-louis-arp.jpg',
'location': 'St. Louis, MO',
'start_date': 'Jun 11 2015',
'end_date': 'Jun 13 2015',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'SEP2015': {
'name': 'Society of Experimental Psychologists Meeting 2015',
'info_url': 'http://faculty.virginia.edu/Society_of_Experimental_Psychologists/',
'logo_url': 'http://www.sepsych.org/nav/images/SEP-header.gif',
'location': 'Charlottesville, VA',
'start_date': 'Apr 17 2015',
'end_date': 'Apr 18 2015',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'Reid2015': {
'name': 'L. Starling Reid Undergraduate Psychology Conference 2015',
'info_url': 'http://avillage.web.virginia.edu/Psych/Conference',
'location': 'Charlottesville, VA',
'start_date': 'Apr 17 2015',
'end_date': 'Apr 17 2015',
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'NEEPS2015': {
'name': 'Northeastern Evolutionary Psychology Conference 2015',
'info_url': 'http://neeps2015.weebly.com/',
'location': 'Boston, MA',
'start_date': 'Apr 09 2015',
'end_date': 'Apr 11 2015',
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'VaACS2015': {
'name': 'Virginia Section American Chemical Society Student Poster Session 2015',
'info_url': 'http://virginia.sites.acs.org/',
'logo_url': 'http://virginia.sites.acs.org/Bulletin/15/UVA.jpg',
'location': 'Charlottesville, VA',
'start_date': 'Apr 17 2015',
'end_date': 'Apr 17 2015',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'MADSSCi2015': {
'name': 'Mid-Atlantic Directors and Staff of Scientific Cores & Southeastern Association of Shared Services 2015',
'info_url': 'http://madssci.abrf.org',
'logo_url': 'http://s24.postimg.org/qtc3baefp/2015madssci_seasr.png',
'location': 'Charlottesville, VA',
'start_date': 'Jun 03 2015',
'end_date': 'Jun 5 2015',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'NRAO2015': {
'name': 'National Radio Astronomy Observatory Accretion 2015',
'info_url': 'https://science.nrao.edu/science/meetings/2015/accretion2015/posters',
'location': 'Charlottesville, VA',
'start_date': 'Oct 09 2015',
'end_date': 'Oct 10 2015',
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'ARCS2015': {
'name': 'Advancing Research Communication and Scholarship 2015',
'info_url': 'http://commons.pacificu.edu/arcs/',
'logo_url': 'http://commons.pacificu.edu/assets/md5images/4dfd167454e9f4745360a9550e189323.png',
'location': 'Philadelphia, PA',
'start_date': 'Apr 26 2015',
'end_date': 'Apr 28 2015',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'singlecasedesigns2015': {
'name': 'Single Case Designs in Clinical Psychology: Uniting Research and Practice',
'info_url': 'https://www.royalholloway.ac.uk/psychology/events/eventsarticles/singlecasedesignsinclinicalpsychologyunitingresearchandpractice.aspx',
'logo_url': None,
'location': 'London, UK',
'start_date': 'Apr 17 2015',
'end_date': 'Apr 17 2015',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'OSFM2015': {
'name': 'OSF for Meetings 2015',
'info_url': None,
'logo_url': None,
'location': 'Charlottesville, VA',
'start_date': None,
'end_date': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'JSSP2015': {
'name': 'Japanese Society of Social Psychology 2015',
'info_url': 'http://www.socialpsychology.jp/conf2015/index.html',
'logo_url': None,
'location': 'Tokyo, Japan',
'start_date': 'Oct 31 2015',
'end_date': 'Nov 01 2015',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'4S2015': {
'name': 'Society for Social Studies of Science 2015',
'info_url': 'http://www.4sonline.org/meeting',
'logo_url': 'http://www.4sonline.org/ee/denver-skyline.jpg',
'location': 'Denver, CO',
'start_date': 'Nov 11 2015',
'end_date': 'Nov 14 2015',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'IARR2016': {
'name': 'International Association for Relationship Research 2016',
'info_url': 'http://iarr.psych.utoronto.ca/',
'logo_url': None,
'location': 'Toronto, Canada',
'start_date': 'Jul 20 2016',
'end_date': 'Jul 24 2016',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'IA2015': {
'name': 'Inclusive Astronomy 2015',
'info_url': 'https://vanderbilt.irisregistration.com/Home/Site?code=InclusiveAstronomy2015',
'logo_url': 'https://vanderbilt.blob.core.windows.net/images/Inclusive%20Astronomy.jpg',
'location': 'Nashville, TN',
'start_date': 'Jun 17 2015',
'end_date': 'Jun 19 2015',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'R2RC': {
'name': 'Right to Research Coalition',
'info_url': None,
'logo_url': None,
'location': None,
'start_date': None,
'end_date': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'OpenCon2015': {
'name': 'OpenCon2015',
'info_url': 'http://opencon2015.org/',
'logo_url': 'http://s8.postimg.org/w9b30pxyd/Open_Con2015_new_logo.png',
'location': 'Brussels, Belgium',
'start_date': 'Nov 14 2015',
'end_date': 'Nov 16 2015',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'ESIP2015': {
'name': 'Earth Science Information Partners 2015',
'info_url': 'http://esipfed.org/',
'logo_url': 'http://s30.postimg.org/m2uz2g4pt/ESIP.png',
'location': None,
'start_date': None,
'end_date': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'SPSP2016': {
'name': 'Society for Personality and Social Psychology 2016 ',
'info_url': 'http://meeting.spsp.org',
'logo_url': None,
'location': 'San Diego, CA',
'start_date': 'Jan 28 2016',
'end_date': 'Jan 30 2016',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'NACIII': {
'name': '2015 National Astronomy Consortium (NAC) III Workshop',
'info_url': 'https://info.nrao.edu/do/odi/meetings/2015/nac111/',
'logo_url': None,
'location': 'Washington, DC',
'start_date': 'Aug 29 2015',
'end_date': 'Aug 30 2015',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'CDS2015': {
'name': 'Cognitive Development Society 2015',
'info_url': 'http://meetings.cogdevsoc.org/',
'logo_url': None,
'location': 'Columbus, OH',
'start_date': 'Oct 09 2015',
'end_date': 'Oct 10 2015',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'SEASR2016': {
'name': 'Southeastern Association of Shared Resources 2016',
'info_url': 'http://seasr.abrf.org',
'logo_url': None,
'location': 'Atlanta, GA',
'start_date': 'Jun 22 2016',
'end_date': 'Jun 24 2016',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'Accretion2015': {
'name': 'Observational Evidence of Gas Accretion onto Galaxies?',
'info_url': 'https://science.nrao.edu/science/meetings/2015/accretion2015',
'logo_url': None,
'location':'Charlottesville, VA',
'start_date':'Oct 09 2015',
'end_date':'Oct 10 2015',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'2020Futures': {
'name': 'U.S. Radio/Millimeter/Submillimeter Science Futures in the 2020s',
'info_url': 'https://science.nrao.edu/science/meetings/2015/2020futures/home',
'logo_url': None,
'location':'Chicago, IL',
'start_date':'Dec 15 2015',
'end_date':'Dec 17 2015',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'RMPA2016': {
'name': 'Rocky Mountain Psychological Association 2016',
'info_url': 'http://www.rockymountainpsych.org/convention-info.html',
'logo_url': 'http://www.rockymountainpsych.org/uploads/7/4/2/6/7426961/header_images/1397234084.jpg',
'location':'Denver, CO',
'start_date':'Apr 14 2016',
'end_date':'Apr 16 2016',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'CNI2015': {
'name': 'Coalition for Networked Information (CNI) Fall Membership Meeting 2015',
'info_url': 'https://wp.me/P1LncT-64s',
'logo_url': None,
'location':'Washington, DC',
'start_date':'Dec 14 2015',
'end_date':'Dec 16 2015',
'active': True,
'admins': [],
'public_projects': True,
'poster': False,
'talk': True,
},
'SWPA2016': {
'name': 'Southwestern Psychological Association Convention 2016',
'info_url': 'https://www.swpsych.org/conv_dates.php',
'logo_url': 'http://s28.postimg.org/xbwyqqvx9/SWPAlogo4.jpg',
'location':'Dallas, TX',
'start_date':'Apr 08 2016',
'end_date':'Apr 10 2016',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'ESIP2016W': {
'name': 'Earth Science Information Partners Winter Meeting 2016',
'info_url': 'http://commons.esipfed.org/2016WinterMeeting',
'logo_url': 'http://s30.postimg.org/m2uz2g4pt/ESIP.png',
'location':'Washington, DC',
'start_date':'Jan 06 2016',
'end_date':'Jan 08 2016',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'MiamiBrainhack15': {
'name': 'University of Miami Brainhack 2015',
'info_url': 'http://brainhack.org/americas/',
'logo_url': None,
'location': None,
'start_date': 'Oct 23 2015',
'end_date': 'Oct 25 2015',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'PsiChiRepository': {
'name': 'Psi Chi',
'location': None,
'start_date': None,
'end_date': None,
'info_url': 'http://www.psichi.org/?ResearchAdvisory#.VmBpeOMrI1g',
'logo_url': 'http://s11.postimg.org/4g2451vcz/Psi_Chi_Logo.png',
'active': True,
'admins': [
'research.director@psichi.org',
],
'field_names': {
'submission1': 'measures',
'submission2': 'materials',
'submission1_plural': 'measures/scales',
'submission2_plural': 'study materials',
'meeting_title_type': 'Repository',
'add_submission': 'materials',
'mail_subject': 'Title',
'mail_message_body': 'Measure or material short description',
'mail_attachment': 'Your measure/scale or material file(s)'
},
},
'GI2015': {
'name': 'Genome Informatics 2015',
'info_url': 'https://meetings.cshl.edu/meetings.aspx?meet=info&year=15',
'logo_url': None,
'location':'Cold Spring Harbor, NY' ,
'start_date': 'Oct 28 2015',
'end_date': 'Oct 31 2015',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'MADSSCi2016': {
'name': 'Mid-Atlantic Directors and Staff of Scientific Cores & Southeastern Association of Shared Services 2016',
'info_url': 'http://madssci.abrf.org',
'logo_url': 'http://madssci.abrf.org/sites/default/files/madssci-logo-bk.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'SMM2015': {
'name': 'The Society for Marine Mammalogy',
'info_url': 'https://www.marinemammalscience.org/conference/',
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'TESS': {
'name': 'Time-sharing Experiments for the Social Sciences',
'info_url': 'http://www.tessexperiments.org',
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': False,
'talk': True,
'field_names': {
'submission1': 'poster',
'submission2': 'study',
'submission1_plural': 'posters',
'submission2_plural': 'studies',
'meeting_title_type': 'Studies',
'add_submission': 'studies',
}
},
'ASCERM2016': {
'name': 'ASCE Rocky Mountain Student Conference 2016',
'info_url': 'http://luninuxos.com/asce/',
'logo_url': 'http://s2.postimg.org/eaduh2ovt/2016_ASCE_Rocky_Mtn_banner.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': False,
'talk': True,
},
'ARCA2016': {
'name': '5th Applied Research Conference in Africa',
'info_url': 'http://www.arcaconference.org/',
'logo_url': 'http://www.arcaconference.org/images/ARCA_LOGO_NEW.JPG',
'active': True,
'admins': [],
'public_projects': True,
'poster': False,
'talk': True,
},
'CURCONF2016': {
'name': 'CUR Biennial Conference 2016',
'info_url': 'http://www.cur.org/conferences_and_events/biennial2016/',
'logo_url': 'http://s11.postimg.org/v8feuna4y/Conference_logo_eps.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'CATALISE2016': {
'name': 'Criteria and Terminology Applied to Language Impairments: Synthesising the Evidence (CATALISE) 2016',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'Emergy2016': {
'name': '9th Biennial Emergy Research Conference',
'info_url': 'http://www.cep.ees.ufl.edu/emergy/conferences/ERC09_2016/index.shtml',
'logo_url': 'http://s12.postimg.org/uf9ioqmct/emergy.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'aps2016': {
'name': 'Association for Psychological Science 2016',
'info_url': 'http://www.psychologicalscience.org/convention',
'logo_url': 'http://www.psychologicalscience.org/redesign/wp-content/uploads/2015/03/APS_2016_Banner_990x157.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'jssp2016': {
'name': 'Japanese Society of Social Psychology 2016',
'info_url': 'http://www.socialpsychology.jp/conf2016/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'sepech2016': {
'name': 'XI SEPECH - Research Seminar in Human Sciences (Seminário de Pesquisa em Ciências Humanas)',
'info_url': 'http://www.uel.br/eventos/sepech/sepech2016/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'etmaal2016': {
'name': 'Etmaal van de Communicatiewetenschap 2016 - Media Psychology',
'info_url': 'https://etmaal2016.wordpress.com',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'WSAN2016': {
'name': 'WSAN2016 Erasmus University Rotterdam',
'info_url': 'http://www.humane.eu/wsan/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'ContainerStrategies': {
'name': 'Container Strategies for Data & Software Preservation',
'info_url': 'https://daspos.crc.nd.edu/index.php/workshops/container-strategies-for-data-software-preservation-that-promote-open-science',
'logo_url': 'http://s17.postimg.org/8nl1v5mxb/Screen_Shot_2016_03_02_at_9_05_24_PM.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
},
'CNI2016': {
'name': 'Coalition for Networked Information (CNI) Spring Membership Meeting 2016',
'info_url': 'https://wp.me/P1LncT-6fd',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': False,
'talk': True,
},
'XGAL2016': {
'name': 'Molecular Gas in Galactic Environments 2016',
'info_url': 'https://science.nrao.edu/science/meetings/2016/molecular-gas-in-galactic-environments/home',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'DLF2016': {
'name': 'Digital Library Federation 2016 DLF Forum',
'info_url': 'https://www.diglib.org/forums/2016forum/',
'logo_url': 'https://www.diglib.org/wp-content/themes/construct/lib/scripts/timthumb/thumb.php?src=https://www.diglib.org/wp-content/uploads/2016/02/DLF-Forum-2016-Slider-Website-1.png&w=580&h=252&zc=1&q=100',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
'start_date': 'Nov 7 2016',
'end_date': 'Nov 9 2016',
'locztion': 'Milwaukee, Wisconsin',
},
'ESCAN2016': {
'name': 'European Society for Cognitive and Affective Neuroscience (ESCAN) 2016',
'info_url': 'http://congressos.abreu.pt/escan2016/',
'logo_url': 'http://congressos.abreu.pt/escan2016/images/escan-logo.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'Reid2016': {
'name': 'L. Starling Reid Undergraduate Psychology Conference 2016',
'info_url': 'http://cacsprd.web.virginia.edu/Psych/Conference',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'CNS2016': {
'name': 'The Cognitive Neuroscience Society (CNS) 2016',
'info_url': 'http://www.cogneurosociety.org/annual-meeting/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'HEPA2016': {
'name': 'HEPA Europe Annual Meeting 2016',
'info_url': 'http://www.hepaeurope2016.eu/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'OGH': {
'name': 'Open Global Health',
'info_url': None,
'logo_url': 'http://s33.postimg.org/7tjjpvg4f/Drawing.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
}
def populate_conferences(dev=False):
if dev:
Conference.remove()
date_format = '%b %d %Y'
for meeting, attrs in MEETING_DATA.iteritems():
meeting = meeting.strip()
admin_emails = attrs.pop('admins', [])
admin_objs = []
if not dev:
for email in admin_emails:
try:
user = User.find_one(Q('username', 'iexact', email))
admin_objs.append(user)
except ModularOdmException:
raise RuntimeError('Username {0!r} is not registered.'.format(email))
# Convert string into datetime object
try:
attrs['end_date'] = datetime.strptime(attrs.get('end_date'), date_format)
attrs['start_date'] = datetime.strptime(attrs.get('start_date'), date_format)
except TypeError:
print '** Meeting {} does not have a start or end date. **'.format(meeting)
custom_fields = attrs.pop('field_names', {})
conf = Conference(
endpoint=meeting, admins=admin_objs, **attrs
)
conf.field_names.update(custom_fields)
try:
conf.save()
except ModularOdmException:
conf = Conference.find_one(Q('endpoint', 'eq', meeting))
for key, value in attrs.items():
if isinstance(value, dict):
current = getattr(conf, key)
current.update(value)
setattr(conf, key, current)
else:
setattr(conf, key, value)
conf.admins = admin_objs
changed_fields = conf.save()
if changed_fields:
print('Updated {}: {}'.format(meeting, changed_fields))
else:
print('Added new Conference: {}'.format(meeting))
if __name__ == '__main__':
main()
|
rdhyee/osf.io
|
scripts/populate_conferences.py
|
Python
|
apache-2.0
| 32,254
|
[
"COLUMBUS"
] |
b12902ed3f14ad26534798f555969eeee2ea456f345cf4f4128f3654fd61da02
|
# Pyctools - a picture processing algorithm development kit.
# http://github.com/jim-easterbrook/pyctools
# Copyright (C) 2014-20 Pyctools contributors
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
__all__ = ['GaussianFilter']
__docformat__ = 'restructuredtext en'
import math
import sys
import numpy
from pyctools.core.config import ConfigFloat
from pyctools.core.base import Component
from pyctools.core.frame import Frame
class GaussianFilter(Component):
"""Gaussian filter generator component.
Create a `Gaussian filter
<http://en.wikipedia.org/wiki/Gaussian_filter>`_ for use with the
:py:class:`~.resize.Resize` component.
Connecting a :py:class:`GaussianFilter` component's ``output`` to a
:py:class:`~.resize.Resize` component's ``filter`` input allows the
filter to be updated (while the components are running) by changing
the :py:class:`GaussianFilter` config::
filgen = GaussianFilter(xsigma=1.5)
resize = Resize()
filgen.connect('output', resize.filter)
...
start(..., filgen, resize, ...)
...
filgen.set_config({'xsigma': 1.8})
...
If you don't need to change the configuration after creating the
:py:class:`~.resize.Resize` component then it's simpler to use a
:py:class:`GaussianFilterCore` to create a fixed filter.
2-dimensional filters can be produced by setting both ``xsigma`` and
``ysigma``, but it is usually more efficient to use two
:py:class:`~.resize.Resize` components to process the two dimensions
independently.
Config:
========== ===== ====
``xsigma`` float Horizontal standard deviation parameter.
``ysigma`` float Vertical standard deviation parameter.
========== ===== ====
"""
inputs = []
with_outframe_pool = False
def initialise(self):
self.config['xsigma'] = ConfigFloat(min_value=0.0)
self.config['ysigma'] = ConfigFloat(min_value=0.0)
def on_start(self):
# send first filter coefs
self.make_filter()
def on_set_config(self):
# send more coefs if config changes
self.make_filter()
def make_filter(self):
self.update_config()
x_sigma = self.config['xsigma']
y_sigma = self.config['ysigma']
self.send('output', self.core(x_sigma=x_sigma, y_sigma=y_sigma))
@classmethod
def core(cls, x_sigma=0.0, y_sigma=0.0):
"""Gaussian filter generator core.
Alternative to the :py:class:`GaussianFilter` component that can
be used to make a non-reconfigurable filter::
resize = Resize()
resize.filter(GaussianFilter.core(x_sigma=1.5))
...
start(..., resize, ...)
...
:keyword float x_sigma: Horizontal standard deviation parameter.
:keyword float y_sigma: Vertical standard deviation parameter.
:return: A :py:class:`~pyctools.core.frame.Frame` object
containing the filter.
"""
def filter_1D(sigma):
alpha = 1.0 / (2.0 * (max(sigma, 0.0001) ** 2.0))
coefs = []
coef = 1.0
while coef > 0.0001:
coefs.append(coef)
coef = math.exp(-(alpha * (float(len(coefs) ** 2))))
fil_dim = len(coefs) - 1
result = numpy.zeros(1 + (fil_dim * 2), dtype=numpy.float32)
for n, coef in enumerate(coefs):
result[fil_dim - n] = coef
result[fil_dim + n] = coef
# normalise result
result /= result.sum()
return result
x_sigma = max(x_sigma, 0.0)
y_sigma = max(y_sigma, 0.0)
x_fil = filter_1D(x_sigma)
y_fil = filter_1D(y_sigma)
result = numpy.empty(
[y_fil.shape[0], x_fil.shape[0], 1], dtype=numpy.float32)
for y in range(y_fil.shape[0]):
for x in range(x_fil.shape[0]):
result[y, x, 0] = x_fil[x] * y_fil[y]
out_frame = Frame()
out_frame.data = result
out_frame.type = 'fil'
sigmas = []
if x_sigma != 0.0:
sigmas.append('x_sigma={:g}'.format(x_sigma))
if y_sigma != 0.0:
sigmas.append('y_sigma={:g}'.format(y_sigma))
out_frame.set_audit(
cls,
'data = GaussianFilterCoefficients({})\n'.format(','.join(sigmas)))
return out_frame
|
jim-easterbrook/pyctools
|
src/pyctools/components/interp/gaussianfilter.py
|
Python
|
gpl-3.0
| 5,052
|
[
"Gaussian"
] |
5c4c7b9d3740810a10ae21ed2b1bf2dd0475bb445f2ead71c309bd1abd889d39
|
#!/usr/bin/env python
"""
5PrimeCounter
=============
5PrimeCounter analyses a BAM file from a ChIP-Exo experiment in the context of potential transcription factor binding sites (TFBS) presenting a given sequence motif of interest.
Consequently, as input files, 5PrimeCounter needs a BAM file (and its BAI index) and a set of sequence motifs as created by MatrixScanWS for instance, or any output file of RSAT's tool 'matrix_scan'.
Please note that in case of motif hits on both strands in the same location, only the one with highest score will be considered.
Use cases
Use case 1 : basic usage
Use case 2 : validation using permuted matrices
Use case 3 : QC and FASTA creation using an input genome
Developed with:
Python (2.7.3).
Package dependencies:
numpy, HTSeq (see installation guide), pysam (HTSeq dependency)
(optional) pyfasta. If a reference genome is provided to calculate consensus sequences (see Use case 3), 5PrimeCounter also imports the 'pyfasta' package.
Tool developed in Python by Jonas Ibn-Salem.
# Last update: 26.09.2014 (CEH)
"""
epilog="""
15.10.13 Jonas Ibn-Salem <ibnsalem@molgen.mpg.de>
"""
import argparse
import os.path
# import sys
import numpy as np
import HTSeq # For installation see: http://www-huber.embl.de/users/anders/HTSeq/doc/install.html
import subprocess
## Read and check input parameters
def commandline():
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, epilog=epilog)
# Use case 1
parser.add_argument("-bam", "--bam_file", type=str, required=True, help="Bam file of the ChIP-exo experiment. Requires the index of the bam file with name <BAM_FILE>.bai in the same folder.")
parser.add_argument("-i", "--input_sites", type=str, required=True, help="Input is a file containing predicted Transcription Factor Binding Sites in RSAT matrix-scan output format.")
parser.add_argument("-o", "--output_prefix", type=str, required=True, help="All output file names will have that prefix. Can include a path.")
parser.add_argument("-s", "--size", type=int, default=60, help="Window size around motif for which the profile will be computed.")
parser.add_argument("-ob", "--output_bed", action="store_true", help="Write a BED file with the binding site regions defined by --size and --order_by_score.")
parser.add_argument("-if", "--input_format", type=str, choices=['matrix-scan'], default="matrix-scan", help="Input format, 'matrix-scan' output (default).")
# Use case 2
parser.add_argument("-pm", "--perm", action="store_true", help="Compute profiles from matrix-scan results for permuted matrices. 5PrimeCounter searches for all files with the same name as the <INPUT_SITES> file, plus the tag '_perm' and a number. Files must be located in the same folder as <INPUT_SITES>.")
# Use case 3
parser.add_argument("-g", "--genome_seq", type=str, help="Reference genome sequence in FASTA format. If this optional argument is given, the consensus sequence of the motif is plotted at the bottom of profile and heatmap plots. Moreover sequences for all binding regions will be written to an integer-encoded matrix file. Note, if a fasta file is read for the first time, an index is built in the same directory for faster access. First execution can thus be slower.")
parser.add_argument("-of", "--output_seq", action="store_true", help="Write a genomic sequences in FASTA format for the binding site regions defined by --size and --order_by_score. Needs a genome in FASTA format to be provided using option --genome_seq.")
# Other options
parser.add_argument("-os", "--order_by_score", action="store_true", help="By default, output regions are sorted by occupancy level (number of total read counts). This option sorts output regions by score instead of occupancy level.")
parser.add_argument("-n", "--number_of_sites", type=int, help="Number of sites to be considered. For a given N take only the top N sites by occupancy level (or motif score if -os is set).")
parser.add_argument("-p", "--percent_of_sites", type=float, help="Percent of sites to be considered. For a given P take only the top P percent sites by occupancy level (or motif score if -os is set).")
parser.add_argument("-d", "--down_sample_sites", type=int, help="Down sample input sites. For a given D sample D sites randomly.")
parser.add_argument("-sd", "--shift_dist", type=int, help="Shift sites by given distance (in bp) to the right (if positive) or to the left (if negative).")
parser.add_argument("-fs", "--flip_strand", action="store_true", help="Flip the strand of motif matches from '+' to '-' and from '-' to '+' for all input sites.")
return parser.parse_args()
## Parse input files
def parse_matrix_scan(inFile):
"""
Parses the RSAT matrix-scan output file and returns a list of dicts ordered by score
Keeps only one hit if 2 hits at the same position on both strands.
Genomic coordinates are transfromed from 1-based (in matrix-scan output format)
into zero-based half open (like BED format) for internal representation and HTSeq compatibility.
Assumes the "galaxy format" for sequence IDs in the first column.
"""
# dict for unique sites
unique_sites = {}
# count number of total input sites
n_sites = 0
for line in open(inFile):
# ignore comment lines
if not line.startswith(';') and not line.startswith('#'):
sp = line.strip().split('\t')
loc = sp[0]
chr = loc.split('_')[1]
# the peak-coordinates are assumed now again ONE-based in matrix-scan output format!
peak_start = int(loc.split('_')[2]) - 1
# the motif coordinates are ONE-based and relative to peak start in the matrix-scan output file format
start = peak_start + int(sp[4]) - 1
end = peak_start + int(sp[5])
strand = sp[3].replace('DR', '+').replace('D', '+').replace('R','-')
score = float(sp[7])
# one based locus coordinates of the motif:
motif_loc = chr + ":" + str(start+1) + "-" + str(end)
# keep it only if score is greater than sits with same location
if motif_loc not in unique_sites or score > unique_sites[motif_loc]["score"]:
type = sp[1]
ft_name = sp[2]
seq = HTSeq.Sequence(sp[6], loc)
# append region as dict with all annotations
unique_sites[motif_loc] = {"chr":chr, "start":start, "end":end, \
"strand":strand, "score":score, "type":type, \
"ft_name":ft_name, "motif_seq":seq, "motif_loc":motif_loc,
"seq_id":loc, "name":motif_loc}
n_sites += 1 # increase counter for total number of sites
# get list of sites sorted by motif score
sorted_sites = sorted(unique_sites.values(), cmp=lambda x,y: cmp(x['score'], y['score']), reverse = True )
print "INFO: Read {0} of {1} input regions.".format(len(sorted_sites), n_sites)
return sorted_sites
def add_center(sites, size):
"""
Add center of motif to sites. In case of even motifs (real center
is between two bases) the closest base upstream from the center is
chosen.
Add also extended region information as "ext_start" and "ext_end" positions.
"""
for s in sites:
# check if motif length is even
even = (s["end"] - s["start"]) % 2 == 0
# calculate center coordinate of binding site
center = (s["start"]+s["end"]-1)/2
# in even case on the reverse strand the center is
# the closesd base upstream of the real center
if even and s["strand"] == '-':
center += 1
s["center"] = center
s["ext_start"] = center - size/2
s["ext_end"] = center + size/2
# get 1-based genomic location in the format "chr:start-end"
s["location"] = s["chr"] + ":" + str(s["ext_start"]+1) + "-" + str(s["ext_end"])
return sites
def reads_profile(regions, bam_file, size):
"""
Parses reads from BAM file and adds number of forward and reverse
5' coverage counts per position of each region.
This function depend on the HTSeq package for fast parsing of read infromation from BAM files.
"""
print "INFO: Begin to parse reads from BAM file for n={0} regions.".format(len(regions))
# Open BAM file:
bamHandle = HTSeq.BAM_Reader(bam_file)
# get list of available chromosoms
chromosomes = set([chr['SN'] for chr in bamHandle.get_header_dict()['SQ']])
for i, reg in enumerate(regions):
center = reg["center"]
# initialize read-counts for all positions of this region
up_counts = size * [0]
down_counts = size * [0]
# check if chr of region is available in BAM file:
if reg["chr"] in chromosomes:
# get GenomicInterval object. extend it by +-1 to for including reads on negative strand inside the interval
iv = HTSeq.GenomicInterval( reg["chr"], max(0, reg["ext_start"]-1), reg["ext_end"]+1, reg["strand"] )
# iterate over all reads mapping to that region (interval)
for aln in bamHandle[ iv ]:
# consider motif on positiv stand
if reg["strand"] == '+':
dist = aln.iv.start_d - center
pos = dist + size/2
if pos >= 0 and pos < size:
if aln.iv.strand == '+': up_counts[pos] += 1
if aln.iv.strand == '-': down_counts[pos] += 1
if reg["strand"] == '-':
dist = -1 * (aln.iv.start_d - center)
pos = dist + size/2
if pos >= 0 and pos < size:
if aln.iv.strand == '+': down_counts[pos] += 1
if aln.iv.strand == '-': up_counts[pos] += 1
# add counts to region dictionary:
reg["up_counts"] = up_counts
reg["down_counts"] = down_counts
print "INFO: Finished parsing of BAM file."
return regions
## Write functions
def write_counts(regions, output_prefix):
""" Writes count matrix as TAB seperated file to output file. """
# get number of rows:
n = len(regions)
upHandle = open(output_prefix + ".up_counts.tab", 'w')
downHandle = open(output_prefix + ".down_counts.tab", 'w')
# iterate over rows
for reg in regions:
upHandle.write('\t'.join([reg["location"]+"_"+reg["name"]] + [str(c) for c in reg["up_counts"]]) + '\n')
downHandle.write('\t'.join([reg["location"]+"_"+reg["name"]] + [str(c) for c in reg["down_counts"]]) + '\n')
upHandle.close()
downHandle.close()
def write_region_to_bed(regions, outFile, size=60):
"""Regions to outFile in BED format"""
with open(outFile, 'w') as outHandle:
for reg in regions:
outHandle.write("\t".join([str(c) for c in [
reg["chr"], reg["center"]-size/2, reg["center"]+size/2,
reg["name"] if "name" in reg else ".",
reg["score"] if "score" in reg else ".",
reg["strand"]
]]) + '\n')
def write_fasta(regions, outFile):
"""writes sequences of regions to fasta file"""
with open(outFile, 'w') as outHandle:
for reg in regions:
outHandle.write(">" + reg["location"]+"_"+reg["strand"] + "\n")
outHandle.write(reg["ext_seq"] + "\n")
## Read sequences
def parse_sequences(sites, size, fasta_file):
"""Adds the binding site sequences extende to 'size' per row (decoded as A=0, C=1, G=2, T=3) to each input region."""
from pyfasta import Fasta # Fasta package is needed to fetch sequences from genome fasta file
print "INFO: Begin to fetch sequences...."
f = Fasta(fasta_file, key_fn=lambda key: key.split()[0])
for i, reg in enumerate(sites):
start = reg["ext_start"]
end = reg["ext_end"]
# if motif on negativ strand, shift region by +1 to account for zero based half-open intervals
if reg["strand"] == '-':
start += 1
end += 1
seq = f.sequence({"chr":reg["chr"], "start":start, "stop":end}, one_based=False)
# Note, the 'strand':reg["strand"] argument for f.sequence does not work, there seems to be a bug in the pyfasta/fasta.py code.
seq = seq.upper()
# if motif on negative strand, convert seq to reverse complement
if reg["strand"] == '-':
seq = reverse_complement(seq)
# add sequence to region dict
reg["ext_seq"] = seq
print "INFO: Finished sequences."
return regions
def reverse_complement(seq):
""" returns the reverse complement of seq"""
rep_dict = {"A":"T", \
"C":"G", \
"G":"C", \
"T":"A"}
revcomp = ""
for i, base in enumerate(seq):
if base in rep_dict:
revcomp += rep_dict[base]
else:
revcomp += base
return revcomp[::-1]
def get_consensus(sites, seq_type, m=-1):
"""return a string as consesus sequence """
bases = ['A', 'C', 'G', 'T', 'N']
n = len(sites) # number of sites
if m == -1 and sites: m = len(sites[0][seq_type])
# initialize count array
# rows correspond to positions in motif sequence
# columns correspond to bases: "A", "C", "G", "T", and "N"
counts = np.zeros(( m, 5 ), np.int )
consenus = ""
for s in sites:
# convert seq to HTSeq.Sequence object
seq = HTSeq.Sequence(str(s[seq_type]))
# count bases to counts array
seq.add_bases_to_count_array( counts )
base_idx = np.argmax(counts, 1)
for i in range(m):
# test if at least 75% of sites have same base:
if n>0 and counts[i, base_idx[i]]/float(n) > 0.75:
consenus += bases[base_idx[i]]
# test if at least 50% of sites have same base:
elif n>0 and counts[i, base_idx[i]]/float(n) > 0.5:
consenus += bases[base_idx[i]].lower()
else:
consenus += "."
return consenus
## Write functions
def write_consensus(consenus, size, outFile):
""" write extende consenus sequence to outFile"""
if len(consensus) == size :
ext_consensus = consensus
else:
l = len(consensus)
before = (size - l) / 2 + 1
after = np.ceil( (size - l)/2.0 ) - 1
ext_consensus = before * '.' + consenus + after * '.'
with open(outFile, 'w') as outHandle:
outHandle.write(ext_consensus + "\n")
def write_seq_matrix(seq_matrix, outFile):
""" writes for each region the genomic sequence encoded as integers to tab seperated file"""
base2int = {"A":"0", "C":"1", "G":"2", "T":"3", "N":"4"}
with open(outFile, 'w') as outHandle:
for reg in regions:
outHandle.write('\t'.join([reg["location"]+"_"+reg["name"]] + [base2int[b] for b in reg["ext_seq"]]) + '\n')
## Ordering functions
def order_by_read_counts(regions):
"""Reorders the input list of regions by number of total read counts"""
return sorted(regions, cmp=lambda x,y: cmp(sum(x['up_counts']+x['down_counts']), sum(y['up_counts']+y['down_counts'])), reverse = True )
def order_by_score(regions, reverse=True):
"""Reorders the list of input regions by score"""
return sorted(regions, cmp=lambda x,y: cmp(x['score'], y['score']), reverse = reverse )
## Other functions
def flip_strands(regions):
""" Filp the strand of all input regions """
print "INFO: Flip strand of all input sites."
for i in range(len(regions)):
strand = regions[i]["strand"]
if strand == "+":
regions[i]["strand"] = "-"
elif strand == "-":
regions[i]["strand"] = "+"
return regions
def shift_sites(regions, shift_dist):
"""Shift all regions by indicated distance to the right (if positive) or to the left (if negative) """
for s in regions:
for coord in ["start", "center", "end", "ext_start", "ext_end"]:
# in case of motif on negative stand, shift in oposite direction
if s["strand"] == '-':
s[coord] -= shift_dist
else:
s[coord] += shift_dist
# addjust 1-based genomic location in the format "chr:start-end"
s["location"] = s["chr"] + ":" + str(s["ext_start"]+1) + "-" + str(s["ext_end"])
## Main
if __name__ == "__main__":
# read commandline argumets
args = commandline()
# test validity of other arguments:
if args.number_of_sites and args.percent_of_sites:
sys.exit("ERROR: '--number_of_sites' and '--percent_of_sites' arguments are mutually exclusive. Exit now.")
if args.perm:
# Split between real name and extension (matrix)
(matrix_filename_label, matrix_filename_ext) = os.path.splitext(args.input_sites)
nb_permutations = 10
permuted_res = []
permuted_suffix = []
for index in xrange(nb_permutations):
permuted_res.append(os.path.join(matrix_filename_label+'_perm'+str(index+1)+matrix_filename_ext))
permuted_suffix.append('_perm'+str(index+1))
files_to_analyze = [args.input_sites]
suffixes = ['']
if args.perm:
files_to_analyze += permuted_res
suffixes += permuted_suffix
for index in xrange(len(files_to_analyze)):
file_to_analyze = files_to_analyze[index]
if args.input_format.lower() == "matrix-scan":
# parse matrix-scan results
sites = parse_matrix_scan(file_to_analyze)
# extend sites to region of given size:
regions = add_center(sites, args.size)
else:
sys.exit("ERROR: INPUT_FORMAT shuld be one of 'matrix-scan' or 'bed'. Exit now.")
# if option 'flip_strand' is given, flip strand of all input sites:
if args.flip_strand:
regions = flip_strands(regions)
# if option shift_dist is set, shift sites by given distance:
if args.shift_dist:
shift_sites(regions, args.shift_dist)
# down sample sites
if args.down_sample_sites:
import random
regions = random.sample(regions, args.down_sample_sites)
print "INFO: Input sites were down sampled to {0} regions.".format(len(regions))
# parse 5' coverage counts from BAM file:
regions = reads_profile(regions, args.bam_file, args.size)
# order output regions by motif score or exo-read occupancy level
if args.order_by_score:
regions = order_by_score(regions)
else:
regions = order_by_read_counts(regions)
# take only a subset of top p percent sites if such an argument is given:
if args.percent_of_sites:
args.number_of_sites = int(args.percent_of_sites * len(regions)/100)
# take only a subset of top N sites if such an argument is given:
if args.number_of_sites:
regions = regions[:args.number_of_sites]
# write the 5' coverage count data to ouput files
write_counts(regions, args.output_prefix+suffixes[index])
# fetch genomic sequences, if reference seq is given and calculate consensus sequence.
if args.genome_seq:
regions = parse_sequences(regions, args.size, args.genome_seq)
write_seq_matrix(regions, args.output_prefix+suffixes[index] + ".seq_matrix.tab")
consensus = get_consensus(regions, "ext_seq", args.size)
else:
consensus = get_consensus(regions, "motif_seq")
# write consensus seq to output file
write_consensus(consensus, args.size, args.output_prefix+suffixes[index] + ".consensus.txt")
# write extended regions to BED file:
if args.output_bed:
write_region_to_bed(regions, args.output_prefix+suffixes[index] + ".bed", args.size)
# write genomic sequences to fasta file:
if args.output_seq:
if args.genome_seq:
write_fasta(regions, args.output_prefix+suffixes[index] + ".fa")
else:
sys.exit("ERROR: Need reference genome file (--genome_seq) to write sequence of given regions to fasta file. Exit now.")
|
ComputationalSystemsBiology/ExoProfiler
|
python/5primeCounter.py
|
Python
|
gpl-3.0
| 21,010
|
[
"Galaxy",
"HTSeq",
"pysam"
] |
2458c7aa137c751dbc1f8c038a84d098497e5965544f465c1a2d1952d99b1cf1
|
#!/usr/bin/env python
#
# This program is free software you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation either version 3 of the License, or
# (at your option) any later version.
#
# Written (C) 2012-2013 Heiko Strathmann
#
import numpy as np
from math import pi
parameter_list = [[250,3,3]]
def statistics_hsic (n, difference, angle):
from modshogun import RealFeatures
from modshogun import DataGenerator
from modshogun import GaussianKernel
from modshogun import HSIC
from modshogun import PERMUTATION, HSIC_GAMMA
from modshogun import EuclideanDistance
from modshogun import Statistics, Math
# for reproducable results (the numpy one might not be reproducible across
# different OS/Python-distributions
Math.init_random(1)
np.random.seed(1)
# note that the HSIC has to store kernel matrices
# which upper bounds the sample size
# use data generator class to produce example data
data=DataGenerator.generate_sym_mix_gauss(n,difference,angle)
#plot(data[0], data[1], 'x');show()
# create shogun feature representation
features_x=RealFeatures(np.array([data[0]]))
features_y=RealFeatures(np.array([data[1]]))
# compute median data distance in order to use for Gaussian kernel width
# 0.5*median_distance normally (factor two in Gaussian kernel)
# However, shoguns kernel width is different to usual parametrization
# Therefore 0.5*2*median_distance^2
# Use a subset of data for that, only 200 elements. Median is stable
subset=np.random.permutation(features_x.get_num_vectors()).astype(np.int32)
subset=subset[0:200]
features_x.add_subset(subset)
dist=EuclideanDistance(features_x, features_x)
distances=dist.get_distance_matrix()
features_x.remove_subset()
median_distance=Statistics.matrix_median(distances, True)
sigma_x=median_distance**2
features_y.add_subset(subset)
dist=EuclideanDistance(features_y, features_y)
distances=dist.get_distance_matrix()
features_y.remove_subset()
median_distance=Statistics.matrix_median(distances, True)
sigma_y=median_distance**2
#print "median distance for Gaussian kernel on x:", sigma_x
#print "median distance for Gaussian kernel on y:", sigma_y
kernel_x=GaussianKernel(10,sigma_x)
kernel_y=GaussianKernel(10,sigma_y)
hsic=HSIC(kernel_x,kernel_y,features_x,features_y)
# perform test: compute p-value and test if null-hypothesis is rejected for
# a test level of 0.05 using different methods to approximate
# null-distribution
statistic=hsic.compute_statistic()
#print "HSIC:", statistic
alpha=0.05
#print "computing p-value using sampling null"
hsic.set_null_approximation_method(PERMUTATION)
# normally, at least 250 iterations should be done, but that takes long
hsic.set_num_null_samples(100)
# sampling null allows usage of unbiased or biased statistic
p_value_boot=hsic.compute_p_value(statistic)
thresh_boot=hsic.compute_threshold(alpha)
#print "p_value:", p_value_boot
#print "threshold for 0.05 alpha:", thresh_boot
#print "p_value <", alpha, ", i.e. test sais p and q are dependend:", p_value_boot<alpha
#print "computing p-value using gamma method"
hsic.set_null_approximation_method(HSIC_GAMMA)
p_value_gamma=hsic.compute_p_value(statistic)
thresh_gamma=hsic.compute_threshold(alpha)
#print "p_value:", p_value_gamma
#print "threshold for 0.05 alpha:", thresh_gamma
#print "p_value <", alpha, ", i.e. test sais p and q are dependend:", p_value_gamma<alpha
# sample from null distribution (these may be plotted or whatsoever)
# mean should be close to zero, variance stronly depends on data/kernel
# sampling null, biased statistic
#print "sampling null distribution using sample_null"
hsic.set_null_approximation_method(PERMUTATION)
hsic.set_num_null_samples(100)
null_samples=hsic.sample_null()
#print "null mean:", np.mean(null_samples)
#print "null variance:", np.var(null_samples)
#hist(null_samples, 100); show()
return p_value_boot, thresh_boot, p_value_gamma, thresh_gamma, statistic, null_samples
if __name__=='__main__':
print('HSIC')
statistics_hsic(*parameter_list[0])
|
AzamYahya/shogun
|
examples/undocumented/python_modular/statistics_hsic.py
|
Python
|
gpl-3.0
| 4,120
|
[
"Gaussian"
] |
9a4d2f3f35fa761812a104b32d8d8b7a4fd3fe379fdb4a74f2d9bf9df1d8534b
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.