text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
#! /usr/bin/env python
import os
import numpy as np
import xarray as xr
from pytest import approx
from pymt.grids import RasterField
from pymt.printers.nc.database import Database
def open_nc_file(filename):
import netCDF4 as nc
return nc.Dataset(filename, "r", format="NETCDF4")
def test_2d_constant_shape(tmpdir):
# Create field and add some data to it that we will write to a
# NetCDF file database.
# nc_file = 'Elevation_time_series_0000.nc'
nc_file = "2d_elevation_time_series.nc"
data = np.arange(6.0)
field = RasterField((2, 3), (1.0, 1.0), (0.0, 0.0), indexing="ij")
field.add_field("Elevation", data, centering="point")
# Create database of 'Elevation' values. Data are written to the
# NetCDF file Elevation_time_series.nc.
with tmpdir.as_cwd():
db = Database()
db.open(nc_file, "Elevation")
db.write(field)
assert os.path.isfile(nc_file)
# Append data to the NetCDF file.
data *= 2.0
db.write(field)
db.close()
root = xr.open_dataset(nc_file, engine="h5netcdf")
assert set(root.dims) == {"y", "x", "time"}
assert set(root.data_vars) == {"mesh", "Elevation"}
assert root.dims["x"] == 3
assert root.dims["y"] == 2
assert root.dims["time"] == 2
assert root.variables["Elevation"].shape == (2, 2, 3)
assert root.variables["Elevation"][0].data == approx(
np.arange(6.0).reshape(2, 3)
)
assert root.variables["Elevation"][1].data == approx(
np.arange(6.0).reshape((2, 3)) * 2.0
)
assert root.variables["y"].data == approx([0.0, 1.0])
assert root.variables["x"].data == approx([0.0, 1.0, 2.0])
assert root.data_vars["Elevation"].attrs["long_name"] == "Elevation"
assert root.data_vars["Elevation"].attrs["units"] == "-"
root.close()
def test_2d_changing_shape(tmpdir):
# Create field and add some data to it that we will write to a
# NetCDF file database.
nc_file = "Temperature_time_series.nc"
data = np.arange(6.0)
field = RasterField((3, 2), (1.0, 1.0), (0.0, 0.0))
field.add_field("Temperature", data, centering="point")
with tmpdir.as_cwd():
db = Database()
db.open(nc_file, "Temperature")
db.write(field)
assert os.path.isfile(nc_file)
# Create a new field and write the data to the database. Since
# the size of the field has changed, the data will be written
# to a new file, Elevation_time_series_0000.nc.
field = RasterField((3, 3), (1.0, 1.0), (0.0, 0.0))
data = np.arange(9.0)
field.add_field("Temperature", data, centering="point")
db.write(field)
assert os.path.isfile("Temperature_time_series_0000.nc")
db.close()
nc_file = "Temperature_time_series_0000.nc"
root = xr.open_dataset(nc_file, engine="h5netcdf")
assert set(root.dims) == {"y", "x", "time"}
assert set(root.data_vars) == {"mesh", "Temperature"}
assert root.dims["x"] == 3
assert root.dims["y"] == 3
assert root.dims["time"] == 1
assert root.data_vars["Temperature"].shape == (1, 3, 3)
assert root.data_vars["Temperature"][0].data == approx(
np.arange(9.0).reshape((3, 3))
)
assert root.variables["x"].data == approx([0.0, 1.0, 2.0])
assert root.variables["y"].data == approx([0.0, 1.0, 2.0])
assert root.data_vars["Temperature"].attrs["long_name"] == "Temperature"
assert root.data_vars["Temperature"].attrs["units"] == "-"
root.close()
| csdms/pymt | tests/printers/nc/test_database.py | Python | mit | 3,687 | [
"NetCDF"
] | a86dcf06e3583757dbefc23488cf1a46d4a64f540e2275dd4f68e23e814aade8 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converter construction support.
This module contains a base class for all converters, as well as supporting
structures. These structures are referred to as contexts.
The class hierarchy is as follows:
<your converter>
[extends] converter.Base
[extends] transformer.Base
[extends] gast.nodeTransformer
[uses] transfomer.SourceInfo
[uses] converter.EntityContext
[uses] converter.ProgramContext
[uses] transfomer.SourceInfo
converter.Base is a specialization of transformer.Base for AutoGraph. It's a
very lightweight subclass that adds a `ctx` attribute holding the corresponding
EntityContext object (see below). Note that converters are not reusable, and
`visit` will raise an error if called more than once.
converter.EntityContext contains mutable state associated with an entity that
the converter processes.
converter.ProgramContext contains mutable state across related entities. For
example, when converting several functions that call one another, the
ProgramContext should be shared across these entities.
Below is the overal flow at conversion:
program_ctx = ProgramContext(<entities to convert>, <global settings>, ...)
while <program_ctx has more entities to convert>:
entity, source_info = <get next entity from program_ctx>
entity_ctx = EntityContext(program_ctx, source_info)
for <each ConverterClass>:
converter = ConverterClass(entity_ctx)
# May update entity_ctx and program_ctx
entity = converter.visit(entity)
<add entity's dependencies to program_ctx>
Note that pyct contains a small number of transformers used for static analysis.
These implement transformer.Base, rather than converter.Base, to avoid a
dependency on AutoGraph.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from enum import Enum
from tensorflow.python.autograph.core import config
from tensorflow.python.autograph.core import naming
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import compiler
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import activity
from tensorflow.python.autograph.pyct.static_analysis import live_values
from tensorflow.python.autograph.pyct.static_analysis import liveness
from tensorflow.python.autograph.pyct.static_analysis import reaching_definitions
from tensorflow.python.autograph.pyct.static_analysis import type_info
# TODO(mdan): These contexts can be refactored into first class objects.
# For example, we could define Program and Entity abstractions that hold on
# to the actual entity and have conversion methods.
# TODO(mdan): Add a test specific to this converter.
class ProgramContext(object):
"""ProgramContext keeps track of converting function hierarchies.
This object is mutable, and is updated during conversion. Not thread safe.
Attributes:
recursive: bool, whether to recursively convert any functions that the
decorator function may call.
autograph_decorators: Tuple[Callable, ...], decorator functions that belong
to AutoGraph. These require special treatment.
dependency_cache: Dict[Any, ast.AST], the original entities mapped to their
converted AST
additional_imports: Set[Any], additional entities which for any reason
cannot be attached after loading and need to be explicitly imported
in the generated code
name_map: Dict[str, str], map of original entity name to the name of
their converted counterparts
autograph_module: Module, a reference to the autograph module. This
needs to be specified by the caller to avoid circular dependencies.
uncompiled_modules: Set[Tuple[str, ...]], with each tuple representing the
fully qualified name of a package containing functions that will not be
compiled.
required_imports: str, containing an import statement on each line. These
are all the imports necessary for the compiled code to run, in addition
to the closures of each entity, which are attached dynamically.
"""
def __init__(
self,
recursive,
autograph_decorators,
partial_types,
autograph_module,
uncompiled_modules,
):
self.recursive = recursive
self.autograph_decorators = autograph_decorators
self.partial_types = partial_types if partial_types else ()
self.autograph_module = autograph_module
self.uncompiled_modules = uncompiled_modules
self.conversion_order = []
self.dependency_cache = {}
self.additional_imports = set()
self.name_map = {}
@property
def required_imports(self):
"""Returns a block containing all imports required by the converted code."""
# TODO(mdan): Check that these don't clobber one another.
return '\n'.join(config.COMPILED_IMPORT_STATEMENTS +
tuple(self.additional_imports))
def new_namer(self, namespace):
return naming.Namer(namespace, self.recursive, self.name_map,
self.partial_types)
def update_name_map(self, namer):
"""Updates renamed_calls based on the recent activity from the namer.
Whenever we convert a new entity, any references to other entities are being
renamed to match their soon-to-be-converted counterparts. The namer keeps
track of these renames. When conversion is complete, we copy those renames
so that when those referenced entities are being converted, their new name
matches.
Args:
namer: naming.Namer
Raises:
ValueError: when an entity was renamed twice and to different names.
"""
# TODO(mdan): Have call_trees do this directly.
# This is done so indirectly, via the namer, for historic reasons. But
# now we can have the converter that does the rename record the new name
# as well and skip this step altogether.
for o, name in namer.renamed_calls.items():
if o in self.name_map:
if self.name_map[o] != name:
raise ValueError(
'Calls to %s were converted using multiple names (%s). This is '
'possible when an entity with one of these names already '
'existed. To fix, avoid using any of these names.' %
(o, (name, self.name_map[o])))
else:
self.name_map[o] = name
def add_to_cache(self, original_entity, converted_ast):
self.conversion_order.append(original_entity)
self.dependency_cache[original_entity] = converted_ast
class EntityContext(object):
"""Tracks the conversion of a single entity.
This object is mutable, and is updated during conversion. Not thread safe.
Attributes:
namer: Namer
info: transformer.EntityInfo
program: ProgramContext
"""
def __init__(self, namer, entity_info, program_ctx):
self.namer = namer
self.info = entity_info
self.program = program_ctx
class Base(transformer.Base):
"""All converters should inherit from this class.
Attributes:
ctx: EntityContext
"""
def __init__(self, ctx):
super(Base, self).__init__(ctx.info)
self.ctx = ctx # Keeping this short because it's used frequently.
self._used = False
self._ast_depth = 0
def get_definition_directive(self, node, directive, arg, default):
"""Returns the unique directive for a symbol, or a default if none exist.
See lang/directives.py for details on directives.
Args:
node: ast.AST
directive: Callable[..., Any]
arg: str
default: Any
Raises:
ValueError: if conflicting annotations have been found
"""
defs = anno.getanno(node, anno.Static.ORIG_DEFINITIONS, ())
if not defs:
return default
# TODO(mdan): Simplify this.
arg_values = []
for def_ in defs:
if (directive not in def_.directives or
arg not in def_.directives[directive]):
continue
arg_value = def_.directives[directive][arg]
for prev_value in arg_values:
if not ast_util.matches(arg_value, prev_value):
qn = anno.getanno(node, anno.Basic.QN)
raise ValueError('%s has ambiguous annotations for %s(%s): %s, %s' %
(qn, directive.__name__, arg,
compiler.ast_to_source(arg_value).strip(),
compiler.ast_to_source(prev_value).strip()))
arg_values.append(arg_value)
if not arg_values:
return default
arg_value, = arg_values
return arg_value
def visit(self, node):
if not self._ast_depth:
if self._used:
raise ValueError('converter objects cannot be reused')
self._used = True
self._ast_depth += 1
try:
return super(Base, self).visit(node)
finally:
self._ast_depth -= 1
class AnnotatedDef(reaching_definitions.Definition):
def __init__(self):
super(AnnotatedDef, self).__init__()
self.directives = {}
class AgAnno(Enum):
"""Annotation labels specific to AutoGraph. See anno.py."""
DIRECTIVES = 'User directives associated with the annotated statement.'
def __repr__(self):
return self.name
def standard_analysis(node, context, is_initial=False):
"""Performs a complete static analysis of the given code.
Args:
node: ast.AST
context: converter.EntityContext
is_initial: bool, whether this is the initial analysis done on the input
source code
Returns:
ast.AST, same as node, with the static analysis annotations added
"""
# TODO(mdan): Clear static analysis here.
# TODO(mdan): Consider not running all analyses every time.
# TODO(mdan): Don't return a node because it's modified by reference.
graphs = cfg.build(node)
node = qual_names.resolve(node)
node = activity.resolve(node, context.info, None)
node = reaching_definitions.resolve(node, context.info, graphs, AnnotatedDef)
node = liveness.resolve(node, context.info, graphs)
node = live_values.resolve(node, context.info, config.PYTHON_LITERALS)
node = type_info.resolve(node, context.info)
# This second call allows resolving first-order class attributes.
node = live_values.resolve(node, context.info, config.PYTHON_LITERALS)
if is_initial:
anno.dup(
node,
{
anno.Static.DEFINITIONS: anno.Static.ORIG_DEFINITIONS,
},
)
return node
def apply_(node, context, converter_module):
"""Applies a converter to an AST.
Args:
node: ast.AST
context: converter.EntityContext
converter_module: converter.Base
Returns:
ast.AST, the result of applying converter to node
"""
node = standard_analysis(node, context)
node = converter_module.transform(node, context)
return node
| kobejean/tensorflow | tensorflow/python/autograph/core/converter.py | Python | apache-2.0 | 11,637 | [
"VisIt"
] | a39adcd3d191e40bad0782b7b2773ab11e2d7258479a15e4b9755fa13a01c86e |
'''
Created on 16/02/2010
@author: jose
'''
# Copyright 2009 Jose Blanca, Peio Ziarsolo, COMAV-Univ. Politecnica Valencia
# This file is part of franklin.
# franklin is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# franklin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with franklin. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
from operator import attrgetter
from collections import defaultdict
from copy import copy
import math, itertools
import logging
try:
import pysam
except ImportError:
pass
from Bio.SeqFeature import FeatureLocation
from Bio.Restriction import Analysis, CommOnly, RestrictionBatch
from franklin.seq.seqs import SeqFeature, get_seq_name
from franklin.utils.misc_utils import get_fhand
from franklin.sam import create_bam_index, get_read_group_info
DEFAUL_MIN_NUM_READS_PER_ALLELE = 2
DEFAULT_PLOIDY = 2
DELETION_ALLELE = '-'
UNKNOWN_NUCLEOTYDE = 'N'
NO_NUCLEOTYDE = ' '
N_ALLLELES = ('n', '?')
SNP = 0
INSERTION = 1
DELETION = 2
INVARIANT = 3
INDEL = 4
COMPLEX = 5
TRANSITION = 6
TRANSVERSION = 7
UNKNOWN = 8
VARIANT = 9 # non-invariant allele
SNV_TYPES = {SNP:'SNP', INSERTION:'insertion', DELETION:'deletion',
INVARIANT:'invariant', INDEL:'indel', COMPLEX:'complex',
TRANSITION:'transition', TRANSVERSION:'transversion',
UNKNOWN:'unknown'}
COMMON_ENZYMES = ['EcoRI', 'SmaI', 'BamHI', 'AluI', 'BglII',
'SalI', 'BglI', 'ClaI', 'TaqI',
'PstI', 'PvuII', 'HindIII', 'EcoRV',
'HaeIII', 'KpnI', 'ScaI',
'HinfI', 'DraI', 'ApaI', 'BstEII', 'ZraI', 'BanI', 'Asp718I']
UNKNOWN_RG = 'unknown'
MATCH = 0
INSERTION = 1
DELETION = 2
SKIP = 3
SOFT_CLIP = 4
HARD_CLIP = 5
PADDING = 6
IN_FIRST_POS = 1
IN_MIDDLE_POS = 2
IN_LAST_POS = 3
IN_FIRST_AND_LAST = 4
CIGAR_DECODE = 'MIDNSHP'
def _get_raw_allele_from_read(aligned_read, index):
'It returns allele, quality, is_reverse'
allele = aligned_read.seq[index].upper()
if aligned_read.qual:
try:
qual = _quality_to_phred(aligned_read.qual[index])
except ZeroDivisionError:
raise RuntimeError('mi error')
else:
qual = None
return allele, qual
SEGMENTS_CACHE = {}
MAX_CACHED_SEGMENTS = 10000
def _get_cigar_segments_from_aligned_read(aligned_read):
'It gets the cigar from an aligened_read of pysam'
begin_pos_read_in_ref = aligned_read.pos
read_len = len(aligned_read.seq)
cigar = aligned_read.cigar
return _get_segments_from_cigar(begin_pos_read_in_ref, cigar, read_len)
def _get_segments_from_cigar(begin_pos_read_in_ref, cigar, read_len):
'''It returns two lists (reference and read) in which the firsts nucleotides
of the different cigar categories are given.
67890 12345
ATCGA--GATCG
atcGATCG--CG
01234 56
CIGAR = 3H2M2I1M2D2M
ref_segments = [9, None, 11, 12, 14]
read_segments = [0, 2, 4, None, 5]
It also returns the limits of the aligned reference and the limits of the
read.
It also returns a list with the cigar category for each segment.
The position in the reference is only used for the cache
'''
cigar = tuple(cigar)
global SEGMENTS_CACHE
cache_key = read_len, begin_pos_read_in_ref, cigar
if cache_key in SEGMENTS_CACHE:
return SEGMENTS_CACHE[cache_key]['result']
#We ignore hard clipped nucleotides ('H')
cigar_elements = []
for element in range(len(cigar)):
if cigar[element][0] != HARD_CLIP:
cigar_elements.append(cigar[element])
ref_segments = []
read_segments = []
ref_pos = begin_pos_read_in_ref
read_start = 0
read_end = 0
segment_type = []
segment_lens = []
read_pos = read_start
for index, element in enumerate(cigar_elements):
if element[0] == SOFT_CLIP:
#Is the soft clip at beginning?
if index == 0:
read_pos += element[1]
read_start += element[1]
read_end = read_start
elif index == len(cigar_elements) - 1:
continue
else:
msg = 'Soft clips in the middle of the read are not supported'
raise RuntimeError(msg)
elif element[0] == MATCH:
segment_type.append(MATCH)
ref_segments.append(ref_pos)
read_segments.append(read_pos)
segment_lens.append(element[1])
ref_pos += element[1]
read_pos += element[1]
read_end += element[1]
elif element[0] == INSERTION:
segment_type.append(INSERTION)
ref_segments.append(None)
read_segments.append(read_pos)
segment_lens.append(element[1])
read_pos += element[1]
read_end += element[1]
elif element[0] == DELETION or element[0] == SKIP:
read_segments.append(None)
ref_segments.append(ref_pos)
segment_lens.append(element[1])
#We differentiate between DELETION or SKIP
if element[0] == DELETION:
segment_type.append(DELETION)
if element[0] == SKIP:
segment_type.append(SKIP)
ref_pos += element[1]
ref_end = ref_pos - 1
ref_start = ref_segments[0]
# Somethimes the first segment is an insertion and the reference por the
# start position is in the next segment
if ref_segments[0] is not None:
ref_start = ref_segments[0]
else:
ref_start = ref_segments[1]
ref_limits = [ref_start, ref_end]
read_end = read_end - 1
read_limits = [read_start, read_end]
result = (ref_segments, read_segments, sorted(ref_limits),
sorted(read_limits), segment_type, segment_lens)
if ref_pos is not None:
#store cache
SEGMENTS_CACHE[cache_key] = {'result':result, 'ref_pos':ref_pos}
#clean cache
if len(SEGMENTS_CACHE) > MAX_CACHED_SEGMENTS:
SEGMENTS_CACHE = {}
return result
def _locate_segment(ref_pos, ref_segments, segment_lens, ref_limits):
'It locates a read position in the segments'
for segment_index, segment_begin_pos in enumerate(ref_segments):
if segment_begin_pos is None:
continue
end_segment_pos = segment_begin_pos + segment_lens[segment_index] - 1
if ref_pos < segment_begin_pos:
#we're before the first segment, no read here
return None
elif ref_pos > end_segment_pos:
continue
elif ref_pos == segment_begin_pos and segment_begin_pos == end_segment_pos:
return segment_index, IN_FIRST_AND_LAST
elif ref_pos == segment_begin_pos:
return segment_index, IN_FIRST_POS
elif ref_pos == end_segment_pos:
return segment_index, IN_LAST_POS
elif segment_begin_pos < ref_pos < end_segment_pos:
return segment_index, IN_MIDDLE_POS
else:
raise RuntimeError('We should not be here, fix me')
#we're outside any segment
return None
def _get_insertion(segment_index, segment_type, read_pos, aligned_read,
segment_lens):
#TODO explain function
allele = None
kind = None
qual = None
if segment_index == len(segment_type) - 1:
#we're in the last segment
next_segment_pos = None
else:
next_segment_pos = segment_index + 1
if (next_segment_pos is not None and
segment_type[next_segment_pos] == INSERTION):
indel_length = segment_lens[next_segment_pos]
if indel_length == 0:
msg = "An insertion can't be of 0 length\n"
msg += 'next_segment_pos ' + str(next_segment_pos)
msg += '\nsegment_index ' + str(segment_index)
msg += '\nsegment_type ' + str(segment_type)
msg += '\nread_pos ' + str(read_pos)
msg += '\naligned_read ' + str(aligned_read)
raise ValueError(msg)
start = read_pos
end = start + indel_length
allele, qual = _get_raw_allele_from_read(aligned_read,
slice(start, end))
kind = INSERTION
return allele, kind, qual
def _from_ref_to_read_pos(segment_type, ref_segment_pos, read_segment_pos,
ref_pos):
'Given the segment positions it calculates the read_pos'
if segment_type != MATCH:
return None
read_pos = read_segment_pos + (ref_pos - ref_segment_pos)
return read_pos
def _read_pos_around_del(ref_segments, read_segments, segment_types,
segment_index, ref_pos):
'It returns the read positions around a deletion'
if segment_types[segment_index - 1] == MATCH:
read_pos1 = _from_ref_to_read_pos(segment_types[segment_index - 1],
ref_segments[segment_index - 1],
read_segments[segment_index - 1],
ref_segments[segment_index] - 1)
read_pos2 = read_pos1 + 1
elif segment_types[segment_index + 1] == MATCH:
read_pos2 = _from_ref_to_read_pos(segment_types[segment_index + 1],
ref_segments[segment_index + 1],
read_segments[segment_index + 1],
ref_segments[segment_index + 1])
read_pos1 = read_pos2 - 1
else:
msg = 'A deletion is surrounded by two segments that are not matches'
raise RuntimeError(msg)
return read_pos1, read_pos2
def _chop_alignment(alignment):
'it chops the alignments in lists'
ref, read = alignment
listed_ref = []
listed_read = []
inserts = ''
read_inserts = ''
for index in range(len(ref)):
if ref[index] == '-':
inserts += '-'
read_inserts += read[index]
else:
if inserts != '':
listed_ref.append(inserts)
listed_read.append(read_inserts)
inserts = ''
read_inserts = ''
listed_ref.append(ref[index])
listed_read.append(read[index])
else:
if inserts != '':
listed_ref.append(inserts)
listed_read.append(read_inserts)
return listed_ref, listed_read
def _prepare_alignments(alignments):
'It prepares the alignments for joining'
prepared_alignments = []
for name, alignment in alignments.items():
ref, read = _chop_alignment(alignment)
assert len(ref) == len(read)
alignment = {'reference':ref,
'reads':{name:read}}
prepared_alignments.append(alignment)
return prepared_alignments
def add_collumn(alignment, nalig, index, diff_length=0):
'It adds a column to the given alignment'
for name, read in alignment['reads'].items():
if name not in nalig['reads']:
nalig['reads'][name] = []
try:
to_add = read[index]
except IndexError:
to_add = diff_length * '-'
nalig['reads'][name].append(to_add)
def _join_alignments(alignment1, alignment2, snv_types_per_read):
'It joins two alignments and makes a new alignment'
def _insert_in_seq(list_seq):
for element in list_seq:
if '-' in element:
return True
return False
if len(alignment1['reference']) > len(alignment2['reference']):
align1 = alignment1
align2 = alignment2
else:
align1 = alignment2
align2 = alignment1
ref1 = align1['reference']
ref2 = align2['reference']
if (not _insert_in_seq(ref1) and not _insert_in_seq(ref2) and
len(ref1) == len(ref2)):
reads = {}
for name, read in align1['reads'].items():
reads[name] = read
for name, read in align2['reads'].items():
reads[name] = read
return {'reference':ref1,
'reads':reads}
nalig = {'reference':[], 'reads':{}}
index1_delta = 0
index2_delta = 0
for index in range(len(ref1)):
index1 = index - index1_delta
index2 = index - index2_delta
ref1_item = ref1[index1]
try:
ref2_item = ref2[index2]
except IndexError:
ref2_item = ref1_item
inser_1 = ref1_item.count('-')
inser_2 = ref2_item.count('-')
if inser_1 == inser_2:
nalig['reference'].append(ref1_item)
add_collumn(align1, nalig, index1, inser_1)
add_collumn(align2, nalig, index2, inser_2)
elif inser_1 > inser_2:
inser_diff = inser_1 - inser_2
nalig['reference'].append(ref1_item)
add_collumn(align1, nalig, index1)
just_once = True
for name, read in align2['reads'].items():
if name not in nalig['reads']:
nalig['reads'][name] = []
if inser_2 > 0:
nalig['reads'][name].append(read[index2] + inser_diff * '-')
else:
nalig['reads'][name].append(inser_diff * '-')
if just_once:
index2_delta += 1
just_once = False
else:
inser_diff = inser_2 - inser_1
nalig['reference'].append(ref2_item)
just_once = True
for name, read in align1['reads'].items():
if name not in nalig['reads']:
nalig['reads'][name] = []
if inser_1 > 0:
nalig['reads'][name].append(read[index1] + inser_diff * '-')
else:
nalig['reads'][name].append(inser_diff * '-')
if just_once:
index1_delta += 1
just_once = False
add_collumn(align2, nalig, index)
return nalig
def _make_multiple_alignment(alignments, reads=None):
'It makes the multiple alignments using ref to read sinmple alignments'
snv_types_per_read = {}
alignments = _prepare_alignments(alignments)
if not alignments:
raise RuntimeError('No alignments to create the multiple one')
alignment = alignments[0]
assert len(alignment['reads'].values()[0]) == len(alignment['reference'])
for index in range(1, len(alignments)):
assert len(alignments[index]['reads'].values()[0]) == len(alignments[index]['reference'])
alignment = _join_alignments(alignment, alignments[index],
snv_types_per_read)
assert len(alignment['reads'].values()[0]) == len(alignment['reference'])
return alignment
def _get_alignment_section(pileup_read, start, end, reference_seq=None):
'It gets a section of the alignment of the given read'
# we don't get the last position because our politic is:
# [start, end[
# [start, stop]
if start > end:
raise ValueError('Start (%i) is bigger than end (%i)' % (start, end))
stop = end - 1
aligned_read = pileup_read.alignment
read_seq = aligned_read.seq
(ref_segments, read_segments, ali_ref_limits, ali_read_limits, segment_types,
segment_lens) = _get_cigar_segments_from_aligned_read(aligned_read)
if (start < 0 or (reference_seq and len(reference_seq) < end)):
ref_len = len(reference_seq) if reference_seq else None
msg = 'Section outside the alignment: start (%d),'
msg += 'stop (%d), limits (1, %d)'
msg %= start, stop, ref_len
raise ValueError(msg)
#in which segment starts the section
start_segment = _locate_segment(start, ref_segments, segment_lens, ali_ref_limits)
start_segment = start_segment[0] if start_segment is not None else None
end_segment = _locate_segment(end, ref_segments, segment_lens, ali_ref_limits)
end_segment = end_segment[0] if end_segment is not None else None
stop_segment = _locate_segment(stop, ref_segments, segment_lens, ali_ref_limits)
if stop_segment is not None:
stop_segment, stop_segment_pos = stop_segment
else:
stop_segment, stop_segment_pos = None, None
#when does the read start and end in the reference coordinate system
ref_start_limit, ref_end_limit = ali_ref_limits
read_start_in_ref = start if start >= ref_start_limit else ref_start_limit
read_stop_in_ref = stop if stop <= ref_end_limit else ref_end_limit
# we have to look if the position of the end is in an insertion. For that
# we can look the difference between the end_segment and
# the origial_end_segment
if (stop_segment is not None and stop_segment_pos == IN_LAST_POS and
len(ref_segments) > stop_segment + 1 and
segment_types[stop_segment + 1] == INSERTION):
stop_segment += 1
cum_ref_seq, cum_read_seq = '', ''
#before alignment
len_before_segment = ref_start_limit - start
if reference_seq is None:
ref_seq_before = UNKNOWN_NUCLEOTYDE * len_before_segment
else:
ref_seq_before = str(reference_seq.seq[ref_start_limit - len_before_segment:ref_start_limit])
cum_ref_seq += ref_seq_before
cum_read_seq += NO_NUCLEOTYDE * len_before_segment
#in alignment
ssegment = 0 if start_segment is None else start_segment
esegment = len(ref_segments) - 1 if stop_segment is None else stop_segment
for isegment in range(ssegment, esegment + 1):
seg_type = segment_types[isegment]
seg_len = segment_lens[isegment]
ref_seg_start = ref_segments[isegment]
# when the segment is an insert there is no start, we have to calculate
if ref_seg_start is None:
try:
ref_seg_start = ref_segments[isegment + 1] - 1
except IndexError:
prev_seg_start = ref_segments[isegment -1]
prev_seg_len = segment_lens[isegment -1]
ref_seg_start = prev_seg_start + prev_seg_len
read_seg_start = read_segments[isegment]
if isegment == ssegment:
start_delta = read_start_in_ref - ref_seg_start
else:
start_delta = 0
if isegment == esegment:
end_delta = seg_len - read_stop_in_ref + ref_seg_start - 1
else:
end_delta = 0
if seg_type == INSERTION:
seg_ref_seq = DELETION_ALLELE * seg_len
read_start = read_seg_start + start_delta
read_end = read_start + seg_len #- end_delta
seg_read_seq = read_seq[read_start: read_end]
elif seg_type == DELETION or seg_type == SKIP:
ref_start = ref_seg_start + start_delta
ref_end = ref_start + seg_len - start_delta - end_delta
if reference_seq is not None:
seg_ref_seq = str(reference_seq.seq[ref_start: ref_end])
else:
seg_ref_seq = UNKNOWN_NUCLEOTYDE * (seg_len - start_delta - end_delta)
read_start = None
read_end = None
seg_read_seq = DELETION_ALLELE * (seg_len - start_delta - end_delta)
else:
ref_start = ref_seg_start + start_delta
ref_end = ref_start + seg_len - end_delta - start_delta
if reference_seq is not None:
seg_ref_seq = str(reference_seq.seq[ref_start: ref_end])
else:
seg_ref_seq = UNKNOWN_NUCLEOTYDE * (seg_len - end_delta - start_delta)
read_start = read_seg_start + start_delta
read_end = read_start + seg_len - end_delta - start_delta
seg_read_seq = read_seq[read_start: read_end]
cum_ref_seq += seg_ref_seq
cum_read_seq += seg_read_seq
#after alignment
len_after_alignment = stop - ali_ref_limits[1]
if reference_seq is None:
ref_seq_after = UNKNOWN_NUCLEOTYDE * len_after_alignment
else:
ref_seq_after = str(reference_seq.seq[ali_ref_limits[1] + 1:ali_ref_limits[1] + len_after_alignment + 1])
cum_ref_seq += ref_seq_after
cum_read_seq += NO_NUCLEOTYDE * len_after_alignment
assert len(cum_ref_seq) == len(cum_read_seq)
return cum_ref_seq, cum_read_seq
def _get_alleles_from_read(ref_allele, ref_pos, pileup_read):
'''It returns an allele from the read.
It returns a list with the alleles in the given position.
The returned allele can be an empty list if we're in a deletion.
If the position holds an insertion it will return two alleles, the
insertion and the nucleotide at that position.
'''
alleles = []
aligned_read = pileup_read.alignment
(ref_segments, read_segments, ref_limits, read_limits, segment_types,
segment_lens) = _get_cigar_segments_from_aligned_read(aligned_read)
located_segment = _locate_segment(ref_pos, ref_segments, segment_lens,
ref_limits)
if located_segment is None:
return []
else:
segment_index, segment_pos = located_segment
is_reverse = bool(aligned_read.is_reverse)
if segment_types[segment_index] == MATCH:
read_pos = _from_ref_to_read_pos(MATCH, ref_segments[segment_index],
read_segments[segment_index], ref_pos)
allele, qual = _get_raw_allele_from_read(aligned_read, read_pos)
if allele != ref_allele:
kind = SNP
else:
kind = INVARIANT
alleles.append((allele, kind, qual, is_reverse))
if segment_pos == IN_LAST_POS or segment_pos == IN_FIRST_AND_LAST:
#Is there an insertion in the next position?
next_read_pos = read_pos + 1
allele, kind, qual = _get_insertion(segment_index, segment_types,
next_read_pos, aligned_read,
segment_lens)
if kind is not None:
alleles.append((allele, kind, qual, is_reverse))
elif segment_types[segment_index] == DELETION:
if (segment_pos == IN_FIRST_POS or segment_pos == IN_FIRST_AND_LAST or
segment_pos == IN_LAST_POS):
read_pos1, read_pos2 = _read_pos_around_del(ref_segments,
read_segments,
segment_types,
segment_index,
ref_pos)
if segment_pos == IN_FIRST_POS or segment_pos == IN_FIRST_AND_LAST:
indel_length = segment_lens[segment_index]
allele = DELETION_ALLELE * (indel_length)
#in the deletion case the quality is the lowest of the
#bases that embrace the deletion
if aligned_read.qual:
qual0 = aligned_read.qual[read_pos1]
qual0 = _quality_to_phred(qual0)
qual1 = aligned_read.qual[read_pos2]
qual1 = _quality_to_phred(qual1)
qual = min((qual0, qual1))
else:
qual = None
kind = DELETION
alleles.append((allele, kind, qual, is_reverse))
if segment_pos ==IN_FIRST_AND_LAST or segment_pos == IN_LAST_POS:
#Is there an insertion in the next position?
allele, kind, qual = _get_insertion(segment_index, segment_types,
read_pos1, aligned_read,
segment_lens)
if kind is not None:
alleles.append((allele, kind, qual, is_reverse))
elif segment_types[segment_index] == SKIP:
#Is there an insertion in the next position?
read_pos1, read_pos2 = _read_pos_around_del(ref_segments,
read_segments,
segment_types,
segment_index,
ref_pos)
allele, kind, qual = _get_insertion(segment_index, segment_types,
read_pos1, aligned_read,
segment_lens)
if kind is not None:
alleles.append((allele, kind, qual, is_reverse))
elif segment_types[segment_index] == INSERTION:
pass #if we're in an insertion, it is returned in the last position
#of the previous match segment
return alleles, read_limits
def _quality_to_phred(quality):
'It transforms a qual chrs into a phred quality'
if quality is None:
return None
elif len(quality) == 1:
phred_qual = ord(quality) - 33
else:
phred_quals = [ord(qual) - 33 for qual in quality]
phred_qual = sum(phred_quals) / len(phred_quals)
if phred_qual == 93: #the character used for unknown qualities
phred_qual = None
return phred_qual
def _add_allele(alleles, allele, kind, read_name, read_group, is_reverse, qual,
mapping_quality, readgroup_info, pileup_read):
'It adds one allele to the alleles dict'
key = (allele, kind)
if key not in alleles:
alleles[key] = {'read_groups':[], 'orientations':[],
'qualities':[], 'mapping_qualities':[], 'reads':[]}
allele_info = alleles[key]
allele_info['read_groups'].append(read_group)
allele_info['orientations'].append(not(is_reverse))
allele_info['qualities'].append(qual)
allele_info['mapping_qualities'].append(mapping_quality)
allele_info['reads'].append(pileup_read)
def _normalize_read_edge_conf(read_edge_conf):
'It returns a dict with all valid keys'
platforms = ('454', 'sanger', 'illumina')
if read_edge_conf is None:
read_edge_conf = {}
for platform in platforms:
if platform not in read_edge_conf:
read_edge_conf[platform] = (None, None)
return read_edge_conf
def _add_pileup_reads(snv, reads):
'It adds the pileup reads for each snv_allele to the reads structure'
snv_name = snv['ref_name'] + '_' + str(snv['ref_position'])
if snv_name not in reads:
reads[snv_name] = {}
for allele, allele_info in snv['alleles'].items():
reads[snv_name][allele] = allele_info['reads']
def _check_read_length(read, position, read_edge_conf, read_groups_info,
default_bam_platform):
'If checks if the given pileup read covers all the snv extension'
aligned_read = read.alignment
read_start_in_ref, read_end_in_ref = _get_cigar_segments_from_aligned_read(aligned_read)[2]
if read_edge_conf:
platform, read_group = _get_platform_from_aligned_read(aligned_read,
read_groups_info,
default_bam_platform)
if platform in read_edge_conf:
edge_left, edge_right = read_edge_conf[platform]
if edge_left:
read_start_in_ref += edge_left
if edge_right:
read_end_in_ref -= edge_right
if read_start_in_ref <= position[0] and read_end_in_ref >= position[1] - 1:
return True
else:
return False
def _get_fixed_snv_start_vcf4(snv):
'''It corrects the start of the snv to the new vcf4 system
01234567 snp_caller vcf_format
ref atctgtag
read1 atccgtag snp in pos 3 snp in pos 3
read2 atctgcctag inser in pos 5 inser in pos 5
read3 atgcctag del in pos 2 del in pos 1
This is the result and
'''
allele_types = [allele[1] for allele in snv['alleles'].keys()]
snv_kind = _calculate_snv_kinds(allele_types)
if (snv_kind in (DELETION, INDEL) or snv_kind == COMPLEX and
DELETION in allele_types):
start = snv['ref_position'] - 1
else:
start = snv['ref_position']
return start
def _get_snv_end_position(snv):
'it returns the snv position plus the length of the allele'
end = snv['ref_position']
allele_types = [allele[1] for allele in snv['alleles'].keys()]
snv_kind = _calculate_snv_kinds(allele_types)
if (snv_kind in (INSERTION, SNP) or
(snv_kind == COMPLEX and INSERTION in allele_types)):
end += 1
else:
max_length = 0
for allele in snv['alleles'].keys():
allele, kind = allele
allele_length = len(allele)
if kind == DELETION and allele_length > max_length:
max_length = allele_length
end += max_length
return end
def _init_snv_block(snv, read_groups_info, default_bam_platform, read_edge_conf):
'It inits the data structure for the first snv of a block'
start = _get_fixed_snv_start_vcf4(snv)
end = _get_snv_end_position(snv)
reads = {}
_add_pileup_reads(snv, reads)
snv_block = {'start':start, 'end':end, 'snvs':[snv]}
if snv['ref_position'] != start:
# we're checking for the case in which a deletion increments the
# svn to the left
_remove_not_covering_reads(reads, (start, end), read_groups_info, default_bam_platform, read_edge_conf)
return snv_block, reads
def _remove_not_covering_reads(reads, position, read_groups_info,
default_bam_platform, read_edge_conf):
'''It checks that the reads in the snv cover the length of the snv.'''
for snv_name, alleles in reads.items():
for allele, pileup_reads in alleles.items():
valid_reads = [r for r in pileup_reads if _check_read_length(r, position, read_edge_conf, read_groups_info, default_bam_platform)]
if not valid_reads:
del reads[snv_name][allele]
else:
reads[snv_name][allele] = valid_reads
alleles = reads[snv_name].keys()
if not alleles or (len(alleles) == 1 and alleles[0][1] == INVARIANT):
del reads[snv_name]
def _make_snv_blocks(snvs, read_edge_conf=None, read_groups_info=None,
default_bam_platform=None):
'''It joins snvs that should be just one snv. e.g. a deletion that match
with another deletion in the same position.
'''
def add_one_to_left_if_snp(snv_block, reads):
'''It yields an snv_block,
but first it checks if we have to add
one base to the left because we have an SNP as the first snv of the
block. In that case it would check if there are enough reads covering'''
# if there are several SNPs together and the first one is an
# SNP we should add one base to the block in order to have
# a non-variant position as the first base of the new COMPLEX
# block
first_snv = snv_block['snvs'][0]
two_snps_together = (len(snv_block['snvs']) > 1 and
_is_snv_of_kind(first_snv, SNP))
# a complex allele would have an insertion and a deletion or an
# insertion and a snp in the same allele
# ref A- A-
# read GT -T
complex_kinds = [k for a, k in first_snv['alleles'].keys() if k == COMPLEX]
complex_allele_in_first_snv = True if complex_kinds else False
if (first_snv['ref_position'] == snv_block['start'] and
(two_snps_together or complex_allele_in_first_snv)):
snv_block['start'] -= 1
snv_span = (snv_block['start'], snv_block['end'])
_remove_not_covering_reads(reads, snv_span, read_groups_info,
default_bam_platform, read_edge_conf)
if not reads:
snv_block, reads = None, None
return snv_block, reads
snv_block, reads = None, None
for snv in snvs:
if snv_block is None:
snv_block, reads = _init_snv_block(snv, read_groups_info,
default_bam_platform,
read_edge_conf)
continue
snv_start = _get_fixed_snv_start_vcf4(snv)
snv_end = _get_snv_end_position(snv)
if snv_block['end'] + 1 <= snv_start:
# we do not add the snv to the block because the snv starts to the
# right of the current snv_block
snv_block, reads = add_one_to_left_if_snp(snv_block, reads)
if snv_block is not None:
yield snv_block
snv_block, reads = _init_snv_block(snv, read_groups_info,
default_bam_platform,
read_edge_conf)
else:
_remove_not_covering_reads(reads, (snv_block['start'], snv_end),
read_groups_info, default_bam_platform,
read_edge_conf)
if not reads:
if not _is_snv_of_kind(snv, SNP):
# we do not add the snv because there would be not enough reads
# covering the snv_block
snv_block, reads = add_one_to_left_if_snp(snv_block, reads)
if snv_block is not None:
yield snv_block
snv_block, reads = _init_snv_block(snv, read_groups_info,
default_bam_platform,
read_edge_conf)
else:
#we have decided to add the new snv to the block
if snv_block['end'] < snv_end:
snv_block['end'] = snv_end
snv_block['snvs'].append(snv)
else:
if snv_block:
snv_block, reads = add_one_to_left_if_snp(snv_block, reads)
if snv_block is not None:
yield snv_block
def get_insertions_in_position(reads, position):
'it returns the insertions that are found inside the given range'
insertions = set()
for segments in reads.values():
(ref_segments, read_segments, ref_limits, read_limits, segment_types,
segment_lens) = segments['segments']
for index, segment_type in enumerate(segment_types):
if segment_type == 1:
insert_start_in_ref = ref_segments[index - 1]
insert_end_in_ref = insert_start_in_ref + segment_lens[index]
if ((insert_start_in_ref > position[0] and insert_start_in_ref< position[1]) or
(insert_end_in_ref > position[0] and insert_end_in_ref< position[1])):
insertions.add((insert_start_in_ref, insert_end_in_ref))
return list(insertions)
def _is_snv_of_kind(snv, kind):
'True if it is a insertion false if not'
#This function works with the kinds used in the one column old codification
#DELETIONC, INSERTION, etc. Not in the new multicolumn kind specification
#VARIANT, INVARIANT
allele_types = [allele[1] for allele in snv['alleles'].keys()]
snv_kind = _calculate_snv_kinds(allele_types)
if snv_kind == kind:
return True
else:
return False
def _sum_snv_kinds(kind1, kind2):
'It calculates the result of the union of two kinds'
if kind1 == kind2:
return kind1
else:
if kind1 == INVARIANT:
return kind2
elif kind2 == INVARIANT:
return kind1
elif kind1 in [SNP, COMPLEX] or kind2 in [SNP, COMPLEX]:
return COMPLEX
else:
return INDEL
def _calculate_snv_kinds(kinds):
'It returns the snv kind for the given feature'
if len(kinds) == 1:
return kinds[0]
kind = kinds[0]
for index in range(1, len(kinds)):
kind = _sum_snv_kinds(kind, kinds[index])
return kind
def _remove_allele_from_alignments(alignments, min_num_reads_for_allele):
''' It removes alignments/alleles taking into account the times it appears
min_num_reads_for_allele'''
allele_count = {}
for ref, allele in alignments.values():
if allele not in allele_count:
allele_count[allele] = 0
allele_count[allele] +=1
for read_name, alignment in alignments.items():
read_allele = alignment[1]
if allele_count[read_allele] < min_num_reads_for_allele:
del alignments[read_name]
def _join_snvs(snv_block, min_num_alleles, min_num_reads_for_allele,
min_quality, reference_seq=None):
'It joins the snvs that should be together'
snvs = snv_block['snvs']
if len(snvs) == 1 and _is_snv_of_kind(snvs[0], SNP):
return snvs[0]
else:
block_start = snv_block['start']
block_end = snv_block['end']
# collect all the reads and its alignment
reads = {}
alignments = {}
allele_kinds = {}
for snv in snvs:
for allele, allele_info in snv['alleles'].items():
allele_kind = allele[1]
for index in range(len(allele_info['reads'])):
name = allele_info['reads'][index].alignment.qname
if name in reads:
continue
read = allele_info['reads'][index]
read_group = allele_info['read_groups'][index]
orientation = allele_info['orientations'][index]
quality = allele_info['qualities'][index]
mapping_quality = allele_info['mapping_qualities'][index]
reads[name] = {'read': read,
'read_group': read_group,
'orientation': orientation,
'quality': quality,
'mapping_quality': mapping_quality}
# do the multiple alignments
for read_name in reads:
if read_name not in allele_kinds:
allele_kinds[read_name] = []
allele_kinds[read_name].append(allele_kind)
if read_name not in alignments:
alignment = _get_alignment_section(reads[read_name]['read'],
block_start, block_end,
reference_seq=reference_seq)
alignments[read_name] = alignment
# we need to remove the reads that do not cover the alignment in its
# complete span
for read_name in list(reads.keys()):
aligned_read = alignments[read_name][1]
if NO_NUCLEOTYDE in aligned_read:
del alignments[read_name]
del reads[read_name]
_remove_allele_from_alignments(alignments, min_num_reads_for_allele)
if alignments:
malignment = _make_multiple_alignment(alignments, reads)
# We create the new alleles that span the complete snv_block
alleles = {}
ref_allele = ''.join(malignment['reference'])
ref_allele = ref_allele.replace('-', '')
for read, allele in malignment['reads'].items():
allele = ''.join(allele)
allele = allele.replace(DELETION_ALLELE, '')
kind = INVARIANT if allele == ref_allele else VARIANT
allele = allele, kind
if allele not in alleles:
alleles[allele] = {'read_groups':[],
'reads':[],
'orientations':[],
'qualities':[],
'mapping_qualities':[]}
allele_info = alleles[allele]
allele_info['read_groups'].append(reads[read]['read_group'])
allele_info['reads'].append(reads[read]['read'])
allele_info['orientations'].append(reads[read]['orientation'])
allele_info['qualities'].append(reads[read]['quality'])
allele_info['mapping_qualities'].append(reads[read]['mapping_quality'])
allele_info['quality'] = _calculate_allele_quality(allele_info)
#remove bad quality alleles
_remove_bad_quality_alleles(alleles, min_quality)
# min_num_reads_for_allele
_remove_alleles_by_read_number(alleles, min_num_reads_for_allele)
#if there are a min_num number of alleles requested and there are more
#alleles than that
#OR
#there is some allele different than invariant
#a variation is yield
if alleles and ((len(alleles) > min_num_alleles) or
(min_num_alleles == 1 and alleles.keys()[0][1] != INVARIANT) or
(min_num_alleles > 1 and len(alleles) >= min_num_alleles)):
snv = {'ref_name':snvs[0]['ref_name'],
'ref_position':block_start ,
'read_groups':snvs[0]['read_groups'],
'alleles':alleles,
'reference_allele': ref_allele}
#some alleles might end up here because the functions that do
#the SNP calling might have some problems.For instance
# ref TACTG qualities
# al1 TA--G 45 45 45
# al2 T-CTG 10 60 45 45
alleles_different_in_first_base = [(a, k) for a, k in alleles.keys() if ref_allele[0] != a[0]]
alleles_not_invariant = [(a, k) for a, k in alleles.keys() if k != INVARIANT]
if len(alleles_not_invariant) - len(alleles_different_in_first_base) > 0:
#we remove the offending alleles
for allele in alleles_different_in_first_base:
del snv['alleles'][allele]
else:
msg = 'One allele in molecule %s position %i will be malformed'
msg %= snv['ref_name'], snv['ref_position']
logger = logging.getLogger('franklin')
logger.warn(msg)
return snv
def _snvs_in_bam(bam, reference, min_quality,
default_sanger_quality, min_mapq, min_num_alleles,
max_maf, min_num_reads_for_allele,
read_edge_conf=None, default_bam_platform=None):
'It returns the snvs in a bam for the given reference'
read_groups_info = get_read_group_info(bam)
if not read_groups_info:
if default_bam_platform is None:
msg = 'Platform is not present either in header or in '
msg += 'configuration'
raise ValueError(msg)
read_groups_info = {UNKNOWN_RG:{'PL':default_bam_platform}}
snvs = _snvs_in_bam_by_position(bam, reference, min_quality,
default_sanger_quality, min_mapq,
min_num_alleles,max_maf,
min_num_reads_for_allele, read_edge_conf,
default_bam_platform, read_groups_info)
for snv_block in _make_snv_blocks(snvs, read_edge_conf, read_groups_info,
default_bam_platform):
snv =_join_snvs(snv_block, min_num_alleles, min_num_reads_for_allele,
min_quality, reference_seq=reference)
if snv is not None:
yield snv
def _snvs_in_bam_by_position(bam, reference, min_quality,
default_sanger_quality, min_mapq, min_num_alleles,
max_maf, min_num_reads_for_allele,
read_edge_conf, default_bam_platform,
read_groups_info):
'''It yields the snv information for every snv in the given reference,
for each position'''
min_num_alleles = int(min_num_alleles)
reference_id = get_seq_name(reference)
reference_seq = reference.seq
reference_len = len(reference_seq)
#we can clean the cache of segments because we're in a new molecule
global SEGMENTS_CACHE
SEGMENTS_CACHE = {}
for column in bam.pileup(reference=reference_id):
alleles = {}
ref_pos = column.pos
if ref_pos >= reference_len:
continue
ref_id = bam.getrname(column.tid)
ref_allele = reference_seq[ref_pos].upper()
for pileup_read in column.pileups:
#for each read in the column we add its allele to the alleles dict
aligned_read = pileup_read.alignment
read_name = aligned_read.qname
read_mapping_qual = aligned_read.mapq
#We ignore the reads that are likely to be missaligned
if read_mapping_qual < min_mapq:
continue
platform, read_group = _get_platform_from_aligned_read(aligned_read,
read_groups_info,
default_bam_platform)
read_pos = pileup_read.qpos
alleles_here, read_limits = _get_alleles_from_read(ref_allele,
ref_pos,
pileup_read)
#there is an special case that we had not considered before and that
#can't be coded with the schema returned by this function, an SNP
#followed by an insertion
# ref CA-C
# read CGCC
# we will code it as a complex, C and we will remove the SNP
if len(alleles_here) > 1:
kinds = [a[1] for a in alleles_here if a[1] != INVARIANT]
if len(kinds) == 1:
pass
elif len(kinds) == 2:
if kinds[0] == INSERTION:
ins_allele = list(alleles_here[0])
elif kinds[1] == INSERTION:
ins_allele = list(alleles_here[1])
else:
msg = 'At least one allele should be an insertion'
raise RuntimeError(msg)
ins_allele[1] = COMPLEX
alleles_here = [tuple(ins_allele)]
else:
msg = '3 alleles in one read, I should not be here'
raise RuntimeError(msg)
if read_edge_conf and platform in read_edge_conf:
edge_left, edge_right = read_edge_conf[platform]
#if we're in the edge region to be ignored we continue to
#the next read, because there's no allele to add for this one.
if (edge_left is not None and
read_limits[0] + edge_left > read_pos):
continue
if (edge_right is not None and
read_pos > read_limits[1] - edge_right):
continue
for allele in alleles_here:
allele, kind, qual, is_reverse = allele
_add_allele(alleles, allele, kind, read_name, read_group,
is_reverse, qual, read_mapping_qual,
read_groups_info, pileup_read)
#remove N
_remove_alleles_n(alleles)
#add default sanger qualities to the sanger reads with no quality
_add_default_sanger_quality(alleles, default_sanger_quality,
read_groups_info)
#remove bad quality alleles
_remove_bad_quality_alleles(alleles, min_quality)
#check maf
if not check_maf_ok(alleles, max_maf):
continue
# min_num_reads_for_allele
_remove_alleles_by_read_number(alleles, min_num_reads_for_allele)
#if there are a min_num number of alleles requested and there are more
#alleles than that
#OR
#there is some allele different than invariant
#a variation is yield
if not alleles:
continue
if (len(alleles) > min_num_alleles or
(min_num_alleles == 1 and alleles.keys()[0][1] != INVARIANT) or
(min_num_alleles > 1 and len(alleles) >= min_num_alleles)):
yield {'ref_name':ref_id,
'ref_position':ref_pos,
'reference_allele':ref_allele,
'alleles':alleles,
'read_groups':read_groups_info}
def _get_platform_from_aligned_read(aligned_read, read_groups_info,
default_bam_platform):
'It returns the platform'
try:
read_group = aligned_read.opt('RG')
except KeyError:
read_group = UNKNOWN_RG
if read_groups_info and read_group in read_groups_info:
platform = read_groups_info[read_group]['PL']
else:
platform = default_bam_platform
return platform, read_group
def _remove_alleles_by_read_number(alleles, min_num_reads_for_allele):
'It remove alleles with less reads than the given value'
alleles_to_remove = []
for allele_name, allele_info in alleles.items():
if len(allele_info['read_groups']) < min_num_reads_for_allele:
alleles_to_remove.append(allele_name)
if alleles_to_remove:
for allele_to_remove in alleles_to_remove:
del(alleles[allele_to_remove])
def _add_default_sanger_quality(alleles, default_sanger_quality,
read_groups_info):
'It adds default sanger qualities to the sanger reads with no quality'
for allele_info in alleles.values():
for index, (qual, rg) in enumerate(zip(allele_info['qualities'],
allele_info['read_groups'])):
try:
if qual is None and read_groups_info[rg]['PL'] == 'sanger':
allele_info['qualities'][index] = default_sanger_quality
except KeyError:
if 'PL' not in read_groups_info[rg]:
msg = 'The bam file has no platforms for the read groups'
raise KeyError(msg)
else:
raise
def _remove_alleles_n(alleles):
'It deletes the aleles that are N'
for allele in alleles:
if allele[0] in N_ALLLELES:
del alleles[allele]
def _remove_bad_quality_alleles(alleles, min_quality):
'It adds the quality to the alleles dict and it removes the bad alleles'
orientations_independent = False
if orientations_independent:
qual_calculator = _calculate_allele_quality_oriented
else:
qual_calculator = _calculate_allele_quality
for allele, allele_info in alleles.items():
qual = qual_calculator(allele_info)
allele_info['quality'] = qual
if qual < min_quality:
del alleles[allele]
def _calculate_allele_quality(allele_info):
'It returns the quality for the given allele'
#we sort all qualities
quals = allele_info['qualities'][:]
#slow alternative
#quals.sort(lambda x, y: int(y - x))
#fast alternative
qual_set = set(quals)
for index in range(3):
if not qual_set:
break
quals[index] = max(qual_set)
qual_set.remove(quals[index])
total_qual = 0
if quals:
total_qual += quals[0]
if len(quals) > 1:
total_qual += quals[1] / 4.0
if len(quals) > 2:
total_qual += quals[2] / 4.0
return total_qual
def _calculate_allele_quality_oriented(allele_info):
'''It returns the quality for the given allele
It assumes that reads with different orientations are independent'''
#we gather all qualities for independent groups
quals = defaultdict(list)
for qual, orientation in zip(allele_info['qualities'],
allele_info['orientations']):
quals[orientation].append(qual)
#we sort all qualities
for independent_quals in quals.values():
independent_quals.sort(lambda x, y: int(y - x))
total_qual = 0
for independent_quals in quals.values():
if independent_quals:
total_qual += independent_quals[0]
if len(independent_quals) > 1:
total_qual += independent_quals[1] / 4.0
if len(independent_quals) > 2:
total_qual += independent_quals[2] / 4.0
return total_qual
def _root_mean_square(numbers):
'It returns the root mean square for the given numbers'
power2 = lambda x: math.pow(x, 2)
return math.sqrt(sum(map(power2, numbers)) / len(numbers))
def _summarize_snv(snv):
'It returns an snv with an smaller memory footprint'
used_read_groups = set()
for allele_info in snv['alleles'].values():
#the read_groups list to a count dict
rg_count = {}
for read_group in allele_info['read_groups']:
if read_group not in rg_count:
rg_count[read_group] = 0
rg_count[read_group] += 1
used_read_groups.add(read_group)
allele_info['read_groups'] = rg_count
#we calculate a couple of parameters that summarize the quality
for kind in ('mapping_qualities', 'qualities'):
quals = []
for allele_info in snv['alleles'].values():
quals.extend(allele_info[kind])
if kind == 'mapping_qualities':
kind = 'mapping_quality'
if kind == 'qualities':
kind = 'quality'
snv[kind] = _root_mean_square(quals) if quals else None
for allele_info in snv['alleles'].values():
#we remove some extra quality info
del allele_info['mapping_qualities']
del allele_info['qualities']
del allele_info['orientations']
del allele_info['reads']
#we remove from the read_groups the ones not used in this snv
new_read_groups = {}
for read_group, info in snv['read_groups'].items():
if read_group in used_read_groups:
new_read_groups[read_group] = info
snv['read_groups'] = new_read_groups
return snv
def create_snv_annotator(bam_fhand, min_quality=45, default_sanger_quality=25,
min_mapq=15, min_num_alleles=1, max_maf=None,
read_edge_conf=None, default_bam_platform=None,
min_num_reads_for_allele=None, ploidy=2):
'It creates an annotator capable of annotating the snvs in a SeqRecord'
#the bam should have an index, does the index exists?
bam_fhand = get_fhand(bam_fhand)
create_bam_index(bam_fpath=bam_fhand.name)
read_edge_conf = _normalize_read_edge_conf(read_edge_conf)
bam = pysam.Samfile(bam_fhand.name, 'rb')
# default min num_reads per allele and ploidy
if min_num_reads_for_allele is None:
min_num_reads_for_allele = DEFAUL_MIN_NUM_READS_PER_ALLELE
if ploidy is None:
ploidy = DEFAULT_PLOIDY
def annotate_snps(sequence):
'It annotates the snvs found in the sequence'
for snv in _snvs_in_bam(bam, reference=sequence,
min_quality=min_quality,
default_sanger_quality=default_sanger_quality,
min_mapq=min_mapq,
min_num_alleles=min_num_alleles,
max_maf=max_maf,
read_edge_conf=read_edge_conf,
default_bam_platform=default_bam_platform,
min_num_reads_for_allele=min_num_reads_for_allele):
snv = _summarize_snv(snv)
start = snv['ref_position']
end = snv['ref_position'] + len(snv['reference_allele'])
type_ = 'snv'
qualifiers = {'alleles':snv['alleles'],
'reference_allele':snv['reference_allele'],
'read_groups':snv['read_groups'],
'mapping_quality': snv['mapping_quality'],
'quality': snv['quality']}
snv_feat = SeqFeature(location=FeatureLocation(start, end),
type=type_,
qualifiers=qualifiers)
annotate_pic(snv_feat)
annotate_heterozygosity(snv_feat, ploidy=ploidy)
sequence.features.append(snv_feat)
return sequence
return annotate_snps
def calculate_snv_kind(feature, detailed=False):
'It returns the snv kind for the given feature'
alleles = feature.qualifiers['alleles'].keys()
ref_allele = feature.qualifiers['reference_allele']
lengths = [len(ref_allele)]
for a , kind in alleles :
if kind != INVARIANT:
lengths.append(len(a))
#lengths.extend([len(a) for a , kind in alleles if kind != INVARIANT])
if len(lengths) == 1:
return INVARIANT
elif all([l == 1 for l in lengths]):
if detailed:
return _guess_snp_kind(feature.qualifiers['alleles'], ref_allele)
else:
return SNP
elif len(lengths) > 2:
return COMPLEX
else:
if lengths[0] < lengths[1]:
return INSERTION
elif lengths[0] > lengths[1]:
return DELETION
else:
return COMPLEX
def _al_type(allele):
'I guesses the type of the allele'
allele = allele.upper()
if allele in ('A', 'G'):
return 'purine'
elif allele in ('T', 'C'):
return 'pirimidine'
return UNKNOWN
def _guess_snp_kind(alleles, ref_allele):
'It guesses the type of the snp'
alleles = alleles.keys()
# if we take into account the reference to decide if there is a variation
alleles = [a for a, kind in alleles if kind != INVARIANT]
if len(alleles) > 1:
return UNKNOWN
al0 = _al_type(alleles[0])
al1 = _al_type(ref_allele)
if al0 == UNKNOWN or al1 == UNKNOWN:
snv_kind = UNKNOWN
elif al0 == al1:
snv_kind = TRANSITION
else:
snv_kind = TRANSVERSION
return snv_kind
def _cmp_by_read_num(allele1, allele2):
'cmp by the number of reads for each allele'
return len(allele2['read_names']) - len(allele1['read_names'])
def sorted_alleles(feature):
'It returns the alleles sorted by number of reads'
#from dict to list
alleles = feature.qualifiers['alleles']
alleles_list = []
for allele, allele_info in alleles.items():
allele_info = copy(allele_info)
allele_info['seq'] = allele[0]
allele_info['kind'] = allele[1]
alleles_list.append(allele_info)
return sorted(alleles_list, _cmp_by_read_num)
def snvs_in_window(snv, snvs, window, snv_type=None, maf=None):
'it gets all the snvs in a window taking a snv as reference'
num_of_snvs = 0
snv_location = int(str(snv.location.start))
left_margin = snv_location - (window / 2)
rigth_margin = snv_location + (window / 2)
for snv in snvs:
current_location = int(str(snv.location.start))
if current_location == snv_location:
continue
if current_location >= left_margin and current_location <= rigth_margin:
if snv_type is None and maf is None:
num_of_snvs += 1
elif snv_type is None and maf is not None:
snv_maf = calculate_maf_frequency(snv)
if snv_maf < maf:
num_of_snvs += 1
elif snv_type is not None and maf is None:
type_ = calculate_snv_kind(snv)
if ((snv_type == type_) or
(snv_type == INDEL and type_ in(INSERTION, DELETION))):
num_of_snvs += 1
else:
type_ = calculate_snv_kind(snv)
snv_maf = calculate_maf_frequency(snv)
if (((snv_type == type_) or
(snv_type == INDEL and type_ in(INSERTION, DELETION))) and
(snv_maf < maf)):
num_of_snvs += 1
return num_of_snvs
def _get_group(read_group, group_kind, read_groups):
'It returns the group (lb, rg, sm) for the given rg and group_kind'
if group_kind:
if group_kind == 'read_groups':
return read_group
else:
group_kind = group_kind.lower()
if group_kind in ('lb', 'library', 'libraries'):
group_kind = 'LB'
elif group_kind in ('sm', 'sample', 'samples'):
group_kind = 'SM'
elif group_kind in ('pl', 'platform', 'platforms'):
group_kind = 'PL'
return read_groups[read_group][group_kind]
def check_maf_ok(alleles, max_maf):
'It checks that the major allele freq is less than maximun limit'
if max_maf is None:
return True
maf = _calculate_maf_frequency_for_alleles(alleles,alleles_is_dict=False)
if maf > max_maf:
return False
else:
return True
def _allele_count(allele, alleles, read_groups=None,
groups=None, group_kind=None, alleles_is_dict=True):
'It returns the number of reads for the given allele'
counts = []
if not alleles_is_dict:
return len(alleles[allele]['read_groups'])
for read_group, count in alleles[allele]['read_groups'].items():
#do we have to count this read_group?
group = _get_group(read_group, group_kind, read_groups)
if not groups or groups and group in groups:
counts.append(count)
return sum(counts)
def calculate_maf_frequency(feature, groups=None, group_kind=None):
'It returns the most frequent allele frequency'
alleles = feature.qualifiers['alleles']
read_groups = feature.qualifiers['read_groups']
return _calculate_maf_frequency_for_alleles(alleles, groups=groups,
group_kind=group_kind,
read_groups=read_groups)
def _calculate_maf_frequency_for_alleles(alleles, groups=None, group_kind=None,
read_groups=None, alleles_is_dict=True):
'It returns the most frequent allele frequency'
major_number_reads = None
total_number_reads = 0
for allele in alleles:
number_reads = _allele_count(allele, alleles, read_groups, groups,
group_kind, alleles_is_dict)
if major_number_reads is None or major_number_reads < number_reads:
major_number_reads = number_reads
total_number_reads += number_reads
if not total_number_reads:
return None
return major_number_reads / total_number_reads
def calculate_snv_variability(sequence):
'It returns the number of snv for every 100 pb'
n_snvs = sum(1 for snv in sequence.get_features(kind='snv'))
return n_snvs / len(sequence)
def calculate_cap_enzymes(feature, sequence, all_enzymes=False):
'''Given an snv feature and a sequence it returns the list of restriction
enzymes that distinguish between their alleles.'''
if 'cap_enzymes' in feature.qualifiers:
return feature.qualifiers['cap_enzymes']
#which alleles do we have?
alleles = set()
for allele in feature.qualifiers['alleles'].keys():
alleles.add(repr((allele[0], allele[1])))
#for every pair of different alleles we have to look for differences in
#their restriction maps
enzymes = set()
alleles = list(alleles)
reference = sequence
location = feature.location
for i_index in range(len(alleles)):
for j_index in range(i_index, len(alleles)):
if i_index == j_index:
continue
allelei = eval(alleles[i_index])
allelei = {'allele':allelei[0], 'kind':allelei[1]}
allelej = eval(alleles[j_index])
allelej = {'allele':allelej[0], 'kind':allelej[1]}
i_j_enzymes = _cap_enzymes_between_alleles(allelei, allelej,
reference, location,
all_enzymes)
enzymes = enzymes.union(i_j_enzymes)
enzymes = [str(enzyme) for enzyme in enzymes]
feature.qualifiers['cap_enzymes'] = enzymes
return enzymes
def _cap_enzymes_between_alleles(allele1, allele2, reference, location,
all_enzymes=False):
'''It looks in the enzymes that differenciate the given alleles.
It returns a set.
'''
allele1 = allele1['allele']
allele2 = allele2['allele']
#we have to build the two sequences
if all_enzymes:
restriction_batch = CommOnly
else:
restriction_batch = RestrictionBatch(COMMON_ENZYMES)
sseq = reference.seq
seq1 = sseq[0:location.start] + allele1 + sseq[location.end + 1:]
seq2 = sseq[0:location.start] + allele2 + sseq[location.end + 1:]
anal1 = Analysis(restriction_batch, seq1, linear=True)
enzymes1 = set(anal1.with_sites().keys())
anal1 = Analysis(restriction_batch, seq2, linear=True)
enzymes2 = set(anal1.with_sites().keys())
enzymes = set(enzymes1).symmetric_difference(set(enzymes2))
return enzymes
def variable_in_groupping(snv, group_kind, groups, in_union=True,
in_all_groups=True, reference_free=True, maf=None,
min_num_reads=None, min_reads_per_allele=None):
'It looks if the given snv is variable for the given groups'
alleles = _get_alleles_for_group(snv.qualifiers['alleles'],
groups, group_kind,
snv.qualifiers['read_groups'],
min_reads_per_allele=min_reads_per_allele)
if not alleles:
return False
if in_union:
alleles = _aggregate_alleles(alleles)
variable_per_read_group = []
for alleles_in_rg in alleles.values():
variable = _is_rg_variable(alleles_in_rg, reference_free=reference_free,
maf=maf, min_num_reads=min_num_reads)
variable_per_read_group.append(variable)
if in_all_groups:
return all(variable_per_read_group)
else:
return any(variable_per_read_group)
def invariant_in_groupping(snv, group_kind, groups, in_union=True,
in_all_groups=True, reference_free=True, maf=None,
min_num_reads=None):
'it check if the given snv is invariant form the given groups'
alleles = _get_alleles_for_group(snv.qualifiers['alleles'],
groups, group_kind,
snv.qualifiers['read_groups'])
if not alleles and reference_free:
return False
elif not alleles and not reference_free:
return True
if in_union:
alleles = _aggregate_alleles(alleles)
invariable_per_read_group = []
for alleles_in_rg in alleles.values():
invariable = not _is_rg_variable(alleles_in_rg,
reference_free=reference_free, maf=maf,
min_num_reads=min_num_reads)
invariable_per_read_group.append(invariable)
if in_all_groups:
return all(invariable_per_read_group)
else:
return any(invariable_per_read_group)
def _is_rg_variable(alleles, reference_free=True, maf=None, min_num_reads=None):
'It checks if the allele is variable'
allele_count = len(alleles)
ref_in_alleles = False
for allele in alleles.keys():
if allele[1] == INVARIANT:
ref_in_alleles = True
if allele_count == 1:
if reference_free:
return False
elif not reference_free and ref_in_alleles:
return False
maf_allele, num_reads = calc_maf_and_num_reads(alleles)
if ((maf and maf < maf_allele) or
(min_num_reads and min_num_reads > num_reads)):
return False
return True
def calc_maf_and_num_reads(alleles):
'It calculates the maf and the number of reads in that position'
values = alleles.values()
major = max(values)
num_reads = sum(values)
maf_allele = major/float(num_reads)
return maf_allele, num_reads
def _aggregate_alleles(alleles):
'It joins all alleles for the read groups into one'
aggregate = {}
for alleles in alleles.values():
for allele, allele_count in alleles.items():
if allele not in aggregate:
aggregate[allele] = 0
aggregate[allele] += allele_count
return {None: aggregate}
def _get_alleles_for_group(alleles, groups, group_kind='read_groups',
read_groups=None, min_reads_per_allele=None):
'''It gets the alleles from the given items of type:key, separated by items.
For example, if you give key rg and items rg1, rg2, it will return
alleles separated in rg1 and rg2 '''
alleles_for_groups = {}
for allele, alleles_info in alleles.items():
for read_group in alleles_info['read_groups']:
if (min_reads_per_allele and
alleles_info['read_groups'][read_group] < min_reads_per_allele):
continue
group = _get_group(read_group, group_kind, read_groups)
if group not in groups:
continue
if not group in alleles_for_groups:
alleles_for_groups[group] = {}
# if allele not in alleles_for_groups[group]:
# alleles_for_groups[group][allele] = 0
alleles_for_groups[group][allele] = alleles_info['read_groups'][read_group]
return alleles_for_groups
def calculate_pic(snv, read_groups=None, group_kind=None, groups=None):
'''It calculates the uniformly minimum variance unbiased (UMVU) estimator
of PIC of a locus, given a list with the number of times that each allele
has been read.
PIC(UMVU) = 1 - summatory((Xi(Xi-1))/(n(n-1))) -
summatory((Xi(Xi-1)Xj(Xj-1))/(n(n-1)(n-2)(n-3))
Xi = number of times that allele i-th has been read
Xj = number of times that allele j-th has been read
n = total number of reads
Formula taken from "On Estimating the Heterozygosity and Polymorphism
Information Content Value" by Shete S., Tiwari H. and Elston R.C.
Theoretical Population Biology. Volume 57, Issue 3, May 2000, Pages 265-271
'''
alleles = snv.qualifiers['alleles']
read_groups = snv.qualifiers['read_groups']
alleles_reads = []
for allele in alleles:
alleles_reads.append(_allele_count(allele, alleles,
read_groups=read_groups,
group_kind=group_kind,
groups=groups ))
if len(alleles_reads) == 1:
pic = 0
else:
total_reads = sum(alleles_reads)
# we need at least 4 reads to calculate pic
if total_reads < 4:
pic = None
else:
first_element = 0
second_element = 0
for index, num_allele in enumerate(alleles_reads):
first_element_part = ((num_allele*(num_allele - 1))/
(total_reads*(total_reads - 1)))
first_element += first_element_part
for num_allele in alleles_reads[index+1:]:
second_element_part = (first_element_part*
((num_allele*(num_allele - 1))/
((total_reads - 2)*
(total_reads - 3))))
second_element += second_element_part
pic = 1 - first_element - second_element
return pic
def annotate_pic(snv):
'It annotates the pic'
pic = calculate_pic(snv)
snv.qualifiers['pic'] = pic
def calculate_heterozygosity(snv, ploidy, group_kind=None,
groups=None):
'''It calculates the estimator of heterozygosity, given a list with the
number of times that each allele has been read.
heterozygosity(estimator) = (n/(n-1))(1-summatory(xi**2))
xi = gene frequency of the i-th allele
n = total number of reads
Formula taken from "SAMPLING VARIANCES OF HETEROZYGOSITY AND GENETIC
DISTANCE" by MASATOSHI NEI and A. K. ROYCHOUDHURY.
Genetics 76: 379-390 February, 1974.
If the number of individuals is less than 50, formula has to be corrected:
heterozygosity(estimator) = (2n/(2n-1))(1-summatory(xi**2))
Taken from "ESTIMATION OF AVERAGE HETEROZYGOSITY AND GENETIC DISTANCE FROM
A SMALL NUMBER OF INDIVIDUALS" by MASATOSHI NEI.
Genetics 89 : 583-590 July, 1978.
'''
alleles = snv.qualifiers['alleles']
read_groups = snv.qualifiers['read_groups']
alleles_reads = []
for allele in alleles:
alleles_reads.append(_allele_count(allele, alleles,
read_groups=read_groups,
group_kind=group_kind,
groups=groups))
if len(alleles_reads) == 1:
heterozygosity = 0
else:
total_reads = sum(alleles_reads)
if total_reads == 0:
heterozygosity = None
else:
sum_ = 0
for num_allele in alleles_reads:
allele_freq = num_allele/total_reads
sum_ += allele_freq**2
if total_reads/ploidy < 50:
heterozygosity = ((2*total_reads)/((2*total_reads) - 1))*(1 - sum_)
else:
heterozygosity = ((total_reads)/((total_reads) - 1))*(1 - sum_)
return heterozygosity
def annotate_heterozygosity(snv, ploidy):
'It annotates the heterozigosity'
heterozygosity = calculate_heterozygosity(snv, ploidy)
snv.qualifiers['heterozygosity'] = heterozygosity
| JoseBlanca/franklin | franklin/snv/snv_annotation.py | Python | agpl-3.0 | 74,722 | [
"pysam"
] | ab526bf358ca1c1fec5ccf82b1f834242fd29de011d149a418f61620e292d348 |
#!/usr/bin/env python
"""
Script to setup some test data.
This is useful during development when we are continuously wiping the db
and want to get some new data in quickly.
NOTE: Several tests use this module, so avoid breaking tests when changing
this.
"""
import os
import random
import shutil
import subprocess
import sys
# Setup the Django environment. Our setup_django_env() method is outdated.
sys.path.append(
os.path.join(os.path.dirname(os.path.realpath(__file__)), '../'))
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from django.db import transaction
from django.contrib.auth.models import User
from django.core.management import call_command
from django.conf import settings
from main.models import AlignmentGroup
from main.models import Chromosome
from main.models import Dataset
from main.models import ExperimentSample
from main.models import ExperimentSampleToAlignment
from main.models import Project
from main.models import ReferenceGenome
from main.models import Region
from main.models import RegionInterval
from main.models import SavedVariantFilterQuery
from main.models import Variant
from main.models import VariantAlternate
from main.models import VariantSet
from main.models import VariantToVariantSet
from main.testing_util import FullVCFTestSet
from pipeline.pipeline_runner import run_pipeline
from utils.import_util import add_dataset_to_entity
from utils.import_util import copy_and_add_dataset_source
from utils.import_util import copy_dataset_to_entity_data_dir
from utils.import_util import import_reference_genome_from_local_file
from utils.import_util import import_variant_set_from_vcf
from utils.import_util import run_fastqc_on_sample_fastq
from settings import PWD as GD_ROOT
# Test data.
TEST_USERNAME = 'gmcdev'
TEST_PASSWORD = 'g3n3d3z'
TEST_EMAIL = 'gmcdev@genomedesigner.freelogy.org'
TEST_FASTA = os.path.join(GD_ROOT, 'test_data', 'fake_genome_and_reads',
'test_genome.fa')
TEST_FASTQ1 = os.path.join(GD_ROOT, 'test_data', 'fake_genome_and_reads',
'38d786f2', 'test_genome_1.snps.simLibrary.1.fq')
TEST_FASTQ2 = os.path.join(GD_ROOT, 'test_data', 'fake_genome_and_reads',
'38d786f2', 'test_genome_1.snps.simLibrary.2.fq')
TEST_FASTQ_GZ_1 = os.path.join(GD_ROOT, 'test_data', 'fake_genome_and_reads',
'6057f443', 'test_genome_8.snps.simLibrary.1.fq.gz')
TEST_FASTQ_GZ_2 = os.path.join(GD_ROOT, 'test_data', 'fake_genome_and_reads',
'6057f443', 'test_genome_8.snps.simLibrary.2.fq.gz')
TEST_BAM = os.path.join(GD_ROOT, 'test_data', 'fake_genome_and_reads',
'38d786f2', 'bwa_align.sorted.grouped.realigned.bam')
TEST_BAM_INDEX = os.path.join(GD_ROOT, 'test_data', 'fake_genome_and_reads',
'38d786f2', 'bwa_align.sorted.grouped.realigned.bam.bai')
TEST_PROJECT_NAME = 'recoli'
SV_PROJECT_NAME = 'sv_testing'
REF_GENOME_1_LABEL = 'mg1655'
REF_GENOME_2_LABEL = 'c321D'
SAMPLE_1_LABEL = 'sample1'
VARIANTSET_1_LABEL = 'Set A'
VARIANTSET_2_LABEL = 'Set B'
CUSTOM_SAVED_QUERY_LIST = [
'GT_TYPE = 2 & DP > 10',
'INFO_EFF_IMPACT = HIGH',
]
@transaction.commit_on_success
def add_fake_variants_to_genome(ref_genome, chromosome, num_variants):
"""
Add many fake variants to a fake genome in order to test
features like pagination.
"""
for var_count in range(num_variants):
variant = Variant.objects.create(
type=Variant.TYPE.TRANSITION,
reference_genome=ref_genome,
chromosome=chromosome,
position=random.randint(1, chromosome.num_bases),
ref_value='A')
variant.variantalternate_set.add(VariantAlternate.objects.create(
variant=variant,
alt_value='G'))
def create_fake_variants_and_variant_sets(ref_genome, num_variants= 100):
chrom_0 = Chromosome.objects.get(reference_genome=ref_genome)
add_fake_variants_to_genome(
ref_genome=ref_genome,
chromosome=chrom_0,
num_variants=num_variants)
### Add fake variants to a set
@transaction.commit_on_success
def _add_fake_variants_to_fake_set():
ref_genome_1 = ReferenceGenome.objects.get(
label=REF_GENOME_1_LABEL)
(sample_1, created) = ExperimentSample.objects.get_or_create(
project=ref_genome.project,
label=SAMPLE_1_LABEL)
var_set1 = VariantSet.objects.create(
reference_genome=ref_genome,
label=VARIANTSET_1_LABEL)
var_set2 = VariantSet.objects.create(
reference_genome=ref_genome,
label=VARIANTSET_2_LABEL)
variant_list = Variant.objects.filter(reference_genome=ref_genome)
for var in variant_list:
#add variant to one of two sets, depending on var position
if var.position < 25:
vvs1 = VariantToVariantSet.objects.create(
variant=var,
variant_set=var_set1)
#add a sample to the association if the variant is odd
if var.position % 2:
vvs1.sample_variant_set_association.add(sample_1)
if var.position > 20:
vvs2 = VariantToVariantSet.objects.create(
variant=var,
variant_set=var_set2)
#add a sample to the association if the variant is even
if not var.position % 2:
vvs2.sample_variant_set_association.add(sample_1)
# Make sure that both sets have at least one variant.
guaranteed_var = Variant.objects.create(
type=Variant.TYPE.TRANSITION,
reference_genome=ref_genome,
chromosome=Chromosome.objects.get(
reference_genome=ref_genome),
position=22,
ref_value='A',
alt_value='G')
vvs1 = VariantToVariantSet.objects.create(
variant=guaranteed_var,
variant_set=var_set1)
vvs2 = VariantToVariantSet.objects.create(
variant=guaranteed_var,
variant_set=var_set2)
vvs2.sample_variant_set_association.add(sample_1)
_add_fake_variants_to_fake_set()
def get_or_create_user():
try:
print "trying to create user"
return User.objects.get(username=TEST_USERNAME)
except User.DoesNotExist:
print "user did not exist"
return User.objects.create_user(
TEST_USERNAME, password=TEST_PASSWORD, email=TEST_EMAIL)
def bootstrap_fake_data():
"""Fill the database with fake data.
"""
user = get_or_create_user()
### Create some projects
(test_project, project_created) = Project.objects.get_or_create(
title=TEST_PROJECT_NAME, owner=user.get_profile())
(test_project_2, project_created) = Project.objects.get_or_create(
title=SV_PROJECT_NAME, owner=user.get_profile())
### Create some reference genomes
ref_genome_1 = import_reference_genome_from_local_file(
test_project, REF_GENOME_1_LABEL, TEST_FASTA, 'fasta')
ref_genome_2 = import_reference_genome_from_local_file(
test_project, REF_GENOME_2_LABEL, TEST_FASTA, 'fasta')
ref_genome_3 = import_reference_genome_from_local_file(
test_project, 'test_genome', TEST_FASTA, 'fasta')
### Create some saved queries.
for saved_query_text in CUSTOM_SAVED_QUERY_LIST:
SavedVariantFilterQuery.objects.get_or_create(
owner=user.get_profile(),
text=saved_query_text)
### Create some ExperimentSamples.
# Create some samples without backing data just to explore the UI.
ExperimentSample.objects.create(
project=test_project,
label='C321D_MiSeq',
data = {'SAMPLE_WELL': 'A01'}
)
ExperimentSample.objects.create(
project=test_project,
label='C321D Fixed 01',
data = {'SAMPLE_WELL': 'A02'}
)
ExperimentSample.objects.create(
project=test_project,
label='C321D Fixed 02',
data = {'SAMPLE_WELL': 'A03'}
)
# Create some samples with backing data.
(sample_1, created) = ExperimentSample.objects.get_or_create(
project=test_project,
label=SAMPLE_1_LABEL)
# Add datasets to the samples.
if not sample_1.dataset_set.filter(type=Dataset.TYPE.FASTQ1):
copy_and_add_dataset_source(sample_1, Dataset.TYPE.FASTQ1,
Dataset.TYPE.FASTQ1, TEST_FASTQ1)
if not sample_1.dataset_set.filter(type=Dataset.TYPE.FASTQ2):
copy_and_add_dataset_source(sample_1, Dataset.TYPE.FASTQ2,
Dataset.TYPE.FASTQ2, TEST_FASTQ2)
# Create sample backed by g-zipped data.
gz_backed_sample = ExperimentSample.objects.create(
project=test_project,
label='sample backed by gz data')
gz_fastq1_dataset = copy_and_add_dataset_source(
gz_backed_sample, Dataset.TYPE.FASTQ1, Dataset.TYPE.FASTQ1,
TEST_FASTQ_GZ_1)
gz_fastq2_dataset = copy_and_add_dataset_source(
gz_backed_sample, Dataset.TYPE.FASTQ1, Dataset.TYPE.FASTQ2,
TEST_FASTQ_GZ_2)
run_fastqc_on_sample_fastq(gz_backed_sample, gz_fastq1_dataset)
run_fastqc_on_sample_fastq(gz_backed_sample, gz_fastq2_dataset, rev=True)
### Create an alignment.
alignment_group_1 = AlignmentGroup.objects.create(
label='Alignment 1',
reference_genome=ref_genome_3,
aligner=AlignmentGroup.ALIGNER.BWA)
# Link it to a sample.
sample_alignment = ExperimentSampleToAlignment.objects.create(
alignment_group=alignment_group_1,
experiment_sample=sample_1)
### Add alignment data. NOTE: Stored in sample model dir.
# NOTE: This is a bit convoluted. Perhaps it would be better to store alignments
# in the ExperimentSampleToAlignment directory.
copy_dest = copy_dataset_to_entity_data_dir(sample_1, TEST_BAM)
copy_dataset_to_entity_data_dir(sample_1, TEST_BAM_INDEX)
add_dataset_to_entity(sample_alignment, Dataset.TYPE.BWA_ALIGN,
Dataset.TYPE.BWA_ALIGN, copy_dest)
# Create fake variants.
create_fake_variants_and_variant_sets(ref_genome_1)
#############################
# Full VCF Testing (annotated for snpeff, variant filtering, etc)
#############################
# Create a new reference genome and samples using full_vcf_test_set
full_vcf_reference_genome = import_reference_genome_from_local_file(
test_project, 'mg1655_tolC_through_zupT',
FullVCFTestSet.TEST_GENBANK, 'genbank')
# Create all samples.
parent_obj = None
full_vcf_samples = []
for i in range(FullVCFTestSet.NUM_SAMPLES):
sample_obj = ExperimentSample.objects.create(
project=test_project,
label='Sample %d' % i)
sample_obj.data['SAMPLE_WELL'] = 'A0%d' % (i+1)
if i == 0:
parent_obj = sample_obj
if i > 0:
sample_obj.data['SAMPLE_PARENTS'] = parent_obj.label
parent_obj.add_child(sample_obj)
parent_obj.save()
sample_obj.save()
# Add raw reads to each sample.
fastq1_dataset = copy_and_add_dataset_source(sample_obj,
Dataset.TYPE.FASTQ1,
Dataset.TYPE.FASTQ1,
FullVCFTestSet.FASTQ1[i])
fastq2_dataset = copy_and_add_dataset_source(sample_obj,
Dataset.TYPE.FASTQ2,
Dataset.TYPE.FASTQ2,
FullVCFTestSet.FASTQ2[i])
# Run FASTQC on sample reads.
run_fastqc_on_sample_fastq(sample_obj, fastq1_dataset)
run_fastqc_on_sample_fastq(sample_obj, fastq2_dataset, rev=True)
full_vcf_samples.append(sample_obj)
# Run the alignment. Return the alignment group created, indexed by the
# reference genome's uid.
run_pipeline(
'test_align', full_vcf_reference_genome, full_vcf_samples)
import_variant_set_from_vcf(full_vcf_reference_genome, 'Designed',
FullVCFTestSet.TEST_DESIGNED_SNPS)
def _create_region_intervals(region, interval_tuple_list):
"""Helper method to create RegionIntervals for a Region.
Args:
region: Region Model object.
interval_tuple_list: List of tuples of intervals to create.
"""
for interval in interval_tuple_list:
RegionInterval.objects.create(
region=region,
start=interval[0],
end=interval[1])
# Create some fake regions.
# TODO: Should not be much harder to replace this with real regions.
region_1 = Region.objects.create(
reference_genome=full_vcf_reference_genome,
label='region_1',
type=Region.TYPE.POOR_MAPPING_QUALITY)
_create_region_intervals(region_1, [(1,150), (300, 400), (500, 900)])
region_2 = Region.objects.create(
reference_genome=full_vcf_reference_genome,
label='region_2',
type=Region.TYPE.POOR_MAPPING_QUALITY)
_create_region_intervals(region_2, [(1000, 1500)])
region_3 = Region.objects.create(
reference_genome=full_vcf_reference_genome,
label='region_3',
type=Region.TYPE.POOR_MAPPING_QUALITY)
_create_region_intervals(region_3, [(1800, 1900), (2150, 2300)])
# And some GENE regions.
gene_A = Region.objects.create(
reference_genome=full_vcf_reference_genome,
label='geneA',
type=Region.TYPE.GENE)
_create_region_intervals(gene_A, [(2000, 2400)])
gene_B = Region.objects.create(
reference_genome=full_vcf_reference_genome,
label='geneB',
type=Region.TYPE.GENE)
_create_region_intervals(gene_B, [(4800, 5200)])
gene_C = Region.objects.create(
reference_genome=full_vcf_reference_genome,
label='geneC',
type=Region.TYPE.GENE)
_create_region_intervals(gene_C, [(1, 500)])
# Bootstrap test_project_2 with SV stuff
sv_testing_bootstrap(test_project_2)
def sv_testing_bootstrap(project):
sv_testing_dir = os.path.join(GD_ROOT, 'test_data', 'sv_testing', 'all_svs')
fasta = os.path.join(sv_testing_dir, 'ref.fa')
fq1 = os.path.join(sv_testing_dir, 'simLibrary.1.fq')
fq2 = os.path.join(sv_testing_dir, 'simLibrary.2.fq')
ref_genome = import_reference_genome_from_local_file(
project, 'ref', fasta, 'fasta')
sample = ExperimentSample.objects.create(
project=project,
label='simLibrary',
)
copy_and_add_dataset_source(sample, Dataset.TYPE.FASTQ1,
Dataset.TYPE.FASTQ1, fq1)
copy_and_add_dataset_source(sample, Dataset.TYPE.FASTQ2,
Dataset.TYPE.FASTQ2, fq2)
if '--sv' in sys.argv: # using --sv argument runs pipeline for SV project
run_pipeline('sample_alignment_ref', ref_genome, [sample])
def reset_database():
"""Deletes the old database and sets up a new one.
For now, only works with the temp.db database to prevent
accidentally deleting data down the line.
"""
### Delete the old database if it exists.
print 'Deleting old database ...'
script_string = """
sudo -u %(os_user)s psql -c "
DO
\$body\$
BEGIN
IF NOT EXISTS (
SELECT *
FROM pg_catalog.pg_user
WHERE usename = '%(user)s') THEN
CREATE USER %(user)s WITH PASSWORD '%(password)s';
END IF;
END;
\$body\$"
sudo -u %(os_user)s psql -c "DROP DATABASE IF EXISTS %(db)s;"
sudo -u %(os_user)s psql -c "DROP USER IF EXISTS %(user)s;"
sudo -u %(os_user)s psql -c "CREATE USER %(user)s WITH PASSWORD \
'%(password)s';"
sudo -u %(os_user)s psql -c "CREATE DATABASE %(db)s;"
sudo -u %(os_user)s psql -c 'GRANT ALL PRIVILEGES ON DATABASE \
%(db)s to %(user)s;'
sudo -u %(os_user)s psql -c "ALTER USER %(user)s CREATEDB;"
""" % {
'db': settings.DATABASES['default']['NAME'],
'user': settings.DATABASES['default']['USER'],
'password': settings.DATABASES['default']['PASSWORD'],
'os_user': settings.DATABASES['default']['OS_USER']
}
proc = subprocess.Popen(script_string, shell=True, stderr=subprocess.PIPE)
# parse script stderr for errors.
error_lines = []
for output_line in proc.stderr.readline():
# Skip 'NOTICE' stderr lines, they are not errors.
if not output_line or 'NOTICE' in output_line:
continue
error_lines.append(output_line)
if len(error_lines):
raise Exception(
'Error while reseting database. Possible reasons:\n '
'\t* Celery is running\n'
'\t* Postgres session is open\n'
'\t* No postgres user; change OS_USER in default database'
' in local_settings.py\n'
'\nOffending postgres errors:\n' + ''.join(error_lines))
"""
flush: removes all rows in the database.
syncdb --all: performs syncdb on non-South apps and migrate on South apps
migrate --fake: note that migrate has been performed in syncdb
"""
call_command('syncdb', migrate_all=True, interactive=False)
print "\n\n\n\n---------------ABOUT TO MIGRATE"
call_command('migrate', fake=True, interactive=False)
print "\n\n\n\n----------------MIGRATION FINISHED"
### Recreate the media root.
if os.path.exists(settings.MEDIA_ROOT):
shutil.rmtree(settings.MEDIA_ROOT)
os.mkdir(settings.MEDIA_ROOT)
def confirm_bootstrap():
if len(sys.argv) > 1 and sys.argv[1] in ["-q",]:
sys.argv.pop(1)
return True
confirm_text = raw_input(
"This will wipe any current database. Are you sure? y/n\n")
return confirm_text.lower() in ['y', 'yes']
if __name__ == '__main__':
# First entry is the default.
# We can add more here later.
BOOTSTRAP_FLAVORS = [
('blank', lambda: 'no function'),
('full', bootstrap_fake_data)]
if confirm_bootstrap():
if len(sys.argv) == 1:
reset_database()
BOOTSTRAP_FLAVORS[0][1]()
elif len(sys.argv) >= 2:
# Make sure first arg is an available flavor.
assert sys.argv[1] in dict(BOOTSTRAP_FLAVORS), (
'Available flavors:\n\t%s') % ('\n\t'.join(
dict(BOOTSTRAP_FLAVORS).keys()))
# Reset the database and bootstrap with the flavor.
reset_database()
dict(BOOTSTRAP_FLAVORS)[sys.argv[1]]()
else:
print 'Aborting.'
| woodymit/millstone | genome_designer/scripts/bootstrap_data.py | Python | mit | 18,750 | [
"BWA"
] | a234d3bd9fa4956158820440e726c12c66e2241933e9db95be5eac43ce2c25e8 |
# __main__
class ParameterError(Exception):
pass
# T-Coffee
class TcoffeeError(Exception):
def __init__(self, result, error, alignInFile, system_command):
message = (
'tcoffee blast error for file: {0}, with error message: {1}, '
'when trying to run command: {2}'
.format(alignInFile, error, system_command))
Exception.__init__(self, message)
self.result = result
# Provean
class ProveanError(Exception):
pass
class ProveanResourceError(Exception):
def __init__(self, message, child_process_group_id):
Exception.__init__(self, message)
self.child_process_group_id = child_process_group_id
class MutationMismatchError(Exception):
pass
# Finding templates (PDB in uppercase to be consistent with Biopython)
class PDBError(Exception):
pass
class PDBNotFoundError(Exception):
pass
class PDBEmptySequenceError(Exception):
"""One of the sequences is missing from the alignment.
The most likely cause is that the alignment domain definitions were incorrect.
"""
pass
class PDBDomainDefsError(Exception):
"""PDB domain definitions not found in the pdb file."""
pass
class PDBChainError(Exception):
pass
# Making models
class MSMSError(Exception):
pass
class ModellerError(Exception):
pass
class FoldxError(Exception):
pass
class FoldXAAMismatchError(Exception):
pass
class ResourceError(Exception):
pass
class InterfaceMismatchError(Exception):
pass
# Computing mutations
class NoSequenceFound(Exception):
pass
class ChainsNotInteractingError(Exception):
pass
class MutationOutsideDomainError(Exception):
pass
class MutationOutsideInterfaceError(Exception):
pass
# Database
class Archive7zipError(Exception):
def __init__(self, result, error_message, return_code):
super(Archive7zipError, self).__init__(result)
self.error_message = error_message
self.return_code = return_code
class Archive7zipFileNotFoundError(Archive7zipError):
pass
| ostrokach/elaspic | elaspic/errors.py | Python | mit | 2,077 | [
"BLAST",
"Biopython"
] | 151d2efde43dbffaa433fdb1c0414988ba8a3623bec4e8f7480726f86953b911 |
# -*- coding: utf-8 -*-
# This work is licensed under the GNU Public License (GPL).
# To view a copy of this license, visit http://www.gnu.org/copyleft/gpl.html
# Written by Abd Allah Diab (mpcabd)
# Email: mpcabd ^at^ gmail ^dot^ com
# Website: http://mpcabd.igeex.biz
# Ported and tweaked from Java to Python, from Better Arabic Reshaper [https://github.com/agawish/Better-Arabic-Reshaper/]
# Usage:
### Install python-bidi [https://github.com/MeirKriheli/python-bidi], can be installed from pip `pip install python-bidi`.
# import arabic_reshaper
# from bidi.algorithm import get_display
# reshaped_text = arabic_reshaper.reshape(u'اللغة العربية رائعة')
# bidi_text = get_display(reshaped_text)
### Now you can pass `bidi_text` to any function that handles displaying/printing of the text, like writing it to PIL Image or passing it to a PDF generating method.
import re
DEFINED_CHARACTERS_ORGINAL_ALF_UPPER_MDD = '\u0622'
DEFINED_CHARACTERS_ORGINAL_ALF_UPPER_HAMAZA = '\u0623'
DEFINED_CHARACTERS_ORGINAL_ALF_LOWER_HAMAZA = '\u0625'
DEFINED_CHARACTERS_ORGINAL_ALF = '\u0627'
DEFINED_CHARACTERS_ORGINAL_LAM = '\u0644'
LAM_ALEF_GLYPHS = [
['\u0622', '\uFEF6', '\uFEF5'],
['\u0623', '\uFEF8', '\uFEF7'],
['\u0627', '\uFEFC', '\uFEFB'],
['\u0625', '\uFEFA', '\uFEF9']
]
HARAKAT = [
'\u0600', '\u0601', '\u0602', '\u0603', '\u0606', '\u0607', '\u0608', '\u0609',
'\u060A', '\u060B', '\u060D', '\u060E', '\u0610', '\u0611', '\u0612', '\u0613',
'\u0614', '\u0615', '\u0616', '\u0617', '\u0618', '\u0619', '\u061A', '\u061B',
'\u061E', '\u061F', '\u0621', '\u063B', '\u063C', '\u063D', '\u063E', '\u063F',
'\u0640', '\u064B', '\u064C', '\u064D', '\u064E', '\u064F', '\u0650', '\u0651',
'\u0652', '\u0653', '\u0654', '\u0655', '\u0656', '\u0657', '\u0658', '\u0659',
'\u065A', '\u065B', '\u065C', '\u065D', '\u065E', '\u0660', '\u066A', '\u066B',
'\u066C', '\u066F', '\u0670', '\u0672', '\u06D4', '\u06D5', '\u06D6', '\u06D7',
'\u06D8', '\u06D9', '\u06DA', '\u06DB', '\u06DC', '\u06DF', '\u06E0', '\u06E1',
'\u06E2', '\u06E3', '\u06E4', '\u06E5', '\u06E6', '\u06E7', '\u06E8', '\u06E9',
'\u06EA', '\u06EB', '\u06EC', '\u06ED', '\u06EE', '\u06EF', '\u06D6', '\u06D7',
'\u06D8', '\u06D9', '\u06DA', '\u06DB', '\u06DC', '\u06DD', '\u06DE', '\u06DF',
'\u06F0', '\u06FD', '\uFE70', '\uFE71', '\uFE72', '\uFE73', '\uFE74', '\uFE75',
'\uFE76', '\uFE77', '\uFE78', '\uFE79', '\uFE7A', '\uFE7B', '\uFE7C', '\uFE7D',
'\uFE7E', '\uFE7F', '\uFC5E', '\uFC5F', '\uFC60', '\uFC61', '\uFC62', '\uFC63'
]
ARABIC_GLYPHS = {
'\u0622' : ['\u0622', '\uFE81', '\uFE81', '\uFE82', '\uFE82', 2],
'\u0623' : ['\u0623', '\uFE83', '\uFE83', '\uFE84', '\uFE84', 2],
'\u0624' : ['\u0624', '\uFE85', '\uFE85', '\uFE86', '\uFE86', 2],
'\u0625' : ['\u0625', '\uFE87', '\uFE87', '\uFE88', '\uFE88', 2],
'\u0626' : ['\u0626', '\uFE89', '\uFE8B', '\uFE8C', '\uFE8A', 4],
'\u0627' : ['\u0627', '\u0627', '\u0627', '\uFE8E', '\uFE8E', 2],
'\u0628' : ['\u0628', '\uFE8F', '\uFE91', '\uFE92', '\uFE90', 4],
'\u0629' : ['\u0629', '\uFE93', '\uFE93', '\uFE94', '\uFE94', 2],
'\u062A' : ['\u062A', '\uFE95', '\uFE97', '\uFE98', '\uFE96', 4],
'\u062B' : ['\u062B', '\uFE99', '\uFE9B', '\uFE9C', '\uFE9A', 4],
'\u062C' : ['\u062C', '\uFE9D', '\uFE9F', '\uFEA0', '\uFE9E', 4],
'\u062D' : ['\u062D', '\uFEA1', '\uFEA3', '\uFEA4', '\uFEA2', 4],
'\u062E' : ['\u062E', '\uFEA5', '\uFEA7', '\uFEA8', '\uFEA6', 4],
'\u062F' : ['\u062F', '\uFEA9', '\uFEA9', '\uFEAA', '\uFEAA', 2],
'\u0630' : ['\u0630', '\uFEAB', '\uFEAB', '\uFEAC', '\uFEAC', 2],
'\u0631' : ['\u0631', '\uFEAD', '\uFEAD', '\uFEAE', '\uFEAE', 2],
'\u0632' : ['\u0632', '\uFEAF', '\uFEAF', '\uFEB0', '\uFEB0', 2],
'\u0633' : ['\u0633', '\uFEB1', '\uFEB3', '\uFEB4', '\uFEB2', 4],
'\u0634' : ['\u0634', '\uFEB5', '\uFEB7', '\uFEB8', '\uFEB6', 4],
'\u0635' : ['\u0635', '\uFEB9', '\uFEBB', '\uFEBC', '\uFEBA', 4],
'\u0636' : ['\u0636', '\uFEBD', '\uFEBF', '\uFEC0', '\uFEBE', 4],
'\u0637' : ['\u0637', '\uFEC1', '\uFEC3', '\uFEC4', '\uFEC2', 4],
'\u0638' : ['\u0638', '\uFEC5', '\uFEC7', '\uFEC8', '\uFEC6', 4],
'\u0639' : ['\u0639', '\uFEC9', '\uFECB', '\uFECC', '\uFECA', 4],
'\u063A' : ['\u063A', '\uFECD', '\uFECF', '\uFED0', '\uFECE', 4],
'\u0641' : ['\u0641', '\uFED1', '\uFED3', '\uFED4', '\uFED2', 4],
'\u0642' : ['\u0642', '\uFED5', '\uFED7', '\uFED8', '\uFED6', 4],
'\u0643' : ['\u0643', '\uFED9', '\uFEDB', '\uFEDC', '\uFEDA', 4],
'\u0644' : ['\u0644', '\uFEDD', '\uFEDF', '\uFEE0', '\uFEDE', 4],
'\u0645' : ['\u0645', '\uFEE1', '\uFEE3', '\uFEE4', '\uFEE2', 4],
'\u0646' : ['\u0646', '\uFEE5', '\uFEE7', '\uFEE8', '\uFEE6', 4],
'\u0647' : ['\u0647', '\uFEE9', '\uFEEB', '\uFEEC', '\uFEEA', 4],
'\u0648' : ['\u0648', '\uFEED', '\uFEED', '\uFEEE', '\uFEEE', 2],
'\u0649' : ['\u0649', '\uFEEF', '\uFEEF', '\uFEF0', '\uFEF0', 2],
'\u0671' : ['\u0671', '\u0671', '\u0671', '\uFB51', '\uFB51', 2],
'\u064A' : ['\u064A', '\uFEF1', '\uFEF3', '\uFEF4', '\uFEF2', 4],
'\u066E' : ['\u066E', '\uFBE4', '\uFBE8', '\uFBE9', '\uFBE5', 4],
'\u06AA' : ['\u06AA', '\uFB8E', '\uFB90', '\uFB91', '\uFB8F', 4],
'\u06C1' : ['\u06C1', '\uFBA6', '\uFBA8', '\uFBA9', '\uFBA7', 4],
'\u06E4' : ['\u06E4', '\u06E4', '\u06E4', '\u06E4', '\uFEEE', 2],
'\u067E' : ['\u067E', '\uFB56', '\uFB58', '\uFB59', '\uFB57', 4],
'\u0698' : ['\u0698', '\uFB8A', '\uFB8A', '\uFB8A', '\uFB8B', 2],
'\u06AF' : ['\u06AF', '\uFB92', '\uFB94', '\uFB95', '\uFB93', 4],
'\u0686' : ['\u0686', '\uFB7A', '\uFB7C', '\uFB7D', '\uFB7B', 4],
'\u06A9' : ['\u06A9', '\uFB8E', '\uFB90', '\uFB91', '\uFB8F', 4],
'\u06CC' : ['\u06CC', '\uFEEF', '\uFEF3', '\uFEF4', '\uFEF0', 4]
}
ARABIC_GLYPHS_LIST = [
['\u0622', '\uFE81', '\uFE81', '\uFE82', '\uFE82', 2],
['\u0623', '\uFE83', '\uFE83', '\uFE84', '\uFE84', 2],
['\u0624', '\uFE85', '\uFE85', '\uFE86', '\uFE86', 2],
['\u0625', '\uFE87', '\uFE87', '\uFE88', '\uFE88', 2],
['\u0626', '\uFE89', '\uFE8B', '\uFE8C', '\uFE8A', 4],
['\u0627', '\u0627', '\u0627', '\uFE8E', '\uFE8E', 2],
['\u0628', '\uFE8F', '\uFE91', '\uFE92', '\uFE90', 4],
['\u0629', '\uFE93', '\uFE93', '\uFE94', '\uFE94', 2],
['\u062A', '\uFE95', '\uFE97', '\uFE98', '\uFE96', 4],
['\u062B', '\uFE99', '\uFE9B', '\uFE9C', '\uFE9A', 4],
['\u062C', '\uFE9D', '\uFE9F', '\uFEA0', '\uFE9E', 4],
['\u062D', '\uFEA1', '\uFEA3', '\uFEA4', '\uFEA2', 4],
['\u062E', '\uFEA5', '\uFEA7', '\uFEA8', '\uFEA6', 4],
['\u062F', '\uFEA9', '\uFEA9', '\uFEAA', '\uFEAA', 2],
['\u0630', '\uFEAB', '\uFEAB', '\uFEAC', '\uFEAC', 2],
['\u0631', '\uFEAD', '\uFEAD', '\uFEAE', '\uFEAE', 2],
['\u0632', '\uFEAF', '\uFEAF', '\uFEB0', '\uFEB0', 2],
['\u0633', '\uFEB1', '\uFEB3', '\uFEB4', '\uFEB2', 4],
['\u0634', '\uFEB5', '\uFEB7', '\uFEB8', '\uFEB6', 4],
['\u0635', '\uFEB9', '\uFEBB', '\uFEBC', '\uFEBA', 4],
['\u0636', '\uFEBD', '\uFEBF', '\uFEC0', '\uFEBE', 4],
['\u0637', '\uFEC1', '\uFEC3', '\uFEC4', '\uFEC2', 4],
['\u0638', '\uFEC5', '\uFEC7', '\uFEC8', '\uFEC6', 4],
['\u0639', '\uFEC9', '\uFECB', '\uFECC', '\uFECA', 4],
['\u063A', '\uFECD', '\uFECF', '\uFED0', '\uFECE', 4],
['\u0641', '\uFED1', '\uFED3', '\uFED4', '\uFED2', 4],
['\u0642', '\uFED5', '\uFED7', '\uFED8', '\uFED6', 4],
['\u0643', '\uFED9', '\uFEDB', '\uFEDC', '\uFEDA', 4],
['\u0644', '\uFEDD', '\uFEDF', '\uFEE0', '\uFEDE', 4],
['\u0645', '\uFEE1', '\uFEE3', '\uFEE4', '\uFEE2', 4],
['\u0646', '\uFEE5', '\uFEE7', '\uFEE8', '\uFEE6', 4],
['\u0647', '\uFEE9', '\uFEEB', '\uFEEC', '\uFEEA', 4],
['\u0648', '\uFEED', '\uFEED', '\uFEEE', '\uFEEE', 2],
['\u0649', '\uFEEF', '\uFEEF', '\uFEF0', '\uFEF0', 2],
['\u0671', '\u0671', '\u0671', '\uFB51', '\uFB51', 2],
['\u064A', '\uFEF1', '\uFEF3', '\uFEF4', '\uFEF2', 4],
['\u066E', '\uFBE4', '\uFBE8', '\uFBE9', '\uFBE5', 4],
['\u06AA', '\uFB8E', '\uFB90', '\uFB91', '\uFB8F', 4],
['\u06C1', '\uFBA6', '\uFBA8', '\uFBA9', '\uFBA7', 4],
['\u067E', '\uFB56', '\uFB58', '\uFB59', '\uFB57', 4],
['\u0698', '\uFB8A', '\uFB8A', '\uFB8A', '\uFB8B', 2],
['\u06AF', '\uFB92', '\uFB94', '\uFB95', '\uFB93', 4],
['\u0686', '\uFB7A', '\uFB7C', '\uFB7D', '\uFB7B', 4],
['\u06A9', '\uFB8E', '\uFB90', '\uFB91', '\uFB8F', 4],
['\u06CC', '\uFEEF', '\uFEF3', '\uFEF4', '\uFEF0', 4]
]
def get_reshaped_glyph(target, location):
if target in ARABIC_GLYPHS:
return ARABIC_GLYPHS[target][location]
else:
return target
def get_glyph_type(target):
if target in ARABIC_GLYPHS:
return ARABIC_GLYPHS[target][5]
else:
return 2
def is_haraka(target):
return target in HARAKAT
def replace_jalalah(unshaped_word):
return re.sub('^\u0627\u0644\u0644\u0647$', '\uFDF2', unshaped_word)
def replace_lam_alef(unshaped_word):
list_word = list(unshaped_word)
letter_before = ''
for i in range(len(unshaped_word)):
if not is_haraka(unshaped_word[i]) and unshaped_word[i] != DEFINED_CHARACTERS_ORGINAL_LAM:
letter_before = unshaped_word[i]
if unshaped_word[i] == DEFINED_CHARACTERS_ORGINAL_LAM:
candidate_lam = unshaped_word[i]
lam_position = i
haraka_position = i + 1
while haraka_position < len(unshaped_word) and is_haraka(unshaped_word[haraka_position]):
haraka_position += 1
if haraka_position < len(unshaped_word):
if lam_position > 0 and get_glyph_type(letter_before) > 2:
lam_alef = get_lam_alef(list_word[haraka_position], candidate_lam, False)
else:
lam_alef = get_lam_alef(list_word[haraka_position], candidate_lam, True)
if lam_alef != '':
list_word[lam_position] = lam_alef
list_word[haraka_position] = ' '
return ''.join(list_word).replace(' ', '')
def get_lam_alef(candidate_alef, candidate_lam, is_end_of_word):
shift_rate = 1
reshaped_lam_alef = ''
if is_end_of_word:
shift_rate += 1
if DEFINED_CHARACTERS_ORGINAL_LAM == candidate_lam:
if DEFINED_CHARACTERS_ORGINAL_ALF_UPPER_MDD == candidate_alef:
reshaped_lam_alef = LAM_ALEF_GLYPHS[0][shift_rate]
if DEFINED_CHARACTERS_ORGINAL_ALF_UPPER_HAMAZA == candidate_alef:
reshaped_lam_alef = LAM_ALEF_GLYPHS[1][shift_rate]
if DEFINED_CHARACTERS_ORGINAL_ALF == candidate_alef:
reshaped_lam_alef = LAM_ALEF_GLYPHS[2][shift_rate]
if DEFINED_CHARACTERS_ORGINAL_ALF_LOWER_HAMAZA == candidate_alef:
reshaped_lam_alef = LAM_ALEF_GLYPHS[3][shift_rate]
return reshaped_lam_alef
class DecomposedWord(object):
def __init__(self, word):
self.stripped_harakat = []
self.harakat_positions = []
self.stripped_regular_letters = []
self.letters_position = []
for i in range(len(word)):
c = word[i]
if is_haraka(c):
self.harakat_positions.append(i)
self.stripped_harakat.append(c)
else:
self.letters_position.append(i)
self.stripped_regular_letters.append(c)
def reconstruct_word(self, reshaped_word):
l = list('\0' * (len(self.stripped_harakat) + len(reshaped_word)))
for i in range(len(self.letters_position)):
l[self.letters_position[i]] = reshaped_word[i]
for i in range(len(self.harakat_positions)):
l[self.harakat_positions[i]] = self.stripped_harakat[i]
return ''.join(l)
def get_reshaped_word(unshaped_word):
unshaped_word = replace_jalalah(unshaped_word)
unshaped_word = replace_lam_alef(unshaped_word)
decomposed_word = DecomposedWord(unshaped_word)
result = ''
if decomposed_word.stripped_regular_letters:
result = reshape_it(''.join(decomposed_word.stripped_regular_letters))
return decomposed_word.reconstruct_word(result)
def reshape_it(unshaped_word):
if not unshaped_word:
return ''
if len(unshaped_word) == 1:
return get_reshaped_glyph(unshaped_word[0], 1)
reshaped_word = []
for i in range(len(unshaped_word)):
before = False
after = False
if i == 0:
after = get_glyph_type(unshaped_word[i]) == 4
elif i == len(unshaped_word) - 1:
before = get_glyph_type(unshaped_word[i - 1]) == 4
else:
after = get_glyph_type(unshaped_word[i]) == 4
before = get_glyph_type(unshaped_word[i - 1]) == 4
if after and before:
reshaped_word.append(get_reshaped_glyph(unshaped_word[i], 3))
elif after and not before:
reshaped_word.append(get_reshaped_glyph(unshaped_word[i], 2))
elif not after and before:
reshaped_word.append(get_reshaped_glyph(unshaped_word[i], 4))
elif not after and not before:
reshaped_word.append(get_reshaped_glyph(unshaped_word[i], 1))
return ''.join(reshaped_word)
def is_arabic_character(target):
return target in ARABIC_GLYPHS or target in HARAKAT
def get_words(sentence):
if sentence:
return re.split('\\s', sentence)
return []
def has_arabic_letters(word):
for c in word:
if is_arabic_character(c):
return True
return False
def is_arabic_word(word):
for c in word:
if not is_arabic_character(c):
return False
return True
def get_words_from_mixed_word(word):
temp_word = ''
words = []
for c in word:
if is_arabic_character(c):
if temp_word and not is_arabic_word(temp_word):
words.append(temp_word)
temp_word = c
else:
temp_word += c
else:
if temp_word and is_arabic_word(temp_word):
words.append(temp_word)
temp_word = c
else:
temp_word += c
if temp_word:
words.append(temp_word)
return words
def reshape(text):
if text:
lines = re.split('\\r?\\n', text)
for i in range(len(lines)):
lines[i] = reshape_sentence(lines[i])
return '\n'.join(lines)
return ''
def reshape_sentence(sentence):
words = get_words(sentence)
for i in range(len(words)):
word = words[i]
if has_arabic_letters(word):
if is_arabic_word(word):
words[i] = get_reshaped_word(word)
else:
mixed_words = get_words_from_mixed_word(word)
for j in range(len(mixed_words)):
mixed_words[j] = get_reshaped_word(mixed_words[j])
words[i] = ''.join(mixed_words)
return ' '.join(words)
| SmartElect/SmartElect | rollgen/arabic_reshaper.py | Python | apache-2.0 | 13,671 | [
"VisIt"
] | d5fab593429837c825738e3c41d52492a83e87044fe3edfab03bad56bf704e3f |
"""Generators to create different output formats."""
class Augtool:
"""Visits each node in turn and generates augtool set commands"""
def visit(self, pathnode, augpath):
"""Generates augtool set commands for everything below augpath in
pathnode"""
topnode = pathnode.topnode(augpath)
return self.visitnode(topnode)
def visitnode(self, node):
"""Generates augtool set commands for everything below node"""
setpath = node.setpath()
value = node.value()
if value:
# FIXME: quoting
command = "set %s '%s'" % (setpath, value)
else:
command = "clear %s" % setpath
yield command
# Recurse depth-first and yield all commands
for child in node.children():
for childcmd in self.visitnode(child):
yield childcmd
| domcleal/aug2cmds | aug2cmds/outputs.py | Python | bsd-3-clause | 877 | [
"VisIt"
] | 080574770979d9ff72ca5c4019573e55042aac7f58bf7ac89b5d28e0323eb0c0 |
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
import gtk, pango
from zeroinstall import _, translation
from zeroinstall.cmd import slave
from zeroinstall.support import tasks, pretty_size
from zeroinstall.injector import model, download
from zeroinstall.gui import properties
from zeroinstall.gtkui.icon import load_icon
from logging import warning, info
from zeroinstall.gui.gui import gobject
ngettext = translation.ngettext
ICON_SIZE = 20.0
CELL_TEXT_INDENT = int(ICON_SIZE) + 4
def get_tooltip_text(mainwindow, details, model_column):
interface = details['interface']
if model_column == InterfaceBrowser.INTERFACE_NAME:
return _("Full name: %s") % interface
elif model_column == InterfaceBrowser.SUMMARY:
return details['summary-tip']
elif model_column is None:
return _("Click here for more options...")
version = details.get('version', None)
if version is None:
return _("No suitable version was found. Double-click "
"here to find out why.")
if model_column == InterfaceBrowser.VERSION:
return details['version-tip']
assert model_column == InterfaceBrowser.DOWNLOAD_SIZE
return details["fetch-tip"]
import math
angle_right = math.pi / 2
class MenuIconRenderer(gtk.GenericCellRenderer):
def __init__(self):
gtk.GenericCellRenderer.__init__(self)
self.set_property('mode', gtk.CELL_RENDERER_MODE_ACTIVATABLE)
def do_set_property(self, prop, value):
setattr(self, prop.name, value)
def do_get_size(self, widget, cell_area, layout = None):
return (0, 0, 20, 20)
on_get_size = do_get_size # GTK 2
if gtk.pygtk_version >= (2, 90):
# note: if you get "TypeError: Couldn't find conversion for foreign struct 'cairo.Context'", you need "python3-gi-cairo"
def do_render(self, cr, widget, background_area, cell_area, flags): # GTK 3
context = widget.get_style_context()
gtk.render_arrow(context, cr, angle_right,
cell_area.x + 5, cell_area.y + 5, max(cell_area.width, cell_area.height) - 10)
else:
def on_render(self, window, widget, background_area, cell_area, expose_area, flags): # GTK 2
if flags & gtk.CELL_RENDERER_PRELIT:
state = gtk.STATE_PRELIGHT
else:
state = gtk.STATE_NORMAL
widget.style.paint_box(window, state, gtk.SHADOW_OUT, expose_area, widget, None,
cell_area.x, cell_area.y, cell_area.width, cell_area.height)
widget.style.paint_arrow(window, state, gtk.SHADOW_NONE, expose_area, widget, None,
gtk.ARROW_RIGHT, True,
cell_area.x + 5, cell_area.y + 5, cell_area.width - 10, cell_area.height - 10)
class IconAndTextRenderer(gtk.GenericCellRenderer):
__gproperties__ = {
"image": (gobject.TYPE_PYOBJECT, "Image", "Image", gobject.PARAM_READWRITE),
"text": (gobject.TYPE_STRING, "Text", "Text", "-", gobject.PARAM_READWRITE),
}
def do_set_property(self, prop, value):
setattr(self, prop.name, value)
def do_get_size(self, widget, cell_area, layout = None):
if not layout:
layout = widget.create_pango_layout(self.text)
a, rect = layout.get_pixel_extents()
if self.image:
pixmap_height = self.image.get_height()
else:
pixmap_height = 32
if not isinstance(rect, tuple):
rect = (rect.x, rect.y, rect.width, rect.height) # GTK 3
both_height = max(rect[1] + rect[3], pixmap_height)
return (0, 0,
rect[0] + rect[2] + CELL_TEXT_INDENT,
both_height)
on_get_size = do_get_size # GTK 2
if gtk.pygtk_version >= (2, 90):
def do_render(self, cr, widget, background_area, cell_area, flags): # GTK 3
if self.image is None: return
layout = widget.create_pango_layout(self.text)
a, rect = layout.get_pixel_extents()
context = widget.get_style_context()
image_y = int(0.5 * (cell_area.height - self.image.get_height()))
gtk.render_icon(context, cr, self.image, cell_area.x, cell_area.y + image_y)
text_y = int(0.5 * (cell_area.height - (rect.y + rect.height)))
gtk.render_layout(context, cr,
cell_area.x + CELL_TEXT_INDENT,
cell_area.y + text_y,
layout)
else:
def on_render(self, window, widget, background_area, cell_area, expose_area, flags): # GTK 2
if self.image is None: return
layout = widget.create_pango_layout(self.text)
a, rect = layout.get_pixel_extents()
if flags & gtk.CELL_RENDERER_SELECTED:
state = gtk.STATE_SELECTED
elif flags & gtk.CELL_RENDERER_PRELIT:
state = gtk.STATE_PRELIGHT
else:
state = gtk.STATE_NORMAL
image_y = int(0.5 * (cell_area.height - self.image.get_height()))
window.draw_pixbuf(widget.style.white_gc, self.image, 0, 0,
cell_area.x,
cell_area.y + image_y)
text_y = int(0.5 * (cell_area.height - (rect[1] + rect[3])))
widget.style.paint_layout(window, state, True,
expose_area, widget, "cellrenderertext",
cell_area.x + CELL_TEXT_INDENT,
cell_area.y + text_y,
layout)
if gtk.pygtk_version < (2, 8, 0):
# Note sure exactly which versions need this.
# 2.8.0 gives a warning if you include it, though.
gobject.type_register(IconAndTextRenderer)
gobject.type_register(MenuIconRenderer)
def walk(model, it):
while it:
yield it
for x in walk(model, model.iter_children(it)): yield x
it = model.iter_next(it)
class InterfaceBrowser(object):
model = None
cached_icon = None
driver = None
config = None
update_icons = False
DETAILS = 0
INTERFACE_NAME = 1
VERSION = 2
SUMMARY = 3
DOWNLOAD_SIZE = 4
ICON = 5
BACKGROUND = 6
PROBLEM = 7
columns = [(_('Component'), INTERFACE_NAME),
(_('Version'), VERSION),
(_('Fetch'), DOWNLOAD_SIZE),
(_('Description'), SUMMARY),
('', None)]
def __init__(self, driver, widgets):
self.driver = driver
self.config = driver.config
tree_view = widgets.get_widget('components')
tree_view.set_property('has-tooltip', True)
def callback(widget, x, y, keyboard_mode, tooltip):
x, y = tree_view.convert_widget_to_bin_window_coords(x, y)
pos = tree_view.get_path_at_pos(x, y)
if pos:
tree_view.set_tooltip_cell(tooltip, pos[0], pos[1], None)
path = pos[0]
try:
col_index = column_objects.index(pos[1])
except ValueError:
return False
else:
col = self.columns[col_index][1]
row = self.model[path]
details = row[InterfaceBrowser.DETAILS]
tooltip.set_text(get_tooltip_text(self, details, col))
return True
else:
return False
tree_view.connect('query-tooltip', callback)
self.cached_icon = {} # URI -> GdkPixbuf
self.default_icon = tree_view.get_style().lookup_icon_set(gtk.STOCK_EXECUTE).render_icon(tree_view.get_style(),
gtk.TEXT_DIR_NONE, gtk.STATE_NORMAL, gtk.ICON_SIZE_SMALL_TOOLBAR, tree_view, None)
self.model = gtk.TreeStore(object, str, str, str, str, gobject.TYPE_PYOBJECT, str, bool)
self.tree_view = tree_view
tree_view.set_model(self.model)
column_objects = []
text = gtk.CellRendererText()
coloured_text = gtk.CellRendererText()
for name, model_column in self.columns:
if model_column == InterfaceBrowser.INTERFACE_NAME:
column = gtk.TreeViewColumn(name, IconAndTextRenderer(),
text = model_column,
image = InterfaceBrowser.ICON)
elif model_column == None:
menu_column = column = gtk.TreeViewColumn('', MenuIconRenderer())
else:
if model_column == InterfaceBrowser.SUMMARY:
text_ellip = gtk.CellRendererText()
try:
text_ellip.set_property('ellipsize', pango.ELLIPSIZE_END)
except:
pass
column = gtk.TreeViewColumn(name, text_ellip, text = model_column)
column.set_expand(True)
elif model_column == InterfaceBrowser.VERSION:
column = gtk.TreeViewColumn(name, coloured_text, text = model_column,
background = InterfaceBrowser.BACKGROUND)
else:
column = gtk.TreeViewColumn(name, text, text = model_column)
tree_view.append_column(column)
column_objects.append(column)
tree_view.set_enable_search(True)
selection = tree_view.get_selection()
def button_press(tree_view, bev):
pos = tree_view.get_path_at_pos(int(bev.x), int(bev.y))
if not pos:
return False
path, col, x, y = pos
if (bev.button == 3 or (bev.button < 4 and col is menu_column)) \
and bev.type == gtk.gdk.BUTTON_PRESS:
selection.select_path(path)
details = self.model[path][InterfaceBrowser.DETAILS]
self.show_popup_menu(details, bev)
return True
if bev.button != 1 or bev.type != gtk.gdk._2BUTTON_PRESS:
return False
details = self.model[path][InterfaceBrowser.DETAILS]
iface_uri = details['interface']
iface = self.config.iface_cache.get_interface(iface_uri)
properties.edit(driver, iface, details['name'], self.compile, show_versions = True)
tree_view.connect('button-press-event', button_press)
tree_view.connect('destroy', lambda s: driver.watchers.remove(self.build_tree))
driver.watchers.append(self.build_tree)
def set_update_icons(self, update_icons):
if update_icons:
# Clear icons cache to make sure they're really updated
self.cached_icon = {}
self.update_icons = update_icons
def get_icon(self, iface_uri):
"""Get an icon for this interface. If the icon is in the cache, use that.
If not, start a download. If we already started a download (successful or
not) do nothing. Returns None if no icon is currently available."""
try:
# Try the in-memory cache
return self.cached_icon[iface_uri]
except KeyError:
# Try the on-disk cache
iface = self.config.iface_cache.get_interface(iface_uri)
iconpath = self.config.iface_cache.get_icon_path(iface)
if iconpath:
icon = load_icon(iconpath, ICON_SIZE, ICON_SIZE)
# (if icon is None, cache the fact that we can't load it)
self.cached_icon[iface.uri] = icon
else:
icon = None
# Download a new icon if we don't have one, or if the
# user did a 'Refresh'
if iconpath is None or self.update_icons:
if self.config.network_use == model.network_offline:
fetcher = None
else:
fetcher = slave.invoke_master(["download-icon", iface.uri])
if fetcher:
if iface.uri not in self.cached_icon:
self.cached_icon[iface.uri] = None # Only try once
@tasks.async
def update_display():
yield fetcher
try:
tasks.check(fetcher)
# Try to insert new icon into the cache
# If it fails, we'll be left with None in the cached_icon so
# we don't try again.
iconpath = self.config.iface_cache.get_icon_path(iface)
if iconpath:
self.cached_icon[iface.uri] = load_icon(iconpath, ICON_SIZE, ICON_SIZE)
self.build_tree()
else:
pass #warning("Failed to download icon for '%s'", iface)
except download.DownloadAborted as ex:
info("Icon download aborted: %s", ex)
# Don't report further; the user knows they cancelled
except download.DownloadError as ex:
warning("Icon download failed: %s", ex)
# Not worth showing a dialog box for this
except Exception as ex:
import traceback
traceback.print_exc()
self.config.handler.report_error(ex)
update_display()
# elif fetcher is None: don't store anything in cached_icon
# Note: if no icon is available for downloading,
# more attempts are made later.
# It can happen that no icon is yet available because
# the interface was not downloaded yet, in which case
# it's desireable to try again once the interface is available
return icon
return None
def build_tree(self):
self.model.clear()
def add_node(parent, details):
iter = self.model.append(parent)
iface = details['interface']
self.model[iter][InterfaceBrowser.DETAILS] = details
self.model[iter][InterfaceBrowser.INTERFACE_NAME] = details["name"]
self.model[iter][InterfaceBrowser.SUMMARY] = details["summary"]
self.model[iter][InterfaceBrowser.ICON] = self.get_icon(iface) or self.default_icon
problem = details["type"] != "selected"
self.model[iter][InterfaceBrowser.PROBLEM] = problem
if problem:
self.model[iter][InterfaceBrowser.VERSION] = '(problem)'
self.model[iter][InterfaceBrowser.DOWNLOAD_SIZE] = ''
else:
if details["type"] == "selected":
self.model[iter][InterfaceBrowser.VERSION] = details["version"]
self.model[iter][InterfaceBrowser.DOWNLOAD_SIZE] = details["fetch"]
for child in details["children"]:
add_node(iter, child)
else:
self.model[iter][InterfaceBrowser.VERSION] = _('(problem)') if details["type"] == "problem" else _('(none)')
try:
add_node(None, self.driver.tree)
self.tree_view.expand_all()
except Exception as ex:
warning("Failed to build tree: %s", ex, exc_info = ex)
raise
def show_popup_menu(self, details, bev):
iface_uri = details['interface']
iface_name = details['name']
have_source = details['may-compile']
from zeroinstall.gui import bugs
iface = self.config.iface_cache.get_interface(iface_uri)
global menu # Fix GC problem in PyGObject
menu = gtk.Menu()
for label, cb in [(_('Show Feeds'), lambda: properties.edit(self.driver, iface, iface_name, self.compile)),
(_('Show Versions'), lambda: properties.edit(self.driver, iface, iface_name, self.compile, show_versions = True)),
(_('Report a Bug...'), lambda: bugs.report_bug(self.driver, iface))]:
item = gtk.MenuItem()
item.set_label(label)
if cb:
item.connect('activate', lambda item, cb=cb: cb())
else:
item.set_sensitive(False)
item.show()
menu.append(item)
item = gtk.MenuItem()
item.set_label(_('Compile'))
item.show()
menu.append(item)
if have_source:
compile_menu = gtk.Menu()
item.set_submenu(compile_menu)
item = gtk.MenuItem()
item.set_label(_('Automatic'))
item.connect('activate', lambda item: self.compile(iface, autocompile = True))
item.show()
compile_menu.append(item)
item = gtk.MenuItem()
item.set_label(_('Manual...'))
item.connect('activate', lambda item: self.compile(iface, autocompile = False))
item.show()
compile_menu.append(item)
else:
item.set_sensitive(False)
if gtk.pygtk_version >= (2, 90):
menu.popup(None, None, None, None, bev.button, bev.time)
else:
menu.popup(None, None, None, bev.button, bev.time)
@tasks.async
def compile(self, interface, autocompile = True):
blocker = slave.invoke_master(["gui-compile", interface.uri, autocompile])
yield blocker
tasks.check(blocker)
from zeroinstall.gui import main
main.recalculate()
def update_download_status(self, only_update_visible = False):
"""Called at regular intervals while there are downloads in progress,
and once at the end. Also called when things are added to the store.
Update the TreeView with the interfaces."""
# A download may be for a feed, an interface or an implementation.
# Create the reverse mapping (item -> download)
hints = {}
for dl in self.config.handler.monitored_downloads:
if dl.hint:
if dl.hint not in hints:
hints[dl.hint] = []
hints[dl.hint].append(dl)
# Only update currently visible rows
if only_update_visible and self.tree_view.get_visible_range() != None:
firstVisiblePath, lastVisiblePath = self.tree_view.get_visible_range()
firstVisibleIter = self.model.get_iter(firstVisiblePath)
else:
# (or should we just wait until the TreeView has settled enough to tell
# us what is visible?)
firstVisibleIter = self.model.get_iter_root()
lastVisiblePath = None
for it in walk(self.model, firstVisibleIter):
row = self.model[it]
details = row[InterfaceBrowser.DETAILS]
# Is this interface the download's hint?
downloads = []
for feed_url in details['all-feeds']:
downloads += hints.get(feed_url, [])
if downloads:
so_far = 0
expected = None
for dl in downloads:
if dl.expected_size:
expected = (expected or 0) + dl.expected_size
so_far += dl.get_bytes_downloaded_so_far()
if expected:
summary = ngettext("(downloading %(downloaded)s/%(expected)s [%(percentage).2f%%])",
"(downloading %(downloaded)s/%(expected)s [%(percentage).2f%%] in %(number)d downloads)",
downloads)
values_dict = {'downloaded': pretty_size(so_far), 'expected': pretty_size(expected), 'percentage': 100 * so_far / float(expected), 'number': len(downloads)}
else:
summary = ngettext("(downloading %(downloaded)s/unknown)",
"(downloading %(downloaded)s/unknown in %(number)d downloads)",
downloads)
values_dict = {'downloaded': pretty_size(so_far), 'number': len(downloads)}
row[InterfaceBrowser.SUMMARY] = summary % values_dict
else:
row[InterfaceBrowser.DOWNLOAD_SIZE] = details.get("fetch", "")
row[InterfaceBrowser.SUMMARY] = details['summary']
if self.model.get_path(it) == lastVisiblePath:
break
def highlight_problems(self):
"""Called when the solve finishes. Highlight any missing implementations."""
for it in walk(self.model, self.model.get_iter_root()):
row = self.model[it]
if row[InterfaceBrowser.PROBLEM]:
row[InterfaceBrowser.BACKGROUND] = '#f88'
| afb/0install | zeroinstall/gui/iface_browser.py | Python | lgpl-2.1 | 16,982 | [
"VisIt"
] | 75c92d10904794928536ee4a83820f53c0a42783799f42e3170cf4ba9610fff8 |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module is intended to match crystal structures against known crystallographic "prototype"
structures.
In this module, the AflowPrototypeMatcher uses the AFLOW LIBRARY OF CRYSTALLOGRAPHIC PROTOTYPES.
If using this particular class, please cite their publication appropriately:
Mehl, M. J., Hicks, D., Toher, C., Levy, O., Hanson, R. M., Hart, G., & Curtarolo, S. (2017).
The AFLOW library of crystallographic prototypes: part 1.
Computational Materials Science, 136, S1-S828.
https://doi.org/10.1016/j.commatsci.2017.01.017
"""
import os
from monty.serialization import loadfn
from pymatgen.analysis.structure_matcher import StructureMatcher
module_dir = os.path.dirname(os.path.abspath(__file__))
AFLOW_PROTOTYPE_LIBRARY = loadfn(os.path.join(os.path.dirname(os.path.abspath(__file__)), "aflow_prototypes.json"))
class AflowPrototypeMatcher:
"""
This class will match structures to their crystal prototypes, and will
attempt to group species together to match structures derived from
prototypes (e.g. an A_xB_1-x_C from a binary prototype), and will
give these the names the "-like" suffix.
This class uses data from the AFLOW LIBRARY OF CRYSTALLOGRAPHIC PROTOTYPES.
If using this class, please cite their publication appropriately:
Mehl, M. J., Hicks, D., Toher, C., Levy, O., Hanson, R. M., Hart, G., & Curtarolo, S. (2017).
The AFLOW library of crystallographic prototypes: part 1.
Computational Materials Science, 136, S1-S828.
https://doi.org/10.1016/j.commatsci.2017.01.017
"""
def __init__(self, initial_ltol=0.2, initial_stol=0.3, initial_angle_tol=5):
"""
Tolerances as defined in StructureMatcher. Tolerances will be
gradually decreased until only a single match is found (if possible).
Args:
initial_ltol: fractional length tolerance
initial_stol: site tolerance
initial_angle_tol: angle tolerance
"""
self.initial_ltol = initial_ltol
self.initial_stol = initial_stol
self.initial_angle_tol = initial_angle_tol
@staticmethod
def _match_prototype(structure_matcher, structure):
tags = []
for d in AFLOW_PROTOTYPE_LIBRARY:
p = d["snl"].structure
match = structure_matcher.fit_anonymous(p, structure)
if match:
tags.append(d)
return tags
def _match_single_prototype(self, structure):
sm = StructureMatcher(
ltol=self.initial_ltol,
stol=self.initial_stol,
angle_tol=self.initial_angle_tol,
)
tags = self._match_prototype(sm, structure)
while len(tags) > 1:
sm.ltol *= 0.8
sm.stol *= 0.8
sm.angle_tol *= 0.8
tags = self._match_prototype(sm, structure)
if sm.ltol < 0.01:
break
return tags
def get_prototypes(self, structure):
"""
Get prototype(s) structures for a given
input structure. If you use this method in
your work, please cite the appropriate
AFLOW publication:
Mehl, M. J., Hicks, D., Toher, C., Levy, O.,
Hanson, R. M., Hart, G., & Curtarolo, S. (2017).
The AFLOW library of crystallographic prototypes: part 1.
Computational Materials Science, 136, S1-S828.
https://doi.org/10.1016/j.commatsci.2017.01.017
Args:
structure: structure to match
Returns (list): A list of dicts with keys
'snl' for the matched prototype and 'tags',
a dict of tags ('mineral', 'strukturbericht'
and 'aflow') of that prototype. This should
be a list containing just a single entry,
but it is possible a material can match
multiple prototypes.
"""
tags = self._match_single_prototype(structure)
if len(tags) == 0:
return None
return tags
| vorwerkc/pymatgen | pymatgen/analysis/prototypes.py | Python | mit | 4,051 | [
"CRYSTAL",
"pymatgen"
] | 7a8631ece3954fe83f80adeff561a49195675f212245893d360c9e9b66c93693 |
# compartment_net_no_array.py ---
#
# Filename: compartment_net_no_array.py
# Description:
# Author:Subhasis Ray
# Maintainer:
# Created: Sat Aug 11 14:30:21 2012 (+0530)
# Version:
# Last-Updated: Tue May 7 18:26:26 2013 (+0530)
# By: subha
# Update #: 974
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
"""
Following, is a demo to create a network of single compartmental neurons connected
via alpha synapses. This is same as compartment_net.py except that
we avoid ematrix and use single melements.
"""
import sys
sys.path.append('../../python')
import os
os.environ['NUMPTHREADS'] = '1'
import numpy as np
import matplotlib.pyplot as plt
import moose
from moose import utils
EREST_ACT = -70e-3
# Gate equations have the form:
#
# y(x) = (A + B * x) / (C + exp((x + D) / F))
#
# where x is membrane voltage and y is the rate constant for gate
# closing or opening
Na_m_params = [1e5 * (25e-3 + EREST_ACT), # 'A_A':
-1e5, # 'A_B':
-1.0, # 'A_C':
-25e-3 - EREST_ACT, # 'A_D':
-10e-3, # 'A_F':
4e3, # 'B_A':
0.0, # 'B_B':
0.0, # 'B_C':
0.0 - EREST_ACT, # 'B_D':
18e-3 # 'B_F':
]
Na_h_params = [ 70.0, # 'A_A':
0.0, # 'A_B':
0.0, # 'A_C':
0.0 - EREST_ACT, # 'A_D':
0.02, # 'A_F':
1000.0, # 'B_A':
0.0, # 'B_B':
1.0, # 'B_C':
-30e-3 - EREST_ACT, # 'B_D':
-0.01 # 'B_F':
]
K_n_params = [ 1e4 * (10e-3 + EREST_ACT), # 'A_A':
-1e4, # 'A_B':
-1.0, # 'A_C':
-10e-3 - EREST_ACT, # 'A_D':
-10e-3, # 'A_F':
0.125e3, # 'B_A':
0.0, # 'B_B':
0.0, # 'B_C':
0.0 - EREST_ACT, # 'B_D':
80e-3 # 'B_F':
]
VMIN = -40e-3 + EREST_ACT
VMAX = 120e-3 + EREST_ACT
VDIVS = 30000
soma_dia = 30e-6
def create_na_chan(path):
na = moose.HHChannel('%s/na' % (path))
na.Xpower = 3
xGate = moose.HHGate(na.path + '/gateX')
xGate.setupAlpha(Na_m_params +
[VDIVS, VMIN, VMAX])
na.Ypower = 1
yGate = moose.HHGate(na.path + '/gateY')
yGate.setupAlpha(Na_h_params +
[VDIVS, VMIN, VMAX])
na.Ek = 115e-3 + EREST_ACT
return na
def create_k_chan(path):
k = moose.HHChannel('%s/k' % (path))
k.Xpower = 4.0
xGate = moose.HHGate(k.path + '/gateX')
xGate.setupAlpha(K_n_params +
[VDIVS, VMIN, VMAX])
k.Ek = -12e-3 + EREST_ACT
return k
def create_compartment(path):
comp = moose.Compartment(path)
comp.diameter = soma_dia
comp.Em = EREST_ACT + 10.613e-3
comp.initVm = EREST_ACT
sarea = np.pi * soma_dia * soma_dia
comp.Rm = 1/(0.3e-3 * 1e4 * sarea)
comp.Cm = 1e-6 * 1e4 * sarea
if moose.exists('/library/na'):
nachan = moose.element(moose.copy('/library/na', comp, 'na'))
else:
nachan = create_na_chan(comp.path)
nachan.Gbar = 120e-3 * sarea * 1e4
moose.showfield(nachan)
moose.connect(nachan, 'channel', comp, 'channel')
if moose.exists('/library/k'):
kchan = moose.element(moose.copy('/library/k', comp, 'k'))
else:
kchan = create_k_chan(comp.path)
kchan.Gbar = 36e-3 * sarea * 1e4
moose.connect(kchan, 'channel', comp, 'channel')
synchan = moose.SynChan(comp.path + '/synchan')
synchan.Gbar = 1e-8
synchan.tau1 = 2e-3
synchan.tau2 = 2e-3
synchan.Ek = 0.0
m = moose.connect(comp, 'channel', synchan, 'channel')
spikegen = moose.SpikeGen(comp.path + '/spikegen')
spikegen.threshold = 0.0
m = moose.connect(comp, 'VmOut', spikegen, 'Vm')
return comp
def test_compartment():
n = moose.Neutral('/model')
lib = moose.Neutral('/library')
create_na_chan(lib.path)
create_k_chan(lib.path)
comp = create_compartment('/model/soma')
pg = moose.PulseGen('/model/pulse')
pg.firstDelay = 50e-3
pg.firstWidth = 40e-3
pg.firstLevel = 1e-9
moose.connect(pg, 'output', comp, 'injectMsg')
d = moose.Neutral('/data')
vm = moose.Table('/data/Vm')
moose.connect(vm, 'requestOut', comp, 'getVm')
gK = moose.Table('/data/gK')
moose.connect(gK, 'requestOut', moose.element('%s/k' % (comp.path)), 'getGk')
gNa = moose.Table('/data/gNa')
moose.connect(gNa, 'requestOut', moose.element('%s/na' % (comp.path)), 'getGk')
# utils.resetSim(['/model', '/data'], 1e-6, 1e-4, simmethod='ee')
assign_clocks(['/model'], 1e-6, 1e-4)
simtime = 100e-3
moose.start(simtime)
t = np.linspace(0, simtime, len(vm.vector))
plt.subplot(221)
plt.title('Vm')
plt.plot(t, vm.vector)
plt.subplot(222)
plt.title('Conductance')
plt.plot(t, gK.vector, label='GK')
plt.plot(t, gNa.vector, label='GNa')
plt.legend()
plt.subplot(223)
ma = moose.element('%s/na/gateX' % (comp.path)).tableA
mb = moose.element('%s/na/gateX' % (comp.path)).tableB
ha = moose.element('%s/na/gateY' % (comp.path)).tableA
hb = moose.element('%s/na/gateY' % (comp.path)).tableB
na = moose.element('%s/k/gateX' % (comp.path)).tableA
nb = moose.element('%s/k/gateX' % (comp.path)).tableB
plt.plot(1/mb, label='tau_m')
plt.plot(1/hb, label='tau_h')
plt.plot(1/nb, label='tau_n')
plt.legend()
plt.subplot(224)
plt.plot(ma/mb, label='m_inf')
plt.plot(ha/hb, label='h_inf')
plt.plot(na/nb, label='n_inf')
plt.legend()
plt.show()
plt.close()
def create_population(container, size):
"""Create a population of `size` single compartmental neurons with
Na and K channels. Also create SpikeGen objects and SynChan
objects connected to these which can act as plug points for
setting up synapses later."""
path = container.path
# Contrast this with
# comps = moose.vec(path+'/soma', size, 'Compartment')
comps = [create_compartment(path+'/soma_%d' % (ii)) for ii in range(size)]
spikegens = []
synchans = []
Em = EREST_ACT + 10.613e-3
initVm_array = [EREST_ACT] * size
Em_array = [Em] * size
# initVm_array = np.random.normal(EREST_ACT, np.abs(EREST_ACT) * 0.1, size)
# Em_array = np.random.normal(Em, np.abs(Em) * 0.1, size)
for comp, initVm, Em in zip(comps, initVm_array, Em_array):
comp.Em = Em
comp.initVm = initVm
synchan = moose.element(comp.path + '/synchan')
synchans.append(synchan)
spikegen = moose.element(comp.path + '/spikegen')
spikegens.append(spikegen)
return {'compartment': comps,
'spikegen': spikegens,
'synchan': synchans}
def make_synapses(spikegen, synchan, delay=5e-3):
"""
Create synapses from spikegens to synchans in a manner similar to
OneToAll connection.
spikegen: list of spikegen objects
These are sources of synaptic event messages.
synchan: list of synchan objects
These are the targets of the synaptic event messages.
delay: mean delay of synaptic transmission.
Individual delays are normally distributed with sd=0.1*mean.
"""
scount = len(spikegen)
for ii, sid in enumerate(synchan):
s = moose.SynChan(sid)
sh = moose.SimpleSynHandler( sid.path + "/synh" )
moose.connect( sh, "activationOut", s, "activation" )
sh.synapse.num = scount
delay_list = np.random.normal(delay, delay*0.1, scount)
# print delay_list
for jj in range(scount):
sh.synapse[jj].delay = delay_list[jj]
# Connect all spikegens to this synchan except that from
# same compartment - we assume if parents are same the two belong to the same compartment
if s.parent.path != spikegen[jj].parent.path:
m = moose.connect(spikegen[jj], 'spikeOut', moose.element(sh.path + '/synapse'), 'addSpike')
def two_populations(size=2):
"""An example with two population connected via synapses."""
net = moose.Neutral('network2')
pop_a = create_population(moose.Neutral('/network2/pop_A'), size)
pop_b = create_population(moose.Neutral('/network2/pop_B'), size)
make_synapses(pop_a['spikegen'], pop_b['synchan'])
make_synapses(pop_b['spikegen'], pop_a['synchan'])
pulse = moose.PulseGen('/network2/net2_pulse')
pulse.firstLevel = 1e-9
pulse.firstDelay = 0.05 # disable the pulsegen
pulse.firstWidth = 0.02
moose.connect(pulse, 'output', pop_a['compartment'][0], 'injectMsg')
data = moose.Neutral('/data')
vm_a = [moose.Table('/data/net2_Vm_A_%d' % (ii)) for ii in range(size)]
for tab, comp in zip(vm_a, pop_a['compartment']):
moose.connect(tab, 'requestOut', comp, 'getVm')
vm_b = [moose.Table('/data/net2_Vm_B_%d' % (ii)) for ii in range(size)]
for tab, comp in zip(vm_b, pop_b['compartment']):
moose.connect(tab, 'requestOut', comp, 'getVm')
gksyn_a = [moose.Table('/data/net2_Gk_syn_a_%d' % (ii)) for ii in range(size)]
for tab, synchan in zip(gksyn_a, pop_a['synchan']):
moose.connect(tab, 'requestOut', synchan, 'getGk')
gksyn_b = [moose.Table('/data/net2_Gk_syn_b_%d' % (ii)) for ii in range(size)]
for tab, synchan in zip(gksyn_b, pop_b['synchan']):
moose.connect(tab, 'requestOut', synchan, 'getGk')
pulsetable = moose.Table('/data/net2_pulse')
pulsetable.connect('requestOut', pulse, 'getOutputValue')
return {'vm_a': vm_a,
'vm_b': vm_b,
'gksyn_a': gksyn_a,
'gksyn_b': gksyn_b,
'pulse': pulsetable,}
def single_population(size=2):
"""Example of a single population where each cell is connected to
every other cell.
Creates a network of single compartmental cells under /network1 and a pulse generaor
"""
net = moose.Neutral('network1')
pop = create_population(moose.Neutral('/network1'), size)
make_synapses(pop['spikegen'], pop['synchan'])
pulse = moose.PulseGen('/network1/net1_pulse')
pulse.firstLevel = 1e-9
pulse.firstDelay = 0.05
pulse.firstWidth = 0.02
moose.connect(pulse, 'output', pop['compartment'][0], 'injectMsg')
data = moose.Neutral('/data')
vm = [moose.Table('/data/net1_Vm_%d' % (ii)) for ii in range(size)]
for tab, comp in zip(vm, pop['compartment']):
moose.connect(tab, 'requestOut', comp, 'getVm')
gksyn = [moose.Table('/data/net1_Gk_syn_%d' % (ii)) for ii in range(size)]
for tab, synchan in zip(gksyn, pop['synchan']):
moose.connect(tab, 'requestOut', synchan, 'getGk')
pulsetable = moose.Table('/data/net1_pulse')
pulsetable.connect('requestOut', pulse, 'getOutputValue')
return {'vm': vm,
'gksyn': gksyn,
'pulse': pulsetable,}
inited = False
def assign_clocks(model_container_list, simdt, plotdt):
"""
Assign clocks to elements under the listed paths.
This should be called only after all model components have been
created. Anything created after this will not be scheduled.
"""
global inited
# `inited` is for avoiding double scheduling of the same object
if not inited:
print(('SimDt=%g, PlotDt=%g' % (simdt, plotdt)))
moose.setClock(0, simdt)
moose.setClock(1, simdt)
moose.setClock(2, simdt)
moose.setClock(3, simdt)
moose.setClock(4, plotdt)
for path in model_container_list:
print(('Scheduling elements under:', path))
moose.useClock(0, '%s/##[TYPE=Compartment]' % (path), 'init')
moose.useClock(1, '%s/##[TYPE=Compartment]' % (path), 'process')
moose.useClock(2, '%s/##[TYPE=SynChan],%s/##[TYPE=HHChannel]' % (path, path), 'process')
moose.useClock(3, '%s/##[TYPE=SpikeGen],%s/##[TYPE=PulseGen]' % (path, path), 'process')
moose.useClock(4, '/data/##[TYPE=Table]', 'process')
inited = True
moose.reinit()
def main():
"""
A demo to create a network of single compartmental neurons connected
via alpha synapses. This is same as compartment_net.py except that
we avoid ematrix and use single melements.
"""
# test_compartment() # this calls assign_clocks - after which nothing else will be scheduled.
simtime = 0.1
simdt = 0.25e-5
plotdt = 0.25e-3
size = 2
data1 = single_population(size=size)
data2 = two_populations(size=size)
assign_clocks(['/network1', '/network2'], simdt, plotdt)
# assign_clocks(['/network1'], simdt, plotdt)
moose.start(simtime)
plt.figure(1)
plt.suptitle('Single population')
plt.subplot(211)
for vm in data1['vm']:
t = np.linspace(0, simtime, len(vm.vector))
plt.plot(t, vm.vector, label=vm.path)
plt.plot(np.linspace(0, simtime, len(data1['pulse'].vector)), data1['pulse'].vector * 1e6, label='Inject(uA)')
plt.legend()
plt.subplot(212)
for gk in data1['gksyn']:
t = np.linspace(0, simtime, len(gk.vector))
plt.plot(t, gk.vector, label=gk.path)
plt.legend()
plt.figure(2)
plt.suptitle('Two populations')
plt.subplot(221)
for vm in data2['vm_a']:
t = np.linspace(0, simtime, len(vm.vector))
plt.plot(t, vm.vector, label=vm.path)
plt.plot(np.linspace(0, simtime, len(data2['pulse'].vector)), data2['pulse'].vector*1e6, label='Inject(uA)')
plt.legend()
plt.subplot(223)
for vm in data2['vm_b']:
t = np.linspace(0, simtime, len(vm.vector))
plt.plot(t, vm.vector, label=vm.path)
plt.legend()
plt.subplot(222)
for gk in data2['gksyn_a']:
t = np.linspace(0, simtime, len(gk.vector))
plt.plot(t, gk.vector, label=gk.path)
plt.legend()
plt.subplot(224)
for gk in data2['gksyn_b']:
t = np.linspace(0, simtime, len(gk.vector))
plt.plot(t, gk.vector, label=gk.path)
plt.legend()
plt.show()
#
# compartment_net_no_array.py ends here
if __name__ == '__main__':
main()
| BhallaLab/moose-examples | snippets/compartment_net_no_array.py | Python | gpl-2.0 | 15,391 | [
"MOOSE"
] | a59b0437c7b9ccbf5dd962e5f8ba3a8f4032b603cee9eb82ca5821bcf17aab51 |
#!/usr/bin/env python3
import os, re
import pickle
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from netCDF4 import Dataset
from datetime import datetime
# --------------------------------------------------------------------------------
def ncdf_2_df(nc_fname):
# extract the site name from the basename of the filepath
site_name = get_site_label(nc_fname)
# open connection to netCDF file
nc_con = Dataset(nc_fname, 'r')
# extract all variable labels that have values
headers = nc_con.variables.keys()
# get timestream
time_stamp = create_timestream(nc_con.variables['time'])
# extract key and value pairs as a list of dicts
dict_list = [get_value(nc_con, hd) for hd in headers] \
+ [site_name, time_stamp]
# merge list into one dictionary
one_dict = {key: val for dic in dict_list \
for (key, val) in dic.items() \
if key not in ['x', 'y', 'time']}
# return a pandas dataframe to the user
index_list = ['DT', 'site', 'elevation', 'latitude', 'longitude', 'reference_height']
return pd.DataFrame(one_dict).set_index(index_list)
# --------------------------------------------------------------------------------
def get_site_label(filename):
# extract the site name from the basename of the filepath
site_lab = re.search('.+?(?=Fluxnet)', \
os.path.basename(filename)).group(0)
# return a dict of the datetime stamps
return {'site': site_lab}
# --------------------------------------------------------------------------------
def create_timestream(nc_var):
# get date timestamp of origin
str_origin = re.search('[0-9].*$', nc_var.units).group(0)
# convert string to a datetime format
dt_origin = datetime.strptime(str_origin, "%Y-%m-%d %H:%M:%S")
# determine the timestep
if dt_origin.minute is 0:
tstep = 60
else:
tstep = dt_origin.minute
# create a range of timestamp from the origin date for the length of
# of the time-series at the specified frequency
time_stamp = pd.date_range(dt_origin, freq="{0}min".format(tstep), \
periods=nc_var.shape[0])
# return a dict of the datetime stamps
return {'DT': time_stamp}
# --------------------------------------------------------------------------------
def get_value(nc_con, label):
# extract values as a numpy array
values = np.squeeze(nc_con.variables[label][:])
# return a (key, value) dict
return {label: values}
# --------------------------------------------------------------------------------
def quick_test(df_test, flux='NEE'):
# testing stage here
print(df_test.head(10))
plt.figure(figsize=(10, 5))
ax1 = plt.subplot(111)
ax2 = ax1.twinx()
ax1.plot_date(df_test.index, df_test[flux], 'r-', lw=1)
ax2.plot_date(df_test.index, df_test[flux + '_qc'], '-', alpha=0.3)
ax1.set_zorder(ax2.get_zorder() + 1)
ax1.patch.set_visible(False)
plt.show()
return 1
# --------------------------------------------------------------------------------
def main():
flux_fplist = [os.path.join(dp, f) for (dp, _, fn) in os.walk(DIRPATH) \
for f in fn if f.endswith("nc")]
flux_dflist = [ncdf_2_df(fp) for fp in flux_fplist]
pickle.dump(flux_dflist, open(DIRPATH + SAVEPATH, 'wb'), protocol=2)
if __name__ == "__main__":
DIRPATH = os.path.expanduser("~/Work/Research_Work/Drought_Workshop/PALS_site_datasets/flux/")
SAVEPATH = "fluxnet_raw_dataframes.pkl"
main()
# old code
#def create_timestream(nc_var):
#
# # get date timestamp of origin
# str_origin = re.search('[0-9].*$', nc_var.units).group(0)
# # convert string to a datetime format
# dt_origin = datetime.strptime(str_origin, "%Y-%m-%d %H:%M:%S")
#
# # convert the seconds since origin to a datetime stamp
# t_labels = [dt_origin + timedelta(seconds=sec_i) \
# for sec_i in nc_var[:]]
#
# # return a dictionary of the datetime stamps
# return {'DT': t_labels}
| rhyswhitley/flux_learner | src/ncdf_to_df.py | Python | cc0-1.0 | 4,130 | [
"NetCDF"
] | 9f7ec48dde251e131ccb7c2bfdcb2e037360cdec5e1184e6b6e3dbf7b5f93798 |
import unittest
import datetime
import json
from mock import MagicMock
from DIRAC.DataManagementSystem.Agent.RequestOperations.ReplicateAndRegister import ReplicateAndRegister
from DIRAC.DataManagementSystem.Agent.RequestOperations.MoveReplica import MoveReplica
from DIRAC.RequestManagementSystem.Client.File import File
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.RequestManagementSystem.Client.Request import Request
class ReqOpsTestCase( unittest.TestCase ):
""" Base class for the clients test cases
"""
def setUp( self ):
fcMock = MagicMock()
ftsMock = MagicMock()
self.rr = ReplicateAndRegister()
self.rr.fc = fcMock
self.rr.ftsClient = ftsMock
def tearDown( self ):
pass
#############################################################################
class MoveReplicaSuccess( ReqOpsTestCase ):
def setUp(self):
self.op = Operation()
self.op.Type = "MoveFile"
self.op.SourceSE = "%s,%s" % ( "sourceSE1", "sourceSE2" )
self.op.TargetSE = "%s,%s" % ( "targetSE1", "targetSE2" )
self.File = File()
self.File.LFN = '/cta/file1'
self.File.Size = 2
self.File.Checksum = '011300a2'
self.File.ChecksumType = "adler32"
self.op.addFile( self.File )
self.req = Request()
self.req.addOperation( self.op )
self.mr = MoveReplica( self.op )
self.mr.dm = MagicMock()
self.mr.fc = MagicMock()
# This test needs to be fixed. It currently fails because StorageElement is not mocked
'''def test__dmTransfer( self ):
successful = {}
for sourceSE in self.op.sourceSEList:
successful[sourceSE] = 'dips://' + sourceSE.lower() + ':9148/DataManagement/StorageElement' + self.File.LFN
res = {'OK': True, 'Value': {'Successful': {self.File.LFN : successful}, 'Failed': {}}}
self.mr.dm.getActiveReplicas.return_value = res
res = {'OK': True, 'Value': {'Successful': {self.File.LFN : {'register': 0.1228799819946289, 'replicate': 9.872732877731323}}, 'Failed': {}}}
self.mr.dm.replicateAndRegister.return_value = res
res = self.mr.dmTransfer( self.File )
self.assertTrue( res['OK'] )
self.assertEqual( self.mr.operation.__files__[0].Status, 'Waiting' )
self.assertEqual( self.mr.operation.Status, 'Waiting' )
self.assertEqual( self.mr.request.Status, 'Waiting' )'''
def test__dmRemoval( self ):
res = {'OK': True, 'Value': {'Successful': { self.File.LFN : {'DIRACFileCatalog': True}}, 'Failed': {}}}
self.mr.dm.removeReplica.return_value = res
toRemoveDict = {self.File.LFN: self.File}
targetSEs = self.op.sourceSEList
res = self.mr.dmRemoval( toRemoveDict, targetSEs )
self.assertTrue( res['OK'] )
resvalue = dict( [ ( targetSE, '' ) for targetSE in targetSEs ] )
self.assertEqual( res['Value'], {self.File.LFN: resvalue} )
self.assertEqual( self.mr.operation.__files__[0].Status, 'Done' )
self.assertEqual( self.mr.operation.Status, 'Done' )
self.assertEqual( self.mr.request.Status, 'Done' )
class MoveReplicaFailure( ReqOpsTestCase ):
def setUp( self ):
self.op = Operation()
self.op.Type = "MoveReplica"
self.op.SourceSE = "%s,%s" % ( "sourceSE1", "sourceSE2" )
self.op.TargetSE = "%s,%s" % ( "targetSE1", "targetSE2" )
self.File = File()
self.File.LFN = '/cta/file1'
self.File.Size = 2
self.File.Checksum = '011300a2'
self.File.ChecksumType = "adler32"
self.op.addFile( self.File )
self.req = Request()
self.req.addOperation( self.op )
self.mr = MoveReplica( self.op )
self.mr.dm = MagicMock()
self.mr.fc = MagicMock()
self.mr.ci = MagicMock()
def test__dmTransfer( self ):
successful = {}
for sourceSE in self.op.sourceSEList:
successful[sourceSE] = 'dips://' + sourceSE.lower() + ':9148/DataManagement/StorageElement' + self.File.LFN
res = {'OK': True, 'Value': ({self.File.LFN: successful}, [])}
self.mr.ci._getCatalogReplicas.return_value = res
res = {'OK': True, 'Value': {'MissingAllReplicas': {}, 'NoReplicas': {}, 'MissingReplica': {}, 'SomeReplicasCorrupted': {}, 'AllReplicasCorrupted': {}}}
self.mr.ci.compareChecksum.return_value = res
res = {'OK': True, 'Value': {'Successful': {}, 'Failed': {self.File.LFN : 'Unable to replicate file'}}}
self.mr.dm.replicateAndRegister.return_value = res
res = self.mr.dmTransfer( self.File )
self.assertFalse( res['OK'] )
self.assertEqual( self.mr.operation.__files__[0].Status, 'Waiting' )
self.assertEqual( self.mr.operation.Status, 'Waiting' )
self.assertEqual( self.mr.request.Status, 'Waiting' )
def test__dmRemoval( self ):
res = {'OK': True, 'Value': {'Successful': {}, 'Failed': {self.File.LFN: 'Write access not permitted for this credential'}}}
self.mr.dm.removeReplica.return_value = res
toRemoveDict = {self.File.LFN: self.File}
targetSEs = self.op.sourceSEList
res = self.mr.dmRemoval( toRemoveDict, targetSEs )
self.assertTrue( res['OK'] )
resvalue = dict( [ ( targetSE, 'Write access not permitted for this credential' ) for targetSE in targetSEs ] )
self.assertEqual( res['Value'], {self.File.LFN: resvalue} )
self.assertEqual( self.mr.operation.__files__[0].Status, 'Waiting' )
self.assertEqual( self.mr.operation.Status, 'Waiting' )
self.assertEqual( self.mr.request.Status, 'Waiting' )
class ReplicateAndRegisterSuccess( ReqOpsTestCase ):
def test__addMetadataToFiles( self ):
resMeta = {'OK': True,
'Value': {'Failed': {},
'Successful': {'/lhcb/1.dst': {'ChecksumType': 'AD',
'Checksum': '123456',
'CreationDate': datetime.datetime( 2013, 12, 11, 20, 20, 21 ),
'GUID': '92F9CE97-7A62-E311-8401-0025907FD430',
'Mode': 436,
'ModificationDate': datetime.datetime( 2013, 12, 11, 20, 20, 21 ),
'NumberOfLinks': 1,
'Size': 5846023777,
'Status': '-'},
'/lhcb/2.dst': {'ChecksumType': 'AD',
'Checksum': '987654',
'CreationDate': datetime.datetime( 2013, 12, 12, 6, 26, 52 ),
'GUID': 'DAE4933A-C162-E311-8A6B-003048FEAF04',
'Mode': 436,
'ModificationDate': datetime.datetime( 2013, 12, 12, 6, 26, 52 ),
'NumberOfLinks': 1,
'Size': 5893396937,
'Status': '-'}}}}
self.rr.fc.getFileMetadata.return_value = resMeta
file1 = File()
file1.LFN = '/lhcb/1.dst'
file2 = File()
file2.LFN = '/lhcb/2.dst'
toSchedule = {'/lhcb/1.dst': [file1, ['SE1'], ['SE2', 'SE3']],
'/lhcb/2.dst': [file2, ['SE4'], ['SE5', 'SE6']]}
res = self.rr._addMetadataToFiles( toSchedule )
self.assertTrue(res['OK'])
for lfn in toSchedule:
self.assertEqual( res['Value'][lfn].LFN, lfn )
for attr in ('GUID', 'Size', 'Checksum'):
self.assertEqual( getattr(res['Value'][lfn],attr), resMeta['Value']['Successful'][lfn][attr] )
# AD should be transformed into Adler32
self.assertEqual( res['Value'][lfn].ChecksumType, "ADLER32" )
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase( ReqOpsTestCase )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( ReplicateAndRegisterSuccess ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( MoveReplicaSuccess ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( MoveReplicaFailure ) )
testResult = unittest.TextTestRunner( verbosity = 2 ).run( suite )
| fstagni/DIRAC | DataManagementSystem/Agent/RequestOperations/test/Test_RequestOperations.py | Python | gpl-3.0 | 8,115 | [
"DIRAC"
] | ce63a51a9a629690ff5fab7e56961d74893fbabd9baf4c53be2bc401703d0b2a |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# rm - backend to remove files/directories in user home
# Copyright (C) 2003-2015 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Module that enables a user to delete files and directories
in his home directory.
It is possible to supply a recursive flag to enable recursive deletes.
"""
import os
import glob
import shared.returnvalues as returnvalues
from shared.base import client_id_dir, invisible_file
from shared.functional import validate_input, REJECT_UNSET
from shared.handlers import correct_handler
from shared.init import initialize_main_variables
from shared.parseflags import verbose, recursive
from shared.validstring import valid_user_path
def signature():
"""Signature of the main function"""
defaults = {
'flags': [''],
'iosessionid': [''],
'path': REJECT_UNSET,
'delete': [''],
'allbox': [''],
}
return ['', defaults]
def main(client_id, user_arguments_dict):
"""Main function used by front end"""
(configuration, logger, output_objects, op_name) = \
initialize_main_variables(client_id)
client_dir = client_id_dir(client_id)
status = returnvalues.OK
defaults = signature()[1]
(validate_status, accepted) = validate_input(user_arguments_dict,
defaults, output_objects, allow_rejects=False)
if not validate_status:
return (accepted, returnvalues.CLIENT_ERROR)
if not correct_handler('POST'):
output_objects.append(
{'object_type': 'error_text', 'text'
: 'Only accepting POST requests to prevent unintended updates'})
return (output_objects, returnvalues.CLIENT_ERROR)
flags = ''.join(accepted['flags'])
pattern_list = accepted['path']
iosessionid = accepted['iosessionid'][-1]
if not client_id:
if not iosessionid.strip() or not iosessionid.isalnum():
# deny
output_objects.append(
{'object_type': 'error_text', 'text'
: 'No sessionid or invalid sessionid supplied!'})
return (output_objects, returnvalues.CLIENT_ERROR)
base_dir_no_sessionid = \
os.path.realpath(configuration.webserver_home) + os.sep
base_dir = \
os.path.realpath(os.path.join(configuration.webserver_home,
iosessionid)) + os.sep
if not os.path.isdir(base_dir):
# deny
output_objects.append({'object_type': 'error_text', 'text'
: 'Invalid sessionid!'})
return (output_objects, returnvalues.CLIENT_ERROR)
if not valid_user_path(base_dir, base_dir_no_sessionid, True):
# deny
output_objects.append({'object_type': 'error_text', 'text'
: 'Invalid sessionid!'})
return (output_objects, returnvalues.CLIENT_ERROR)
else:
# TODO: this is a hack to allow truncate - fix 'put' empty files
# Please note that base_dir must end in slash to avoid access to other
# user dirs when own name is a prefix of another user name
base_dir = \
os.path.abspath(os.path.join(configuration.user_home,
client_dir)) + os.sep
if verbose(flags):
for flag in flags:
output_objects.append({'object_type': 'text', 'text'
: '%s using flag: %s' % (op_name,
flag)})
for pattern in pattern_list:
# Check directory traversal attempts before actual handling to avoid
# leaking information about file system layout while allowing
# consistent error messages
unfiltered_match = glob.glob(base_dir + pattern)
match = []
for server_path in unfiltered_match:
real_path = os.path.abspath(server_path)
if not valid_user_path(real_path, base_dir, True):
# out of bounds - save user warning for later to allow
# partial match:
# ../*/* is technically allowed to match own files.
logger.warning('%s tried to %s restricted path %s ! ( %s)'
% (client_id, op_name, real_path, pattern))
continue
match.append(real_path)
# Now actually treat list of allowed matchings and notify if no
# (allowed) match
if not match:
output_objects.append({'object_type': 'file_not_found',
'name': pattern})
status = returnvalues.FILE_NOT_FOUND
for real_path in match:
relative_path = real_path.replace(base_dir, '')
if verbose(flags):
output_objects.append({'object_type': 'file', 'name'
: relative_path})
# Make it harder to accidentially delete too much - e.g. do not delete
# VGrid files without explicit selection of subdir contents
if real_path == os.path.abspath(base_dir):
output_objects.append({'object_type': 'warning', 'text'
: "You're not allowed to delete your entire home directory!"
})
status = returnvalues.CLIENT_ERROR
continue
if os.path.islink(real_path):
output_objects.append({'object_type': 'warning', 'text'
: "You're not allowed to delete entire %s shared dirs!"
% configuration.site_vgrid_label
})
status = returnvalues.CLIENT_ERROR
continue
if os.path.isdir(real_path) and recursive(flags):
# bottom up traversal of the file tree since rmdir is limited to
# empty dirs
for (root, dirs, files) in os.walk(real_path,
topdown=False):
for name in files:
path = os.path.join(root, name)
relative_path = path.replace(base_dir, '')
# Traversal may find additional invisible files to skip
if invisible_file(name):
continue
if verbose(flags):
output_objects.append({'object_type': 'file'
, 'name': relative_path})
try:
os.remove(path)
except Exception, exc:
output_objects.append({'object_type'
: 'error_text', 'text'
: "%s: '%s': %s" % (op_name,
relative_path, exc)})
logger.error("%s: failed on '%s': %s"
% (op_name, relative_path, exc))
status = returnvalues.SYSTEM_ERROR
for name in dirs:
path = os.path.join(root, name)
relative_path = path.replace(base_dir, '')
if verbose(flags):
output_objects.append({'object_type': 'file'
, 'name': relative_path})
try:
os.rmdir(path)
except Exception, exc:
output_objects.append({'object_type'
: 'error_text', 'text'
: "%s: '%s': %s" % (op_name,
relative_path, exc)})
logger.error("%s: failed on '%s': %s"
% (op_name, relative_path, exc))
status = returnvalues.SYSTEM_ERROR
# Finally remove base directory
relative_path = real_path.replace(base_dir, '')
try:
os.rmdir(real_path)
except Exception, exc:
output_objects.append({'object_type': 'error_text',
'text': "%s: '%s': %s" % (op_name,
relative_path, exc)})
logger.error("%s: failed on '%s': %s" % (op_name,
relative_path, exc))
status = returnvalues.SYSTEM_ERROR
else:
relative_path = real_path.replace(base_dir, '')
try:
os.remove(real_path)
except Exception, exc:
output_objects.append({'object_type': 'error_text',
'text': "%s: '%s': %s" % (op_name,
relative_path, exc)})
logger.error("%s: failed on '%s'" % (op_name,
relative_path))
status = returnvalues.SYSTEM_ERROR
continue
return (output_objects, status)
| heromod/migrid | mig/shared/functionality/rm.py | Python | gpl-2.0 | 9,938 | [
"Brian"
] | 25ed74d611dd523973b9ca7a639a796e1369298957f4d50a87afa3d470f78f9f |
""" EC2Endpoint class is the implementation of the EC2 interface to
a cloud endpoint
"""
import os
import json
import boto3
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities.File import makeGuid
from DIRAC.Resources.Cloud.Endpoint import Endpoint
class EC2Endpoint(Endpoint):
def __init__(self, parameters=None):
super(EC2Endpoint, self).__init__(parameters=parameters)
# logger
self.log = gLogger.getSubLogger("EC2Endpoint")
self.valid = False
result = self.initialize()
if result["OK"]:
self.log.debug("EC2Endpoint created and validated")
self.valid = True
else:
self.log.error(result["Message"])
def initialize(self):
availableParams = {
"RegionName": "region_name",
"AccessKey": "aws_access_key_id",
"SecretKey": "aws_secret_access_key",
"EndpointUrl": "endpoint_url", # EndpointUrl is optional
}
connDict = {}
for var in availableParams:
if var in self.parameters:
connDict[availableParams[var]] = self.parameters[var]
try:
self.__ec2 = boto3.resource("ec2", **connDict)
except Exception as e:
self.log.exception("Failed to connect to EC2")
errorStatus = "Can't connect to EC2: " + str(e)
return S_ERROR(errorStatus)
result = self.__loadInstanceType()
if not result["OK"]:
return result
result = self.__checkConnection()
return result
def __loadInstanceType(self):
currentDir = os.path.dirname(os.path.abspath(__file__))
instanceTypeFile = os.path.join(currentDir, "ec2_instance_type.json")
try:
with open(instanceTypeFile, "r") as f:
self.__instanceTypeInfo = json.load(f)
except Exception as e:
self.log.exception("Failed to fetch EC2 instance details")
errmsg = "Exception loading EC2 instance type info: %s" % e
self.log.error(errmsg)
return S_ERROR(errmsg)
return S_OK()
def __checkConnection(self):
"""
Checks connection status by trying to list the images.
:return: S_OK | S_ERROR
"""
try:
self.__ec2.images.filter(Owners=["self"])
except Exception as e:
self.log.exception("Failed to list EC2 images")
return S_ERROR(e)
return S_OK()
def createInstances(self, vmsToSubmit):
outputDict = {}
for nvm in range(vmsToSubmit):
instanceID = makeGuid()[:8]
result = self.createInstance(instanceID)
if result["OK"]:
ec2Id, nodeDict = result["Value"]
self.log.debug("Created VM instance %s/%s" % (ec2Id, instanceID))
outputDict[ec2Id] = nodeDict
else:
self.log.error("Create EC2 instance error:", result["Message"])
break
return S_OK(outputDict)
def createInstance(self, instanceID=""):
if not instanceID:
instanceID = makeGuid()[:8]
self.parameters["VMUUID"] = instanceID
self.parameters["VMType"] = self.parameters.get("CEType", "EC2")
createNodeDict = {}
# Image
if "ImageID" in self.parameters and "ImageName" not in self.parameters:
try:
images = self.__ec2.images.filter(Filters=[{"Name": "name", "Values": [self.parameters["ImageName"]]}])
imageId = None
for image in images:
imageId = image.id
break
except Exception as e:
self.log.exception("Exception when get ID from image name %s:" % self.parameters["ImageName"])
return S_ERROR("Failed to get image for Name %s" % self.parameters["ImageName"])
if imageId is None:
return S_ERROR("Image name %s not found" % self.parameters["ImageName"])
elif "ImageID" in self.parameters:
try:
self.__ec2.images.filter(ImageIds=[self.parameters["ImageID"]])
except Exception as e:
self.log.exception("Failed to get EC2 image list")
return S_ERROR("Failed to get image for ID %s" % self.parameters["ImageID"])
imageId = self.parameters["ImageID"]
else:
return S_ERROR("No image specified")
createNodeDict["ImageId"] = imageId
# Instance type
if "FlavorName" not in self.parameters:
return S_ERROR("No flavor specified")
instanceType = self.parameters["FlavorName"]
createNodeDict["InstanceType"] = instanceType
# User data
result = self._createUserDataScript()
if not result["OK"]:
return result
createNodeDict["UserData"] = str(result["Value"])
# Other params
for param in ["KeyName", "SubnetId", "EbsOptimized"]:
if param in self.parameters:
createNodeDict[param] = self.parameters[param]
self.log.info("Creating node:")
for key, value in createNodeDict.items():
self.log.verbose("%s: %s" % (key, value))
# Create the VM instance now
try:
instances = self.__ec2.create_instances(MinCount=1, MaxCount=1, **createNodeDict)
except Exception as e:
self.log.exception("Failed to create EC2 instance")
return S_ERROR("Exception in ec2 create_instances: %s" % e)
if len(instances) < 1:
errmsg = "ec2 create_instances failed to create any VM"
self.log.error(errmsg)
return S_ERROR(errmsg)
# Create the name in tags
ec2Id = instances[0].id
tags = [{"Key": "Name", "Value": "DIRAC_%s" % instanceID}]
try:
self.__ec2.create_tags(Resources=[ec2Id], Tags=tags)
except Exception as e:
self.log.exception("Failed to tag EC2 instance")
return S_ERROR("Exception setup name for %s: %s" % (ec2Id, e))
# Properties of the instance
nodeDict = {}
# nodeDict['PublicIP'] = publicIP
nodeDict["InstanceID"] = instanceID
if instanceType in self.__instanceTypeInfo:
nodeDict["NumberOfProcessors"] = self.__instanceTypeInfo[instanceType]["vCPU"]
nodeDict["RAM"] = self.__instanceTypeInfo[instanceType]["Memory"]
else:
nodeDict["NumberOfProcessors"] = 1
return S_OK((ec2Id, nodeDict))
def stopVM(self, nodeID, publicIP=""):
"""
Given the node ID it gets the node details, which are used to destroy the
node making use of the libcloud.openstack driver. If three is any public IP
( floating IP ) assigned, frees it as well.
:Parameters:
**uniqueId** - `string`
openstack node id ( not uuid ! )
**public_ip** - `string`
public IP assigned to the node if any
:return: S_OK | S_ERROR
"""
try:
self.__ec2.Instance(nodeID).terminate()
except Exception as e:
self.log.exception("Failed to terminate EC2 instance")
return S_ERROR("Exception terminate instance %s: %s" % (nodeID, e))
return S_OK()
| DIRACGrid/DIRAC | src/DIRAC/Resources/Cloud/EC2Endpoint.py | Python | gpl-3.0 | 7,409 | [
"DIRAC"
] | 313fbc7c7dc3ec69952d58136d979dd2691a06f37dd6c1214a7ab08186451d5b |
__author__ = 'Christo Robison'
from PyQt4 import QtGui, QtCore
import sys
import HyperSpecGui
import numpy as np
from scipy import ndimage
from scipy import fftpack as ft
import matplotlib
matplotlib.use('QT4Agg')
import matplotlib.pylab as pylab
import matplotlib.pyplot as plt
import time
import pyqtgraph
import hs_imFFTW as hs
from spectral import *
spectral.settings.WX_GL_DEPTH_SIZE = 16
'''
background = 0
red = 1 WBC
green = 2 RBC
pink = 3 nuclear material
yellow = 4 ignore'''
class HyperSpecApp(QtGui.QMainWindow, HyperSpecGui.Ui_MainWindow):
def __init__(self, parent=None):
pyqtgraph.setConfigOption('background', 'k') #before loading widget
super(HyperSpecApp, self).__init__(parent)
self.setupUi(self)
self.updateBtn.clicked.connect(self.update)
#self.view = pyqtgraph.ViewBox(self.gView0)
#self.scene = pyqtgraph.ImageItem(self.view)
STEPS = np.array([0.0, 0.2, 0.6, 1.0])
CLRS = ['k','r','y','w']
clrmp = pyqtgraph.ColorMap(STEPS, np.array([pyqtgraph.colorTuple(pyqtgraph.Color(c)) for c in CLRS]))
data = np.random.normal(size=(100, 200, 200))
#imv = pyqtgraph.image(data)
self.ImageView2.setImage(data)
#self.ImageView2.ui.histogram.gradient.setColorMap(clrmp)
#self.img_array = np.zeros((1000, CHUNKSZ / 2 + 1))
# bipolar colormap
#pos = np.array([0., 1., 0.5, 0.25, 0.75])
#color = np.array([[0, 255, 255, 255], [255, 255, 0, 255], [0, 0, 0, 255], (0, 0, 255, 255), (255, 0, 0, 255)],
# dtype=np.ubyte)
#cmap = pyqtgraph.ColorMap(pos, color)
#lut = cmap.getLookupTable(0.0, 1.0, 256)
#self.img.setLookupTable(lut)
self.pltView1.plotItem.showGrid(True, True, 0.7)
self.pltView2.plotItem.showGrid(True, True, 0.7)
def update(self):
t1 = time.clock()
points = 100
# Temp demo code
#X = np.arange(points)
#Y = np.sin(np.arange(points) / points * 3 * np.pi + time.time())
#C = pyqtgraph.hsvColor(time.time() / 5 % 1, alpha=.5)
#pen = pyqtgraph.mkPen(color=C, width=10)
#self.pltView0.plot(X, Y, pen=pen, clear=True)
print("update took %.02f ms" % ((time.clock() - t1) * 1000))
#if self.chkMore.isChecked():
#QtCore.QTimer.singleShot(1, self.update) # QUICKLY repeat
def runKmeans(view, inputImg, clusters=6, iters=300):
(m, c) = kmeans(inputImg, clusters, iters)
#mw = pyqtgraph.widgets.MatplotlibWidget.MatplotlibWidget()
#subplot = form.pltView0.add_subplot(111)
#form.pltView0.
for i in range(c.shape[0]):
view.plot(c[i])
#form.pltView0.plot(subplot)
return (m, c)
def adjustLabels(dcb, bkgnd=0.0, unknown=4.0):
dcb[dcb==0.0] = 5.0
dcb[dcb==4.0] = 0.0
dcb[dcb==5.0] = 4.0
return dcb
def runSpectral(dcb, gt, title = 'dcb'):
(classes, gmlc, clmap) = runGauss(dcb, gt)
(gtresults, gtErrors) = genResults(clmap, gt)
displayPlots(clmap, gt, gtresults, gtErrors, (title+" Gaussian Classifer"))
return (gtresults, gtErrors)
def runPCA(dcb, gt, title = 'dcb'):
pc = principal_components(dcb)
pc_0999 = pc.reduce(fraction=0.999)
img_pc = pc_0999.transform(dcb)
(classes, gmlc, clmap) = runGauss(img_pc, gt)
(gtresults, gtErrors) = genResults(clmap, gt)
displayPlots(clmap, gt, gtresults, gtErrors, (title+" PCA Gaussian Classifer"))
return (gtresults, gtErrors, pc)
def genResults(clmap, gt):
gtresults = clmap * (gt!=0)
gtErrors = gtresults * (gtresults !=gt)
return (gtresults, gtErrors)
def runGauss(dcb, gt):
classes = create_training_classes(dcb, gt)
gmlc = GaussianClassifier(classes, min_samples=200)
clmap = gmlc.classify_image(dcb)
return (classes, gmlc, clmap)
def displayPlots(clmap, gt, gtresults = None, gtErrors = None, title = 'classifier'):
if (gtresults is None and gtErrors is None):
(gtresults, gtErrors) = genResults(clmap, gt)
v0 = imshow(classes=clmap, title=(title+" results"))
pylab.savefig((title+" results.png"), bbox_inches='tight')
v1 = imshow(classes = gtresults, title=(title+" gt Results"))
pylab.savefig((title + " gt Results.png"), bbox_inches='tight')
v2 = imshow(classes = gtErrors, title=(title+" Error"))
pylab.savefig((title + " Error.png"), bbox_inches='tight')
def cleanResults(inputImg, cls_iter=1, open_iter=1):
open = ndimage.binary_opening(inputImg, iterations=open_iter)
close = ndimage.binary_opening(inputImg, iterations=cls_iter)
return (open, close)
def combineLabels(rbc, wbc, nuc, bkgd):
out = np.zeros(np.shape(rbc), dtype=np.float64)
out[bkgd == 1] = 4.0
out[rbc == 1] = 2.0
out[wbc == 1] = 1.0
out[nuc == 1] = 3.0
out[out == 0] = 0.0
print(out)
return out
def create_rgb(classimg, colormap=None):
if colormap is None:
colormap = np.array([[0, 0, 0], [255, 0, 0], [0, 255, 0], [255, 0, 255], [255, 255, 0]], dtype=np.ubyte)
h,w = np.shape(classimg)
out = np.zeros([h, w, 3], dtype=np.uint8)
out[classimg == 0.0] = colormap[0]
out[classimg == 1.0] = colormap[1]
out[classimg == 2.0] = colormap[2]
out[classimg == 3.0] = colormap[3]
out[classimg == 4.0] = colormap[4]
return out
if __name__=="__main__":
trainData = hs.getData(filename='D:\-_Hyper_Spec_-\HYPER_SPEC_TRAIN.h5', dat_idx=25*49, lab_idx=49)
#testData = hs.getData(filename='D:\-_Hyper_Spec_-\HYPER_SPEC_TRAIN.h5')
app = QtGui.QApplication(sys.argv)
form = HyperSpecApp()
form.show()
form.update() #start with something
#print("TRAIN " + str(np.shape(trainData['dcb'])))
img = trainData['dcb'][:, :, 0:25] #fromn red test 343:370
img1 = np.swapaxes(img, 2, 0)
form.ImageView2.setImage(img1)
form.ImageView2.export("Pre_FFT_Masking_.png")
#create FFT Plot for paper
fft_example = hs.hsfft(img1)
log_fft = np.log2(fft_example)
aaa = ft.fftshift(log_fft.real)
form.ImageView3.setImage(aaa) #levels=[np.amin(fft_example.real), np.amax(fft_example.real)+.01])
form.ImageView3.export("FFT_DCB_.png")
#v89 = imshow(aaa)
#aaaa = ImageView.set_data(aaa)
mask = hs.genMask(offset=41)
#form.ImageView3.setImage(mask, levels=[np.amin(mask),np.amax(mask)+.0001])
#ImageView doesn't seem to display binary arrays very well so add a small value.
out_dcb = hs.dcbFilter(img)
form.ImageView1.setImage(out_dcb.real)
form.ImageView1.export("Post_FFT_Masking_.png")
gtbatch = adjustLabels(trainData['classLabels'])
gt = gtbatch
#form.ImageView2.setImage(gt)
t = np.swapaxes(out_dcb, 0, 2)
t = np.swapaxes(t, 0, 1)
fftImg = t.real.astype(np.float32, copy=False)
print('SHAPE OF INPUT IMG: ' + str(np.shape(img)))
print('SHAPE OF FFT OUT: ' + str(np.shape(fftImg)))
(m, c) = runKmeans(form.pltView1, fftImg)
(mm, cc) = runKmeans(form.pltView2, img)
view_cube(fftImg)
(raw_results, raw_Errors) = runSpectral(img, gt, title="Raw")
(fft_results, fft_Errors) = runSpectral(fftImg, gt, title="FFT")
(raw_pc_results, raw_pc_Errors, raw_pc) = runPCA(img, gt, title="Raw")
(fft_pc_results, fft_pc_Errors, fft_pc) = runPCA(fftImg, gt, title="FFT")
print('SHAPE of results: ' + str(np.shape(fft_pc_results)))
print(fft_pc_results)
xdata = fft_pc.transform(fftImg)
w = view_nd(xdata[:, :, :5], classes=gt.astype(np.int8, copy=False), title="FFT_DCB PCA Components")
ydata = fft_pc.transform(img)
w = view_nd(ydata[:, :, :5], classes=gt.astype(np.int8, copy=False), title="DCB PCA Components")
# perform mathematical morphology operations to reduce noise in results
# convert each class to binary images then recombine a the end
rbc_img = fft_pc_results == 2.0
wbc_img = fft_pc_results == 1.0
nuc_img = fft_pc_results == 3.0
bkg_img = fft_pc_results == 4.0
(wbc_o, wbc_c) = cleanResults(wbc_img)
(rbc_o, rbc_c) = cleanResults(rbc_img)
(nuc_o, nuc_c) = cleanResults(nuc_img)
(bkg_o, bkg_c) = cleanResults(bkg_img)
#open_rbc = ndimage.binary_opening(rbc_img)
#clse_rbc = ndimage.binary_closing(open_rbc)
#print(rbc_img)
def calcAccuracy(bin, gt, c):
'''takes binary image of one class and compares it to the ground
truth of that class. Error is calculated based on weighted empirical error'''
class_gt = gt[gt == c]
class_match = class_gt[bin]
class_err = class_match
ti = combineLabels(rbc_o, wbc_o, nuc_o, bkg_img)
color = np.array([[0, 0, 0], [255, 0, 0], [0, 255, 0], [0, 0, 255], [255, 255, 0]], dtype=np.ubyte)
tis = create_rgb(ti, color)
#calculate accuracy for each class.
v6876 = imshow(tis, title="Cleaned FFT PCA GT Result")
pylab.savefig("Cleaned FFT PCA GT Result.png", bbox_inches='tight')
#form.ImageView3.setImage(rbc_c)
#form.ImageView1.setImage(wbc_c)
#form.ImageView2.setImage(ti)
fft_classes = create_training_classes(fftImg, gt, True)
fft_means = np.zeros((len(fft_classes), fftImg.shape[2]), float)
for (e, g) in enumerate(fft_classes):
fft_means[e] = g.stats.mean
fft_angles = spectral_angles(fftImg, fft_means)
fft_clmap = np.argmin(fft_angles, 2)
v20 = imshow(classes=((fft_clmap + 1) * (gt != 0)))
#v9 = plt.imshow(rbc_img) #, title="RBC results")
#pylab.savefig(("RBC_results.png"), bbox_inches='tight')
#v10 = imshow(classes=open_rbc, title="RBC open_results")
#pylab.savefig(("RBC_open_results.png"), bbox_inches='tight')
#v12 = imshow(classes=clse_rbc, title="RBC_closed results")
#pylab.savefig(("RBC_closed_results.png"), bbox_inches='tight')
#subplot = form.matWidget0.getFigure().imshow(clmap)
#form.matWidget0.
#STEPS = np.array([0, 1, 2, 3, 4])
#CLRS = ['k', 'r', 'y', 'b', 'w']
#clrmp = pyqtgraph.ColorMap(STEPS, np.array([pyqtgraph.colorTuple(pyqtgraph.Color(c)) for c in CLRS]))
#print(clrmp)
#form.ImageView2.setImage(clmap)
#form.ImageView2.ui.histogram.gradient.setColorMap(clrmp)
#form.img.setImage(clmap,auto_levels=False)
#form.img.show()
app.exec_()
print("DONE") | Crobisaur/HyperSpec | Python/HyperSpecApp.py | Python | gpl-3.0 | 10,241 | [
"Gaussian"
] | 5caa552216afe1a0496bb42e1da003d8314a88d9ae2319972e8163b5fd366247 |
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import unittest as ut
import unittest_decorators as utx
import espressomd
import numpy as np
from espressomd.accumulators import Correlator, TimeSeries
from espressomd.observables import ParticleVelocities, ParticleBodyAngularVelocities, ParticlePositions
from tests_common import single_component_maxwell
class BrownianDynamics(ut.TestCase):
"""Tests velocity distributions and diffusion for Brownian Dynamics"""
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
system.cell_system.set_domain_decomposition(use_verlet_lists=True)
system.cell_system.skin = 0
system.periodicity = [0, 0, 0]
system.integrator.set_brownian_dynamics()
@classmethod
def setUpClass(cls):
np.random.seed(42)
def check_velocity_distribution(self, vel, minmax, n_bins, error_tol, kT):
"""check the recorded particle distributions in velocity against a
histogram with n_bins bins. Drop velocities outside minmax. Check
individual histogram bins up to an accuracy of error_tol against the
analytical result for kT."""
for i in range(3):
hist = np.histogram(
vel[:, i], range=(-minmax, minmax), bins=n_bins, density=False)
data = hist[0] / float(vel.shape[0])
bins = hist[1]
for j in range(n_bins):
found = data[j]
expected = single_component_maxwell(bins[j], bins[j + 1], kT)
self.assertLessEqual(abs(found - expected), error_tol)
def test_00_verify_single_component_maxwell(self):
"""Verifies the normalization of the analytical expression."""
self.assertLessEqual(
abs(single_component_maxwell(-10, 10, 4.) - 1.), 1E-4)
def check_vel_dist_global_temp(self, recalc_forces, loops):
"""Test velocity distribution for global temperature parameters.
Parameters
----------
recalc_forces : :obj:`bool`
True if the forces should be recalculated after every step.
loops : :obj:`int`
Number of sampling loops
"""
N = 200
system = self.system
system.part.clear()
system.time_step = 1.6
# Place particles
system.part.add(pos=np.random.random((N, 3)))
# Enable rotation if compiled in
if espressomd.has_features("ROTATION"):
system.part[:].rotation = [1, 1, 1]
kT = 1.1
gamma = 3.5
system.thermostat.set_brownian(kT=kT, gamma=gamma, seed=41)
# Warmup
system.integrator.run(20)
# Sampling
v_stored = np.zeros((N * loops, 3))
omega_stored = np.zeros((N * loops, 3))
for i in range(loops):
system.integrator.run(1, recalc_forces=recalc_forces)
v_stored[i * N:(i + 1) * N, :] = system.part[:].v
if espressomd.has_features("ROTATION"):
omega_stored[i * N:(i + 1) * N, :] = system.part[:].omega_body
v_minmax = 5
bins = 4
error_tol = 0.01
self.check_velocity_distribution(
v_stored, v_minmax, bins, error_tol, kT)
if espressomd.has_features("ROTATION"):
self.check_velocity_distribution(
omega_stored, v_minmax, bins, error_tol, kT)
def test_vel_dist_global_temp(self):
"""Test velocity distribution for global temperature."""
self.check_vel_dist_global_temp(False, loops=200)
def test_vel_dist_global_temp_initial_forces(self):
"""Test velocity distribution for global Brownian parameters,
when using the initial force calculation.
"""
self.check_vel_dist_global_temp(True, loops=170)
@utx.skipIfMissingFeatures("BROWNIAN_PER_PARTICLE")
def test_05_brownian_per_particle(self):
"""Test Brownian dynamics with particle specific kT and gamma. Covers all combinations of
particle specific gamma and temp set or not set.
"""
N = 400
system = self.system
system.part.clear()
system.time_step = 1.9
system.part.add(pos=np.random.random((N, 3)))
if espressomd.has_features("ROTATION"):
system.part[:].rotation = [1, 1, 1]
kT = 0.9
gamma = 3.2
gamma2 = 4.3
kT2 = 1.5
system.thermostat.set_brownian(kT=kT, gamma=gamma, seed=41)
# Set different kT on 2nd half of particles
system.part[int(N / 2):].temp = kT2
# Set different gamma on half of the particles (overlap over both kTs)
if espressomd.has_features("PARTICLE_ANISOTROPY"):
system.part[int(N / 4):int(3 * N / 4)].gamma = 3 * [gamma2]
else:
system.part[int(N / 4):int(3 * N / 4)].gamma = gamma2
system.integrator.run(50)
loops = 200
v_kT = np.zeros((int(N / 2) * loops, 3))
v_kT2 = np.zeros((int(N / 2 * loops), 3))
if espressomd.has_features("ROTATION"):
omega_kT = np.zeros((int(N / 2) * loops, 3))
omega_kT2 = np.zeros((int(N / 2 * loops), 3))
for i in range(loops):
system.integrator.run(1)
v_kT[int(i * N / 2):int((i + 1) * N / 2),
:] = system.part[:int(N / 2)].v
v_kT2[int(i * N / 2):int((i + 1) * N / 2),
:] = system.part[int(N / 2):].v
if espressomd.has_features("ROTATION"):
omega_kT[int(i * N / 2):int((i + 1) * N / 2), :] = \
system.part[:int(N / 2)].omega_body
omega_kT2[int(i * N / 2):int((i + 1) * N / 2), :] = \
system.part[int(N / 2):].omega_body
v_minmax = 5
bins = 4
error_tol = 0.012
self.check_velocity_distribution(v_kT, v_minmax, bins, error_tol, kT)
self.check_velocity_distribution(v_kT2, v_minmax, bins, error_tol, kT2)
if espressomd.has_features("ROTATION"):
self.check_velocity_distribution(
omega_kT, v_minmax, bins, error_tol, kT)
self.check_velocity_distribution(
omega_kT2, v_minmax, bins, error_tol, kT2)
def setup_diff_mass_rinertia(self, p):
if espressomd.has_features("MASS"):
p.mass = 0.5
if espressomd.has_features("ROTATION"):
p.rotation = [1, 1, 1]
# Make sure rinertia does not change diff coeff
if espressomd.has_features("ROTATIONAL_INERTIA"):
p.rinertia = [0.4, 0.4, 0.4]
def test_msd_global_temp(self):
"""Tests diffusion via MSD for global gamma and temperature"""
gamma = 9.4
kT = 0.37
dt = 0.5
system = self.system
system.part.clear()
p = system.part.add(pos=(0, 0, 0), id=0)
system.time_step = dt
system.thermostat.set_brownian(kT=kT, gamma=gamma, seed=42)
system.cell_system.skin = 0.4
pos_obs = ParticlePositions(ids=(p.id,))
c_pos = Correlator(obs1=pos_obs, tau_lin=16, tau_max=100., delta_N=1,
corr_operation="square_distance_componentwise",
compress1="discard1")
system.auto_update_accumulators.add(c_pos)
system.integrator.run(30000)
c_pos.finalize()
# Check MSD
msd = c_pos.result()
tau = c_pos.lag_times()
system.auto_update_accumulators.clear()
def expected_msd(x):
return 2. * kT / gamma * x
for i in range(2, 6):
np.testing.assert_allclose(
msd[i], expected_msd(tau[i]), rtol=0.02)
@utx.skipIfMissingFeatures("VIRTUAL_SITES")
def test_07__virtual(self):
system = self.system
system.time_step = 0.01
system.part.clear()
virtual = system.part.add(pos=[0, 0, 0], virtual=True, v=[1, 0, 0])
physical = system.part.add(pos=[0, 0, 0], virtual=False, v=[1, 0, 0])
system.thermostat.set_brownian(
kT=0, gamma=1, gamma_rotation=1., act_on_virtual=False, seed=41)
system.integrator.run(1)
np.testing.assert_almost_equal(np.copy(virtual.v), [1, 0, 0])
np.testing.assert_almost_equal(np.copy(physical.v), [0, 0, 0])
system.part.clear()
virtual = system.part.add(pos=[0, 0, 0], virtual=True, v=[1, 0, 0])
physical = system.part.add(pos=[0, 0, 0], virtual=False, v=[1, 0, 0])
system.thermostat.set_brownian(
kT=0, gamma=1, gamma_rotation=1., act_on_virtual=True, seed=41)
system.integrator.run(1)
np.testing.assert_almost_equal(np.copy(virtual.v), [0, 0, 0])
np.testing.assert_almost_equal(np.copy(physical.v), [0, 0, 0])
def test_08__noise_correlation(self):
"""Checks that the Brownian noise is uncorrelated"""
system = self.system
system.part.clear()
system.time_step = 0.01
system.cell_system.skin = 0.1
system.part.add(id=(0, 1), pos=np.zeros((2, 3)))
vel_obs = ParticleVelocities(ids=system.part[:].id)
vel_series = TimeSeries(obs=vel_obs)
system.auto_update_accumulators.add(vel_series)
if espressomd.has_features("ROTATION"):
system.part[:].rotation = (1, 1, 1)
omega_obs = ParticleBodyAngularVelocities(ids=system.part[:].id)
omega_series = TimeSeries(obs=omega_obs)
system.auto_update_accumulators.add(omega_series)
kT = 3.2
system.thermostat.set_brownian(kT=kT, gamma=2.1, seed=17)
steps = int(1e4)
system.integrator.run(steps)
system.auto_update_accumulators.clear()
# test translational noise correlation
vel = np.array(vel_series.time_series())
for ind in range(2):
for i in range(3):
for j in range(i, 3):
corrcoef = np.dot(
vel[:, ind, i], vel[:, ind, j]) / steps / kT
if i == j:
self.assertAlmostEqual(corrcoef, 1.0, delta=0.04)
else:
self.assertLessEqual(np.abs(corrcoef), 0.04)
# test rotational noise correlation
if espressomd.has_features("ROTATION"):
omega = np.array(omega_series.time_series())
for ind in range(2):
for i in range(3):
for j in range(3):
corrcoef = np.dot(
omega[:, ind, i], omega[:, ind, j]) / steps / kT
if i == j:
self.assertAlmostEqual(corrcoef, 1.0, delta=0.04)
else:
self.assertLessEqual(np.abs(corrcoef), 0.04)
# translational and angular velocities should be
# independent
corrcoef = np.dot(
vel[:, ind, i], omega[:, ind, j]) / steps / kT
self.assertLessEqual(np.abs(corrcoef), 0.04)
if __name__ == "__main__":
ut.main()
| KaiSzuttor/espresso | testsuite/python/brownian_dynamics.py | Python | gpl-3.0 | 11,788 | [
"ESPResSo"
] | 487c6a849674c6801529afbfb99a68f82bfcd59b4b74fbec4be87838a262a6ef |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Module for commons shared by tests """
import re
import logging
logging = logging.getLogger('sneakylang')
from sneakylang.err import ParserRollback, MacroCallError
from sneakylang.macro import Macro
from sneakylang.node import Node, TextNode
from sneakylang.parser import *
from sneakylang.expanders import Expander, expand
class DummyMacro(Macro):
name = 'dummy_macro'
def expand_to_nodes(self, *args, **kwargs):
self.builder.append(DummyNode())
class DummyNode(Node):
name = 'dummy node'
class DummyParser(Parser):
start = ['(####)']
macro = DummyMacro
class DummyParserTwo(Parser):
start = ['(#####)']
macro = DummyMacro
class NadpisMacro(Macro):
name = 'nadpis'
def expand_to_nodes(self, *args, **kwargs):
return DummyNode()
# parser borrowed from czechtile
class Nadpis(Parser):
start = ['(\n)?(=){1,5}(\ ){1}']
macro = NadpisMacro
def resolve_argument_string(self):
endPattern = self.chunk[:-1]
if endPattern.startswith('\n'):
endPattern = endPattern[1:]
# chunk is \n={n}[whitespace],
# end is [whitespace]={n}\n
endMatch = re.search(''.join([' ', endPattern, '\n']), self.stream)
if not endMatch:
raise ParserRollback
self.level = len(endPattern)
self.argument_string = self.stream[0:endMatch.start()]
# end()-1 because we won't eat trailing newline
self.chunk_end = self.stream[endMatch.start():endMatch.end()-1]
self.stream = self.stream[endMatch.end()-1:]
def call_macro(self):
""" Do proper call to related macro(s) """
return self.macro(self.register, self.register_map).expand(self.level, self.argument_string)
### Define basic grammar
# This wiki have only paragraps (\n\n) and headings (=)
class OneArgumentMacro(Macro):
name = 'onearg'
def expand_to_nodes(self, content):
self.builder.append(DummyNode())
self.builder.append(TextNode(content=content), move_actual=False)
self.builder.move_up()
class ParagraphNode(Node): pass
class ParagraphMacro(Macro):
name = 'odstavec'
help = '((odstavec text odstavce))'
@classmethod
def get_arguments(self, argument_string):
return [argument_string], {}
def expand_to_nodes(self, *args):
if len(args) < 1:
raise MacroCallError("Paragraph must have some content")
content = ''.join([word+' ' for word in args])[:-1]
print content
self.builder.append(ParagraphNode())
parse(content, self.register_map, self.register, builder=self.builder)
self.builder.move_up()
class Paragraph(Parser):
start = ['(\n){2}']
macro = ParagraphMacro
end = '(\n){2}'
def get_arguments(self, argument_string):
return parse_macro_arguments(argument_string, return_kwargs=True)
def resolve_argument_string(self):
end = re.search(self.__class__.end, self.stream)
if end:
self.argument_string = self.stream[0:end.start()]
self.chunk_end = self.stream[end.start():end.end()]
self.stream = self.stream[end.end():]
else:
self.argument_string = self.stream
print self.argument_string
self.stream = ''
class StrongNode(Node): pass
class StrongMacro(Macro):
name = 'silne'
help = '((silne zesileny text))'
def expand_to_nodes(self, content, **kwargs):
self.builder.append(StrongNode(), move_actual=True)
parse(content, self.register_map, self.register, builder=self.builder)
self.builder.move_up()
class StrongVistingMacro(Macro):
name = 'silne'
help = '((silne zesileny text))'
def expand_to_nodes(self, content, *args, **kwargs):
self.state.visit(self)
self.builder.append(StrongNode())
self.builder.append(TextNode(content=content), move_actual=False)
self.builder.move_up()
class PictureNode(Node):
pass
class PictureKeywordMacro(Macro):
name = 'picture'
help = '((picture http://pic.png title="My picture"))'
def expand_to_nodes(self, content, **kwargs):
node = PictureNode()
node.args = [content]
node.kwargs = kwargs
self.builder.append(node, move_actual=False)
class Strong(Parser):
start = ['("){2}']
macro = StrongMacro
def resolve_argument_string(self):
endMatch = re.search(re.escape(self.chunk), self.stream)
if not endMatch:
raise ParserRollback()
self.argument_string = self.stream[0:endMatch.start()]
self.chunk_end = self.stream[endMatch.start():endMatch.end()]
self.stream = self.stream[endMatch.end():]
class ParagraphDocbookExpand(Expander):
def expand(self, node, format, node_map):
return ''.join(['<para>'] + [expand(child, format, node_map) for child in node.children] + ['</para>'])
parsers_list = [DummyParser, DummyParserTwo, Paragraph, Nadpis, Strong]
| Almad/sneakylang | sneakylang/test/module_test.py | Python | bsd-3-clause | 5,032 | [
"VisIt"
] | 9998010076552951cd93bb45993214a364ffcf1f2d6c5a4357fe2abda876491a |
#!/usr/bin/env python
# -*- coding: Latin-1 -*-
""" setbuild - set the build environment you require """
##
def syntax():
"syntax - print syntax of setbuild.py "
print "syntax: python setbuild.py Win32|x64|all|reset"
##
##
# import modules
import os.path
import sys
import xml.dom.minidom
from xml.sax.saxutils import escape
##
# from PP3E/System/Filetools/cpall.py
maxfileload = 5 * 1024 * 1024
blksize = 100 * 1024
##
def cp(pathFrom, pathTo, maxfileload=maxfileload):
"""
copy file pathFrom to pathTo, byte for byte
"""
if os.path.getsize(pathFrom) <= maxfileload:
bytesFrom = open(pathFrom, 'rb').read() # read small file all at once
open(pathTo, 'wb').write(bytesFrom) # need b mode on Windows
else:
fileFrom = open(pathFrom, 'rb') # read big files in chunks
fileTo = open(pathTo, 'wb') # need b mode here too
while 1:
bytesFrom = fileFrom.read(blksize) # get one block, less at end
if not bytesFrom: break # empty after last chunk
fileTo.write(bytesFrom)
##
##
def save(path):
"""save - make a backup (or restore the backup)"""
orig = path+'.orig'
if os.path.exists(orig):
cp(orig,path)
else:
cp(path,orig)
##
##
def sln(path,remove):
"""sln - remove the x64 stuff from a sln"""
save(path)
if remove:
f = open(path, 'r')
lines = []
for line in f:
if line.find(remove)<0:
lines.append(line)
f.close()
f = open(path,'w')
for line in lines:
f.write(line)
f.close()
##
##
def vcproj(path,remove):
"""vcproj - remove the x64 stuff from a vcproj"""
save(path)
if remove:
f = open(path,'r')
dom = xml.dom.minidom.parseString(f.read())
f.close()
for tag in [ 'Platform','Configuration' ]:
tags = dom.getElementsByTagName(tag)
kills = []
for t in tags:
if t.getAttribute("Name").find(remove)>=0:
kills.append(t)
for kill in kills:
kill.parentNode.removeChild(kill)
# repair the command lines!
for tool in dom.getElementsByTagName('Tool'):
cl=tool.getAttribute('CommandLine')
if cl:
cl=escape(cl)
cl=cl.replace('\r','__CR__')
cl=cl.replace('\n','__LF__')
tool.setAttribute('CommandLine',cl)
# print '-----------',cl,'----------'
code=dom.toxml()
code=code.replace('__CR__','
')
code=code.replace('__LF__','
')
f = open(path,'w')
f.write(code)
f.close()
##
##
def visit(myData, directoryName, filesInDirectory): # called for each dir
"""visit - called by os.path.walk"""
# print "in visitor",directoryName, "myData = ",myData
# print "filesInDirectory => ",filesInDirectory
for filename in filesInDirectory: # do non-dir files here
pathname = os.path.join(directoryName, filename)
if not os.path.isdir(pathname):
global paths
paths.append(pathname)
##
##
def setbuild(remove):
"""setbuild - remove == None, means both x64 and Win32"""
if remove in set(['x64','Win32',None]):
directory = os.path.abspath(os.path.dirname(sys.argv[0]))
print "directory = ",directory
global paths
paths=[]
os.path.walk(directory, visit, None)
for path in paths:
# print path
handlers = { '.sln' : sln
, '.vcproj' : vcproj
} ;
ext=os.path.splitext(path)[1]
if handlers.has_key(ext):
handlers[ext](path,remove)
else:
syntax()
##
##
if __name__ == '__main__':
argc = len(sys.argv)
syntaxError = argc < 2
if not syntaxError:
option=sys.argv[1].lower()
removes = { 'x64' : 'Win32'
, 'win32' : 'x64'
, 'all' : None
, 'reset' : None
}
syntaxError = not removes.has_key(option)
if not syntaxError:
setbuild(removes[option])
if option=='reset':
os.system('del/s *.orig')
if syntaxError:
syntax()
# That's all Folks!
##
| coapp-packages/exiv2 | msvc64/setbuild.py | Python | gpl-2.0 | 4,218 | [
"VisIt"
] | 7c27a046124730e5f94cb09186c95c012a902dfd5c964c64242796871b348569 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
import os
from commoncode.testcase import FileBasedTesting
from commoncode.testcase import file_cmp
from commoncode import fileutils
from commoncode import text
from textcode import markup
class TestMarkup(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def atest_gen(self):
"""
Rename to test xxx to regen tests.
"""
test_dir = self.get_test_loc(u'markup', True)
expected_dir = self.get_test_loc(u'markup_expected')
template = u"""
def test_%(tn)s(self):
test_file = self.get_test_loc(u'markup/%(test_file)s')
expected = self.get_test_loc(u'markup_expected/%(test_file)s')
result = markup.convert_to_text(test_file)
file_cmp(expected, result, ignore_line_endings=True)"""
for test_file in os.listdir(test_dir):
tn = text.python_safe_name(test_file)
location = os.path.join(test_dir, test_file)
result = markup.convert_to_text(location)
expected_file = os.path.join(expected_dir, test_file)
fileutils.copyfile(result, expected_file)
print(template % locals())
def test_404_htm(self):
test_file = self.get_test_loc(u'markup/404.htm')
expected = self.get_test_loc(u'markup_expected/404.htm')
result = markup.convert_to_text(test_file)
file_cmp(expected, result, ignore_line_endings=True)
def test_a_htm(self):
test_file = self.get_test_loc(u'markup/a.htm')
expected = self.get_test_loc(u'markup_expected/a.htm')
result = markup.convert_to_text(test_file)
file_cmp(expected, result, ignore_line_endings=True)
def test_allclasses_frame_html(self):
test_file = self.get_test_loc(u'markup/allclasses-frame.html')
expected = self.get_test_loc(u'markup_expected/allclasses-frame.html')
result = markup.convert_to_text(test_file)
file_cmp(expected, result, ignore_line_endings=True)
def test_chinese_htm(self):
test_file = self.get_test_loc(u'markup/chinese.htm')
expected = self.get_test_loc(u'markup_expected/chinese.htm')
result = markup.convert_to_text(test_file)
file_cmp(expected, result, ignore_line_endings=True)
def test_contenttype_html(self):
test_file = self.get_test_loc(u'markup/contenttype.html')
expected = self.get_test_loc(u'markup_expected/contenttype.html')
result = markup.convert_to_text(test_file)
file_cmp(expected, result, ignore_line_endings=True)
def test_double_pygment_in_html_html(self):
# FIXME: the output is still markup. we need a second pass
test_file = self.get_test_loc(u'markup/double_pygment_in_html.html')
expected = self.get_test_loc(u'markup_expected/double_pygment_in_html.html')
result = markup.convert_to_text(test_file)
file_cmp(expected, result, ignore_line_endings=True)
def test_json_phps(self):
test_file = self.get_test_loc(u'markup/JSON.phps')
expected = self.get_test_loc(u'markup_expected/JSON.phps')
result = markup.convert_to_text(test_file)
file_cmp(expected, result, ignore_line_endings=True)
def test_json_phps_html(self):
test_file = self.get_test_loc(u'markup/JSON.phps.html')
expected = self.get_test_loc(u'markup_expected/JSON.phps.html')
result = markup.convert_to_text(test_file)
file_cmp(expected, result, ignore_line_endings=True)
def test_label_html(self):
test_file = self.get_test_loc(u'markup/Label.html')
expected = self.get_test_loc(u'markup_expected/Label.html')
result = markup.convert_to_text(test_file)
file_cmp(expected, result, ignore_line_endings=True)
def test_lgpl_license_html(self):
test_file = self.get_test_loc(u'markup/lgpl_license.html')
expected = self.get_test_loc(u'markup_expected/lgpl_license.html')
result = markup.convert_to_text(test_file)
file_cmp(expected, result, ignore_line_endings=True)
def test_pdl_html(self):
test_file = self.get_test_loc(u'markup/PDL.html')
expected = self.get_test_loc(u'markup_expected/PDL.html')
result = markup.convert_to_text(test_file)
file_cmp(expected, result, ignore_line_endings=True)
def test_php_php(self):
test_file = self.get_test_loc(u'markup/php.php')
expected = self.get_test_loc(u'markup_expected/php.php')
result = markup.convert_to_text(test_file)
file_cmp(expected, result, ignore_line_endings=True)
def test_php_highlighted_in_html_html(self):
# FIXME: the output is still markup. we need a second pass
test_file = self.get_test_loc(u'markup/php_highlighted_in_html.html')
expected = self.get_test_loc(u'markup_expected/php_highlighted_in_html.html')
result = markup.convert_to_text(test_file)
file_cmp(expected, result, ignore_line_endings=True)
def test_rst_highlighted_html(self):
test_file = self.get_test_loc(u'markup/rst_highlighted.html')
expected = self.get_test_loc(u'markup_expected/rst_highlighted.html')
result = markup.convert_to_text(test_file)
file_cmp(expected, result, ignore_line_endings=True)
def test_services_htm(self):
test_file = self.get_test_loc(u'markup/services.htm')
expected = self.get_test_loc(u'markup_expected/services.htm')
result = markup.convert_to_text(test_file)
file_cmp(expected, result, ignore_line_endings=True)
def test_sissl_license_html(self):
test_file = self.get_test_loc(u'markup/sissl_license.html')
expected = self.get_test_loc(u'markup_expected/sissl_license.html')
result = markup.convert_to_text(test_file)
file_cmp(expected, result, ignore_line_endings=True)
def test_text_phps(self):
test_file = self.get_test_loc(u'markup/text.phps')
expected = self.get_test_loc(u'markup_expected/text.phps')
result = markup.convert_to_text(test_file)
file_cmp(expected, result, ignore_line_endings=True)
def test_us_htm(self):
test_file = self.get_test_loc(u'markup/us.htm')
expected = self.get_test_loc(u'markup_expected/us.htm')
result = markup.convert_to_text(test_file)
file_cmp(expected, result, ignore_line_endings=True)
| retrography/scancode-toolkit | tests/textcode/test_markup.py | Python | apache-2.0 | 7,836 | [
"VisIt"
] | 93ac52f7c4ab807bedc47c4d315d7f115da51af5940556e55a4777debcaf8b6d |
"""
Signal handling functions for use with external commerce service.
"""
from __future__ import unicode_literals
import json
import logging
from urlparse import urljoin
import requests
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from django.dispatch import receiver
from django.utils.translation import ugettext as _
from openedx.core.djangoapps.commerce.utils import ecommerce_api_client, is_commerce_service_configured
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.theming import helpers as theming_helpers
from request_cache.middleware import RequestCache
from student.signals import REFUND_ORDER
from .models import CommerceConfiguration
log = logging.getLogger(__name__)
# pylint: disable=unused-argument
@receiver(REFUND_ORDER)
def handle_refund_order(sender, course_enrollment=None, **kwargs):
"""
Signal receiver for unenrollments, used to automatically initiate refunds
when applicable.
"""
if not is_commerce_service_configured():
return
if course_enrollment and course_enrollment.refundable():
try:
request_user = get_request_user() or course_enrollment.user
if isinstance(request_user, AnonymousUser):
# Assume the request was initiated via server-to-server
# API call (presumably Otto). In this case we cannot
# construct a client to call Otto back anyway, because
# the client does not work anonymously, and furthermore,
# there's certainly no need to inform Otto about this request.
return
refund_seat(course_enrollment)
except: # pylint: disable=bare-except
# don't assume the signal was fired with `send_robust`.
# avoid blowing up other signal handlers by gracefully
# trapping the Exception and logging an error.
log.exception(
"Unexpected exception while attempting to initiate refund for user [%s], course [%s]",
course_enrollment.user.id,
course_enrollment.course_id,
)
def get_request_user():
"""
Helper to get the authenticated user from the current HTTP request (if
applicable).
If the requester of an unenrollment is not the same person as the student
being unenrolled, we authenticate to the commerce service as the requester.
"""
request = RequestCache.get_current_request()
return getattr(request, 'user', None)
def refund_seat(course_enrollment):
"""
Attempt to initiate a refund for any orders associated with the seat being unenrolled, using the commerce service.
Arguments:
course_enrollment (CourseEnrollment): a student enrollment
Returns:
A list of the external service's IDs for any refunds that were initiated
(may be empty).
Raises:
exceptions.SlumberBaseException: for any unhandled HTTP error during communication with the E-Commerce Service.
exceptions.Timeout: if the attempt to reach the commerce service timed out.
"""
User = get_user_model() # pylint:disable=invalid-name
course_key_str = unicode(course_enrollment.course_id)
enrollee = course_enrollment.user
service_user = User.objects.get(username=settings.ECOMMERCE_SERVICE_WORKER_USERNAME)
api_client = ecommerce_api_client(service_user)
log.info('Attempting to create a refund for user [%s], course [%s]...', enrollee.id, course_key_str)
refund_ids = api_client.refunds.post({'course_id': course_key_str, 'username': enrollee.username})
if refund_ids:
log.info('Refund successfully opened for user [%s], course [%s]: %r', enrollee.id, course_key_str, refund_ids)
config = CommerceConfiguration.current()
if config.enable_automatic_refund_approval:
refunds_requiring_approval = []
for refund_id in refund_ids:
try:
# NOTE: Approve payment only because the user has already been unenrolled. Additionally, this
# ensures we don't tie up an additional web worker when the E-Commerce Service tries to unenroll
# the learner
api_client.refunds(refund_id).process.put({'action': 'approve_payment_only'})
log.info('Refund [%d] successfully approved.', refund_id)
except: # pylint: disable=bare-except
log.exception('Failed to automatically approve refund [%d]!', refund_id)
refunds_requiring_approval.append(refund_id)
else:
refunds_requiring_approval = refund_ids
if refunds_requiring_approval:
# XCOM-371: this is a temporary measure to suppress refund-related email
# notifications to students and support for free enrollments. This
# condition should be removed when the CourseEnrollment.refundable() logic
# is updated to be more correct, or when we implement better handling (and
# notifications) in Otto for handling reversal of $0 transactions.
if course_enrollment.mode != 'verified':
# 'verified' is the only enrollment mode that should presently
# result in opening a refund request.
log.info(
'Skipping refund email notification for non-verified mode for user [%s], course [%s], mode: [%s]',
course_enrollment.user.id,
course_enrollment.course_id,
course_enrollment.mode,
)
else:
try:
send_refund_notification(course_enrollment, refunds_requiring_approval)
except: # pylint: disable=bare-except
# don't break, just log a warning
log.warning('Could not send email notification for refund.', exc_info=True)
else:
log.info('No refund opened for user [%s], course [%s]', enrollee.id, course_key_str)
return refund_ids
def create_zendesk_ticket(requester_name, requester_email, subject, body, tags=None):
""" Create a Zendesk ticket via API. """
if not (settings.ZENDESK_URL and settings.ZENDESK_USER and settings.ZENDESK_API_KEY):
log.debug('Zendesk is not configured. Cannot create a ticket.')
return
# Copy the tags to avoid modifying the original list.
tags = list(tags or [])
tags.append('LMS')
# Remove duplicates
tags = list(set(tags))
data = {
'ticket': {
'requester': {
'name': requester_name,
'email': requester_email
},
'subject': subject,
'comment': {'body': body},
'tags': tags
}
}
# Encode the data to create a JSON payload
payload = json.dumps(data)
# Set the request parameters
url = urljoin(settings.ZENDESK_URL, '/api/v2/tickets.json')
user = '{}/token'.format(settings.ZENDESK_USER)
pwd = settings.ZENDESK_API_KEY
headers = {'content-type': 'application/json'}
try:
response = requests.post(url, data=payload, auth=(user, pwd), headers=headers)
# Check for HTTP codes other than 201 (Created)
if response.status_code != 201:
log.error('Failed to create ticket. Status: [%d], Body: [%s]', response.status_code, response.content)
else:
log.debug('Successfully created ticket.')
except Exception: # pylint: disable=broad-except
log.exception('Failed to create ticket.')
return
def generate_refund_notification_body(student, refund_ids): # pylint: disable=invalid-name
""" Returns a refund notification message body. """
msg = _(
"A refund request has been initiated for {username} ({email}). "
"To process this request, please visit the link(s) below."
).format(username=student.username, email=student.email)
ecommerce_url_root = configuration_helpers.get_value(
'ECOMMERCE_PUBLIC_URL_ROOT', settings.ECOMMERCE_PUBLIC_URL_ROOT,
)
refund_urls = [urljoin(ecommerce_url_root, '/dashboard/refunds/{}/'.format(refund_id))
for refund_id in refund_ids]
return '{msg}\n\n{urls}'.format(msg=msg, urls='\n'.join(refund_urls))
def send_refund_notification(course_enrollment, refund_ids):
""" Notify the support team of the refund request. """
tags = ['auto_refund']
if theming_helpers.is_request_in_themed_site():
# this is not presently supported with the external service.
raise NotImplementedError("Unable to send refund processing emails to support teams.")
student = course_enrollment.user
subject = _("[Refund] User-Requested Refund")
body = generate_refund_notification_body(student, refund_ids)
requester_name = student.profile.name or student.username
create_zendesk_ticket(requester_name, student.email, subject, body, tags)
| lduarte1991/edx-platform | lms/djangoapps/commerce/signals.py | Python | agpl-3.0 | 9,145 | [
"VisIt"
] | c78338a886125fdb6a398bbba95cdead16bb6ba16652446c60834e21924c233b |
teamAbv = {
"L.A": "LOS ANGELES KINGS",
"ANA": "ANAHEIM DUCKS",
"CAR": "CAROLINA HURRICANES",
"MTL": "MONTREAL CANADIENS",
"DET": "DETROIT RED WINGS",
"COL": "COLORADO AVALANCHE",
"NSH": "NASHVILLE PREDATORS",
"BOS": "BOSTON BRUINS",
"DAL": "DALLAS STARS",
"PHI": "PHILADELPHIA FLYERS",
"EDM": "EDMONTON OILERS",
"STL": "ST. LOUIS BLUES",
"MIN": "MINNESOTA WILD",
"ATL": "ATLANTA THRASHERS",
"N.J": "NEW JERSEY DEVILS",
"VAN": "VANCOUVER CANUCKS",
"TOR": "TORONTO MAPLE LEAFS",
"PHX": "PHOENIX COYOTES",
"OTT": "OTTAWA SENATORS",
"FLA": "FLORIDA PANTHERS",
"NYI": "NEW YORK ISLANDERS",
"PIT": "PITTSBURGH PENGUINS",
"WSH": "WASHINGTON CAPITALS",
"T.B": "TAMPA BAY LIGHTNING",
"CGY": "CALGARY FLAMES",
"S.J": "SAN JOSE SHARKS",
"CBJ": "COLUMBUS BLUE JACKETS",
"NYR": "NEW YORK RANGERS",
"CHI": "CHICAGO BLACKHAWKS",
"BUF": "BUFFALO SABRES",
"WPG": "WINNIPEG JETS",
"ARI": "ARIZONA COYOTES"
}
altNames = {
"CANADIENS MONTREAL": "MONTREAL CANADIENS"
}
teamConf = {
"L.A": "WEST",
"ANA": "WEST",
"CAR": "EAST",
"MTL": "EAST",
"DET": "WEST",
"COL": "WEST",
"NSH": "WEST",
"BOS": "EAST",
"DAL": "WEST",
"PHI": "EAST",
"EDM": "WEST",
"STL": "WEST",
"MIN": "WEST",
"ATL": "EAST",
"N.J": "EAST",
"VAN": "WEST",
"TOR": "EAST",
"PHX": "WEST",
"OTT": "EAST",
"FLA": "EAST",
"NYI": "EAST",
"PIT": "EAST",
"WSH": "EAST",
"T.B": "EAST",
"CGY": "WEST",
"S.J": "WEST",
"CBJ": "WEST",
"NYR": "EAST",
"CHI": "WEST",
"BUF": "EAST",
"WPG": "EAST"
}
teamConfReAlign = {
"L.A": "WEST",
"ANA": "WEST",
"CAR": "EAST",
"MTL": "EAST",
"DET": "EAST",
"COL": "WEST",
"NSH": "WEST",
"BOS": "EAST",
"DAL": "WEST",
"PHI": "EAST",
"EDM": "WEST",
"STL": "WEST",
"MIN": "WEST",
"WPG": "WEST",
"N.J": "EAST",
"VAN": "WEST",
"TOR": "EAST",
"ARI": "WEST",
"PHX": "WEST",
"OTT": "EAST",
"FLA": "EAST",
"NYI": "EAST",
"PIT": "EAST",
"WSH": "EAST",
"T.B": "EAST",
"CGY": "WEST",
"S.J": "WEST",
"CBJ": "EAST",
"NYR": "EAST",
"CHI": "WEST",
"BUF": "EAST"
}
teamDiv = {
"L.A": "PAC",
"ANA": "PAC",
"CAR": "SW",
"MTL": "NE",
"DET": "CEN",
"COL": "NW",
"NSH": "CEN",
"BOS": "NE",
"DAL": "PAC",
"PHI": "ATL",
"EDM": "NW",
"STL": "CEN",
"MIN": "NW",
"ATL": "SW",
"N.J": "ATL",
"VAN": "NW",
"TOR": "NE",
"PHX": "PAC",
"OTT": "NE",
"FLA": "SW",
"NYI": "ATL",
"PIT": "ATL",
"WSH": "SW",
"T.B": "SW",
"CGY": "NW",
"S.J": "PAC",
"CBJ": "CEN",
"NYR": "ATL",
"CHI": "CEN",
"BUF": "NE",
"WPG": "ATL"
}
teamDivReAlign = {
"L.A": "PAC",
"ANA": "PAC",
"CAR": "MET",
"MTL": "ATL",
"DET": "ATL",
"COL": "CEN",
"NSH": "CEN",
"BOS": "ATL",
"DAL": "CEN",
"PHI": "MET",
"EDM": "PAC",
"STL": "CEN",
"MIN": "CEN",
"WPG": "CEN",
"N.J": "MET",
"VAN": "PAC",
"TOR": "ATL",
"ARI": "PAC",
"PHX": "PAC",
"OTT": "ATL",
"FLA": "ATL",
"NYI": "MET",
"PIT": "MET",
"WSH": "MET",
"T.B": "ATL",
"CGY": "PAC",
"S.J": "PAC",
"CBJ": "MET",
"NYR": "MET",
"CHI": "CEN",
"BUF": "ATL"
}
def getTeamByName(name,season):
team = dict()
if (name in altNames):
name = altNames[name]
team['name'] = name
print name
for t in teamAbv:
if (teamAbv[t] == name):
team['abv'] = t
break
if(int(season) >= 2013):
team['conf'] = teamConfReAlign[team['abv']]
team['div'] = teamDivReAlign[team['abv']]
else:
team['conf'] = teamConf[team['abv']]
team['div'] = teamDiv[team['abv']]
return team
def getTeamByAbv(abv,season):
team = dict()
team['name'] = teamAbv[abv]
team['abv'] = abv
if(int(season) >= 2013):
team['conf'] = teamConfReAlign[abv]
team['div'] = teamDivReAlign[abv]
else:
team['conf'] = teamConf[abv]
team['div'] = teamDiv[abv]
return team | shawger/pucklab | db/scraping/utils.py | Python | gpl-3.0 | 4,478 | [
"COLUMBUS"
] | 4a8053251f8de99e0befd5e1339e9494d6a317e60b0a4eb9eaf4e979af0b2c0d |
# Copyright (C) 2002, Thomas Hamelryck (thamelry@binf.ku.dk)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Classes that deal with macromolecular crystal structures.
Includes: PDB and mmCIF parsers, a Structure class, a module to keep a local
copy of the PDB up-to-date, selective IO of PDB files, etc.
Author: Thomas Hamelryck. Additional code by Kristian Rother.
"""
# Get a Structure object from a PDB file
from .PDBParser import PDBParser
try:
# Get a Structure object from an mmCIF file
from .MMCIFParser import MMCIFParser
except:
# Not compiled I guess
pass
# Download from the PDB
from .PDBList import PDBList
# Parse PDB header directly
from .parse_pdb_header import parse_pdb_header
# Find connected polypeptides in a Structure
from .Polypeptide import PPBuilder, CaPPBuilder, is_aa, standard_aa_names
# This is also useful :-)
from Bio.Data.SCOPData import protein_letters_3to1
# IO of PDB files (including flexible selective output)
from .PDBIO import PDBIO, Select
# Some methods to eg. get a list of Residues
# from a list of Atoms.
from . import Selection
# Superimpose atom sets
from .Superimposer import Superimposer
# 3D vector class
from .Vector import Vector, calc_angle, calc_dihedral, refmat, rotmat, rotaxis
from .Vector import vector_to_axis, m2rotaxis, rotaxis2m
# Alignment module
from .StructureAlignment import StructureAlignment
# DSSP handle
# (secondary structure and solvent accessible area calculation)
from .DSSP import DSSP, make_dssp_dict
# Residue depth:
# distance of residue atoms from solvent accessible surface
from .ResidueDepth import ResidueDepth, get_surface
# Calculation of Half Sphere Solvent Exposure
from .HSExposure import HSExposureCA, HSExposureCB, ExposureCN
# Kolodny et al.'s backbone libraries
from .FragmentMapper import FragmentMapper
# Write out chain(start-end) to PDB file
from .Dice import extract
# Fast atom neighbor search
# Depends on KDTree C++ module
try:
from .NeighborSearch import NeighborSearch
except ImportError:
pass
| Ambuj-UF/ConCat-1.0 | src/Utils/Bio/PDB/__init__.py | Python | gpl-2.0 | 2,165 | [
"Biopython",
"CRYSTAL"
] | 836af2e08b08265505ff0cd4f8c5f96c4b441490027d867c7b05ffca988a8a63 |
#!/usr/bin/env python
from __future__ import division
import numpy as np
import argparse
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import asetk.format.cube as cube
import asetk.format.qe as qe
import asetk.format.igor as igor
import sys
# Define command line parser
parser = argparse.ArgumentParser(
description='Plots plane from Gaussian Cube file.')
parser.add_argument('--version', action='version', version='%(prog)s 19.05.2015')
parser.add_argument(
'--cubes',
nargs='+',
metavar='FILENAME',
default=[],
help='Cube files')
parser.add_argument(
'--qe_cubes',
nargs='+',
metavar='FILENAME',
default=[],
help='Files in QE intermediate cube file format as written by pp.x')
parser.add_argument(
'--normal',
nargs='+',
metavar='DIRECTION',
default='z',
help='Direction of the plane-normal. May be "x", "y" or "z".')
parser.add_argument(
'--positions',
nargs='+',
metavar='HEIGHT',
type=float,
help='Position(s) of plane(s) along plane normal [Angstroms].')
#parser.add_argument(
# '--from_below',
# dest='from_below',
# action='store_true',
# default=False,
# help='Approach sample from below instead from above.')
#parser.add_argument(
# '--isovalues',
# nargs='+',
# metavar='VALUE',
# type=float,
# help='Values of the isosurface for an STM-image in constant current mode\
# [electrons/a0^3]. 1e-7 is typically a good start.')
parser.add_argument(
'--replicate',
default=None,
nargs=2,
type=int,
metavar='INT',
help='Number of replica along x and y.\
If just one number is specified, it is taken for both x and y.')
parser.add_argument(
'--stride',
default=(1,1),
nargs=2,
type=float,
metavar='INT',
help='If specified, the data will be resampled on a cartesian grid. \
--stride 0.5 0.5 will result in a grid twice as fine as the \
original grid of the cube file.')
parser.add_argument(
'--resample',
default=None,
nargs=2,
type=int,
metavar='INT',
help='If specified, the data will be resampled on a cartesian grid of \
nx x ny points.')
parser.add_argument(
'--format',
metavar='STRING',
default='plain',
help='Specifies format of output file. Can be \'plain\' (matrix of numbers)\
or \'igor\' (igor text format of Igor Pro).'
)
parser.add_argument(
'--plot',
dest='plot',
action='store_true',
default=True,
help='Plot data into png using matplotlib.')
parser.add_argument(
'--plotrange',
nargs=2,
metavar='VALUE',
default=None,
type=float,
help='If specified, color scale in plot will range from 1st value \
to 2nd value.')
args = parser.parse_args()
# Make list of jobs
jobs = []
if args.positions is None:
raise ValueError("Please specify --positions")
if args.normal:
jobs += zip(args.positions, [args.normal for _i in range(len(args.positions))])
if args.normal != 'z':
raise ValueError("Only normal z currently implemented.")
if not jobs:
raise ValueError("No isovalues/heights specified.")
if args.replicate is not None:
if len(args.replicate) == 1:
args.replicate = [ args.replicate, args.replicate]
elif len(args.replicate) !=2:
raise ValueError('Invalid specification of replicas. \
Please specify --replicate <nx> <ny>.')
if args.stride is not None and args.resample is not None:
raise ValueError("Please only specify either --stride or --resample")
# Iterate over supplied cube files
for fname in args.cubes + args.qe_cubes:
print("\nReading {n} ".format(n=fname))
if fname in args.cubes:
format = 'cube'
c = cube.Cube.from_file(fname, read_data=True)
elif fname in args.qe_cubes:
format = 'qe_cube'
tmp = qe.QECube.from_file(fname, read_data=True)
c = tmp.to_cube()
if args.resample:
resample = args.resample
elif args.stride:
s = args.stride
resample = [ int(round(c.nx/s[0])),
int(round(c.ny/s[1])) ]
for v,kind in jobs:
planefile = None
header = "STM simulation based on " + fname
planefile = "{}.d{}{}".format(fname,kind,v)
header += ", {} = {} [A]".format(kind,v)
#elif kind == 'i':
# planefile = "{f}.iso{d}".format(f=fname,d=v)
# header += ", isovalue {v}, zcut {z} [A]".format(v=v, z=args.zcut)
plane = None
index = c.get_index(args.normal, v)
plane = c.get_plane(args.normal, index,
return_object=True, replica=args.replicate, resample=resample)
#elif kind == 'i':
# plane = c.get_isosurface_above_atoms(
# v, zcut=args.zcut, from_below=args.from_below,
# return_object=True,
# replica=args.replicate, resample=resample)
# for details of plane object, see asetk/format/cube.py
data = plane.data
imdata = plane.imdata
extent = plane.extent
if args.format == 'plain':
datafile = planefile + '.dat'
print("Writing {} ".format(datafile))
np.savetxt(datafile, data, header=header)
elif args.format == 'igor':
igorwave = igor.Wave2d(
data=data,
xmin=extent[0],
xmax=extent[1],
xlabel='x [Angstroms]',
ymin=extent[2],
ymax=extent[3],
ylabel='y [Angstroms]',
)
datafile = planefile + '.itx'
print("Writing {} ".format(datafile))
igorwave.write(datafile)
else:
raise ValueError("Unknown format {}.".format(args.format))
if args.plot:
plotfile = planefile + '.png'
print("Plotting into {} ".format(plotfile))
fig = plt.figure()
vmin = None
vmax = None
if args.plotrange:
vmin = args.plotrange[0]
vmax = args.plotrange[1]
#if kind == 'i' and args.plotrange:
# vmin = np.max(plane) - args.plotrange
# plane = plane - vmin
# vmin = 0
# when approaching from below, let smaller z be brighter
cmap = 'gray'
# for some reason, I need to revert the x axis for imshow
cax = plt.imshow(imdata, extent=extent,
cmap=cmap, vmin=vmin, vmax=vmax)
plt.xlabel('x [$\AA$]')
plt.ylabel('y [$\AA$]')
if kind == 'h':
cbar = fig.colorbar(cax, format='%.1e')
cbar.set_label('$|\psi|^2$ $[e/a_0^2]$')
elif kind == 'i':
cbar = fig.colorbar(cax, format='%.2f')
cbar.set_label('z [$\AA$]')
plt.savefig(plotfile, dpi=300, bbox_inches='tight')
| ltalirz/asetk | scripts/cube-plot.py | Python | mit | 7,083 | [
"Gaussian"
] | 6a96b57a3c754b4831ec77901dbe49beb0938efce452c06e8bbb29022f952ce3 |
import math
from random import gauss, expovariate
from matplotlib import pyplot
from scipy import optimize, arange
from pyhistuples.pyhistoplots import histo_plot
from pyhistuples.pyhistogram.histogram import Histogram, Axis
def gaussian(mu, sigma2, x) :
norm = math.sqrt( 2*math.pi*sigma2 )
exponent = -1*pow(x-mu,2)/(2*sigma2 )
return math.exp(exponent)/norm
def fitfunc(params,xx) :
return [params[2]*gaussian(params[0], params[1], x) for x in xx]
class ErrFunc(object) :
def __init__(self, histogram, fitfunc) :
self.fitfunc=fitfunc
bins = histogram.filledBins()
self.x = [bin.centre for bin in bins]
self.y = [bin.height for bin in bins]
def __call__(self, params) :
return map(lambda g, e : g-e, fitfunc(params, self.x), self.y)
mu_pt = 5.
sigma_pt = 15.
sigma2_pt=pow(sigma_pt,2)
h_pt = Histogram(axis=Axis(100, -50, 50, label='pt'))
for i in xrange(10000) :
h_pt.fill(gauss(mu_pt, sigma_pt))
p0 = [15., 10, 1000.] # mu, sigma, integral.
errfunc = ErrFunc(h_pt, fitfunc)
p0, success = optimize.leastsq(errfunc,
p0[:])
print 'Success:', success, 'p[0] =', p0[0], ' p[1] =', p0[1], ' p0[2] =', p0[2]
plot_pt = histo_plot(h_pt, color='green')
# plot the fit results on top.
fit=plot_pt.add_subplot(1,1,1)
pt = [bin.centre for bin in h_pt.filledBins()]
fitval = fitfunc(p0,pt)
fit.plot(pt, fitval, 'r')
plot_pt.show()
| juanchopanza/pyhistuples | examples/example_histo_fit.py | Python | lgpl-3.0 | 1,437 | [
"Gaussian"
] | 2f395e27fc5525ea8702b38559d0c9f6ccc07869ecf1de480b12c4c8fae42299 |
from unittest import TestCase
from galaxy.util.bunch import Bunch
from galaxy.jobs.output_checker import check_output
from galaxy.jobs.error_level import StdioErrorLevel
from galaxy.tools import ToolStdioRegex
class OutputCheckerTestCase( TestCase ):
def setUp( self ):
self.tool = Bunch(
stdio_regexes=[],
stdio_exit_codes=[],
)
self.job = Bunch(
stdout=None,
stderr=None,
get_id_tag=lambda: "test_id",
)
self.stdout = ''
self.stderr = ''
self.tool_exit_code = None
def test_default_no_stderr_success( self ):
self.__assertSuccessful()
def test_default_stderr_failure( self ):
self.stderr = 'foo'
self.__assertNotSuccessful()
def test_exit_code_error( self ):
mock_exit_code = Bunch( range_start=1, range_end=1, error_level=StdioErrorLevel.FATAL, desc=None )
self.tool.stdio_exit_codes.append( mock_exit_code )
self.tool_exit_code = 1
self.__assertNotSuccessful()
def test_exit_code_success( self ):
mock_exit_code = Bunch( range_start=1, range_end=1, error_level=StdioErrorLevel.FATAL, desc=None )
self.tool.stdio_exit_codes.append( mock_exit_code )
self.tool_exit_code = 0
self.__assertSuccessful()
def test_problematic_strings_matching( self ):
problematic_str = '\x80abc'
self.__add_regex( Bunch( match=r'.abc', stdout_match=False, stderr_match=True, error_level=StdioErrorLevel.FATAL, desc=None ) )
self.stderr = problematic_str
self.__assertNotSuccessful()
def test_problematic_strings_not_matching( self ):
problematic_str = '\x80abc'
self.__add_regex( Bunch( match=r'.abcd', stdout_match=False, stderr_match=True, error_level=StdioErrorLevel.FATAL, desc=None ) )
self.stderr = problematic_str
self.__assertSuccessful()
def test_stderr_regex_negative_match( self ):
regex = ToolStdioRegex()
regex.stderr_match = True
regex.match = "foobar"
self.__add_regex( regex )
self.stderr = "foo"
self.__assertSuccessful()
def test_stderr_regex_positive_match( self ):
regex = ToolStdioRegex()
regex.stderr_match = True
regex.match = "foo"
self.__add_regex( regex )
self.stderr = "foobar"
self.__assertNotSuccessful()
def test_stdout_ignored_for_stderr_regexes( self ):
regex = ToolStdioRegex()
regex.stderr_match = True
regex.match = "foo"
self.__add_regex( regex )
self.stdout = "foobar"
self.__assertSuccessful()
def test_stderr_ignored_for_stdout_regexes( self ):
regex = ToolStdioRegex()
regex.stdout_match = True
regex.match = "foo"
self.__add_regex( regex )
self.stderr = "foobar"
self.__assertSuccessful()
def __add_regex( self, regex ):
self.tool.stdio_regexes.append( regex )
def __assertSuccessful( self ):
self.assertTrue( self.__check_output() )
def __assertNotSuccessful( self ):
self.assertFalse( self.__check_output() )
def __check_output( self ):
return check_output( self.tool, self.stdout, self.stderr, self.tool_exit_code, self.job )
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/test/unit/jobs/test_job_output_checker.py | Python | gpl-3.0 | 3,321 | [
"Galaxy"
] | 03ddf190ad0bfaddb354f6fdbb96f79dcb266d3e570b58e0518f12db16084cc9 |
#!/usr/bin/python
import numpy as np
from scipy.integrate import odeint
import sys
ELEC_MASS = 9.10938356E-31
ELEC_CHARGE = -1.60217662E-19
FUND_FREQ = 3.7474057E14
SP_LIGHT = 3E8
FOCUS_RADIUS = 30E-6
PL_FWHM = 5E-15
PULSE_ENERGY = 0.6E-3
EPSILON_o = 8.85418782E-12
TIME_GRID = 5000
INTENSITY = 1.88*(PULSE_ENERGY/(FOCUS_RADIUS**2*PL_FWHM))/np.pi #gaussian
FIELD_AMP = np.sqrt(2*INTENSITY/(EPSILON_o*SP_LIGHT))
FIELD_TOLERANCE = FIELD_AMP*1E-3
def chop_start_end(vector, closeness):
'''cut the given vector to the foll. specs:
FROM the second time the value is less than closeness
TO the second point of zero derivative after that.
Used to chop trajectories at their first collision with the
atom, but leave large room at the ends so that it can be
plotted if need be.'''
b = np.where(np.diff(vector < closeness))[0]
if len(b) <= 1:
return np.array([np.inf])
else:
b_start = b[1]
diff = np.ediff1d(vector[b_start:])
a = np.where(np.diff(np.sign(diff)))[0]
if len(a) <= 1:
return np.array([np.inf])
else:
a_end = a[1]
# print(a_end,b_start, diff)
return vector[b_start:b_start+a_end]
def find_end(field, start, end, tol):
interval = (end - start)*10/TIME_GRID
probe = end
while np.sqrt((field[0](probe))**2 + (field[1](probe))**2) > tol:
probe+= interval
return probe
def solve_path(field, start_i, end_i, optimize_collision=True,
pulsed=True, closeness=1, *args):
'''Solves the eqn for the electron path, returns closest approach
Args:
field (list, [y_comp, z_comp]),
start_ionize_time, end_ionize time,
bool (optimize collision?), bool (pulsed?)
Args(optional):
args[0] = cycle length of CW light
(compulsory if optimize_collision and not pulsed)
TODO: change time grid as a function input'''
def elec_path_y(x, t):
path, velocity = x
dydt = [velocity, ELEC_CHARGE*field[0](t)/ELEC_MASS]
return dydt
def elec_path_z(x, t):
path, velocity = x
dzdt = [velocity, ELEC_CHARGE*field[1](t)/ELEC_MASS]
return dzdt
initial_cond = [0., 0.]
if optimize_collision:
hard_end = 0
min_list = []
if pulsed:
hard_end = find_end(field, start_i, end_i, FIELD_TOLERANCE)
else:
#hard_end = 6*PL_FWHM
hard_end = start_i + 20.0/(FUND_FREQ)
interval = (hard_end - start_i)/(TIME_GRID - 1)
t = np.linspace(start_i, hard_end, TIME_GRID)
t_chopped = t[:np.argmax(t >= end_i)]
#print(start_i, end_i, hard_end, (t > end_i))
sol_y = odeint(elec_path_y, initial_cond, t)
sol_z = odeint(elec_path_z, initial_cond, t)
start_e = start_i
for i in range(len(t_chopped)):
'''Optimize over multiple start times'''
a_y = -1*sol_y[i,1]
b_y = -1*(sol_y[i,0] + a_y*t[i])
a_z = -1*sol_z[i,1]
b_z = -1*(sol_z[i,0] + a_z*t[i])
y_pos = sol_y[:,0] + a_y*t + b_y
z_pos = sol_z[:,0] + a_z*t + b_z
y_vel = sol_y[:,1] + a_y
z_vel = sol_z[:,1] + a_z
#end
#dist = np.sqrt(sol_y[i:, 0]**2 + sol_z[i:, 0]**2)
if len(np.where(np.diff(np.sign(y_pos[i:])))[0]) > -1:
#print("Whaaa!!", sol_y[i:,0])
#ret_ind = np.where(np.diff(np.sign(sol_y[i:,0])))[0][1]
kin_energy = 0.5*ELEC_MASS*(y_vel[-1]**2 + z_vel[-1]**2)
#print(kin_energy)
'''dist = chop_start_end(np.sqrt(sol_y[i:, 0]**2 + sol_z[i:, 0]**2), closeness)'''
'''min_list.append((np.amin(dist), start_i, kin_energy))'''
min_list.append((kin_energy, start_e))
start_e += interval
minimas = np.array(min_list)
# min_start = minimas[np.argmin(minimas, axis=0)[0], 1]
# t = np.linspace(min_start, hard_end, TIME_GRID)
# sol_y = odeint(elec_path_y, initial_cond, t, full_output=0)
# sol_z = odeint(elec_path_z, initial_cond, t, full_output=0)
# dist = chop_start_end(np.sqrt(sol_y[:, 0]**2 + sol_z[:, 0]**2), closeness)
# dis2 = np.sqrt(sol_y[:, 0]**2 + sol_z[:, 0]**2)
# return [min_start, sol_y[:,0], sol_z[:,0],dist, dis2]
return minimas
else:
t = np.linspace(start_i, end_i, TIME_GRID)
sol_y = odeint(elec_path_y, initial_cond, t)
sol_z = odeint(elec_path_z, initial_cond, t)
return [0, sol_y[:,0], sol_z[:,0]] | KavuriG/classical-calc-three-color | ponderomotive-test/odeint_solve.py | Python | gpl-3.0 | 4,828 | [
"Gaussian"
] | 82d0071e6eb455ab0dfb38fe581421c53319f7291335a40cfc8e42ba7b0997e4 |
#!/usr/bin/env python
"""
Returns the calib stars for given dataIds
"""
import os, re
import numpy as np
import lsst.afw.table as afwTable
import lsst.daf.persistence as dafPer
import lsst.pex.exceptions
import hsc.tools.bick.utils as hscUtil
import astropy.table
def getMag(flux, fluxerr, zeropoint):
"""
return the magnitude and error
"""
mag, magerr = -2.5 * np.log10(flux), 2.5/np.log(10.0)*fluxerr/flux
return (mag.T + zeropoint).T, magerr
def getEllipse(quad):
"""
returns the semi-major axis, axes ratio and PA for a given quadrupole moment
"""
e = lsst.afw.geom.ellipses.Axes(quad)
return e.getA(), e.getB()/e.getA(), e.getTheta() * 180.0/np.pi
def getAstroTable(src, mags=True, nameList=None, zeropoint=0.0):
"""
returns an astropy table with all the src entries
if the entries are complex objects, it breaks them down:
ellipse entries are broken into
ellipse_a = semi-major axis
ellipse_q = axis ratio (always < 1)
ellipse_theta = rotation of semi-major axis from chip x-axis in degrees
if mags is True, returns the magnitudes for all the flux columns
"""
tab = astropy.table.Table()
allNames = nameList if nameList is not None else src.schema.getNames()
for name in allNames:
#for reasons I don't understand a lookup by name is much slower than a lookup by key
nameKey = src.schema.find(name).getKey()
try:
tab.add_column(astropy.table.Column(name=name,
data=src.get(nameKey)))
except lsst.pex.exceptions.LsstException:
if type(src[0].get(nameKey)) is lsst.afw.geom.ellipses.ellipsesLib.Quadrupole:
reff, q, theta = zip(*[getEllipse(s.get(nameKey)) for s in src])
tab.add_column(astropy.table.Column(name=name+'_a', data=reff))
tab.add_column(astropy.table.Column(name=name+'_q', data=q))
tab.add_column(astropy.table.Column(name=name+'_theta', data=theta))
elif type(src[0].get(nameKey)) is lsst.afw.coord.coordLib.IcrsCoord:
x, y= zip(*[(s.get(nameKey).getRa().asDegrees(),
s.get(nameKey).getDec().asDegrees()) for s in src])
tab.add_column(astropy.table.Column(name=name+'_ra', data=x))
tab.add_column(astropy.table.Column(name=name+'_dec', data=y))
else:
tab.add_column(astropy.table.Column(name=name,
data=np.array([s.get(nameKey) for s in src])))
#report angles in degrees
if isinstance(src[0].get(nameKey), lsst.afw.geom.Angle):
tab.remove_column(name)
tab.add_column(astropy.table.Column(data=[s.get(nameKey).asDegrees()
for s in src],
dtype=float, name=name))
if mags:
#this is a horrible hack, but I don't think we can use the slots, since
#not all the fluxes end up in the slots
for col in tab.colnames:
if (re.match('^flux\.[a-z]+$', col) or
re.match('^flux\.[a-z]+.apcorr$', col) or
re.match('^cmodel.+flux$', col) or
re.match('^cmodel.+flux.apcorr$', col)):
try:
zp = tab['zeropoint']
except:
zp = zeropoint
zp = 0.0 if re.search('apcorr', col) else zp
mag, magerr = getMag(tab[col], tab[col+'.err'], zp)
tab.add_column(astropy.table.Column(name=re.sub('flux', 'mag', col),
data=mag))
tab.add_column(astropy.table.Column(name=re.sub('flux', 'mag', col+'.err'),
data=magerr))
return tab
def main(rerun, dataIds, root):
doCoadd = 'tract' in dataIds[0].keys()
butler = dafPer.Butler(os.path.join(root, "rerun", rerun))
out_tab = None
for dataId in dataIds:
print dataId
try:
src = butler.get("deepCoadd_src" if doCoadd else "src",
dataId, immediate=True)
except:
print "no source catalog for: ",repr(dataId)
continue
isPsfStar = src.get('calib.psf.candidate')
isPsfReserved = src.get('calib.psf.reserved')
print butler.get("calexp", dataId).getCalib().getFluxMag0()
cutsrc = src[isPsfStar | isPsfReserved]
cutsrctab = getAstroTable(cutsrc, nameList=['id', 'coord',
'parent', 'deblend.nchild',
'flags.pixel.edge',
'flags.pixel.interpolated.any',
'shape.sdss',
'classification.extendedness',
'calib.psf.used',
'calib.psf.candidate',
'calib.psf.reserved',
'flux.psf', 'flux.gaussian',
'flux.psf.err', 'flux.gaussian.err'],
zeropoint = 2.5*np.log10(butler.get("calexp", dataId).getCalib().getFluxMag0()[0]))
if out_tab is None:
out_tab = astropy.table.Table(cutsrctab, copy=True)
else:
out_tab = astropy.table.vstack([out_tab, cutsrctab], join_type='exact')
out_tab.write("calib_psf.fits", overwrite=True)
if __name__=='__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("rerun")
parser.add_argument("visits", help="visits or tracts")
parser.add_argument("ccds", help="CCDs or patches, separated by ^")
parser.add_argument("-f", "--filt",
default=None, help="filter, only set for tract/patches")
parser.add_argument("-R", "--root", default="/lustre/Subaru/SSP")
args = parser.parse_args()
visits = hscUtil.idSplit(args.visits)
ccds = hscUtil.idSplit(args.ccds)
if args.filt is None:
dataIds = [{'visit':v, 'ccd':c} for v in visits for c in ccds]
else:
dataIds = [{'tract':t, 'patch':p, 'filter':args.filt}
for p in ccds for t in visits]
main(args.rerun, dataIds, root=args.root)
| surhudm/hscTools | clackner/bin/getCalibStars.py | Python | gpl-3.0 | 6,716 | [
"Gaussian",
"VisIt"
] | 9f04bc9e1d93661ce8300ff5b55ae6f4586efa27c80908aa83bd6405f1a5cc7e |
###############################################################################
##
## Copyright (C) 2011-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
import sys
PY3 = sys.version_info >= (3,)
__all__ = ["createWsUrl",
"parseWsUrl",
"ConnectionRequest",
"ConnectionResponse",
"Timings",
"WebSocketProtocol",
"WebSocketFactory",
"WebSocketServerProtocol",
"WebSocketServerFactory",
"WebSocketClientProtocol",
"WebSocketClientFactory"]
## The Python urlparse module currently does not contain the ws/wss
## schemes, so we add those dynamically (which is a hack of course).
##
try:
import urlparse
except:
## Python 3
from urllib import parse as urlparse
wsschemes = ["ws", "wss"]
urlparse.uses_relative.extend(wsschemes)
urlparse.uses_netloc.extend(wsschemes)
urlparse.uses_params.extend(wsschemes)
urlparse.uses_query.extend(wsschemes)
urlparse.uses_fragment.extend(wsschemes)
import urllib
import binascii
import hashlib
import base64
import struct
import random
import os
import pickle
import copy
import json
from pprint import pformat
from collections import deque
from zope.interface import implementer
from autobahn import __version__
from autobahn.websocket.interfaces import IWebSocketChannel, \
IWebSocketChannelFrameApi, \
IWebSocketChannelStreamingApi
from autobahn.util import Stopwatch
from autobahn.websocket.utf8validator import Utf8Validator
from autobahn.websocket.xormasker import XorMaskerNull, createXorMasker
from autobahn.websocket.compress import *
from autobahn.websocket import http
def createWsUrl(hostname, port = None, isSecure = False, path = None, params = None):
"""
Create a WebSocket URL from components.
:param hostname: WebSocket server hostname.
:type hostname: str
:param port: WebSocket service port or None (to select default ports 80/443 depending on isSecure).
:type port: int
:param isSecure: Set True for secure WebSocket ("wss" scheme).
:type isSecure: bool
:param path: Path component of addressed resource (will be properly URL escaped).
:type path: str
:param params: A dictionary of key-values to construct the query component of the addressed resource (will be properly URL escaped).
:type params: dict
:returns: str -- Constructed WebSocket URL.
"""
if port is not None:
netloc = "%s:%d" % (hostname, port)
else:
if isSecure:
netloc = "%s:443" % hostname
else:
netloc = "%s:80" % hostname
if isSecure:
scheme = "wss"
else:
scheme = "ws"
if path is not None:
ppath = urllib.quote(path)
else:
ppath = "/"
if params is not None:
query = urllib.urlencode(params)
else:
query = None
return urlparse.urlunparse((scheme, netloc, ppath, None, query, None))
def parseWsUrl(url):
"""
Parses as WebSocket URL into it's components and returns a tuple (isSecure, host, port, resource, path, params).
isSecure is a flag which is True for wss URLs.
host is the hostname or IP from the URL.
port is the port from the URL or standard port derived from scheme (ws = 80, wss = 443).
resource is the /resource name/ from the URL, the /path/ together with the (optional) /query/ component.
path is the /path/ component properly unescaped.
params is the /query) component properly unescaped and returned as dictionary.
:param url: A valid WebSocket URL, i.e. `ws://localhost:9000/myresource?param1=23¶m2=666`
:type url: str
:returns: tuple -- A tuple (isSecure, host, port, resource, path, params)
"""
parsed = urlparse.urlparse(url)
if parsed.scheme not in ["ws", "wss"]:
raise Exception("invalid WebSocket scheme '%s'" % parsed.scheme)
if parsed.port is None or parsed.port == "":
if parsed.scheme == "ws":
port = 80
else:
port = 443
else:
port = int(parsed.port)
if parsed.fragment is not None and parsed.fragment != "":
raise Exception("invalid WebSocket URL: non-empty fragment '%s" % parsed.fragment)
if parsed.path is not None and parsed.path != "":
ppath = parsed.path
path = urllib.unquote(ppath)
else:
ppath = "/"
path = ppath
if parsed.query is not None and parsed.query != "":
resource = ppath + "?" + parsed.query
params = urlparse.parse_qs(parsed.query)
else:
resource = ppath
params = {}
return (parsed.scheme == "wss", parsed.hostname, port, resource, path, params)
class TrafficStats:
def __init__(self):
self.reset()
def reset(self):
## all of the following only tracks data messages, not control frames, not handshaking
##
self.outgoingOctetsWireLevel = 0
self.outgoingOctetsWebSocketLevel = 0
self.outgoingOctetsAppLevel = 0
self.outgoingWebSocketFrames = 0
self.outgoingWebSocketMessages = 0
self.incomingOctetsWireLevel = 0
self.incomingOctetsWebSocketLevel = 0
self.incomingOctetsAppLevel = 0
self.incomingWebSocketFrames = 0
self.incomingWebSocketMessages = 0
## the following track any traffic before the WebSocket connection
## reaches STATE_OPEN (this includes WebSocket opening handshake
## proxy handling and such)
##
self.preopenOutgoingOctetsWireLevel = 0
self.preopenIncomingOctetsWireLevel = 0
def __json__(self):
## compression ratio = compressed size / uncompressed size
##
if self.outgoingOctetsAppLevel > 0:
outgoingCompressionRatio = float(self.outgoingOctetsWebSocketLevel) / float(self.outgoingOctetsAppLevel)
else:
outgoingCompressionRatio = None
if self.incomingOctetsAppLevel > 0:
incomingCompressionRatio = float(self.incomingOctetsWebSocketLevel) / float(self.incomingOctetsAppLevel)
else:
incomingCompressionRatio = None
## protocol overhead = non-payload size / payload size
##
if self.outgoingOctetsWebSocketLevel > 0:
outgoingWebSocketOverhead = float(self.outgoingOctetsWireLevel - self.outgoingOctetsWebSocketLevel) / float(self.outgoingOctetsWebSocketLevel)
else:
outgoingWebSocketOverhead = None
if self.incomingOctetsWebSocketLevel > 0:
incomingWebSocketOverhead = float(self.incomingOctetsWireLevel - self.incomingOctetsWebSocketLevel) / float(self.incomingOctetsWebSocketLevel)
else:
incomingWebSocketOverhead = None
return {'outgoingOctetsWireLevel': self.outgoingOctetsWireLevel,
'outgoingOctetsWebSocketLevel': self.outgoingOctetsWebSocketLevel,
'outgoingOctetsAppLevel': self.outgoingOctetsAppLevel,
'outgoingCompressionRatio': outgoingCompressionRatio,
'outgoingWebSocketOverhead': outgoingWebSocketOverhead,
'outgoingWebSocketFrames': self.outgoingWebSocketFrames,
'outgoingWebSocketMessages': self.outgoingWebSocketMessages,
'preopenOutgoingOctetsWireLevel': self.preopenOutgoingOctetsWireLevel,
'incomingOctetsWireLevel': self.incomingOctetsWireLevel,
'incomingOctetsWebSocketLevel': self.incomingOctetsWebSocketLevel,
'incomingOctetsAppLevel': self.incomingOctetsAppLevel,
'incomingCompressionRatio': incomingCompressionRatio,
'incomingWebSocketOverhead': incomingWebSocketOverhead,
'incomingWebSocketFrames': self.incomingWebSocketFrames,
'incomingWebSocketMessages': self.incomingWebSocketMessages,
'preopenIncomingOctetsWireLevel': self.preopenIncomingOctetsWireLevel}
def __str__(self):
return json.dumps(self.__json__())
class FrameHeader:
"""
Thin-wrapper for storing WebSocket frame metadata.
FOR INTERNAL USE ONLY!
"""
def __init__(self, opcode, fin, rsv, length, mask):
"""
Constructor.
:param opcode: Frame opcode (0-15).
:type opcode: int
:param fin: Frame FIN flag.
:type fin: bool
:param rsv: Frame reserved flags (0-7).
:type rsv: int
:param length: Frame payload length.
:type length: int
:param mask: Frame mask (binary string) or None.
:type mask: str
"""
self.opcode = opcode
self.fin = fin
self.rsv = rsv
self.length = length
self.mask = mask
class ConnectionRequest:
"""
Thin-wrapper for WebSocket connection request information provided in
:meth:`autobahn.websocket.protocol.WebSocketServerProtocol.onConnect` when
a WebSocket client want to establish a connection to a WebSocket server.
"""
def __init__(self, peer, headers, host, path, params, version, origin, protocols, extensions):
"""
Constructor.
:param peer: Descriptor of the connecting client (eg IP address/port in case of TCP transports).
:type peer: str
:param headers: HTTP headers from opening handshake request.
:type headers: dict
:param host: Host from opening handshake HTTP header.
:type host: str
:param path: Path from requested HTTP resource URI. For example, a resource URI of `/myservice?foo=23&foo=66&bar=2` will be parsed to `/myservice`.
:type path: str
:param params: Query parameters (if any) from requested HTTP resource URI. For example, a resource URI of `/myservice?foo=23&foo=66&bar=2` will be parsed to `{'foo': ['23', '66'], 'bar': ['2']}`.
:type params: dict of arrays of strings
:param version: The WebSocket protocol version the client announced (and will be spoken, when connection is accepted).
:type version: int
:param origin: The WebSocket origin header or None. Note that this only a reliable source of information for browser clients!
:type origin: str
:param protocols: The WebSocket (sub)protocols the client announced. You must select and return one of those (or None) in :meth:`autobahn.websocket.WebSocketServerProtocol.onConnect`.
:type protocols: array of strings
:param extensions: The WebSocket extensions the client requested and the server accepted (and thus will be spoken, when WS connection is established).
:type extensions: array of strings
"""
self.peer = peer
self.headers = headers
self.host = host
self.path = path
self.params = params
self.version = version
self.origin = origin
self.protocols = protocols
self.extensions = extensions
def __json__(self):
return {'peer': self.peer,
'headers': self.headers,
'host': self.host,
'path': self.path,
'params': self.params,
'version': self.version,
'origin': self.origin,
'protocols': self.protocols,
'extensions': self.extensions}
def __str__(self):
return json.dumps(self.__json__())
class ConnectionResponse:
"""
Thin-wrapper for WebSocket connection response information provided in
:meth:`autobahn.websocket.protocol.WebSocketClientProtocol.onConnect` when
a WebSocket server has accepted a connection request by a client.
"""
def __init__(self, peer, headers, version, protocol, extensions):
"""
Constructor.
:param peer: Descriptor of the connected server (e.g. IP address/port in case of TCP transport).
:type peer: str
:param headers: HTTP headers from opening handshake response.
:type headers: dict
:param version: The WebSocket protocol version that is spoken.
:type version: int
:param protocol: The WebSocket (sub)protocol in use.
:type protocol: str
:param extensions: The WebSocket extensions in use.
:type extensions: array of strings
"""
self.peer = peer
self.headers = headers
self.version = version
self.protocol = protocol
self.extensions = extensions
def __json__(self):
return {'peer': self.peer,
'headers': self.headers,
'version': self.version,
'protocol': self.protocol,
'extensions': self.extensions}
def __str__(self):
return json.dumps(self.__json__())
def parseHttpHeader(data):
"""
Parses the beginning of a HTTP request header (the data up to the \n\n line) into a pair
of status line and HTTP headers dictionary.
Header keys are normalized to all-lower-case.
FOR INTERNAL USE ONLY!
:param data: The HTTP header data up to the \n\n line.
:type data: str
:returns: tuple -- Tuple of HTTP status line, headers and headers count.
"""
raw = data.decode('utf8').splitlines()
http_status_line = raw[0].strip()
http_headers = {}
http_headers_cnt = {}
for h in raw[1:]:
i = h.find(":")
if i > 0:
## HTTP header keys are case-insensitive
key = h[:i].strip().lower()
## not sure if UTF-8 is allowed for HTTP header values..
value = h[i+1:].strip()
## handle HTTP headers split across multiple lines
if key in http_headers:
http_headers[key] += ", %s" % value
http_headers_cnt[key] += 1
else:
http_headers[key] = value
http_headers_cnt[key] = 1
else:
# skip bad HTTP header
pass
return (http_status_line, http_headers, http_headers_cnt)
class Timings:
"""
Helper class to track timings by key. This class also supports item access,
iteration and conversion to string.
"""
def __init__(self):
self._stopwatch = Stopwatch()
self._timings = {}
def track(self, key):
"""
Track elapsed for key.
:param key: Key under which to track the timing.
:type key: str
"""
self._timings[key] = self._stopwatch.elapsed()
def diff(self, startKey, endKey, format = True):
"""
Get elapsed difference between two previously tracked keys.
:param startKey: First key for interval (older timestamp).
:type startKey: str
:param endKey: Second key for interval (younger timestamp).
:type endKey: str
:param format: If `True`, format computed time period and return string.
:type format: bool
:returns: float or str -- Computed time period in seconds (or formatted string).
"""
if endKey in self._timings and startKey in self._timings:
d = self._timings[endKey] - self._timings[startKey]
if format:
if d < 0.00001: # 10us
s = "%d ns" % round(d * 1000000000.)
elif d < 0.01: # 10ms
s = "%d us" % round(d * 1000000.)
elif d < 10: # 10s
s = "%d ms" % round(d * 1000.)
else:
s = "%d s" % round(d)
return s.rjust(8)
else:
return d
else:
if format:
return "n.a.".rjust(8)
else:
return None
def __getitem__(self, key):
return self._timings.get(key, None)
def __iter__(self):
return self._timings.__iter__(self)
def __str__(self):
return pformat(self._timings)
@implementer(IWebSocketChannel)
@implementer(IWebSocketChannelFrameApi)
@implementer(IWebSocketChannelStreamingApi)
class WebSocketProtocol:
"""
Protocol base class for WebSocket.
This class implements:
* :class:`autobahn.websocket.interfaces.IWebSocketChannel`
* :class:`autobahn.websocket.interfaces.IWebSocketChannelFrameApi`
* :class:`autobahn.websocket.interfaces.IWebSocketChannelStreamingApi`
"""
SUPPORTED_SPEC_VERSIONS = [0, 10, 11, 12, 13, 14, 15, 16, 17, 18]
"""
WebSocket protocol spec (draft) versions supported by this implementation.
Use of version 18 indicates RFC6455. Use of versions < 18 indicate actual
draft spec versions (Hybi-Drafts). Use of version 0 indicates Hixie-76.
"""
SUPPORTED_PROTOCOL_VERSIONS = [0, 8, 13]
"""
WebSocket protocol versions supported by this implementation. For Hixie-76,
there is no protocol version announced in HTTP header, and we just use the
draft version (0) in this case.
"""
SPEC_TO_PROTOCOL_VERSION = {0: 0, 10: 8, 11: 8, 12: 8, 13: 13, 14: 13, 15: 13, 16: 13, 17: 13, 18: 13}
"""
Mapping from protocol spec (draft) version to protocol version. For Hixie-76,
there is no protocol version announced in HTTP header, and we just use the
pseudo protocol version 0 in this case.
"""
PROTOCOL_TO_SPEC_VERSION = {0: 0, 8: 12, 13: 18}
"""
Mapping from protocol version to the latest protocol spec (draft) version
using that protocol version. For Hixie-76, there is no protocol version
announced in HTTP header, and we just use the draft version (0) in this case.
"""
DEFAULT_SPEC_VERSION = 18
"""
Default WebSocket protocol spec version this implementation speaks: final RFC6455.
"""
DEFAULT_ALLOW_HIXIE76 = False
"""
By default, this implementation will not allow to speak the obsoleted
Hixie-76 protocol version. That protocol version has security issues, but
is still spoken by some clients. Enable at your own risk! Enabling can be
done by using setProtocolOptions() on the factories for clients and servers.
"""
_WS_MAGIC = b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
"""
Protocol defined magic used during WebSocket handshake (used in Hybi-drafts
and final RFC6455.
"""
_QUEUED_WRITE_DELAY = 0.00001
"""
For synched/chopped writes, this is the reactor reentry delay in seconds.
"""
MESSAGE_TYPE_TEXT = 1
"""
WebSocket text message type (UTF-8 payload).
"""
MESSAGE_TYPE_BINARY = 2
"""
WebSocket binary message type (arbitrary binary payload).
"""
## WebSocket protocol state:
## (STATE_PROXY_CONNECTING) => STATE_CONNECTING => STATE_OPEN => STATE_CLOSING => STATE_CLOSED
##
STATE_CLOSED = 0
STATE_CONNECTING = 1
STATE_CLOSING = 2
STATE_OPEN = 3
STATE_PROXY_CONNECTING = 4
## Streaming Send State
SEND_STATE_GROUND = 0
SEND_STATE_MESSAGE_BEGIN = 1
SEND_STATE_INSIDE_MESSAGE = 2
SEND_STATE_INSIDE_MESSAGE_FRAME = 3
## WebSocket protocol close codes
##
CLOSE_STATUS_CODE_NORMAL = 1000
"""Normal close of connection."""
CLOSE_STATUS_CODE_GOING_AWAY = 1001
"""Going away."""
CLOSE_STATUS_CODE_PROTOCOL_ERROR = 1002
"""Protocol error."""
CLOSE_STATUS_CODE_UNSUPPORTED_DATA = 1003
"""Unsupported data."""
CLOSE_STATUS_CODE_RESERVED1 = 1004
"""RESERVED"""
CLOSE_STATUS_CODE_NULL = 1005 # MUST NOT be set in close frame!
"""No status received. (MUST NOT be used as status code when sending a close)."""
CLOSE_STATUS_CODE_ABNORMAL_CLOSE = 1006 # MUST NOT be set in close frame!
"""Abnormal close of connection. (MUST NOT be used as status code when sending a close)."""
CLOSE_STATUS_CODE_INVALID_PAYLOAD = 1007
"""Invalid frame payload data."""
CLOSE_STATUS_CODE_POLICY_VIOLATION = 1008
"""Policy violation."""
CLOSE_STATUS_CODE_MESSAGE_TOO_BIG = 1009
"""Message too big."""
CLOSE_STATUS_CODE_MANDATORY_EXTENSION = 1010
"""Mandatory extension."""
CLOSE_STATUS_CODE_INTERNAL_ERROR = 1011
"""The peer encountered an unexpected condition or internal error."""
CLOSE_STATUS_CODE_TLS_HANDSHAKE_FAILED = 1015 # MUST NOT be set in close frame!
"""TLS handshake failed, i.e. server certificate could not be verified. (MUST NOT be used as status code when sending a close)."""
CLOSE_STATUS_CODES_ALLOWED = [CLOSE_STATUS_CODE_NORMAL,
CLOSE_STATUS_CODE_GOING_AWAY,
CLOSE_STATUS_CODE_PROTOCOL_ERROR,
CLOSE_STATUS_CODE_UNSUPPORTED_DATA,
CLOSE_STATUS_CODE_INVALID_PAYLOAD,
CLOSE_STATUS_CODE_POLICY_VIOLATION,
CLOSE_STATUS_CODE_MESSAGE_TOO_BIG,
CLOSE_STATUS_CODE_MANDATORY_EXTENSION,
CLOSE_STATUS_CODE_INTERNAL_ERROR]
"""Status codes allowed to send in close."""
CONFIG_ATTRS_COMMON = ['debug',
'debugCodePaths',
'logOctets',
'logFrames',
'trackTimings',
'allowHixie76',
'utf8validateIncoming',
'applyMask',
'maxFramePayloadSize',
'maxMessagePayloadSize',
'autoFragmentSize',
'failByDrop',
'echoCloseCodeReason',
'openHandshakeTimeout',
'closeHandshakeTimeout',
'tcpNoDelay']
"""
Configuration attributes common to servers and clients.
"""
CONFIG_ATTRS_SERVER = ['versions',
'webStatus',
'requireMaskedClientFrames',
'maskServerFrames',
'perMessageCompressionAccept']
"""
Configuration attributes specific to servers.
"""
CONFIG_ATTRS_CLIENT = ['version',
'acceptMaskedServerFrames',
'maskClientFrames',
'serverConnectionDropTimeout',
'perMessageCompressionOffers',
'perMessageCompressionAccept']
"""
Configuration attributes specific to clients.
"""
def onOpen(self):
"""
Implements :func:`autobahn.websocket.interfaces.IWebSocketChannel.onOpen`
"""
if self.debugCodePaths:
self.factory._log("WebSocketProtocol.onOpen")
def onMessageBegin(self, isBinary):
"""
Implements :func:`autobahn.websocket.interfaces.IWebSocketChannel.onMessageBegin`
"""
self.message_is_binary = isBinary
self.message_data = []
self.message_data_total_length = 0
def onMessageFrameBegin(self, length):
"""
Implements :func:`autobahn.websocket.interfaces.IWebSocketChannel.onMessageFrameBegin`
"""
self.frame_length = length
self.frame_data = []
self.message_data_total_length += length
if not self.failedByMe:
if self.maxMessagePayloadSize > 0 and self.message_data_total_length > self.maxMessagePayloadSize:
self.wasMaxMessagePayloadSizeExceeded = True
self.failConnection(WebSocketProtocol.CLOSE_STATUS_CODE_MESSAGE_TOO_BIG, "message exceeds payload limit of %d octets" % self.maxMessagePayloadSize)
elif self.maxFramePayloadSize > 0 and length > self.maxFramePayloadSize:
self.wasMaxFramePayloadSizeExceeded = True
self.failConnection(WebSocketProtocol.CLOSE_STATUS_CODE_POLICY_VIOLATION, "frame exceeds payload limit of %d octets" % self.maxFramePayloadSize)
def onMessageFrameData(self, payload):
"""
Implements :func:`autobahn.websocket.interfaces.IWebSocketChannel.onMessageFrameData`
"""
if not self.failedByMe:
if self.websocket_version == 0:
self.message_data_total_length += len(payload)
if self.maxMessagePayloadSize > 0 and self.message_data_total_length > self.maxMessagePayloadSize:
self.wasMaxMessagePayloadSizeExceeded = True
self.failConnection(WebSocketProtocol.CLOSE_STATUS_CODE_MESSAGE_TOO_BIG, "message exceeds payload limit of %d octets" % self.maxMessagePayloadSize)
self.message_data.append(payload)
else:
self.frame_data.append(payload)
def onMessageFrameEnd(self):
"""
Implements :func:`autobahn.websocket.interfaces.IWebSocketChannel.onMessageFrameEnd`
"""
if not self.failedByMe:
self._onMessageFrame(self.frame_data)
self.frame_data = None
def onMessageFrame(self, payload):
"""
Implements :func:`autobahn.websocket.interfaces.IWebSocketChannel.onMessageFrame`
"""
if not self.failedByMe:
self.message_data.extend(payload)
def onMessageEnd(self):
"""
Implements :func:`autobahn.websocket.interfaces.IWebSocketChannel.onMessageEnd`
"""
if not self.failedByMe:
payload = b''.join(self.message_data)
if self.trackedTimings:
self.trackedTimings.track("onMessage")
self._onMessage(payload, self.message_is_binary)
self.message_data = None
def onMessage(self, payload, isBinary):
"""
Implements :func:`autobahn.websocket.interfaces.IWebSocketChannel.onMessage`
"""
if self.debug:
self.factory._log("WebSocketProtocol.onMessage")
def onPing(self, payload):
"""
Implements :func:`autobahn.websocket.interfaces.IWebSocketChannel.onPing`
"""
if self.debug:
self.factory._log("WebSocketProtocol.onPing")
if self.state == WebSocketProtocol.STATE_OPEN:
self.sendPong(payload)
def onPong(self, payload):
"""
Implements :func:`autobahn.websocket.interfaces.IWebSocketChannel.onPong`
"""
if self.debug:
self.factory._log("WebSocketProtocol.onPong")
def onClose(self, wasClean, code, reason):
"""
Implements :func:`autobahn.websocket.interfaces.IWebSocketChannel.onClose`
"""
if self.debugCodePaths:
s = "WebSocketProtocol.onClose:\n"
s += "wasClean=%s\n" % wasClean
s += "code=%s\n" % code
s += "reason=%s\n" % reason
s += "self.closedByMe=%s\n" % self.closedByMe
s += "self.failedByMe=%s\n" % self.failedByMe
s += "self.droppedByMe=%s\n" % self.droppedByMe
s += "self.wasClean=%s\n" % self.wasClean
s += "self.wasNotCleanReason=%s\n" % self.wasNotCleanReason
s += "self.localCloseCode=%s\n" % self.localCloseCode
s += "self.localCloseReason=%s\n" % self.localCloseReason
s += "self.remoteCloseCode=%s\n" % self.remoteCloseCode
s += "self.remoteCloseReason=%s\n" % self.remoteCloseReason
self.factory._log(s)
def onCloseFrame(self, code, reasonRaw):
"""
Callback when a Close frame was received. The default implementation answers by
sending a Close when no Close was sent before. Otherwise it drops
the TCP connection either immediately (when we are a server) or after a timeout
(when we are a client and expect the server to drop the TCP).
Modes: Hybi, Hixie
Notes:
- For Hixie mode, this method is slightly misnamed for historic reasons.
- For Hixie mode, code and reasonRaw are silently ignored.
:param code: None or close status code, if there was one (:class:`WebSocketProtocol`.CLOSE_STATUS_CODE_*).
:type code: int
:param reason: None or close reason (when present, a status code MUST have been also be present).
:type reason: str
"""
if self.debugCodePaths:
self.factory._log("WebSocketProtocol.onCloseFrame")
self.remoteCloseCode = code
## reserved close codes: 0-999, 1004, 1005, 1006, 1011-2999, >= 5000
##
if code is not None and (code < 1000 or (code >= 1000 and code <= 2999 and code not in WebSocketProtocol.CLOSE_STATUS_CODES_ALLOWED) or code >= 5000):
if self.protocolViolation("invalid close code %d" % code):
return True
## closing reason
##
if reasonRaw is not None:
## we use our own UTF-8 validator to get consistent and fully conformant
## UTF-8 validation behavior
u = Utf8Validator()
val = u.validate(reasonRaw)
if not val[0]:
if self.invalidPayload("invalid close reason (non-UTF-8 payload)"):
return True
self.remoteCloseReason = reasonRaw.decode('utf8')
if self.state == WebSocketProtocol.STATE_CLOSING:
## We already initiated the closing handshake, so this
## is the peer's reply to our close frame.
## cancel any closing HS timer if present
##
if self.closeHandshakeTimeoutCall is not None:
if self.debugCodePaths:
self.factory._log("closeHandshakeTimeoutCall.cancel")
self.closeHandshakeTimeoutCall.cancel()
self.closeHandshakeTimeoutCall = None
self.wasClean = True
if self.factory.isServer:
## When we are a server, we immediately drop the TCP.
self.dropConnection(abort = True)
else:
## When we are a client, the server should drop the TCP
## If that doesn't happen, we do. And that will set wasClean = False.
if self.serverConnectionDropTimeout > 0:
self.serverConnectionDropTimeoutCall = self.factory._callLater(self.serverConnectionDropTimeout, self.onServerConnectionDropTimeout)
elif self.state == WebSocketProtocol.STATE_OPEN:
## The peer initiates a closing handshake, so we reply
## by sending close frame.
self.wasClean = True
if self.websocket_version == 0:
self.sendCloseFrame(isReply = True)
else:
## Either reply with same code/reason, or code == NORMAL/reason=None
if self.echoCloseCodeReason:
self.sendCloseFrame(code = code, reasonUtf8 = reason.encode("UTF-8"), isReply = True)
else:
self.sendCloseFrame(code = WebSocketProtocol.CLOSE_STATUS_CODE_NORMAL, isReply = True)
if self.factory.isServer:
## When we are a server, we immediately drop the TCP.
self.dropConnection(abort = False)
else:
## When we are a client, we expect the server to drop the TCP,
## and when the server fails to do so, a timeout in sendCloseFrame()
## will set wasClean = False back again.
pass
else:
## STATE_PROXY_CONNECTING, STATE_CONNECTING, STATE_CLOSED
raise Exception("logic error")
def onServerConnectionDropTimeout(self):
"""
We (a client) expected the peer (a server) to drop the connection,
but it didn't (in time self.serverConnectionDropTimeout).
So we drop the connection, but set self.wasClean = False.
Modes: Hybi, Hixie
"""
self.serverConnectionDropTimeoutCall = None
if self.state != WebSocketProtocol.STATE_CLOSED:
if self.debugCodePaths:
self.factory._log("onServerConnectionDropTimeout")
self.wasClean = False
self.wasNotCleanReason = "server did not drop TCP connection (in time)"
self.wasServerConnectionDropTimeout = True
self.dropConnection(abort = True)
else:
if self.debugCodePaths:
self.factory._log("skipping onServerConnectionDropTimeout since connection is already closed")
def onOpenHandshakeTimeout(self):
"""
We expected the peer to complete the opening handshake with to us.
It didn't do so (in time self.openHandshakeTimeout).
So we drop the connection, but set self.wasClean = False.
Modes: Hybi, Hixie
"""
self.openHandshakeTimeoutCall = None
if self.state in [WebSocketProtocol.STATE_CONNECTING, WebSocketProtocol.STATE_PROXY_CONNECTING]:
if self.debugCodePaths:
self.factory._log("onOpenHandshakeTimeout fired")
self.wasClean = False
self.wasNotCleanReason = "peer did not finish (in time) the opening handshake"
self.wasOpenHandshakeTimeout = True
self.dropConnection(abort = True)
elif self.state == WebSocketProtocol.STATE_OPEN:
if self.debugCodePaths:
self.factory._log("skipping onOpenHandshakeTimeout since WebSocket connection is open (opening handshake already finished)")
elif self.state == WebSocketProtocol.STATE_CLOSING:
if self.debugCodePaths:
self.factory._log("skipping onOpenHandshakeTimeout since WebSocket connection is closing")
elif self.state == WebSocketProtocol.STATE_CLOSED:
if self.debugCodePaths:
self.factory._log("skipping onOpenHandshakeTimeout since WebSocket connection already closed")
else:
# should not arrive here
raise Exception("logic error")
def onCloseHandshakeTimeout(self):
"""
We expected the peer to respond to us initiating a close handshake. It didn't
respond (in time self.closeHandshakeTimeout) with a close response frame though.
So we drop the connection, but set self.wasClean = False.
Modes: Hybi, Hixie
"""
self.closeHandshakeTimeoutCall = None
if self.state != WebSocketProtocol.STATE_CLOSED:
if self.debugCodePaths:
self.factory._log("onCloseHandshakeTimeout fired")
self.wasClean = False
self.wasNotCleanReason = "peer did not respond (in time) in closing handshake"
self.wasCloseHandshakeTimeout = True
self.dropConnection(abort = True)
else:
if self.debugCodePaths:
self.factory._log("skipping onCloseHandshakeTimeout since connection is already closed")
def dropConnection(self, abort = False):
"""
Drop the underlying TCP connection.
Modes: Hybi, Hixie
"""
if self.state != WebSocketProtocol.STATE_CLOSED:
if self.debugCodePaths:
self.factory._log("dropping connection")
self.droppedByMe = True
self.state = WebSocketProtocol.STATE_CLOSED
self._closeConnection(abort)
else:
if self.debugCodePaths:
self.factory._log("skipping dropConnection since connection is already closed")
def failConnection(self, code = CLOSE_STATUS_CODE_GOING_AWAY, reason = "Going Away"):
"""
Fails the WebSocket connection.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, the code and reason are silently ignored.
"""
if self.state != WebSocketProtocol.STATE_CLOSED:
if self.debugCodePaths:
self.factory._log("Failing connection : %s - %s" % (code, reason))
self.failedByMe = True
if self.failByDrop:
## brutally drop the TCP connection
self.wasClean = False
self.wasNotCleanReason = "I failed the WebSocket connection by dropping the TCP connection"
self.dropConnection(abort = True)
else:
## perform WebSocket closing handshake
if self.state != WebSocketProtocol.STATE_CLOSING:
self.sendCloseFrame(code = code, reasonUtf8 = reason.encode("UTF-8")[:125-2], isReply = False)
else:
if self.debugCodePaths:
self.factory._log("skipping failConnection since connection is already closing")
else:
if self.debugCodePaths:
self.factory._log("skipping failConnection since connection is already closed")
def protocolViolation(self, reason):
"""
Fired when a WebSocket protocol violation/error occurs.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, reason is silently ignored.
:param reason: Protocol violation that was encountered (human readable).
:type reason: str
:returns: bool -- True, when any further processing should be discontinued.
"""
if self.debugCodePaths:
self.factory._log("Protocol violation : %s" % reason)
self.failConnection(WebSocketProtocol.CLOSE_STATUS_CODE_PROTOCOL_ERROR, reason)
if self.failByDrop:
return True
else:
## if we don't immediately drop the TCP, we need to skip the invalid frame
## to continue to later receive the closing handshake reply
return False
def invalidPayload(self, reason):
"""
Fired when invalid payload is encountered. Currently, this only happens
for text message when payload is invalid UTF-8 or close frames with
close reason that is invalid UTF-8.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, reason is silently ignored.
:param reason: What was invalid for the payload (human readable).
:type reason: str
:returns: bool -- True, when any further processing should be discontinued.
"""
if self.debugCodePaths:
self.factory._log("Invalid payload : %s" % reason)
self.failConnection(WebSocketProtocol.CLOSE_STATUS_CODE_INVALID_PAYLOAD, reason)
if self.failByDrop:
return True
else:
## if we don't immediately drop the TCP, we need to skip the invalid frame
## to continue to later receive the closing handshake reply
return False
def setTrackTimings(self, enable):
"""
Enable/disable tracking of detailed timings.
:param enable: Turn time tracking on/off.
:type enable: bool
"""
if not hasattr(self, 'trackTimings') or self.trackTimings != enable:
self.trackTimings = enable
if self.trackTimings:
self.trackedTimings = Timings()
else:
self.trackedTimings = None
def _connectionMade(self):
"""
This is called by network framework when a new TCP connection has been established
and handed over to a Protocol instance (an instance of this class).
Modes: Hybi, Hixie
"""
## copy default options from factory (so we are not affected by changed on
## those), but only copy if not already set on protocol instance (allow
## to set configuration individually)
##
configAttrLog = []
for configAttr in self.CONFIG_ATTRS:
if not hasattr(self, configAttr):
setattr(self, configAttr, getattr(self.factory, configAttr))
configAttrSource = self.factory.__class__.__name__
else:
configAttrSource = self.__class__.__name__
configAttrLog.append((configAttr, getattr(self, configAttr), configAttrSource))
if self.debug:
#self.factory._log(configAttrLog)
self.factory._log("\n" + pformat(configAttrLog))
## permessage-compress extension
self._perMessageCompress = None
## Time tracking
self.trackedTimings = None
self.setTrackTimings(self.trackTimings)
## Traffic stats
self.trafficStats = TrafficStats()
## initial state
if not self.factory.isServer and self.factory.proxy is not None:
self.state = WebSocketProtocol.STATE_PROXY_CONNECTING
else:
self.state = WebSocketProtocol.STATE_CONNECTING
self.send_state = WebSocketProtocol.SEND_STATE_GROUND
self.data = b""
## for chopped/synched sends, we need to queue to maintain
## ordering when recalling the reactor to actually "force"
## the octets to wire (see test/trickling in the repo)
self.send_queue = deque()
self.triggered = False
## incremental UTF8 validator
self.utf8validator = Utf8Validator()
## track when frame/message payload sizes (incoming) were exceeded
self.wasMaxFramePayloadSizeExceeded = False
self.wasMaxMessagePayloadSizeExceeded = False
## the following vars are related to connection close handling/tracking
# True, iff I have initiated closing HS (that is, did send close first)
self.closedByMe = False
# True, iff I have failed the WS connection (i.e. due to protocol error)
# Failing can be either by initiating close HS or brutal drop (this is
# controlled by failByDrop option)
self.failedByMe = False
# True, iff I dropped the TCP connection (called transport.loseConnection())
self.droppedByMe = False
# True, iff full WebSocket closing handshake was performed (close frame sent
# and received) _and_ the server dropped the TCP (which is its responsibility)
self.wasClean = False
# When self.wasClean = False, the reason (what happened)
self.wasNotCleanReason = None
# When we are a client, and we expected the server to drop the TCP, but that
# didn't happen in time, this gets True
self.wasServerConnectionDropTimeout = False
# When the initial WebSocket opening handshake times out, this gets True
self.wasOpenHandshakeTimeout = False
# When we initiated a closing handshake, but the peer did not respond in
# time, this gets True
self.wasCloseHandshakeTimeout = False
# The close code I sent in close frame (if any)
self.localCloseCode = None
# The close reason I sent in close frame (if any)
self.localCloseReason = None
# The close code the peer sent me in close frame (if any)
self.remoteCloseCode = None
# The close reason the peer sent me in close frame (if any)
self.remoteCloseReason = None
# timers, which might get set up later, and remembered here to get canceled
# when appropriate
if not self.factory.isServer:
self.serverConnectionDropTimeoutCall = None
self.openHandshakeTimeoutCall = None
self.closeHandshakeTimeoutCall = None
# set opening handshake timeout handler
if self.openHandshakeTimeout > 0:
self.openHandshakeTimeoutCall = self.factory._callLater(self.openHandshakeTimeout, self.onOpenHandshakeTimeout)
def _connectionLost(self, reason):
"""
This is called by network framework when a transport connection was lost.
Modes: Hybi, Hixie
"""
## cancel any server connection drop timer if present
##
if not self.factory.isServer and self.serverConnectionDropTimeoutCall is not None:
if self.debugCodePaths:
self.factory._log("serverConnectionDropTimeoutCall.cancel")
self.serverConnectionDropTimeoutCall.cancel()
self.serverConnectionDropTimeoutCall = None
self.state = WebSocketProtocol.STATE_CLOSED
if not self.wasClean:
if not self.droppedByMe and self.wasNotCleanReason is None:
self.wasNotCleanReason = "peer dropped the TCP connection without previous WebSocket closing handshake"
self._onClose(self.wasClean, WebSocketProtocol.CLOSE_STATUS_CODE_ABNORMAL_CLOSE, "connection was closed uncleanly (%s)" % self.wasNotCleanReason)
else:
self._onClose(self.wasClean, self.remoteCloseCode, self.remoteCloseReason)
def logRxOctets(self, data):
"""
Hook fired right after raw octets have been received, but only when self.logOctets == True.
Modes: Hybi, Hixie
"""
self.factory._log("RX Octets from %s : octets = %s" % (self.peer, binascii.b2a_hex(data)))
def logTxOctets(self, data, sync):
"""
Hook fired right after raw octets have been sent, but only when self.logOctets == True.
Modes: Hybi, Hixie
"""
self.factory._log("TX Octets to %s : sync = %s, octets = %s" % (self.peer, sync, binascii.b2a_hex(data)))
def logRxFrame(self, frameHeader, payload):
"""
Hook fired right after WebSocket frame has been received and decoded, but only when self.logFrames == True.
Modes: Hybi
"""
data = b''.join(payload)
info = (self.peer,
frameHeader.fin,
frameHeader.rsv,
frameHeader.opcode,
binascii.b2a_hex(frameHeader.mask) if frameHeader.mask else "-",
frameHeader.length,
data if frameHeader.opcode == 1 else binascii.b2a_hex(data))
self.factory._log("RX Frame from %s : fin = %s, rsv = %s, opcode = %s, mask = %s, length = %s, payload = %s" % info)
def logTxFrame(self, frameHeader, payload, repeatLength, chopsize, sync):
"""
Hook fired right after WebSocket frame has been encoded and sent, but only when self.logFrames == True.
Modes: Hybi
"""
info = (self.peer,
frameHeader.fin,
frameHeader.rsv,
frameHeader.opcode,
binascii.b2a_hex(frameHeader.mask) if frameHeader.mask else "-",
frameHeader.length,
repeatLength,
chopsize,
sync,
payload if frameHeader.opcode == 1 else binascii.b2a_hex(payload))
self.factory._log("TX Frame to %s : fin = %s, rsv = %s, opcode = %s, mask = %s, length = %s, repeat_length = %s, chopsize = %s, sync = %s, payload = %s" % info)
def _dataReceived(self, data):
"""
This is called by network framework upon receiving data on transport connection.
Modes: Hybi, Hixie
"""
if self.state == WebSocketProtocol.STATE_OPEN:
self.trafficStats.incomingOctetsWireLevel += len(data)
elif self.state == WebSocketProtocol.STATE_CONNECTING or self.state == WebSocketProtocol.STATE_PROXY_CONNECTING:
self.trafficStats.preopenIncomingOctetsWireLevel += len(data)
if self.logOctets:
self.logRxOctets(data)
self.data += data
self.consumeData()
def consumeData(self):
"""
Consume buffered (incoming) data.
Modes: Hybi, Hixie
"""
## WebSocket is open (handshake was completed) or close was sent
##
if self.state == WebSocketProtocol.STATE_OPEN or self.state == WebSocketProtocol.STATE_CLOSING:
## process until no more buffered data left or WS was closed
##
while self.processData() and self.state != WebSocketProtocol.STATE_CLOSED:
pass
## need to establish proxy connection
##
elif self.state == WebSocketProtocol.STATE_PROXY_CONNECTING:
self.processProxyConnect()
## WebSocket needs handshake
##
elif self.state == WebSocketProtocol.STATE_CONNECTING:
## the implementation of processHandshake() in derived
## class needs to perform client or server handshake
## from other party here ..
##
self.processHandshake()
## we failed the connection .. don't process any more data!
##
elif self.state == WebSocketProtocol.STATE_CLOSED:
## ignore any data received after WS was closed
##
if self.debugCodePaths:
self.factory._log("received data in STATE_CLOSED")
## should not arrive here (invalid state)
##
else:
raise Exception("invalid state")
def processProxyConnect(self):
"""
Process proxy connect.
Modes: Hybi, Hixie
"""
raise Exception("must implement proxy connect (client or server) in derived class")
def processHandshake(self):
"""
Process WebSocket handshake.
Modes: Hybi, Hixie
"""
raise Exception("must implement handshake (client or server) in derived class")
def _trigger(self):
"""
Trigger sending stuff from send queue (which is only used for chopped/synched writes).
Modes: Hybi, Hixie
"""
if not self.triggered:
self.triggered = True
self._send()
def _send(self):
"""
Send out stuff from send queue. For details how this works, see test/trickling
in the repo.
Modes: Hybi, Hixie
"""
if len(self.send_queue) > 0:
e = self.send_queue.popleft()
if self.state != WebSocketProtocol.STATE_CLOSED:
self.transport.write(e[0])
if self.state == WebSocketProtocol.STATE_OPEN:
self.trafficStats.outgoingOctetsWireLevel += len(e[0])
elif self.state == WebSocketProtocol.STATE_CONNECTING or self.state == WebSocketProtocol.STATE_PROXY_CONNECTING:
self.trafficStats.preopenOutgoingOctetsWireLevel += len(e[0])
if self.logOctets:
self.logTxOctets(e[0], e[1])
else:
if self.debugCodePaths:
self.factory._log("skipped delayed write, since connection is closed")
# we need to reenter the reactor to make the latter
# reenter the OS network stack, so that octets
# can get on the wire. Note: this is a "heuristic",
# since there is no (easy) way to really force out
# octets from the OS network stack to wire.
self.factory._callLater(WebSocketProtocol._QUEUED_WRITE_DELAY, self._send)
else:
self.triggered = False
def sendData(self, data, sync = False, chopsize = None):
"""
Wrapper for self.transport.write which allows to give a chopsize.
When asked to chop up writing to TCP stream, we write only chopsize octets
and then give up control to select() in underlying reactor so that bytes
get onto wire immediately. Note that this is different from and unrelated
to WebSocket data message fragmentation. Note that this is also different
from the TcpNoDelay option which can be set on the socket.
Modes: Hybi, Hixie
"""
if chopsize and chopsize > 0:
i = 0
n = len(data)
done = False
while not done:
j = i + chopsize
if j >= n:
done = True
j = n
self.send_queue.append((data[i:j], True))
i += chopsize
self._trigger()
else:
if sync or len(self.send_queue) > 0:
self.send_queue.append((data, sync))
self._trigger()
else:
self.transport.write(data)
if self.state == WebSocketProtocol.STATE_OPEN:
self.trafficStats.outgoingOctetsWireLevel += len(data)
elif self.state == WebSocketProtocol.STATE_CONNECTING or self.state == WebSocketProtocol.STATE_PROXY_CONNECTING:
self.trafficStats.preopenOutgoingOctetsWireLevel += len(data)
if self.logOctets:
self.logTxOctets(data, False)
def sendPreparedMessage(self, preparedMsg):
"""
Implements :func:`autobahn.websocket.interfaces.IWebSocketChannel.sendPreparedMessage`
"""
if self.websocket_version != 0:
if self._perMessageCompress is None or preparedMsg.doNotCompress:
self.sendData(preparedMsg.payloadHybi)
else:
self.sendMessage(preparedMsg.payload, preparedMsg.binary)
else:
self.sendData(preparedMsg.payloadHixie)
def processData(self):
"""
After WebSocket handshake has been completed, this procedure will do all
subsequent processing of incoming bytes.
Modes: Hybi, Hixie
"""
if self.websocket_version == 0:
return self.processDataHixie76()
else:
return self.processDataHybi()
def processDataHixie76(self):
"""
Hixie-76 incoming data processing.
Modes: Hixie
"""
buffered_len = len(self.data)
## outside a message, that is we are awaiting data which starts a new message
##
if not self.inside_message:
if buffered_len >= 2:
## new message
##
if self.data[0] == b'\x00':
self.inside_message = True
if self.utf8validateIncoming:
self.utf8validator.reset()
self.utf8validateIncomingCurrentMessage = True
self.utf8validateLast = (True, True, 0, 0)
else:
self.utf8validateIncomingCurrentMessage = False
self.data = self.data[1:]
if self.trackedTimings:
self.trackedTimings.track("onMessageBegin")
self._onMessageBegin(False)
## Hixie close from peer received
##
elif self.data[0] == b'\xff' and self.data[1] == b'\x00':
self.onCloseFrame(None, None)
self.data = self.data[2:]
# stop receiving/processing after having received close!
return False
## malformed data
##
else:
if self.protocolViolation("malformed data received"):
return False
else:
## need more data
return False
end_index = self.data.find(b'\xff')
if end_index > 0:
payload = self.data[:end_index]
self.data = self.data[end_index + 1:]
else:
payload = self.data
self.data = b''
## incrementally validate UTF-8 payload
##
if self.utf8validateIncomingCurrentMessage:
self.utf8validateLast = self.utf8validator.validate(payload)
if not self.utf8validateLast[0]:
if self.invalidPayload("encountered invalid UTF-8 while processing text message at payload octet index %d" % self.utf8validateLast[3]):
return False
self._onMessageFrameData(payload)
if end_index > 0:
self.inside_message = False
self._onMessageEnd()
return len(self.data) > 0
def processDataHybi(self):
"""
RFC6455/Hybi-Drafts incoming data processing.
Modes: Hybi
"""
buffered_len = len(self.data)
## outside a frame, that is we are awaiting data which starts a new frame
##
if self.current_frame is None:
## need minimum of 2 octets to for new frame
##
if buffered_len >= 2:
## FIN, RSV, OPCODE
##
if PY3:
b = self.data[0]
else:
b = ord(self.data[0])
frame_fin = (b & 0x80) != 0
frame_rsv = (b & 0x70) >> 4
frame_opcode = b & 0x0f
## MASK, PAYLOAD LEN 1
##
if PY3:
b = self.data[1]
else:
b = ord(self.data[1])
frame_masked = (b & 0x80) != 0
frame_payload_len1 = b & 0x7f
## MUST be 0 when no extension defining
## the semantics of RSV has been negotiated
##
if frame_rsv != 0:
if self._perMessageCompress is not None and frame_rsv == 4:
pass
else:
if self.protocolViolation("RSV = %d and no extension negotiated" % frame_rsv):
return False
## all client-to-server frames MUST be masked
##
if self.factory.isServer and self.requireMaskedClientFrames and not frame_masked:
if self.protocolViolation("unmasked client-to-server frame"):
return False
## all server-to-client frames MUST NOT be masked
##
if not self.factory.isServer and not self.acceptMaskedServerFrames and frame_masked:
if self.protocolViolation("masked server-to-client frame"):
return False
## check frame
##
if frame_opcode > 7: # control frame (have MSB in opcode set)
## control frames MUST NOT be fragmented
##
if not frame_fin:
if self.protocolViolation("fragmented control frame"):
return False
## control frames MUST have payload 125 octets or less
##
if frame_payload_len1 > 125:
if self.protocolViolation("control frame with payload length > 125 octets"):
return False
## check for reserved control frame opcodes
##
if frame_opcode not in [8, 9, 10]:
if self.protocolViolation("control frame using reserved opcode %d" % frame_opcode):
return False
## close frame : if there is a body, the first two bytes of the body MUST be a 2-byte
## unsigned integer (in network byte order) representing a status code
##
if frame_opcode == 8 and frame_payload_len1 == 1:
if self.protocolViolation("received close control frame with payload len 1"):
return False
## control frames MUST NOT be compressed
##
if self._perMessageCompress is not None and frame_rsv == 4:
if self.protocolViolation("received compressed control frame [%s]" % self._perMessageCompress.EXTENSION_NAME):
return False
else: # data frame
## check for reserved data frame opcodes
##
if frame_opcode not in [0, 1, 2]:
if self.protocolViolation("data frame using reserved opcode %d" % frame_opcode):
return False
## check opcode vs message fragmentation state 1/2
##
if not self.inside_message and frame_opcode == 0:
if self.protocolViolation("received continuation data frame outside fragmented message"):
return False
## check opcode vs message fragmentation state 2/2
##
if self.inside_message and frame_opcode != 0:
if self.protocolViolation("received non-continuation data frame while inside fragmented message"):
return False
## continuation data frames MUST NOT have the compressed bit set
##
if self._perMessageCompress is not None and frame_rsv == 4 and self.inside_message:
if self.protocolViolation("received continution data frame with compress bit set [%s]" % self._perMessageCompress.EXTENSION_NAME):
return False
## compute complete header length
##
if frame_masked:
mask_len = 4
else:
mask_len = 0
if frame_payload_len1 < 126:
frame_header_len = 2 + mask_len
elif frame_payload_len1 == 126:
frame_header_len = 2 + 2 + mask_len
elif frame_payload_len1 == 127:
frame_header_len = 2 + 8 + mask_len
else:
raise Exception("logic error")
## only proceed when we have enough data buffered for complete
## frame header (which includes extended payload len + mask)
##
if buffered_len >= frame_header_len:
## minimum frame header length (already consumed)
##
i = 2
## extract extended payload length
##
if frame_payload_len1 == 126:
frame_payload_len = struct.unpack("!H", self.data[i:i+2])[0]
if frame_payload_len < 126:
if self.protocolViolation("invalid data frame length (not using minimal length encoding)"):
return False
i += 2
elif frame_payload_len1 == 127:
frame_payload_len = struct.unpack("!Q", self.data[i:i+8])[0]
if frame_payload_len > 0x7FFFFFFFFFFFFFFF: # 2**63
if self.protocolViolation("invalid data frame length (>2^63)"):
return False
if frame_payload_len < 65536:
if self.protocolViolation("invalid data frame length (not using minimal length encoding)"):
return False
i += 8
else:
frame_payload_len = frame_payload_len1
## when payload is masked, extract frame mask
##
frame_mask = None
if frame_masked:
frame_mask = self.data[i:i+4]
i += 4
if frame_masked and frame_payload_len > 0 and self.applyMask:
self.current_frame_masker = createXorMasker(frame_mask, frame_payload_len)
else:
self.current_frame_masker = XorMaskerNull()
## remember rest (payload of current frame after header and everything thereafter)
##
self.data = self.data[i:]
## ok, got complete frame header
##
self.current_frame = FrameHeader(frame_opcode,
frame_fin,
frame_rsv,
frame_payload_len,
frame_mask)
## process begin on new frame
##
self.onFrameBegin()
## reprocess when frame has no payload or and buffered data left
##
return frame_payload_len == 0 or len(self.data) > 0
else:
return False # need more data
else:
return False # need more data
## inside a started frame
##
else:
## cut out rest of frame payload
##
rest = self.current_frame.length - self.current_frame_masker.pointer()
if buffered_len >= rest:
data = self.data[:rest]
length = rest
self.data = self.data[rest:]
else:
data = self.data
length = buffered_len
self.data = b''
if length > 0:
## unmask payload
##
payload = self.current_frame_masker.process(data)
else:
## we also process empty payloads, since we need to fire
## our hooks (at least for streaming processing, this is
## necessary for correct protocol state transitioning)
##
payload = b''
## process frame data
##
fr = self.onFrameData(payload)
if fr == False:
return False
## fire frame end handler when frame payload is complete
##
if self.current_frame_masker.pointer() == self.current_frame.length:
fr = self.onFrameEnd()
if fr == False:
return False
## reprocess when no error occurred and buffered data left
##
return len(self.data) > 0
def onFrameBegin(self):
"""
Begin of receive new frame.
Modes: Hybi
"""
if self.current_frame.opcode > 7:
self.control_frame_data = []
else:
## new message started
##
if not self.inside_message:
self.inside_message = True
## setup decompressor
##
if self._perMessageCompress is not None and self.current_frame.rsv == 4:
self._isMessageCompressed = True
self._perMessageCompress.startDecompressMessage()
else:
self._isMessageCompressed = False
## setup UTF8 validator
##
if self.current_frame.opcode == WebSocketProtocol.MESSAGE_TYPE_TEXT and self.utf8validateIncoming:
self.utf8validator.reset()
self.utf8validateIncomingCurrentMessage = True
self.utf8validateLast = (True, True, 0, 0)
else:
self.utf8validateIncomingCurrentMessage = False
## track timings
##
if self.trackedTimings:
self.trackedTimings.track("onMessageBegin")
## fire onMessageBegin
##
self._onMessageBegin(self.current_frame.opcode == WebSocketProtocol.MESSAGE_TYPE_BINARY)
self._onMessageFrameBegin(self.current_frame.length)
def onFrameData(self, payload):
"""
New data received within frame.
Modes: Hybi
"""
if self.current_frame.opcode > 7:
self.control_frame_data.append(payload)
else:
## decompress frame payload
##
if self._isMessageCompressed:
compressedLen = len(payload)
if self.debug:
self.factory._log("RX compressed [%d]: %s" % (compressedLen, binascii.b2a_hex(payload)))
payload = self._perMessageCompress.decompressMessageData(payload)
uncompressedLen = len(payload)
else:
l = len(payload)
compressedLen = l
uncompressedLen = l
if self.state == WebSocketProtocol.STATE_OPEN:
self.trafficStats.incomingOctetsWebSocketLevel += compressedLen
self.trafficStats.incomingOctetsAppLevel += uncompressedLen
## incrementally validate UTF-8 payload
##
if self.utf8validateIncomingCurrentMessage:
self.utf8validateLast = self.utf8validator.validate(payload)
if not self.utf8validateLast[0]:
if self.invalidPayload("encountered invalid UTF-8 while processing text message at payload octet index %d" % self.utf8validateLast[3]):
return False
self._onMessageFrameData(payload)
def onFrameEnd(self):
"""
End of frame received.
Modes: Hybi
"""
if self.current_frame.opcode > 7:
if self.logFrames:
self.logRxFrame(self.current_frame, self.control_frame_data)
self.processControlFrame()
else:
if self.state == WebSocketProtocol.STATE_OPEN:
self.trafficStats.incomingWebSocketFrames += 1
if self.logFrames:
self.logRxFrame(self.current_frame, self.frame_data)
self._onMessageFrameEnd()
if self.current_frame.fin:
## handle end of compressed message
##
if self._isMessageCompressed:
self._perMessageCompress.endDecompressMessage()
## verify UTF8 has actually ended
##
if self.utf8validateIncomingCurrentMessage:
if not self.utf8validateLast[1]:
if self.invalidPayload("UTF-8 text message payload ended within Unicode code point at payload octet index %d" % self.utf8validateLast[3]):
return False
#if self.debug:
# self.factory._log("Traffic statistics:\n" + str(self.trafficStats))
if self.state == WebSocketProtocol.STATE_OPEN:
self.trafficStats.incomingWebSocketMessages += 1
self._onMessageEnd()
self.inside_message = False
self.current_frame = None
def processControlFrame(self):
"""
Process a completely received control frame.
Modes: Hybi
"""
payload = b''.join(self.control_frame_data)
self.control_frame_data = None
## CLOSE frame
##
if self.current_frame.opcode == 8:
code = None
reasonRaw = None
ll = len(payload)
if ll > 1:
code = struct.unpack("!H", payload[0:2])[0]
if ll > 2:
reasonRaw = payload[2:]
if self.onCloseFrame(code, reasonRaw):
return False
## PING frame
##
elif self.current_frame.opcode == 9:
self._onPing(payload)
## PONG frame
##
elif self.current_frame.opcode == 10:
self._onPong(payload)
else:
## we might arrive here, when protocolViolation
## wants us to continue anyway
pass
return True
def sendFrame(self,
opcode,
payload = b'',
fin = True,
rsv = 0,
mask = None,
payload_len = None,
chopsize = None,
sync = False):
"""
Send out frame. Normally only used internally via sendMessage(), sendPing(), sendPong() and sendClose().
This method deliberately allows to send invalid frames (that is frames invalid
per-se, or frames invalid because of protocol state). Other than in fuzzing servers,
calling methods will ensure that no invalid frames are sent.
In addition, this method supports explicit specification of payload length.
When payload_len is given, it will always write that many octets to the stream.
It'll wrap within payload, resending parts of that when more octets were requested
The use case is again for fuzzing server which want to sent increasing amounts
of payload data to peers without having to construct potentially large messges
themselfes.
Modes: Hybi
"""
if self.websocket_version == 0:
raise Exception("function not supported in Hixie-76 mode")
if payload_len is not None:
if len(payload) < 1:
raise Exception("cannot construct repeated payload with length %d from payload of length %d" % (payload_len, len(payload)))
l = payload_len
pl = b''.join([payload for k in range(payload_len / len(payload))]) + payload[:payload_len % len(payload)]
else:
l = len(payload)
pl = payload
## first byte
##
b0 = 0
if fin:
b0 |= (1 << 7)
b0 |= (rsv % 8) << 4
b0 |= opcode % 128
## second byte, payload len bytes and mask
##
b1 = 0
if mask or (not self.factory.isServer and self.maskClientFrames) or (self.factory.isServer and self.maskServerFrames):
b1 |= 1 << 7
if not mask:
mask = struct.pack("!I", random.getrandbits(32))
mv = mask
else:
mv = b''
## mask frame payload
##
if l > 0 and self.applyMask:
masker = createXorMasker(mask, l)
plm = masker.process(pl)
else:
plm = pl
else:
mv = b''
plm = pl
el = b''
if l <= 125:
b1 |= l
elif l <= 0xFFFF:
b1 |= 126
el = struct.pack("!H", l)
elif l <= 0x7FFFFFFFFFFFFFFF:
b1 |= 127
el = struct.pack("!Q", l)
else:
raise Exception("invalid payload length")
if PY3:
raw = b''.join([b0.to_bytes(1, 'big'), b1.to_bytes(1, 'big'), el, mv, plm])
else:
raw = b''.join([chr(b0), chr(b1), el, mv, plm])
if opcode in [0, 1, 2]:
self.trafficStats.outgoingWebSocketFrames += 1
if self.logFrames:
frameHeader = FrameHeader(opcode, fin, rsv, l, mask)
self.logTxFrame(frameHeader, payload, payload_len, chopsize, sync)
## send frame octets
##
self.sendData(raw, sync, chopsize)
def sendPing(self, payload = None):
"""
Implements :func:`autobahn.websocket.interfaces.IWebSocketChannel.sendPing`
"""
if self.websocket_version == 0:
raise Exception("function not supported in Hixie-76 mode")
if self.state != WebSocketProtocol.STATE_OPEN:
return
if payload:
l = len(payload)
if l > 125:
raise Exception("invalid payload for PING (payload length must be <= 125, was %d)" % l)
self.sendFrame(opcode = 9, payload = payload)
else:
self.sendFrame(opcode = 9)
def sendPong(self, payload = None):
"""
Implements :func:`autobahn.websocket.interfaces.IWebSocketChannel.sendPong`
"""
if self.websocket_version == 0:
raise Exception("function not supported in Hixie-76 mode")
if self.state != WebSocketProtocol.STATE_OPEN:
return
if payload:
l = len(payload)
if l > 125:
raise Exception("invalid payload for PONG (payload length must be <= 125, was %d)" % l)
self.sendFrame(opcode = 10, payload = payload)
else:
self.sendFrame(opcode = 10)
def sendCloseFrame(self, code = None, reasonUtf8 = None, isReply = False):
"""
Send a close frame and update protocol state. Note, that this is
an internal method which deliberately allows not send close
frame with invalid payload.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, this method is slightly misnamed for historic reasons.
- For Hixie mode, code and reasonUtf8 will be silently ignored.
"""
if self.state == WebSocketProtocol.STATE_CLOSING:
if self.debugCodePaths:
self.factory._log("ignoring sendCloseFrame since connection is closing")
elif self.state == WebSocketProtocol.STATE_CLOSED:
if self.debugCodePaths:
self.factory._log("ignoring sendCloseFrame since connection already closed")
elif self.state in [WebSocketProtocol.STATE_PROXY_CONNECTING, WebSocketProtocol.STATE_CONNECTING]:
raise Exception("cannot close a connection not yet connected")
elif self.state == WebSocketProtocol.STATE_OPEN:
if self.websocket_version == 0:
self.sendData("\xff\x00")
else:
## construct Hybi close frame payload and send frame
payload = b''
if code is not None:
payload += struct.pack("!H", code)
if reasonUtf8 is not None:
payload += reasonUtf8
self.sendFrame(opcode = 8, payload = payload)
## update state
self.state = WebSocketProtocol.STATE_CLOSING
self.closedByMe = not isReply
## remember payload of close frame we sent
self.localCloseCode = code
self.localCloseReason = reasonUtf8
## drop connection when timeout on receiving close handshake reply
if self.closedByMe and self.closeHandshakeTimeout > 0:
self.closeHandshakeTimeoutCall = self.factory._callLater(self.closeHandshakeTimeout, self.onCloseHandshakeTimeout)
else:
raise Exception("logic error")
def sendClose(self, code = None, reason = None):
"""
Implements :func:`autobahn.websocket.interfaces.IWebSocketChannel.sendClose`
"""
if code is not None:
if type(code) != int:
raise Exception("invalid type %s for close code" % type(code))
if code != 1000 and not (code >= 3000 and code <= 4999):
raise Exception("invalid close code %d" % code)
if reason is not None:
if code is None:
raise Exception("close reason without close code")
if PY3:
if type(reason) != str:
raise Exception("invalid type %s for close reason" % type(reason))
else:
if type(reason) not in [str, unicode]:
raise Exception("invalid type %s for close reason" % type(reason))
reasonUtf8 = reason.encode("utf8")
if len(reasonUtf8) + 2 > 125:
raise Exception("close reason too long (%d)" % len(reasonUtf8))
else:
reasonUtf8 = None
self.sendCloseFrame(code = code, reasonUtf8 = reasonUtf8, isReply = False)
def beginMessage(self, isBinary = False, doNotCompress = False):
"""
Implements :func:`autobahn.websocket.interfaces.IWebSocketChannel.beginMessage`
"""
if self.state != WebSocketProtocol.STATE_OPEN:
return
## check if sending state is valid for this method
##
if self.send_state != WebSocketProtocol.SEND_STATE_GROUND:
raise Exception("WebSocketProtocol.beginMessage invalid in current sending state")
if self.websocket_version == 0:
if isBinary:
raise Exception("cannot send binary message in Hixie76 mode")
self.sendData('\x00')
self.send_state = WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE
else:
self.send_message_opcode = WebSocketProtocol.MESSAGE_TYPE_BINARY if isBinary else WebSocketProtocol.MESSAGE_TYPE_TEXT
self.send_state = WebSocketProtocol.SEND_STATE_MESSAGE_BEGIN
## setup compressor
##
if self._perMessageCompress is not None and not doNotCompress:
self.send_compressed = True
self._perMessageCompress.startCompressMessage()
else:
self.send_compressed = False
self.trafficStats.outgoingWebSocketMessages += 1
def beginMessageFrame(self, length):
"""
Implements :func:`autobahn.websocket.interfaces.IWebSocketChannel.beginMessageFrame`
"""
if self.websocket_version == 0:
raise Exception("function not supported in Hixie-76 mode")
if self.state != WebSocketProtocol.STATE_OPEN:
return
## check if sending state is valid for this method
##
if self.send_state not in [WebSocketProtocol.SEND_STATE_MESSAGE_BEGIN, WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE]:
raise Exception("WebSocketProtocol.beginMessageFrame invalid in current sending state [%d]" % self.send_state)
if type(length) != int or length < 0 or length > 0x7FFFFFFFFFFFFFFF: # 2**63
raise Exception("invalid value for message frame length")
self.send_message_frame_length = length
self.trafficStats.outgoingWebSocketFrames += 1
if (not self.factory.isServer and self.maskClientFrames) or (self.factory.isServer and self.maskServerFrames):
## automatic mask:
## - client-to-server masking (if not deactivated)
## - server-to-client masking (if activated)
##
self.send_message_frame_mask = struct.pack("!I", random.getrandbits(32))
else:
## no mask
##
self.send_message_frame_mask = None
## payload masker
##
if self.send_message_frame_mask and length > 0 and self.applyMask:
self.send_message_frame_masker = createXorMasker(self.send_message_frame_mask, length)
else:
self.send_message_frame_masker = XorMaskerNull()
## first byte
##
# FIN = false .. since with streaming, we don't know when message ends
b0 = 0
if self.send_state == WebSocketProtocol.SEND_STATE_MESSAGE_BEGIN:
b0 |= self.send_message_opcode % 128
if self.send_compressed:
b0 |= (4 % 8) << 4
self.send_state = WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE
else:
pass # message continuation frame
## second byte, payload len bytes and mask
##
b1 = 0
if self.send_message_frame_mask:
b1 |= 1 << 7
mv = self.send_message_frame_mask
else:
mv = b''
el = b''
if length <= 125:
b1 |= length
elif length <= 0xFFFF:
b1 |= 126
el = struct.pack("!H", length)
elif length <= 0x7FFFFFFFFFFFFFFF:
b1 |= 127
el = struct.pack("!Q", length)
else:
raise Exception("invalid payload length")
## write message frame header
##
if PY3:
header = b''.join([b0.to_bytes(1, 'big'), b1.to_bytes(1, 'big'), el, mv])
else:
header = b''.join([chr(b0), chr(b1), el, mv])
self.sendData(header)
## now we are inside message frame ..
##
self.send_state = WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE_FRAME
def sendMessageFrameData(self, payload, sync = False):
"""
Implements :func:`autobahn.websocket.interfaces.IWebSocketChannel.sendMessageFrameData`
"""
if self.state != WebSocketProtocol.STATE_OPEN:
return
if not self.send_compressed:
self.trafficStats.outgoingOctetsAppLevel += len(payload)
self.trafficStats.outgoingOctetsWebSocketLevel += len(payload)
if self.websocket_version == 0:
## Hixie Mode
##
if self.send_state != WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE:
raise Exception("WebSocketProtocol.sendMessageFrameData invalid in current sending state")
self.sendData(payload, sync = sync)
return None
else:
## Hybi Mode
##
if self.send_state != WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE_FRAME:
raise Exception("WebSocketProtocol.sendMessageFrameData invalid in current sending state")
rl = len(payload)
if self.send_message_frame_masker.pointer() + rl > self.send_message_frame_length:
l = self.send_message_frame_length - self.send_message_frame_masker.pointer()
rest = -(rl - l)
pl = payload[:l]
else:
l = rl
rest = self.send_message_frame_length - self.send_message_frame_masker.pointer() - l
pl = payload
## mask frame payload
##
plm = self.send_message_frame_masker.process(pl)
## send frame payload
##
self.sendData(plm, sync = sync)
## if we are done with frame, move back into "inside message" state
##
if self.send_message_frame_masker.pointer() >= self.send_message_frame_length:
self.send_state = WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE
## when =0 : frame was completed exactly
## when >0 : frame is still uncomplete and that much amount is still left to complete the frame
## when <0 : frame was completed and there was this much unconsumed data in payload argument
##
return rest
def endMessage(self):
"""
Implements :func:`autobahn.websocket.interfaces.IWebSocketChannel.endMessage`
"""
if self.state != WebSocketProtocol.STATE_OPEN:
return
## check if sending state is valid for this method
##
#if self.send_state != WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE:
# raise Exception("WebSocketProtocol.endMessage invalid in current sending state [%d]" % self.send_state)
if self.websocket_version == 0:
self.sendData('\x00')
else:
if self.send_compressed:
payload = self._perMessageCompress.endCompressMessage()
self.trafficStats.outgoingOctetsWebSocketLevel += len(payload)
else:
## send continuation frame with empty payload and FIN set to end message
payload = b''
self.sendFrame(opcode = 0, payload = payload, fin = True)
self.send_state = WebSocketProtocol.SEND_STATE_GROUND
def sendMessageFrame(self, payload, sync = False):
"""
Implements :func:`autobahn.websocket.interfaces.IWebSocketChannel.sendMessageFrame`
"""
if self.websocket_version == 0:
raise Exception("function not supported in Hixie-76 mode")
if self.state != WebSocketProtocol.STATE_OPEN:
return
if self.send_compressed:
self.trafficStats.outgoingOctetsAppLevel += len(payload)
payload = self._perMessageCompress.compressMessageData(payload)
self.beginMessageFrame(len(payload))
self.sendMessageFrameData(payload, sync)
def sendMessage(self,
payload,
isBinary = False,
fragmentSize = None,
sync = False,
doNotCompress = False):
"""
Implements :func:`autobahn.websocket.interfaces.IWebSocketChannel.sendMessage`
"""
assert(type(payload) == bytes)
if self.state != WebSocketProtocol.STATE_OPEN:
return
if self.trackedTimings:
self.trackedTimings.track("sendMessage")
if self.websocket_version == 0:
if isBinary:
raise Exception("cannot send binary message in Hixie76 mode")
if fragmentSize:
raise Exception("cannot fragment messages in Hixie76 mode")
self.sendMessageHixie76(payload, sync)
else:
self.sendMessageHybi(payload, isBinary, fragmentSize, sync, doNotCompress)
def sendMessageHixie76(self, payload, sync = False):
"""
Hixie76-Variant of sendMessage().
Modes: Hixie
"""
self.sendData(b'\x00' + payload + b'\xff', sync = sync)
def sendMessageHybi(self,
payload,
isBinary = False,
fragmentSize = None,
sync = False,
doNotCompress = False):
"""
Hybi-Variant of sendMessage().
Modes: Hybi
"""
## (initial) frame opcode
##
if isBinary:
opcode = 2
else:
opcode = 1
self.trafficStats.outgoingWebSocketMessages += 1
## setup compressor
##
if self._perMessageCompress is not None and not doNotCompress:
sendCompressed = True
self._perMessageCompress.startCompressMessage()
self.trafficStats.outgoingOctetsAppLevel += len(payload)
payload1 = self._perMessageCompress.compressMessageData(payload)
payload2 = self._perMessageCompress.endCompressMessage()
payload = b''.join([payload1, payload2])
self.trafficStats.outgoingOctetsWebSocketLevel += len(payload)
else:
sendCompressed = False
l = len(payload)
self.trafficStats.outgoingOctetsAppLevel += l
self.trafficStats.outgoingOctetsWebSocketLevel += l
## explicit fragmentSize arguments overrides autoFragmentSize setting
##
if fragmentSize is not None:
pfs = fragmentSize
else:
if self.autoFragmentSize > 0:
pfs = self.autoFragmentSize
else:
pfs = None
## send unfragmented
##
if pfs is None or len(payload) <= pfs:
self.sendFrame(opcode = opcode, payload = payload, sync = sync, rsv = 4 if sendCompressed else 0)
## send data message in fragments
##
else:
if pfs < 1:
raise Exception("payload fragment size must be at least 1 (was %d)" % pfs)
n = len(payload)
i = 0
done = False
first = True
while not done:
j = i + pfs
if j > n:
done = True
j = n
if first:
self.sendFrame(opcode = opcode, payload = payload[i:j], fin = done, sync = sync, rsv = 4 if sendCompressed else 0)
first = False
else:
self.sendFrame(opcode = 0, payload = payload[i:j], fin = done, sync = sync)
i += pfs
#if self.debug:
# self.factory._log("Traffic statistics:\n" + str(self.trafficStats))
def _parseExtensionsHeader(self, header, removeQuotes = True):
"""
Parse the Sec-WebSocket-Extensions header.
"""
extensions = []
exts = [str(x.strip()) for x in header.split(',')]
for e in exts:
if e != "":
ext = [x.strip() for x in e.split(";")]
if len(ext) > 0:
extension = ext[0].lower()
params = {}
for p in ext[1:]:
p = [x.strip() for x in p.split("=")]
key = p[0].lower()
if len(p) > 1:
value = "=".join(p[1:])
if removeQuotes:
if len(value) > 0 and value[0] == '"':
value = value[1:]
if len(value) > 0 and value[-1] == '"':
value = value[:-1]
else:
value = True
if not key in params:
params[key] = []
params[key].append(value)
extensions.append((extension, params))
else:
pass # should not arrive here
return extensions
class PreparedMessage:
"""
Encapsulates a prepared message to be sent later once or multiple
times on one or more WebSocket connections.
This can be used for optimizing Broadcast/PubSub.
"""
def __init__(self, payload, isBinary, applyMask, doNotCompress):
"""
Ctor for a prepared message.
:param payload: The message payload.
:type payload: str
:param isBinary: Provide `True` for binary payload.
:type isBinary: bool
:param applyMask: Provide `True` if WebSocket message is to be masked (required for client to server WebSocket messages).
:type applyMask: bool
:param doNotCompress: Iff `True`, never compress this message. This only applies to
Hybi-Mode and only when WebSocket compression has been negotiated on
the WebSocket connection. Use when you know the payload
uncompressible (e.g. encrypted or already compressed).
:type doNotCompress: bool
"""
if not doNotCompress:
## we need to store original payload for compressed WS
## connections (cannot compress/frame in advanced when
## compression is on, and context takeover is off)
self.payload = payload
self.binary = isBinary
self.doNotCompress = doNotCompress
## store pre-framed octets to be sent to Hixie-76 peers
self._initHixie(payload, isBinary)
## store pre-framed octets to be sent to Hybi peers
self._initHybi(payload, isBinary, applyMask)
def _initHixie(self, payload, binary):
if binary:
# silently filter out .. probably do something else:
# base64?
# dunno
self.payloadHixie = ''
else:
self.payloadHixie = '\x00' + payload + '\xff'
def _initHybi(self, payload, binary, masked):
l = len(payload)
## first byte
##
b0 = ((1 << 7) | 2) if binary else ((1 << 7) | 1)
## second byte, payload len bytes and mask
##
if masked:
b1 = 1 << 7
mask = struct.pack("!I", random.getrandbits(32))
if l == 0:
plm = payload
else:
plm = createXorMasker(mask, l).process(payload)
else:
b1 = 0
mask = b''
plm = payload
## payload extended length
##
el = b''
if l <= 125:
b1 |= l
elif l <= 0xFFFF:
b1 |= 126
el = struct.pack("!H", l)
elif l <= 0x7FFFFFFFFFFFFFFF:
b1 |= 127
el = struct.pack("!Q", l)
else:
raise Exception("invalid payload length")
## raw WS message (single frame)
##
if PY3:
self.payloadHybi = b''.join([b0.to_bytes(1, 'big'), b1.to_bytes(1, 'big'), el, mask, plm])
else:
self.payloadHybi = b''.join([chr(b0), chr(b1), el, mask, plm])
class WebSocketFactory:
"""
Mixin for
:class:`autobahn.websocket.protocol.WebSocketClientFactory` and
:class:`autobahn.websocket.protocol.WebSocketServerFactory`.
"""
def prepareMessage(self, payload, isBinary = False, doNotCompress = False):
"""
Prepare a WebSocket message. This can be later sent on multiple
instances of :class:`autobahn.websocket.WebSocketProtocol` using
:meth:`autobahn.websocket.WebSocketProtocol.sendPreparedMessage`.
By doing so, you can avoid the (small) overhead of framing the
*same* payload into WebSocket messages multiple times when that
same payload is to be sent out on multiple connections.
:param payload: The message payload.
:type payload: bytes
:param isBinary: `True` iff payload is binary, else the payload must be UTF-8 encoded text.
:type isBinary: bool
:param doNotCompress: Iff `True`, never compress this message. This only applies to
Hybi-Mode and only when WebSocket compression has been negotiated on
the WebSocket connection. Use when you know the payload
uncompressible (e.g. encrypted or already compressed).
:type doNotCompress: bool
:returns: obj -- An instance of :class:`autobahn.websocket.protocol.PreparedMessage`.
"""
applyMask = not self.isServer
return PreparedMessage(payload, isBinary, applyMask, doNotCompress)
class WebSocketServerProtocol(WebSocketProtocol):
"""
Protocol base class for WebSocket servers.
"""
CONFIG_ATTRS = WebSocketProtocol.CONFIG_ATTRS_COMMON + WebSocketProtocol.CONFIG_ATTRS_SERVER
def onConnect(self, request):
"""
Callback fired during WebSocket opening handshake when new WebSocket client
connection is about to be established.
When you want to accept the connection, return the accepted protocol
from list of WebSocket (sub)protocols provided by client or `None` to
speak no specific one or when the client protocol list was empty.
You may also return a pair of `(protocol, headers)` to send additional
HTTP headers, with `headers` being a dictionary of key-values.
Throw :class:`autobahn.websocket.http.HttpException` when you don't want
to accept the WebSocket connection request.
:param request: WebSocket connection request information.
:type request: instance of :class:`autobahn.websocket.protocol.ConnectionRequest`
"""
return None
def _connectionMade(self):
"""
Called by network framework when new transport connection from client was
accepted. Default implementation will prepare for initial WebSocket opening
handshake. When overriding in derived class, make sure to call this base class
implementation *before* your code.
"""
WebSocketProtocol._connectionMade(self)
self.factory.countConnections += 1
if self.debug:
self.factory._log("connection accepted from peer %s" % self.peer)
def _connectionLost(self, reason):
"""
Called by network framework when established transport connection from client
was lost. Default implementation will tear down all state properly.
When overriding in derived class, make sure to call this base class
implementation *after* your code.
"""
WebSocketProtocol._connectionLost(self, reason)
self.factory.countConnections -= 1
if self.debug:
self.factory._log("connection from %s lost" % self.peer)
def processProxyConnect(self):
raise Exception("Autobahn isn't a proxy server")
def parseHixie76Key(self, key):
"""
Parse Hixie76 opening handshake key provided by client.
"""
return int(filter(lambda x: x.isdigit(), key)) / key.count(" ")
def processHandshake(self):
"""
Process WebSocket opening handshake request from client.
"""
## only proceed when we have fully received the HTTP request line and all headers
##
end_of_header = self.data.find(b"\x0d\x0a\x0d\x0a")
if end_of_header >= 0:
self.http_request_data = self.data[:end_of_header + 4]
if self.debug:
self.factory._log("received HTTP request:\n\n%s\n\n" % self.http_request_data)
## extract HTTP status line and headers
##
(self.http_status_line, self.http_headers, http_headers_cnt) = parseHttpHeader(self.http_request_data)
## validate WebSocket opening handshake client request
##
if self.debug:
self.factory._log("received HTTP status line in opening handshake : %s" % str(self.http_status_line))
self.factory._log("received HTTP headers in opening handshake : %s" % str(self.http_headers))
## HTTP Request line : METHOD, VERSION
##
rl = self.http_status_line.split()
if len(rl) != 3:
return self.failHandshake("Bad HTTP request status line '%s'" % self.http_status_line)
if rl[0].strip() != "GET":
return self.failHandshake("HTTP method '%s' not allowed" % rl[0], http.METHOD_NOT_ALLOWED[0])
vs = rl[2].strip().split("/")
if len(vs) != 2 or vs[0] != "HTTP" or vs[1] not in ["1.1"]:
return self.failHandshake("Unsupported HTTP version '%s'" % rl[2], http.UNSUPPORTED_HTTP_VERSION[0])
## HTTP Request line : REQUEST-URI
##
self.http_request_uri = rl[1].strip()
try:
(scheme, netloc, path, params, query, fragment) = urlparse.urlparse(self.http_request_uri)
## FIXME: check that if absolute resource URI is given,
## the scheme/netloc matches the server
if scheme != "" or netloc != "":
pass
## Fragment identifiers are meaningless in the context of WebSocket
## URIs, and MUST NOT be used on these URIs.
if fragment != "":
return self.failHandshake("HTTP requested resource contains a fragment identifier '%s'" % fragment)
## resource path and query parameters .. this will get forwarded
## to onConnect()
self.http_request_path = path
self.http_request_params = urlparse.parse_qs(query)
except:
return self.failHandshake("Bad HTTP request resource - could not parse '%s'" % rl[1].strip())
## Host
##
if not 'host' in self.http_headers:
return self.failHandshake("HTTP Host header missing in opening handshake request")
if http_headers_cnt["host"] > 1:
return self.failHandshake("HTTP Host header appears more than once in opening handshake request")
self.http_request_host = self.http_headers["host"].strip()
if self.http_request_host.find(":") >= 0:
(h, p) = self.http_request_host.split(":")
try:
port = int(str(p.strip()))
except:
return self.failHandshake("invalid port '%s' in HTTP Host header '%s'" % (str(p.strip()), str(self.http_request_host)))
if port != self.factory.externalPort:
return self.failHandshake("port %d in HTTP Host header '%s' does not match server listening port %s" % (port, str(self.http_request_host), self.factory.externalPort))
self.http_request_host = h
else:
if not ((self.factory.isSecure and self.factory.externalPort == 443) or (not self.factory.isSecure and self.factory.externalPort == 80)):
return self.failHandshake("missing port in HTTP Host header '%s' and server runs on non-standard port %d (wss = %s)" % (str(self.http_request_host), self.factory.externalPort, self.factory.isSecure))
## Upgrade
##
if not 'upgrade' in self.http_headers:
## When no WS upgrade, render HTML server status page
##
if self.webStatus:
if 'redirect' in self.http_request_params and len(self.http_request_params['redirect']) > 0:
## To specifiy an URL for redirection, encode the URL, i.e. from JavaScript:
##
## var url = encodeURIComponent("http://autobahn.ws/python");
##
## and append the encoded string as a query parameter 'redirect'
##
## http://localhost:9000?redirect=http%3A%2F%2Fautobahn.ws%2Fpython
## https://localhost:9000?redirect=https%3A%2F%2Ftwitter.com%2F
##
## This will perform an immediate HTTP-303 redirection. If you provide
## an additional parameter 'after' (int >= 0), the redirection happens
## via Meta-Refresh in the rendered HTML status page, i.e.
##
## https://localhost:9000/?redirect=https%3A%2F%2Ftwitter.com%2F&after=3
##
url = self.http_request_params['redirect'][0]
if 'after' in self.http_request_params and len(self.http_request_params['after']) > 0:
after = int(self.http_request_params['after'][0])
if self.debugCodePaths:
self.factory._log("HTTP Upgrade header missing : render server status page and meta-refresh-redirecting to %s after %d seconds" % (url, after))
self.sendServerStatus(url, after)
else:
if self.debugCodePaths:
self.factory._log("HTTP Upgrade header missing : 303-redirecting to %s" % url)
self.sendRedirect(url)
else:
if self.debugCodePaths:
self.factory._log("HTTP Upgrade header missing : render server status page")
self.sendServerStatus()
self.dropConnection(abort = False)
return
else:
return self.failHandshake("HTTP Upgrade header missing", http.UPGRADE_REQUIRED[0])
upgradeWebSocket = False
for u in self.http_headers["upgrade"].split(","):
if u.strip().lower() == "websocket":
upgradeWebSocket = True
break
if not upgradeWebSocket:
return self.failHandshake("HTTP Upgrade headers do not include 'websocket' value (case-insensitive) : %s" % self.http_headers["upgrade"])
## Connection
##
if not 'connection' in self.http_headers:
return self.failHandshake("HTTP Connection header missing")
connectionUpgrade = False
for c in self.http_headers["connection"].split(","):
if c.strip().lower() == "upgrade":
connectionUpgrade = True
break
if not connectionUpgrade:
return self.failHandshake("HTTP Connection headers do not include 'upgrade' value (case-insensitive) : %s" % self.http_headers["connection"])
## Sec-WebSocket-Version PLUS determine mode: Hybi or Hixie
##
if not 'sec-websocket-version' in self.http_headers:
if self.debugCodePaths:
self.factory._log("Hixie76 protocol detected")
if self.allowHixie76:
version = 0
else:
return self.failHandshake("WebSocket connection denied - Hixie76 protocol mode disabled.")
else:
if self.debugCodePaths:
self.factory._log("Hybi protocol detected")
if http_headers_cnt["sec-websocket-version"] > 1:
return self.failHandshake("HTTP Sec-WebSocket-Version header appears more than once in opening handshake request")
try:
version = int(self.http_headers["sec-websocket-version"])
except:
return self.failHandshake("could not parse HTTP Sec-WebSocket-Version header '%s' in opening handshake request" % self.http_headers["sec-websocket-version"])
if version not in self.versions:
## respond with list of supported versions (descending order)
##
sv = sorted(self.versions)
sv.reverse()
svs = ','.join([str(x) for x in sv])
return self.failHandshake("WebSocket version %d not supported (supported versions: %s)" % (version, svs),
http.BAD_REQUEST[0],
[("Sec-WebSocket-Version", svs)])
else:
## store the protocol version we are supposed to talk
self.websocket_version = version
## Sec-WebSocket-Protocol
##
if 'sec-websocket-protocol' in self.http_headers:
protocols = [str(x.strip()) for x in self.http_headers["sec-websocket-protocol"].split(",")]
# check for duplicates in protocol header
pp = {}
for p in protocols:
if p in pp:
return self.failHandshake("duplicate protocol '%s' specified in HTTP Sec-WebSocket-Protocol header" % p)
else:
pp[p] = 1
# ok, no duplicates, save list in order the client sent it
self.websocket_protocols = protocols
else:
self.websocket_protocols = []
## Origin / Sec-WebSocket-Origin
## http://tools.ietf.org/html/draft-ietf-websec-origin-02
##
if self.websocket_version < 13 and self.websocket_version != 0:
# Hybi, but only < Hybi-13
websocket_origin_header_key = 'sec-websocket-origin'
else:
# RFC6455, >= Hybi-13 and Hixie
websocket_origin_header_key = "origin"
self.websocket_origin = None
if websocket_origin_header_key in self.http_headers:
if http_headers_cnt[websocket_origin_header_key] > 1:
return self.failHandshake("HTTP Origin header appears more than once in opening handshake request")
self.websocket_origin = self.http_headers[websocket_origin_header_key].strip()
else:
# non-browser clients are allowed to omit this header
pass
## Sec-WebSocket-Key (Hybi) or Sec-WebSocket-Key1/Sec-WebSocket-Key2 (Hixie-76)
##
if self.websocket_version == 0:
for kk in ['Sec-WebSocket-Key1', 'Sec-WebSocket-Key2']:
k = kk.lower()
if not k in self.http_headers:
return self.failHandshake("HTTP %s header missing" % kk)
if http_headers_cnt[k] > 1:
return self.failHandshake("HTTP %s header appears more than once in opening handshake request" % kk)
try:
key1 = self.parseHixie76Key(self.http_headers["sec-websocket-key1"].strip())
key2 = self.parseHixie76Key(self.http_headers["sec-websocket-key2"].strip())
except:
return self.failHandshake("could not parse Sec-WebSocket-Key1/2")
else:
if not 'sec-websocket-key' in self.http_headers:
return self.failHandshake("HTTP Sec-WebSocket-Key header missing")
if http_headers_cnt["sec-websocket-key"] > 1:
return self.failHandshake("HTTP Sec-WebSocket-Key header appears more than once in opening handshake request")
key = self.http_headers["sec-websocket-key"].strip()
if len(key) != 24: # 16 bytes => (ceil(128/24)*24)/6 == 24
return self.failHandshake("bad Sec-WebSocket-Key (length must be 24 ASCII chars) '%s'" % key)
if key[-2:] != "==": # 24 - ceil(128/6) == 2
return self.failHandshake("bad Sec-WebSocket-Key (invalid base64 encoding) '%s'" % key)
for c in key[:-2]:
if c not in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+/":
return self.failHandshake("bad character '%s' in Sec-WebSocket-Key (invalid base64 encoding) '%s'" (c, key))
## Sec-WebSocket-Extensions
##
self.websocket_extensions = []
if 'sec-websocket-extensions' in self.http_headers:
if self.websocket_version == 0:
return self.failHandshake("HTTP Sec-WebSocket-Extensions header encountered for Hixie-76")
else:
if http_headers_cnt["sec-websocket-extensions"] > 1:
return self.failHandshake("HTTP Sec-WebSocket-Extensions header appears more than once in opening handshake request")
else:
## extensions requested/offered by client
##
self.websocket_extensions = self._parseExtensionsHeader(self.http_headers["sec-websocket-extensions"])
## For Hixie-76, we need 8 octets of HTTP request body to complete HS!
##
if self.websocket_version == 0:
if len(self.data) < end_of_header + 4 + 8:
return
else:
key3 = self.data[end_of_header + 4:end_of_header + 4 + 8]
if self.debug:
self.factory._log("received HTTP request body containing key3 for Hixie-76: %s" % key3)
## Ok, got complete HS input, remember rest (if any)
##
if self.websocket_version == 0:
self.data = self.data[end_of_header + 4 + 8:]
else:
self.data = self.data[end_of_header + 4:]
## store WS key
##
if self.websocket_version == 0:
self._wskey = (key1, key2, key3)
else:
self._wskey = key
## WebSocket handshake validated => produce opening handshake response
## Now fire onConnect() on derived class, to give that class a chance to accept or deny
## the connection. onConnect() may throw, in which case the connection is denied, or it
## may return a protocol from the protocols provided by client or None.
##
request = ConnectionRequest(self.peer,
self.http_headers,
self.http_request_host,
self.http_request_path,
self.http_request_params,
self.websocket_version,
self.websocket_origin,
self.websocket_protocols,
self.websocket_extensions)
self._onConnect(request)
def succeedHandshake(self, res):
"""
Callback after onConnect() returns successfully. Generates the response for the handshake.
"""
protocol = None
headers = {}
if type(res) == tuple:
if len(res) > 0:
protocol = res[0]
if len(res) > 1:
headers = res[1]
else:
protocol = res
if protocol is not None and not (protocol in self.websocket_protocols):
raise Exception("protocol accepted must be from the list client sent or None")
self.websocket_protocol_in_use = protocol
if self.websocket_version == 0:
key1, key2, key3 = self._wskey
else:
key = self._wskey
## extensions effectively in use for this connection
##
self.websocket_extensions_in_use = []
extensionResponse = []
## gets filled with permessage-compress offers from the client
##
pmceOffers = []
## handle WebSocket extensions
##
for (extension, params) in self.websocket_extensions:
if self.debug:
self.factory._log("parsed WebSocket extension '%s' with params '%s'" % (extension, params))
## process permessage-compress extension
##
if extension in PERMESSAGE_COMPRESSION_EXTENSION:
PMCE = PERMESSAGE_COMPRESSION_EXTENSION[extension]
try:
offer = PMCE['Offer'].parse(params)
pmceOffers.append(offer)
except Exception as e:
return self.failHandshake(str(e))
else:
if self.debug:
self.factory._log("client requested '%s' extension we don't support or which is not activated" % extension)
## handle permessage-compress offers by the client
##
if len(pmceOffers) > 0:
accept = self.perMessageCompressionAccept(pmceOffers)
if accept is not None:
PMCE = PERMESSAGE_COMPRESSION_EXTENSION[accept.EXTENSION_NAME]
self._perMessageCompress = PMCE['PMCE'].createFromOfferAccept(self.factory.isServer, accept)
self.websocket_extensions_in_use.append(self._perMessageCompress)
extensionResponse.append(accept.getExtensionString())
else:
if self.debug:
self.factory._log("client request permessage-compress extension, but we did not accept any offer [%s]" % pmceOffers)
## build response to complete WebSocket handshake
##
response = "HTTP/1.1 %d Switching Protocols\x0d\x0a" % http.SWITCHING_PROTOCOLS[0]
if self.factory.server is not None and self.factory.server != "":
response += "Server: %s\x0d\x0a" % self.factory.server
response += "Upgrade: WebSocket\x0d\x0a"
response += "Connection: Upgrade\x0d\x0a"
## optional, user supplied additional HTTP headers
##
## headers from factory
for uh in self.factory.headers.items():
response += "%s: %s\x0d\x0a" % (uh[0], uh[1])
## headers from onConnect
for uh in headers.items():
response += "%s: %s\x0d\x0a" % (uh[0], uh[1])
if self.websocket_protocol_in_use is not None:
response += "Sec-WebSocket-Protocol: %s\x0d\x0a" % str(self.websocket_protocol_in_use)
if self.websocket_version == 0:
if self.websocket_origin:
## browser client provide the header, and expect it to be echo'ed
response += "Sec-WebSocket-Origin: %s\x0d\x0a" % str(self.websocket_origin)
if self.debugCodePaths:
self.factory._log('factory isSecure = %s port = %s' % (self.factory.isSecure, self.factory.externalPort))
if (self.factory.isSecure and self.factory.externalPort != 443) or ((not self.factory.isSecure) and self.factory.externalPort != 80):
if self.debugCodePaths:
self.factory._log('factory running on non-default port')
response_port = ':' + str(self.factory.externalPort)
else:
if self.debugCodePaths:
self.factory._log('factory running on default port')
response_port = ''
## FIXME: check this! But see below ..
if False:
response_host = str(self.factory.host)
response_path = str(self.factory.path)
else:
response_host = str(self.http_request_host)
response_path = str(self.http_request_uri)
location = "%s://%s%s%s" % ('wss' if self.factory.isSecure else 'ws', response_host, response_port, response_path)
# Safari is very picky about this one
response += "Sec-WebSocket-Location: %s\x0d\x0a" % location
## end of HTTP response headers
response += "\x0d\x0a"
## compute accept body
##
accept_val = struct.pack(">II", key1, key2) + key3
response_body = hashlib.md5(accept_val).digest()
else:
## compute Sec-WebSocket-Accept
##
sha1 = hashlib.sha1()
sha1.update(key.encode('utf8') + WebSocketProtocol._WS_MAGIC)
sec_websocket_accept = base64.b64encode(sha1.digest())
response += "Sec-WebSocket-Accept: %s\x0d\x0a" % sec_websocket_accept.decode()
## agreed extensions
##
if len(extensionResponse) > 0:
response += "Sec-WebSocket-Extensions: %s\x0d\x0a" % ', '.join(extensionResponse)
## end of HTTP response headers
response += "\x0d\x0a"
response_body = None
## send out opening handshake response
##
if self.debug:
self.factory._log("sending HTTP response:\n\n%s" % response)
self.sendData(response.encode('utf8'))
if response_body:
if self.debug:
self.factory._log("sending HTTP response body:\n\n%s" % binascii.b2a_hex(response_body))
self.sendData(response_body)
## save response for testsuite
##
self.http_response_data = response
## opening handshake completed, move WebSocket connection into OPEN state
##
self.state = WebSocketProtocol.STATE_OPEN
## cancel any opening HS timer if present
##
if self.openHandshakeTimeoutCall is not None:
if self.debugCodePaths:
self.factory._log("openHandshakeTimeoutCall.cancel")
self.openHandshakeTimeoutCall.cancel()
self.openHandshakeTimeoutCall = None
## init state
##
self.inside_message = False
if self.websocket_version != 0:
self.current_frame = None
## fire handler on derived class
##
if self.trackedTimings:
self.trackedTimings.track("onOpen")
self._onOpen()
## process rest, if any
##
if len(self.data) > 0:
self.consumeData()
def failHandshake(self, reason, code = http.BAD_REQUEST[0], responseHeaders = []):
"""
During opening handshake the client request was invalid, we send a HTTP
error response and then drop the connection.
"""
if self.debug:
self.factory._log("failing WebSocket opening handshake ('%s')" % reason)
self.sendHttpErrorResponse(code, reason, responseHeaders)
self.dropConnection(abort = False)
def sendHttpErrorResponse(self, code, reason, responseHeaders = []):
"""
Send out HTTP error response.
"""
response = "HTTP/1.1 {0} {1}\x0d\x0a".format(code, reason)
for h in responseHeaders:
response += "{0}: {1}\x0d\x0a".format(h[0], h[1])
response += "\x0d\x0a"
self.sendData(response.encode('utf8'))
def sendHtml(self, html):
"""
Send HTML page HTTP response.
"""
responseBody = html.encode('utf8')
response = "HTTP/1.1 %d %s\x0d\x0a" % (http.OK[0], http.OK[1])
if self.factory.server is not None and self.factory.server != "":
response += "Server: %s\x0d\x0a" % self.factory.server
response += "Content-Type: text/html; charset=UTF-8\x0d\x0a"
response += "Content-Length: %d\x0d\x0a" % len(responseBody)
response += "\x0d\x0a"
self.sendData(response.encode('utf8'))
self.sendData(responseBody)
def sendRedirect(self, url):
"""
Send HTTP Redirect (303) response.
"""
response = "HTTP/1.1 %d\x0d\x0a" % http.SEE_OTHER[0]
if self.factory.server is not None and self.factory.server != "":
response += "Server: %s\x0d\x0a" % self.factory.server
response += "Location: %s\x0d\x0a" % url
response += "\x0d\x0a"
self.sendData(response.encode('utf8'))
def sendServerStatus(self, redirectUrl = None, redirectAfter = 0):
"""
Used to send out server status/version upon receiving a HTTP/GET without
upgrade to WebSocket header (and option serverStatus is True).
"""
if redirectUrl:
redirect = """<meta http-equiv="refresh" content="%d;URL='%s'">""" % (redirectAfter, redirectUrl)
else:
redirect = ""
html = """
<!DOCTYPE html>
<html>
<head>
%s
<style>
body {
color: #fff;
background-color: #027eae;
font-family: "Segoe UI", "Lucida Grande", "Helvetica Neue", Helvetica, Arial, sans-serif;
font-size: 16px;
}
a, a:visited, a:hover {
color: #fff;
}
</style>
</head>
<body>
<h1>AutobahnPython %s</h1>
<p>
I am not Web server, but a WebSocket endpoint.
You can talk to me using the WebSocket <a href="http://tools.ietf.org/html/rfc6455">protocol</a>.
</p>
<p>
For more information, please visit <a href="http://autobahn.ws/python">my homepage</a>.
</p>
</body>
</html>
""" % (redirect, __version__)
self.sendHtml(html)
class WebSocketServerFactory(WebSocketFactory):
"""
A protocol factory for WebSocket servers.
"""
protocol = WebSocketServerProtocol
"""
The protocol to be spoken. Must be derived from :class:`autobahn.websocket.protocol.WebSocketServerProtocol`.
"""
isServer = True
"""
Flag indicating if this factory is client- or server-side.
"""
def __init__(self,
url = None,
protocols = [],
server = "AutobahnPython/%s" % __version__,
headers = {},
externalPort = None,
debug = False,
debugCodePaths = False):
"""
Create instance of WebSocket server factory.
Note that you MUST provide URL either here or using
:meth:`autobahn.websocket.protocol.WebSocketServerFactory.setSessionParameters`
*before* the factory is started.
:param url: The WebSocket URL this factory is working for, e.g. `ws://myhost.com/somepath`.
:type url: str
:param protocols: List of subprotocols the server supports. The subprotocol used is the first from the list of subprotocols announced by the client that is contained in this list.
:type protocols: list of strings
:param server: Server as announced in HTTP response header during opening handshake or None (default: `AutobahnWebSocket/?.?.?`).
:type server: str
:param headers: An optional mapping of additional HTTP headers to send during the WebSocket opening handshake.
:type headers: dict
:param externalPort: Optionally, the external visible port this factory will be reachable under (i.e. when running behind a L2/L3 forwarding device).
:type externalPort: int
:param debug: Debug mode (default: `False`).
:type debug: bool
:param debugCodePaths: Debug code paths mode (default: `False`).
:type debugCodePaths: bool
"""
self.debug = debug
self.debugCodePaths = debugCodePaths
self.logOctets = debug
self.logFrames = debug
self.trackTimings = False
## seed RNG which is used for WS frame masks generation
random.seed()
## default WS session parameters
##
self.setSessionParameters(url, protocols, server, headers, externalPort)
## default WebSocket protocol options
##
self.resetProtocolOptions()
## number of currently connected clients
##
self.countConnections = 0
def setSessionParameters(self,
url = None,
protocols = [],
server = None,
headers = {},
externalPort = None):
"""
Set WebSocket session parameters.
:param url: The WebSocket URL this factory is working for, e.g. `ws://myhost.com/somepath`.
:type url: str
:param protocols: List of subprotocols the server supports. The subprotocol used is the first from the list of subprotocols announced by the client that is contained in this list.
:type protocols: list of strings
:param server: Server as announced in HTTP response header during opening handshake.
:type server: str
:param headers: An optional mapping of additional HTTP headers to send during the WebSocket opening handshake.
:type headers: dict
:param externalPort: Optionally, the external visible port this server will be reachable under (i.e. when running behind a L2/L3 forwarding device).
:type externalPort: int
"""
if url is not None:
## parse WebSocket URI into components
(isSecure, host, port, resource, path, params) = parseWsUrl(url)
if len(params) > 0:
raise Exception("query parameters specified for server WebSocket URL")
self.url = url
self.isSecure = isSecure
self.host = host
self.port = port
self.resource = resource
self.path = path
self.params = params
else:
self.url = None
self.isSecure = None
self.host = None
self.port = None
self.resource = None
self.path = None
self.params = None
self.protocols = protocols
self.server = server
self.headers = headers
self.externalPort = externalPort if externalPort is not None else self.port
def resetProtocolOptions(self):
"""
Reset all WebSocket protocol options to defaults.
"""
self.versions = WebSocketProtocol.SUPPORTED_PROTOCOL_VERSIONS
self.allowHixie76 = WebSocketProtocol.DEFAULT_ALLOW_HIXIE76
self.webStatus = True
self.utf8validateIncoming = True
self.requireMaskedClientFrames = True
self.maskServerFrames = False
self.applyMask = True
self.maxFramePayloadSize = 0
self.maxMessagePayloadSize = 0
self.autoFragmentSize = 0
self.failByDrop = True
self.echoCloseCodeReason = False
self.openHandshakeTimeout = 5
self.closeHandshakeTimeout = 1
self.tcpNoDelay = True
## permessage-XXX extension
##
self.perMessageCompressionAccept = lambda _: None
def setProtocolOptions(self,
versions = None,
allowHixie76 = None,
webStatus = None,
utf8validateIncoming = None,
maskServerFrames = None,
requireMaskedClientFrames = None,
applyMask = None,
maxFramePayloadSize = None,
maxMessagePayloadSize = None,
autoFragmentSize = None,
failByDrop = None,
echoCloseCodeReason = None,
openHandshakeTimeout = None,
closeHandshakeTimeout = None,
tcpNoDelay = None,
perMessageCompressionAccept = None):
"""
Set WebSocket protocol options used as defaults for new protocol instances.
:param versions: The WebSocket protocol versions accepted by the server (default: :func:`autobahn.websocket.protocol.WebSocketProtocol.SUPPORTED_PROTOCOL_VERSIONS`).
:type versions: list of ints
:param allowHixie76: Allow to speak Hixie76 protocol version.
:type allowHixie76: bool
:param webStatus: Return server status/version on HTTP/GET without WebSocket upgrade header (default: `True`).
:type webStatus: bool
:param utf8validateIncoming: Validate incoming UTF-8 in text message payloads (default: `True`).
:type utf8validateIncoming: bool
:param maskServerFrames: Mask server-to-client frames (default: `False`).
:type maskServerFrames: bool
:param requireMaskedClientFrames: Require client-to-server frames to be masked (default: `True`).
:type requireMaskedClientFrames: bool
:param applyMask: Actually apply mask to payload when mask it present. Applies for outgoing and incoming frames (default: `True`).
:type applyMask: bool
:param maxFramePayloadSize: Maximum frame payload size that will be accepted when receiving or `0` for unlimited (default: `0`).
:type maxFramePayloadSize: int
:param maxMessagePayloadSize: Maximum message payload size (after reassembly of fragmented messages) that will be accepted when receiving or `0` for unlimited (default: `0`).
:type maxMessagePayloadSize: int
:param autoFragmentSize: Automatic fragmentation of outgoing data messages (when using the message-based API) into frames with payload length `<=` this size or `0` for no auto-fragmentation (default: `0`).
:type autoFragmentSize: int
:param failByDrop: Fail connections by dropping the TCP connection without performaing closing handshake (default: `True`).
:type failbyDrop: bool
:param echoCloseCodeReason: Iff true, when receiving a close, echo back close code/reason. Otherwise reply with `code == 1000, reason = ""` (default: `False`).
:type echoCloseCodeReason: bool
:param openHandshakeTimeout: Opening WebSocket handshake timeout, timeout in seconds or `0` to deactivate (default: `0`).
:type openHandshakeTimeout: float
:param closeHandshakeTimeout: When we expect to receive a closing handshake reply, timeout in seconds (default: `1`).
:type closeHandshakeTimeout: float
:param tcpNoDelay: TCP NODELAY ("Nagle") socket option (default: `True`).
:type tcpNoDelay: bool
:param perMessageCompressionAccept: Acceptor function for offers.
:type perMessageCompressionAccept: callable
"""
if allowHixie76 is not None and allowHixie76 != self.allowHixie76:
self.allowHixie76 = allowHixie76
if versions is not None:
for v in versions:
if v not in WebSocketProtocol.SUPPORTED_PROTOCOL_VERSIONS:
raise Exception("invalid WebSocket protocol version %s (allowed values: %s)" % (v, str(WebSocketProtocol.SUPPORTED_PROTOCOL_VERSIONS)))
if v == 0 and not self.allowHixie76:
raise Exception("use of Hixie-76 requires allowHixie76 == True")
if set(versions) != set(self.versions):
self.versions = versions
if webStatus is not None and webStatus != self.webStatus:
self.webStatus = webStatus
if utf8validateIncoming is not None and utf8validateIncoming != self.utf8validateIncoming:
self.utf8validateIncoming = utf8validateIncoming
if requireMaskedClientFrames is not None and requireMaskedClientFrames != self.requireMaskedClientFrames:
self.requireMaskedClientFrames = requireMaskedClientFrames
if maskServerFrames is not None and maskServerFrames != self.maskServerFrames:
self.maskServerFrames = maskServerFrames
if applyMask is not None and applyMask != self.applyMask:
self.applyMask = applyMask
if maxFramePayloadSize is not None and maxFramePayloadSize != self.maxFramePayloadSize:
self.maxFramePayloadSize = maxFramePayloadSize
if maxMessagePayloadSize is not None and maxMessagePayloadSize != self.maxMessagePayloadSize:
self.maxMessagePayloadSize = maxMessagePayloadSize
if autoFragmentSize is not None and autoFragmentSize != self.autoFragmentSize:
self.autoFragmentSize = autoFragmentSize
if failByDrop is not None and failByDrop != self.failByDrop:
self.failByDrop = failByDrop
if echoCloseCodeReason is not None and echoCloseCodeReason != self.echoCloseCodeReason:
self.echoCloseCodeReason = echoCloseCodeReason
if openHandshakeTimeout is not None and openHandshakeTimeout != self.openHandshakeTimeout:
self.openHandshakeTimeout = openHandshakeTimeout
if closeHandshakeTimeout is not None and closeHandshakeTimeout != self.closeHandshakeTimeout:
self.closeHandshakeTimeout = closeHandshakeTimeout
if tcpNoDelay is not None and tcpNoDelay != self.tcpNoDelay:
self.tcpNoDelay = tcpNoDelay
if perMessageCompressionAccept is not None and perMessageCompressionAccept != self.perMessageCompressionAccept:
self.perMessageCompressionAccept = perMessageCompressionAccept
def getConnectionCount(self):
"""
Get number of currently connected clients.
:returns: int -- Number of currently connected clients.
"""
return self.countConnections
class WebSocketClientProtocol(WebSocketProtocol):
"""
Protocol base class for WebSocket clients.
"""
CONFIG_ATTRS = WebSocketProtocol.CONFIG_ATTRS_COMMON + WebSocketProtocol.CONFIG_ATTRS_CLIENT
def onConnect(self, response):
"""
Callback fired directly after WebSocket opening handshake when new WebSocket server
connection was established.
:param response: WebSocket connection response information.
:type response: instance of :class:`autobahn.websocket.protocol.ConnectionResponse`
"""
pass
def _connectionMade(self):
"""
Called by network framework when new transport connection to server was established. Default
implementation will start the initial WebSocket opening handshake (or proxy connect).
When overriding in derived class, make sure to call this base class
implementation _before_ your code.
"""
WebSocketProtocol._connectionMade(self)
if self.debug:
self.factory._log("connection to %s established" % self.peer)
if not self.factory.isServer and self.factory.proxy is not None:
## start by doing a HTTP/CONNECT for explicit proxies
self.startProxyConnect()
else:
## immediately start with the WebSocket opening handshake
self.startHandshake()
def _connectionLost(self, reason):
"""
Called by network framework when established transport connection to server was lost. Default
implementation will tear down all state properly.
When overriding in derived class, make sure to call this base class
implementation _after_ your code.
"""
WebSocketProtocol._connectionLost(self, reason)
if self.debug:
self.factory._log("connection to %s lost" % self.peer)
def startProxyConnect(self):
"""
Connect to explicit proxy.
"""
## construct proxy connect HTTP request
##
request = "CONNECT %s:%d HTTP/1.1\x0d\x0a" % (self.factory.host.encode("utf-8"), self.factory.port)
request += "Host: %s:%d\x0d\x0a" % (self.factory.host.encode("utf-8"), self.factory.port)
request += "\x0d\x0a"
if self.debug:
self.factory._log(request)
self.sendData(request)
def processProxyConnect(self):
"""
Process HTTP/CONNECT response from server.
"""
## only proceed when we have fully received the HTTP request line and all headers
##
end_of_header = self.data.find(b"\x0d\x0a\x0d\x0a")
if end_of_header >= 0:
http_response_data = self.data[:end_of_header + 4]
if self.debug:
self.factory._log("received HTTP response:\n\n%s\n\n" % http_response_data)
## extract HTTP status line and headers
##
(http_status_line, http_headers, http_headers_cnt) = parseHttpHeader(http_response_data)
## validate proxy connect response
##
if self.debug:
self.factory._log("received HTTP status line for proxy connect request : %s" % str(http_status_line))
self.factory._log("received HTTP headers for proxy connect request : %s" % str(http_headers))
## Response Line
##
sl = http_status_line.split()
if len(sl) < 2:
return self.failProxyConnect("Bad HTTP response status line '%s'" % http_status_line)
## HTTP version
##
http_version = sl[0].strip()
if http_version != "HTTP/1.1":
return self.failProxyConnect("Unsupported HTTP version ('%s')" % http_version)
## HTTP status code
##
try:
status_code = int(sl[1].strip())
except:
return self.failProxyConnect("Bad HTTP status code ('%s')" % sl[1].strip())
if not (status_code >= 200 and status_code < 300):
## FIXME: handle redirects
## FIXME: handle authentication required
if len(sl) > 2:
reason = " - %s" % ''.join(sl[2:])
else:
reason = ""
return self.failProxyConnect("HTTP proxy connect failed (%d%s)" % (status_code, reason))
## Ok, got complete response for HTTP/CONNECT, remember rest (if any)
##
self.data = self.data[end_of_header + 4:]
## opening handshake completed, move WebSocket connection into OPEN state
##
self.state = WebSocketProtocol.STATE_CONNECTING
## process rest of buffered data, if any
##
if len(self.data) > 0:
self.consumeData()
## now start WebSocket opening handshake
##
self.startHandshake()
def failProxyConnect(self, reason):
"""
During initial explicit proxy connect, the server response indicates some failure and we drop the
connection.
"""
if self.debug:
self.factory._log("failing proxy connect ('%s')" % reason)
self.dropConnection(abort = True)
def createHixieKey(self):
"""
Supposed to implement the crack smoker algorithm below. Well, crack
probably wasn't the stuff they smoked - dog poo?
http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-76#page-21
Items 16 - 22
"""
spaces1 = random.randint(1, 12)
max1 = int(4294967295 / spaces1)
number1 = random.randint(0, max1)
product1 = number1 * spaces1
key1 = str(product1)
rchars = filter(lambda x: (x >= 0x21 and x <= 0x2f) or (x >= 0x3a and x <= 0x7e), range(0,127))
for i in xrange(random.randint(1, 12)):
p = random.randint(0, len(key1) - 1)
key1 = key1[:p] + chr(random.choice(rchars)) + key1[p:]
for i in xrange(spaces1):
p = random.randint(1, len(key1) - 2)
key1 = key1[:p] + ' ' + key1[p:]
return (key1, number1)
def startHandshake(self):
"""
Start WebSocket opening handshake.
"""
## construct WS opening handshake HTTP header
##
request = "GET %s HTTP/1.1\x0d\x0a" % self.factory.resource
if self.factory.useragent is not None and self.factory.useragent != "":
request += "User-Agent: %s\x0d\x0a" % self.factory.useragent
request += "Host: %s:%d\x0d\x0a" % (self.factory.host, self.factory.port)
request += "Upgrade: WebSocket\x0d\x0a"
request += "Connection: Upgrade\x0d\x0a"
## this seems to prohibit some non-compliant proxies from removing the
## connection "Upgrade" header
## See also:
## http://www.ietf.org/mail-archive/web/hybi/current/msg09841.html
## http://code.google.com/p/chromium/issues/detail?id=148908
##
request += "Pragma: no-cache\x0d\x0a"
request += "Cache-Control: no-cache\x0d\x0a"
## optional, user supplied additional HTTP headers
##
for uh in self.factory.headers.items():
request += "%s: %s\x0d\x0a" % (uh[0], uh[1])
## handshake random key
##
if self.version == 0:
(self.websocket_key1, number1) = self.createHixieKey()
(self.websocket_key2, number2) = self.createHixieKey()
self.websocket_key3 = os.urandom(8)
accept_val = struct.pack(">II", number1, number2) + self.websocket_key3
self.websocket_expected_challenge_response = hashlib.md5(accept_val).digest()
## Safari does NOT set Content-Length, even though the body is
## non-empty, and the request unchunked. We do it.
## See also: http://www.ietf.org/mail-archive/web/hybi/current/msg02149.html
request += "Content-Length: %s\x0d\x0a" % len(self.websocket_key3)
## First two keys.
request += "Sec-WebSocket-Key1: %s\x0d\x0a" % self.websocket_key1
request += "Sec-WebSocket-Key2: %s\x0d\x0a" % self.websocket_key2
request_body = self.websocket_key3
else:
self.websocket_key = base64.b64encode(os.urandom(16))
request += "Sec-WebSocket-Key: %s\x0d\x0a" % self.websocket_key.decode()
request_body = None
## optional origin announced
##
if self.factory.origin:
if self.version > 10 or self.version == 0:
request += "Origin: %s\x0d\x0a" % self.factory.origin
else:
request += "Sec-WebSocket-Origin: %s\x0d\x0a" % self.factory.origin
## optional list of WS subprotocols announced
##
if len(self.factory.protocols) > 0:
request += "Sec-WebSocket-Protocol: %s\x0d\x0a" % ','.join(self.factory.protocols)
## extensions
##
if self.version != 0:
extensions = []
## permessage-compress offers
##
for offer in self.perMessageCompressionOffers:
extensions.append(offer.getExtensionString())
if len(extensions) > 0:
request += "Sec-WebSocket-Extensions: %s\x0d\x0a" % ', '.join(extensions)
## set WS protocol version depending on WS spec version
##
if self.version != 0:
request += "Sec-WebSocket-Version: %d\x0d\x0a" % WebSocketProtocol.SPEC_TO_PROTOCOL_VERSION[self.version]
request += "\x0d\x0a"
self.http_request_data = request.encode('utf8')
self.sendData(self.http_request_data)
if request_body:
## Write HTTP request body for Hixie-76
self.sendData(request_body)
if self.debug:
self.factory._log(request)
def processHandshake(self):
"""
Process WebSocket opening handshake response from server.
"""
## only proceed when we have fully received the HTTP request line and all headers
##
end_of_header = self.data.find(b"\x0d\x0a\x0d\x0a")
if end_of_header >= 0:
self.http_response_data = self.data[:end_of_header + 4]
if self.debug:
self.factory._log("received HTTP response:\n\n%s\n\n" % self.http_response_data)
## extract HTTP status line and headers
##
(self.http_status_line, self.http_headers, http_headers_cnt) = parseHttpHeader(self.http_response_data)
## validate WebSocket opening handshake server response
##
if self.debug:
self.factory._log("received HTTP status line in opening handshake : %s" % str(self.http_status_line))
self.factory._log("received HTTP headers in opening handshake : %s" % str(self.http_headers))
## Response Line
##
sl = self.http_status_line.split()
if len(sl) < 2:
return self.failHandshake("Bad HTTP response status line '%s'" % self.http_status_line)
## HTTP version
##
http_version = sl[0].strip()
if http_version != "HTTP/1.1":
return self.failHandshake("Unsupported HTTP version ('%s')" % http_version)
## HTTP status code
##
try:
status_code = int(sl[1].strip())
except:
return self.failHandshake("Bad HTTP status code ('%s')" % sl[1].strip())
if status_code != http.SWITCHING_PROTOCOLS[0]:
## FIXME: handle redirects
## FIXME: handle authentication required
if len(sl) > 2:
reason = " - %s" % ''.join(sl[2:])
else:
reason = ""
return self.failHandshake("WebSocket connection upgrade failed (%d%s)" % (status_code, reason))
## Upgrade
##
if not 'upgrade' in self.http_headers:
return self.failHandshake("HTTP Upgrade header missing")
if self.http_headers["upgrade"].strip().lower() != "websocket":
return self.failHandshake("HTTP Upgrade header different from 'websocket' (case-insensitive) : %s" % self.http_headers["upgrade"])
## Connection
##
if not 'connection' in self.http_headers:
return self.failHandshake("HTTP Connection header missing")
connectionUpgrade = False
for c in self.http_headers["connection"].split(","):
if c.strip().lower() == "upgrade":
connectionUpgrade = True
break
if not connectionUpgrade:
return self.failHandshake("HTTP Connection header does not include 'upgrade' value (case-insensitive) : %s" % self.http_headers["connection"])
## compute Sec-WebSocket-Accept
##
if self.version != 0:
if not 'sec-websocket-accept' in self.http_headers:
return self.failHandshake("HTTP Sec-WebSocket-Accept header missing in opening handshake reply")
else:
if http_headers_cnt["sec-websocket-accept"] > 1:
return self.failHandshake("HTTP Sec-WebSocket-Accept header appears more than once in opening handshake reply")
sec_websocket_accept_got = self.http_headers["sec-websocket-accept"].strip()
sha1 = hashlib.sha1()
sha1.update(self.websocket_key + WebSocketProtocol._WS_MAGIC)
sec_websocket_accept = base64.b64encode(sha1.digest()).decode()
if sec_websocket_accept_got != sec_websocket_accept:
return self.failHandshake("HTTP Sec-WebSocket-Accept bogus value : expected %s / got %s" % (sec_websocket_accept, sec_websocket_accept_got))
## Sec-WebSocket-Extensions
##
## extensions effectively in use for this connection
##
self.websocket_extensions_in_use = []
if 'sec-websocket-extensions' in self.http_headers:
if self.version == 0:
return self.failHandshake("HTTP Sec-WebSocket-Extensions header encountered for Hixie-76")
else:
if http_headers_cnt["sec-websocket-extensions"] > 1:
return self.failHandshake("HTTP Sec-WebSocket-Extensions header appears more than once in opening handshake reply")
else:
## extensions select by server
##
websocket_extensions = self._parseExtensionsHeader(self.http_headers["sec-websocket-extensions"])
## process extensions selected by server
##
for (extension, params) in websocket_extensions:
if self.debug:
self.factory._log("parsed WebSocket extension '%s' with params '%s'" % (extension, params))
## process permessage-compress extension
##
if extension in PERMESSAGE_COMPRESSION_EXTENSION:
## check that server only responded with 1 configuration ("PMCE")
##
if self._perMessageCompress is not None:
return self.failHandshake("multiple occurence of a permessage-compress extension")
PMCE = PERMESSAGE_COMPRESSION_EXTENSION[extension]
try:
pmceResponse = PMCE['Response'].parse(params)
except Exception as e:
return self.failHandshake(str(e))
accept = self.perMessageCompressionAccept(pmceResponse)
if accept is None:
return self.failHandshake("WebSocket permessage-compress extension response from server denied by client")
self._perMessageCompress = PMCE['PMCE'].createFromResponseAccept(self.factory.isServer, accept)
self.websocket_extensions_in_use.append(self._perMessageCompress)
else:
return self.failHandshake("server wants to use extension '%s' we did not request, haven't implemented or did not enable" % extension)
## handle "subprotocol in use" - if any
##
self.websocket_protocol_in_use = None
if 'sec-websocket-protocol' in self.http_headers:
if http_headers_cnt["sec-websocket-protocol"] > 1:
return self.failHandshake("HTTP Sec-WebSocket-Protocol header appears more than once in opening handshake reply")
sp = str(self.http_headers["sec-websocket-protocol"].strip())
if sp != "":
if sp not in self.factory.protocols:
return self.failHandshake("subprotocol selected by server (%s) not in subprotocol list requested by client (%s)" % (sp, str(self.factory.protocols)))
else:
## ok, subprotocol in use
##
self.websocket_protocol_in_use = sp
## For Hixie-76, we need 16 octets of HTTP request body to complete HS!
##
if self.version == 0:
if len(self.data) < end_of_header + 4 + 16:
return
else:
challenge_response = self.data[end_of_header + 4:end_of_header + 4 + 16]
if challenge_response != self.websocket_expected_challenge_response:
return self.failHandshake("invalid challenge response received from server (Hixie-76)")
## Ok, got complete HS input, remember rest (if any)
##
if self.version == 0:
self.data = self.data[end_of_header + 4 + 16:]
else:
self.data = self.data[end_of_header + 4:]
## opening handshake completed, move WebSocket connection into OPEN state
##
self.state = WebSocketProtocol.STATE_OPEN
self.inside_message = False
if self.version != 0:
self.current_frame = None
self.websocket_version = self.version
## we handle this symmetrical to server-side .. that is, give the
## client a chance to bail out .. i.e. on no subprotocol selected
## by server
try:
response = ConnectionResponse(self.peer,
self.http_headers,
None, # FIXME
self.websocket_protocol_in_use,
self.websocket_extensions_in_use)
self._onConnect(response)
except Exception as e:
## immediately close the WS connection
##
self.failConnection(1000, str(e))
else:
## fire handler on derived class
##
if self.trackedTimings:
self.trackedTimings.track("onOpen")
self._onOpen()
## process rest, if any
##
if len(self.data) > 0:
self.consumeData()
def failHandshake(self, reason):
"""
During opening handshake the server response is invalid and we drop the
connection.
"""
if self.debug:
self.factory._log("failing WebSocket opening handshake ('%s')" % reason)
self.dropConnection(abort = True)
class WebSocketClientFactory(WebSocketFactory):
"""
A protocol factory for WebSocket clients.
"""
protocol = WebSocketClientProtocol
"""
The protocol to be spoken. Must be derived from :class:`autobahn.websocket.protocol.WebSocketClientProtocol`.
"""
isServer = False
"""
Flag indicating if this factory is client- or server-side.
"""
def __init__(self,
url = None,
origin = None,
protocols = [],
useragent = "AutobahnPython/%s" % __version__,
headers = {},
proxy = None,
debug = False,
debugCodePaths = False):
"""
Create instance of WebSocket client factory.
Note that you MUST provide URL either here or set using
:meth:`autobahn.websocket.WebSocketClientFactory.setSessionParameters`
*before* the factory is started.
:param url: WebSocket URL this factory will connect to, e.g. `ws://myhost.com/somepath`.
:type url: str
:param origin: The origin to be sent in WebSocket opening handshake or None (default: `None`).
:type origin: str
:param protocols: List of subprotocols the client should announce in WebSocket opening handshake (default: `[]`).
:type protocols: list of strings
:param useragent: User agent as announced in HTTP request header or None (default: `AutobahnWebSocket/?.?.?`).
:type useragent: str
:param headers: An optional mapping of additional HTTP headers to send during the WebSocket opening handshake.
:type headers: dict
:param proxy: Explicit proxy server to use (`hostname:port` or `IP:port`), e.g. `192.168.1.100:8080`.
:type proxy: str
:param debug: Debug mode (default: `False`).
:type debug: bool
:param debugCodePaths: Debug code paths mode (default: `False`).
:type debugCodePaths: bool
"""
self.debug = debug
self.debugCodePaths = debugCodePaths
self.logOctets = debug
self.logFrames = debug
self.trackTimings = False
## seed RNG which is used for WS opening handshake key and WS frame masks generation
random.seed()
## default WS session parameters
##
self.setSessionParameters(url, origin, protocols, useragent, headers, proxy)
## default WebSocket protocol options
##
self.resetProtocolOptions()
def setSessionParameters(self,
url = None,
origin = None,
protocols = [],
useragent = None,
headers = {},
proxy = None):
"""
Set WebSocket session parameters.
:param url: WebSocket URL this factory will connect to, e.g. `ws://myhost.com/somepath`.
:type url: str
:param origin: The origin to be sent in opening handshake.
:type origin: str
:param protocols: List of WebSocket subprotocols the client should announce in opening handshake.
:type protocols: list of strings
:param useragent: User agent as announced in HTTP request header during opening handshake.
:type useragent: str
:param headers: An optional mapping of additional HTTP headers to send during the WebSocket opening handshake.
:type headers: dict
"""
if url is not None:
## parse WebSocket URI into components
(isSecure, host, port, resource, path, params) = parseWsUrl(url)
self.url = url
self.isSecure = isSecure
self.host = host
self.port = port
self.resource = resource
self.path = path
self.params = params
else:
self.url = None
self.isSecure = None
self.host = None
self.port = None
self.resource = None
self.path = None
self.params = None
self.origin = origin
self.protocols = protocols
self.useragent = useragent
self.headers = headers
self.proxy = proxy
def resetProtocolOptions(self):
"""
Reset all WebSocket protocol options to defaults.
"""
self.version = WebSocketProtocol.DEFAULT_SPEC_VERSION
self.allowHixie76 = WebSocketProtocol.DEFAULT_ALLOW_HIXIE76
self.utf8validateIncoming = True
self.acceptMaskedServerFrames = False
self.maskClientFrames = True
self.applyMask = True
self.maxFramePayloadSize = 0
self.maxMessagePayloadSize = 0
self.autoFragmentSize = 0
self.failByDrop = True
self.echoCloseCodeReason = False
self.serverConnectionDropTimeout = 1
self.openHandshakeTimeout = 5
self.closeHandshakeTimeout = 1
self.tcpNoDelay = True
## permessage-XXX extensions
##
self.perMessageCompressionOffers = []
self.perMessageCompressionAccept = lambda _: None
def setProtocolOptions(self,
version = None,
allowHixie76 = None,
utf8validateIncoming = None,
acceptMaskedServerFrames = None,
maskClientFrames = None,
applyMask = None,
maxFramePayloadSize = None,
maxMessagePayloadSize = None,
autoFragmentSize = None,
failByDrop = None,
echoCloseCodeReason = None,
serverConnectionDropTimeout = None,
openHandshakeTimeout = None,
closeHandshakeTimeout = None,
tcpNoDelay = None,
perMessageCompressionOffers = None,
perMessageCompressionAccept = None):
"""
Set WebSocket protocol options used as defaults for _new_ protocol instances.
:param version: The WebSocket protocol spec (draft) version to be used (default: :func:`autobahn.websocket.protocol.WebSocketProtocol.SUPPORTED_PROTOCOL_VERSIONS`).
:param utf8validateIncoming: Validate incoming UTF-8 in text message payloads (default: `True`).
:type utf8validateIncoming: bool
:param acceptMaskedServerFrames: Accept masked server-to-client frames (default: `False`).
:type acceptMaskedServerFrames: bool
:param maskClientFrames: Mask client-to-server frames (default: `True`).
:type maskClientFrames: bool
:param applyMask: Actually apply mask to payload when mask it present. Applies for outgoing and incoming frames (default: `True`).
:type applyMask: bool
:param maxFramePayloadSize: Maximum frame payload size that will be accepted when receiving or `0` for unlimited (default: `0`).
:type maxFramePayloadSize: int
:param maxMessagePayloadSize: Maximum message payload size (after reassembly of fragmented messages) that will be accepted when receiving or `0` for unlimited (default: `0`).
:type maxMessagePayloadSize: int
:param autoFragmentSize: Automatic fragmentation of outgoing data messages (when using the message-based API) into frames with payload length `<=` this size or `0` for no auto-fragmentation (default: `0`).
:type autoFragmentSize: int
:param failByDrop: Fail connections by dropping the TCP connection without performing closing handshake (default: `True`).
:type failbyDrop: bool
:param echoCloseCodeReason: Iff true, when receiving a close, echo back close code/reason. Otherwise reply with `code == 1000, reason = ""` (default: `False`).
:type echoCloseCodeReason: bool
:param serverConnectionDropTimeout: When the client expects the server to drop the TCP, timeout in seconds (default: `1`).
:type serverConnectionDropTimeout: float
:param openHandshakeTimeout: Opening WebSocket handshake timeout, timeout in seconds or `0` to deactivate (default: `0`).
:type openHandshakeTimeout: float
:param closeHandshakeTimeout: When we expect to receive a closing handshake reply, timeout in seconds (default: `1`).
:type closeHandshakeTimeout: float
:param tcpNoDelay: TCP NODELAY ("Nagle"): bool socket option (default: `True`).
:type tcpNoDelay: bool
:param perMessageCompressionOffers: A list of offers to provide to the server for the permessage-compress WebSocket extension. Must be a list of instances of subclass of PerMessageCompressOffer.
:type perMessageCompressionOffers: list of instance of subclass of PerMessageCompressOffer
:param perMessageCompressionAccept: Acceptor function for responses.
:type perMessageCompressionAccept: callable
"""
if allowHixie76 is not None and allowHixie76 != self.allowHixie76:
self.allowHixie76 = allowHixie76
if version is not None:
if version not in WebSocketProtocol.SUPPORTED_SPEC_VERSIONS:
raise Exception("invalid WebSocket draft version %s (allowed values: %s)" % (version, str(WebSocketProtocol.SUPPORTED_SPEC_VERSIONS)))
if version == 0 and not self.allowHixie76:
raise Exception("use of Hixie-76 requires allowHixie76 == True")
if version != self.version:
self.version = version
if utf8validateIncoming is not None and utf8validateIncoming != self.utf8validateIncoming:
self.utf8validateIncoming = utf8validateIncoming
if acceptMaskedServerFrames is not None and acceptMaskedServerFrames != self.acceptMaskedServerFrames:
self.acceptMaskedServerFrames = acceptMaskedServerFrames
if maskClientFrames is not None and maskClientFrames != self.maskClientFrames:
self.maskClientFrames = maskClientFrames
if applyMask is not None and applyMask != self.applyMask:
self.applyMask = applyMask
if maxFramePayloadSize is not None and maxFramePayloadSize != self.maxFramePayloadSize:
self.maxFramePayloadSize = maxFramePayloadSize
if maxMessagePayloadSize is not None and maxMessagePayloadSize != self.maxMessagePayloadSize:
self.maxMessagePayloadSize = maxMessagePayloadSize
if autoFragmentSize is not None and autoFragmentSize != self.autoFragmentSize:
self.autoFragmentSize = autoFragmentSize
if failByDrop is not None and failByDrop != self.failByDrop:
self.failByDrop = failByDrop
if echoCloseCodeReason is not None and echoCloseCodeReason != self.echoCloseCodeReason:
self.echoCloseCodeReason = echoCloseCodeReason
if serverConnectionDropTimeout is not None and serverConnectionDropTimeout != self.serverConnectionDropTimeout:
self.serverConnectionDropTimeout = serverConnectionDropTimeout
if openHandshakeTimeout is not None and openHandshakeTimeout != self.openHandshakeTimeout:
self.openHandshakeTimeout = openHandshakeTimeout
if closeHandshakeTimeout is not None and closeHandshakeTimeout != self.closeHandshakeTimeout:
self.closeHandshakeTimeout = closeHandshakeTimeout
if tcpNoDelay is not None and tcpNoDelay != self.tcpNoDelay:
self.tcpNoDelay = tcpNoDelay
if perMessageCompressionOffers is not None and pickle.dumps(perMessageCompressionOffers) != pickle.dumps(self.perMessageCompressionOffers):
if type(perMessageCompressionOffers) == list:
##
## FIXME: more rigorous verification of passed argument
##
self.perMessageCompressionOffers = copy.deepcopy(perMessageCompressionOffers)
else:
raise Exception("invalid type %s for perMessageCompressionOffers - expected list" % type(perMessageCompressionOffers))
if perMessageCompressionAccept is not None and perMessageCompressionAccept != self.perMessageCompressionAccept:
self.perMessageCompressionAccept = perMessageCompressionAccept
| liurenqiu520/AutobahnPython | autobahn/autobahn/websocket/protocol.py | Python | apache-2.0 | 166,198 | [
"VisIt"
] | 607a2f50b49c5624f2193f3c4ce2268e97492d981bf3e6ea076eebad7fe3303e |
# -*- coding: utf-8 -*-
import re
from app.models import User, Blog
from app import db, create_app
import unittest
from flask import url_for
class FlaskClientTestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
# self.client是Flask测试客户端对象
# 在这个对象上可调用方法向程序发起请求
# use_cookies: 使用依赖cookie的功能记住请求之间的上下文
self.client = self.app.test_client(use_cookies=True)
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
# 测试主页请求
def test_home_page(self):
response = self.client.get(url_for('main.index'))
self.assertTrue(b'BLEXT' in response.data)
# 测试新用户注册和登录以及重设密码等基本操作
def test_authentication(self):
# register a new account
response = self.client.post(url_for('auth.sign_up'), data={
'email': 'mike@example.com',
'username': 'mike',
'password': 'cat',
'password2': 'cat'
}, follow_redirects=True)
self.assertTrue(re.search(b'Hello,\s+mike!', response.data))
self.assertTrue(
b'You have not confirmed your account yet' in response.data)
# issue a random request
response = self.client.get(url_for('settings.profile_setting'))
self.assertTrue(response.status_code == 302)
# send a confirmation token
user = User.query.filter_by(email='mike@example.com').first()
token = user.generate_confirmation_token()
response = self.client.get(
url_for('auth.confirm', token=token), follow_redirects=True)
self.assertTrue(b'You have confirmed your account' in response.data)
# resend a confirmation token
response = self.client.get(url_for('auth.resend_confirmation'))
self.assertTrue(response.status_code == 302)
# reset password page (while logged in)
response = self.client.get(
url_for('auth.password_reset_request'), follow_redirects=True)
self.assertTrue(
b'BLEXT' in response.data)
response = self.client.get(
url_for('auth.password_reset', token=user.generate_reset_token()))
self.assertTrue(response.status_code == 302)
# sign out
response = self.client.get(
url_for('auth.sign_out'), follow_redirects=True)
self.assertTrue(b'You have been signed out' in response.data)
# reset password page
response = self.client.get(url_for('auth.password_reset_request'))
self.assertTrue(
b'Enter your email you registered when signed up, an confirmation email will be sent to you right away.' in response.data)
# reset password request
response = self.client.post(url_for('auth.password_reset_request'), data={
'email': 'mike@example.com'
})
self.assertTrue(response.status_code == 302)
# reset password
response = self.client.get(
url_for('auth.password_reset', token=user.generate_reset_token()))
self.assertTrue(b'Please reset your password.' in response.data)
response = self.client.post(
url_for('auth.password_reset', token=user.generate_reset_token()), data={
'email': 'mike@example.com',
'password': 'dog',
'password2': 'dog'
}, follow_redirects=True)
self.assertTrue(b'Sign in' in response.data)
# re sign up with same email and username
response = self.client.post(url_for('auth.sign_up'), data={
'email': 'mike@example.com',
'username': 'mike',
'password': 'cat',
'password2': 'cat'
}, follow_redirects=True)
self.assertFalse(re.search(b'Hello,\s+mike!', response.data))
self.assertFalse(
b'You have not confirmed your account yet' in response.data)
# sign in with wrong information
response = self.client.post(url_for('auth.sign_in'), data={
'email': 'mike@example.com',
'password': 'cat'
})
self.assertTrue(b'Invalid username or password.' in response.data)
# sign in
response = self.client.post(url_for('auth.sign_in'), data={
'email': 'mike@example.com',
'password': 'dog'
})
self.assertTrue(response.status_code == 302)
# re visit unconfirmed
response = self.client.get(url_for('auth.unconfirmed'))
self.assertTrue(response.status_code == 302)
# 测试用户设置页
def test_settings(self):
# add a user
u = User(email='mike@example.com', username='mike',
password='cat', confirmed=True)
db.session.add(u)
db.session.commit()
# sign in
response = self.client.post(url_for('auth.sign_in'), data={
'email': 'mike@example.com',
'password': 'cat'
})
self.assertTrue(response.status_code == 302)
# setting profile
response = self.client.get(url_for('settings.profile_setting'))
self.assertTrue(response.status_code == 200)
# set bio
response = self.client.post(url_for('settings.profile_setting'), data={
'bio': 'new bio'
}, follow_redirects=True)
self.assertTrue(b'new bio' in response.data)
# set avatar_url
response = self.client.post(url_for('settings.profile_setting'), data={
'avatar_url': 'new avatar_url'
}, follow_redirects=True)
self.assertTrue(b'new avatar_url' in response.data)
# set blog_title
response = self.client.post(url_for('settings.profile_setting'), data={
'blog_title': 'new blog_title'
}, follow_redirects=True)
self.assertTrue(b'new blog_title' in response.data)
# set about_me
response = self.client.post(url_for('settings.profile_setting'), data={
'about_me': 'new about_me'
})
self.assertTrue(response.status_code == 302)
response = self.client.get(url_for('user.about_me', username='mike'))
self.assertTrue(b'new about_me' in response.data)
# setting admin
response = self.client.get(url_for('settings.admin_setting'))
self.assertTrue(response.status_code == 200)
# change password with wrong old password
response = self.client.post(url_for('settings.admin_setting'), data={
'old_password': 'dog',
'password': 'cat',
'password2': 'cat'
}, follow_redirects=True)
self.assertTrue(b'Invalid password' in response.data)
# change password with correct old password
response = self.client.post(url_for('settings.admin_setting'), data={
'old_password': 'cat',
'password': 'dog',
'password2': 'dog'
}, follow_redirects=True)
self.assertTrue(b'BLEXT' in response.data)
# 测试编辑器页
def test_editor(self):
response = self.client.get(url_for('editor.index'))
# 未登录应被重定向到登录页
self.assertTrue(response.status_code == 302)
# add a user
u = User(email='mike@example.com', username='mike',
password='cat', confirmed=True)
db.session.add(u)
db.session.commit()
# sign in
response = self.client.post(url_for('auth.sign_in'), data={
'email': 'mike@example.com',
'password': 'cat'
})
self.assertTrue(response.status_code == 302)
response = self.client.get(url_for('editor.index'))
self.assertTrue(response.status_code == 200)
self.assertTrue(b'Editor' in response.data)
# write a new draft blog
blog_body = '---\ntitle: <title>\ncategory: <category>\ntags: []\n---\n<summary>\n<!-- more -->\n<Content>'
response = self.client.post(url_for('editor.index'), data={
'plainText': blog_body,
'draft': 'true'
}, follow_redirects=True)
self.assertTrue(
b'Your blog is successfully saved as a draft.' in response.data)
# publish a new blog
blog_body = '---\ntitle: <title>\ncategory: <category>\ntags: []\n---\n<summary>\n<!-- more -->\n<Content>'
response = self.client.post(url_for('editor.index'), data={
'plainText': blog_body,
'draft': 'false'
}, follow_redirects=True)
self.assertTrue(
b'Your blog is successfully uploaded!' in response.data)
# edit an existing blog
response = self.client.get(url_for('editor.edit', blog_id=1))
self.assertTrue(response.status_code == 302)
response = self.client.get(url_for('editor.index'))
self.assertTrue(b'1' in response.data)
blog_body += 'something new'
response = self.client.post(url_for('editor.index'), data={
'plainText': blog_body,
'draft': 'false',
'blog_id': '1'
}, follow_redirects=True)
self.assertTrue(
b'Your blog is successfully uploaded!' in response.data)
self.assertTrue(Blog.query.count() == 2)
# publish a wrong format blog
blog_body = 'wrong format'
response = self.client.post(url_for('editor.index'), data={
'plainText': blog_body,
'draft': 'false'
})
self.assertTrue(response.status_code == 200)
self.assertTrue(
b'There is something wrong in your format. Committing abolished' in response.data)
# edit a wrong format blog
blog_body = 'wrong format'
response = self.client.post(url_for('editor.index'), data={
'plainText': blog_body,
'draft': 'false',
'blog_id': '1'
})
self.assertTrue(response.status_code == 200)
self.assertTrue(
b'There is something wrong in your format. Committing abolished' in response.data)
# 测试用户主页(匿名)
def test_user_annonymous(self):
# add one user with one blog with one cat and tag
u = User(email='mike@example.com', username='mike',
password='cat', confirmed=True)
db.session.add(u)
db.session.commit()
blog = Blog(
body='---\ntitle: <title1>\ncategory: cat1\ntags: [tag1]\n---\n<summary>\n<!-- more -->\n<Content>', author_id=u.id, draft=True)
db.session.add(blog)
db.session.commit()
# get index
response = self.client.get(url_for('user.index', username='mike'))
self.assertTrue(response.status_code == 200)
self.assertTrue(b'mike' in response.data)
response = self.client.get(url_for('user.index', username='tom'))
self.assertTrue(response.status_code == 404)
# categories page
response = self.client.get(url_for('user.categories', username='mike'))
self.assertTrue(response.status_code == 200)
response = self.client.get(url_for('user.categories', username='tom'))
self.assertTrue(response.status_code == 404)
# tags page
response = self.client.get(url_for('user.tags', username='mike'))
self.assertTrue(response.status_code == 200)
response = self.client.get(url_for('user.tags', username='tom'))
self.assertTrue(response.status_code == 404)
# single cat list (anonymous)
response = self.client.get(
url_for('user.category', username='mike', category_name='cat1'))
self.assertTrue(response.status_code == 200)
self.assertFalse(b'title1' in response.data)
response = self.client.get(
url_for('user.category', username='mike', category_name='cat2'))
self.assertTrue(response.status_code == 404)
# single tag list (anonymous)
response = self.client.get(
url_for('user.tag', username='mike', tag_name='tag1'))
self.assertTrue(response.status_code == 200)
self.assertFalse(b'title1' in response.data)
response = self.client.get(
url_for('user.tag', username='mike', tag_name='tag2'))
self.assertTrue(response.status_code == 404)
# 测试用户主页(非匿名)
def test_user(self):
# add two users, two draft blogs, two categories and two tags
u = User(email='mike@example.com', username='mike',
password='cat', confirmed=True)
u2 = User(email='jack@example.com', username='jack',
password='dog', confirmed=True)
db.session.add_all([u, u2])
db.session.commit()
blog = Blog(
body='---\ntitle: <title1>\ncategory: cat1\ntags: [tag1]\n---\n<summary>\n<!-- more -->\n<Content>', author_id=u.id, draft=True)
blog2 = Blog(
body='---\ntitle: <title2>\ncategory: cat2\ntags: [tag2]\n---\n<summary>\n<!-- more -->\n<Content>', author_id=u2.id, draft=True)
db.session.add_all([blog, blog2])
db.session.commit()
# sign in
response = self.client.post(url_for('auth.sign_in'), data={
'email': 'mike@example.com',
'password': 'cat'
})
self.assertTrue(response.status_code == 302)
# single cat list (signed in)
response = self.client.get(
url_for('user.category', username='mike', category_name='cat1'))
self.assertTrue(response.status_code == 200)
self.assertTrue(b'title1' in response.data)
self.assertFalse(b'title2' in response.data)
# single tag list (signed in)
response = self.client.get(
url_for('user.tag', username='mike', tag_name='tag1'))
self.assertTrue(response.status_code == 200)
self.assertTrue(b'title1' in response.data)
self.assertFalse(b'title2' in response.data)
# visit own drafts
response = self.client.get(url_for('user.drafts', username='mike'))
self.assertTrue(response.status_code == 200)
self.assertTrue(b'title1' in response.data)
# visit others drafts
response = self.client.get(url_for('user.drafts', username='jack'))
self.assertTrue(response.status_code == 404)
# visit existing blog
response = self.client.get(
url_for('user.blog_page', username='mike', blog_id=1))
self.assertTrue(response.status_code == 200)
# visit unexisting blog
response = self.client.get(
url_for('user.blog_page', username='mike', blog_id=10))
self.assertTrue(response.status_code == 404)
# visit other's draft
response = self.client.get(
url_for('user.blog_page', username='jack', blog_id=2))
self.assertTrue(response.status_code == 404)
# delete ohter's blog
response = self.client.get(
url_for('user.delete_blog', blog_id=2))
self.assertTrue(response.status_code == 404)
# delete own blog
response = self.client.get(
url_for('user.delete_blog', blog_id=1))
self.assertTrue(response.status_code == 302)
# visit unexisting about me page
response = self.client.get(
url_for('user.about_me', username='dsfg'))
self.assertTrue(response.status_code == 404)
| seagullbird/BLEXT | tests/test_client.py | Python | mit | 15,643 | [
"VisIt"
] | e89d5a7f30f38d6fa9146674a195f947eeead8cdc05f0bf5ef502602fe7d6012 |
from unittest import TestCase
from flexible_permissions.roles import (
roles_to_actions,
actions_to_roles,
)
class RolesTestCase(TestCase):
def test_roles_to_actions(self):
self.assertItemsEqual([
'zoo.open',
'exhibit.create',
'animal.feed',
'exhibit.clean',
'zoo.visit',
'exhibit.visit',
'animal.see',
], roles_to_actions(['zoo.admin', 'zoo.visitor']))
def test_actions_to_roles(self):
self.assertItemsEqual([
'zoo.admin',
'exhibit.staff',
'zoo.visitor'
], actions_to_roles(['zoo.visit']))
| staab/django-flexible-permissions | tests/test_roles.py | Python | mit | 662 | [
"VisIt"
] | 8e4e6e224dc21cbd60ce6f39ca51f5b230b2363ebf9599dcfd964b5b8d96d7e5 |
# Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An implementation of the Mime algorithm.
Paper: https://arxiv.org/abs/2008.03606.
"""
import collections
from typing import Any, Collection, Dict, Optional
import attr
import tensorflow as tf
import tensorflow_federated as tff
def _unpack_data_label(batch):
if isinstance(batch, collections.abc.Mapping):
return batch['x'], batch['y']
elif isinstance(batch, (tuple, list)):
if len(batch) < 2:
raise ValueError('Expecting both data and label from a batch.')
return batch[0], batch[1]
else:
raise ValueError('Unrecognized batch data.')
@attr.s(eq=False)
class OptimizerState(object):
iterations = attr.ib()
weights = attr.ib()
def _noise_fn(noise_std: float, model_weight_specs: Collection[tf.TensorSpec]):
"""Returns random noise to be added for differential privacy."""
def noise_tensor(spec):
random_generator = tf.random.Generator.from_non_deterministic_state()
noise = random_generator.normal(spec.shape, stddev=noise_std)
noise = tf.reshape(noise, spec.shape)
return noise
return tf.nest.map_structure(noise_tensor, model_weight_specs)
def _initialize_optimizer_vars(model, optimizer):
"""Ensures variables holding the state of `optimizer` are created."""
delta = tf.nest.map_structure(tf.zeros_like, _get_weights(model).trainable)
model_weights = _get_weights(model)
grads_and_vars = tf.nest.map_structure(lambda x, v: (x, v), delta,
model_weights.trainable)
optimizer.apply_gradients(grads_and_vars, name='server_update')
assert optimizer.variables()
def _get_weights(model):
if hasattr(model, 'weights'):
return model.weights
else:
return tff.learning.ModelWeights.from_model(model)
def _get_optimizer_state(optimizer):
return OptimizerState(
iterations=optimizer.iterations,
# The first weight of an optimizer is reserved for the iterations count,
# see https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/keras/optimizer_v2/optimizer_v2.py pylint: disable=line-too-long]
weights=tuple(optimizer.weights[1:]))
@attr.s(eq=False, order=False, frozen=True)
class ServerState(object):
"""Structure for state on the server.
Attributes:
model: A `tff.learning.ModelWeights` instance.
optimizer_state: A namedtuple of the optimizer variables.
round_num: The current training round, as a float.
dp_clip_norm: L2 norm to clip client gradients.
dp_noise_std: Standard deviation of Gaussian distribution to sample noise
to add to gradients for differential privacy.
mean_full_grad: Average public gradient computed on public data.
"""
model = attr.ib()
optimizer_state = attr.ib()
round_num = attr.ib()
dp_clip_norm= attr.ib()
dp_noise_std=attr.ib()
mean_full_grad = attr.ib()
# This is a float to avoid type incompatibility when calculating learning rate
# schedules.
class CreatePrivateServerUpdateFn():
"""Returns a tf.function for the client_update.
This "create" fn is necesessary to prevent
"ValueError: Creating variables on a non-first call to a function decorated
with tf.function" errors due to the client optimizer creating variables. This
is really only needed because we test the client_update function directly.
"""
def __init__(self):
self.random_generator = tf.random.Generator.from_non_deterministic_state()
def _noise_fn(self, noise_std: float, model_weight_specs: Collection[tf.TensorSpec]):
"""Returns random noise to be added for differential privacy."""
def noise_tensor(spec):
noise = self.random_generator.normal(spec.shape, stddev=noise_std)
noise = tf.reshape(noise, spec.shape)
return noise
return tf.nest.map_structure(noise_tensor, model_weight_specs)
@tf.function
def __call__(self, model,
optimizer,
server_state,
weights_delta,
server_learning_rate=1.0):
"""Updates `server_state` based on `weights_delta`, increase the round number.
Args:
model: A `tff.learning.Model`.
optimizer: A `tf.keras.optimizers.Optimizer`.
server_state: A `ServerState`, the state to be updated.
weights_delta: An update to the trainable variables of the model.
server_learning_rate: Server learning rate scales the update from clients
before applying to server. Defaults to 1.
Returns:
An updated `ServerState`.
"""
model_weights = _get_weights(model)
tf.nest.map_structure(lambda v, t: v.assign(t), model_weights,
server_state.model)
model_weight_specs = tf.nest.map_structure(
lambda v: tf.TensorSpec(v.shape, v.dtype), model_weights.trainable)
noise_tensor = self._noise_fn(server_state.dp_noise_std, model_weight_specs)
# Compute new model weights.
new_weights = tf.nest.map_structure(lambda a, b, n: a + server_learning_rate * (b + n),
model_weights.trainable, weights_delta, noise_tensor)
# Set the model weights to the new ones, overriding the update made by
# the optimizer.
tf.nest.map_structure(lambda v, t: v.assign(t), model_weights.trainable,
new_weights)
# Create a new state based on the updated model.
return tff.structure.update_struct(
server_state,
model=model_weights,
round_num=server_state.round_num)
@tf.function
def public_server_update(model,
optimizer,
server_state,
sum_full_grad,
mean_full_grad,
server_learning_rate=1.0):
"""Updates `server_state` based on `weights_delta`, increase the round number.
Args:
model: A `tff.learning.Model`.
optimizer: A `tf.keras.optimizers.Optimizer`.
server_state: A `ServerState`, the state to be updated.
weights_delta: An update to the trainable variables of the model.
sum_full_grad: Sum of local full batch gradients from public clients.
mean_full_grad: Mean of local full batch gradients from public clients.
server_learning_rate: Server learning rate scales the update from clients
before applying to server. Defaults to 1.
Returns:
An updated `ServerState`.
"""
model_weights = _get_weights(model)
tf.nest.map_structure(lambda v, t: v.assign(t), model_weights,
server_state.model)
# Server optimizer variables must be initialized prior to invoking this
optimizer_state = _get_optimizer_state(optimizer)
tf.nest.map_structure(lambda v, t: v.assign(t), optimizer_state,
server_state.optimizer_state)
# Apply the update to the model. This is only to update the state of
# the optimizer.
grads_and_vars = zip(mean_full_grad, model_weights.trainable)
optimizer.apply_gradients(grads_and_vars)
# Create a new state based on the updated model.
return tff.structure.update_struct(
server_state,
optimizer_state=_get_optimizer_state(optimizer),
mean_full_grad=mean_full_grad,
round_num=server_state.round_num)
@attr.s(eq=False, order=False, frozen=True)
class PrivateClientOutput(object):
"""Structure for outputs returned from clients during federated optimization.
Attributes:
weights_delta: A dictionary of updates to the model's trainable variables.
client_weight: Weights to be used in a weighted mean when aggregating
`weights_delta`.
model_output: A structure matching `tff.learning.Model.report_local_outputs`
reflecting the results of training on the input dataset.
optimizer_output: Additional metrics or other outputs defined by the
optimizer.
"""
weights_delta = attr.ib()
client_weight = attr.ib()
model_output = attr.ib()
optimizer_output = attr.ib()
@attr.s(eq=False, order=False, frozen=True)
class PublicClientOutput(object):
"""Structure for outputs returned from clients during federated optimization.
Attributes:
full_grad: Gradient of loss computed on full client data.
client_weight: Weights to be used in a weighted mean when aggregating
`weights_delta`.
"""
full_grad = attr.ib()
client_weight = attr.ib()
class CreatePrivateClientUpdateFn():
"""Returns a tf.function for the client_update.
This "create" fn is necesessary to prevent
"ValueError: Creating variables on a non-first call to a function decorated
with tf.function" errors due to the client optimizer creating variables. This
is really only needed because we test the client_update function directly.
"""
def __init__(self):
self.grad_sum = None
@tf.function
def __call__(self,
model,
dataset,
initial_weights,
initial_optimizer_state,
optimizer,
mean_full_grad,
client_weight_fn=None,
dp_clip_norm=1.0):
"""Updates client model.
Args:
model: A `tff.learning.Model`.
dataset: A 'tf.data.Dataset'.
initial_weights: A `tff.learning.ModelWeights` from server.
initial_optimizer_state: The variables to assign to the client optimizer.
optimizer: A `tf.keras.optimizer.Optimizer` object, assumed to be
identical to the optimizer used by the server.
mean_full_grad: Average full gradient computed from public clients.
client_weight_fn: Optional function that takes the output of
`model.report_local_outputs` and returns a tensor that provides the
weight in the federated average of model deltas. If not provided, the
default is the total number of examples processed on device.
dp_clip_norm: L2 norm to clip the client deltas
Returns:
A 'PrivateClientOutput`.
"""
model_weights = _get_weights(model)
initial_model_weights = _get_weights(model)
tf.nest.map_structure(lambda v, t: v.assign(t), model_weights,
initial_weights)
tf.nest.map_structure(lambda v, t: v.assign(t), initial_model_weights,
initial_weights)
# Compute gradient over full data at initial_weights.
# This assumes that the loss is an average over all examples in a batch,
# and that all batches have the same size (otherwise, last batch has a
# slightly higher weight).
num_batches = 0.0
loss_sum = 0.0
# Client optimizer variables must be initialized prior to invoking this
optimizer_state = _get_optimizer_state(optimizer)
num_examples = tf.constant(0, dtype=tf.int32)
mean_full_grad_tensors = tf.nest.map_structure(tf.convert_to_tensor, mean_full_grad)
for batch in iter(dataset):
# keep optimizer state fixed to initial values.
tf.nest.map_structure(lambda v, t: v.assign(t), optimizer_state,
initial_optimizer_state)
with tf.GradientTape() as tape:
output = model.forward_pass(batch)
current_grads = tape.gradient(output.loss, model_weights.trainable)
with tf.GradientTape() as tape:
output = model.forward_pass(batch)
initial_grads = tape.gradient(output.loss, initial_model_weights.trainable)
current_grads_tensors = tf.nest.map_structure(tf.convert_to_tensor, current_grads)
initial_grads_tensors = tf.nest.map_structure(tf.convert_to_tensor, initial_grads)
grads = tf.nest.map_structure(lambda g_y, g_x, c: g_y - g_x + c, current_grads_tensors, initial_grads_tensors, mean_full_grad_tensors)
grads_and_vars = zip(grads, model_weights.trainable)
optimizer.apply_gradients(grads_and_vars)
if hasattr(output, 'num_examples'):
batch_size = tf.cast(output.num_examples, dtype=tf.int32)
else:
batch_x, _ = _unpack_data_label(batch)
batch_size = tf.shape(batch_x)[0]
num_examples+=batch_size
loss_sum += output.loss * tf.cast(batch_size, tf.float32)
aggregated_outputs = loss_sum
weights_delta = tf.nest.map_structure(lambda a, b: a - b,
model_weights.trainable,
initial_weights.trainable)
if client_weight_fn is None:
client_weight = tf.cast(num_examples, dtype=tf.float32)
else:
client_weight = client_weight_fn(aggregated_outputs)
optimizer_output = collections.OrderedDict([('num_examples', num_examples)])
clip_norm = tf.cast(dp_clip_norm, tf.float32)
if tf.less(tf.constant(0, tf.float32), clip_norm):
flatten_weights_delta = tf.nest.flatten(weights_delta)
clipped_flatten_weights_delta, _ = tf.clip_by_global_norm(
flatten_weights_delta, clip_norm)
weights_delta = tf.nest.pack_sequence_as(weights_delta,
clipped_flatten_weights_delta)
return PrivateClientOutput(
weights_delta=weights_delta,
client_weight=client_weight,
model_output=loss_sum / client_weight,
optimizer_output=optimizer_output)
class CreatePublicClientUpdateFn():
"""Returns a tf.function for the client_update.
This "create" fn is necesessary to prevent
"ValueError: Creating variables on a non-first call to a function decorated
with tf.function" errors due to the client optimizer creating variables. This
is really only needed because we test the client_update function directly.
"""
def __init__(self):
self.grad_sum = None
@tf.function
def __call__(self,
model,
dataset,
initial_weights,
initial_optimizer_state,
optimizer,
client_weight_fn=None):
"""Updates client model.
Args:
model: A `tff.learning.Model`.
dataset: A 'tf.data.Dataset'.
initial_weights: A `tff.learning.ModelWeights` from server.
initial_optimizer_state: The variables to assign to the client optimizer.
optimizer: A `tf.keras.optimizer.Optimizer` object, assumed to be
identical to the optimizer used by the server.
client_weight_fn: Optional function that takes the output of
`model.report_local_outputs` and returns a tensor that provides the
weight in the federated average of model deltas. If not provided, the
default is the total number of examples processed on device.
Returns:
A 'PublicClientOutput`.
"""
model_weights = _get_weights(model)
tf.nest.map_structure(lambda v, t: v.assign(t), model_weights,
initial_weights)
# Compute gradient over full data at initial_weights.
# This assumes that the loss is an average over all examples in a batch,
# and that all batches have the same size (otherwise, last batch has a
# slightly higher weight).
num_batches = 0.0
if self.grad_sum is None:
self.grad_sum = tf.nest.map_structure(
lambda x: tf.Variable(tf.zeros_like(x)), model_weights.trainable)
tf.nest.map_structure(
lambda v, t: v.assign(t), self.grad_sum,
tf.nest.map_structure(tf.zeros_like, model_weights.trainable))
for batch in iter(dataset):
num_batches += 1.0
with tf.GradientTape() as tape:
output = model.forward_pass(batch)
tf.nest.map_structure(lambda v, t: v.assign_add(t), self.grad_sum,
tape.gradient(output.loss, model_weights.trainable))
if num_batches > 0.0:
full_grad = tf.nest.map_structure(lambda a: a / num_batches,
self.grad_sum)
else:
# In case a client dataset is empty, just return an all 0s full gradient.
full_grad = tf.nest.map_structure(tf.zeros_like, model_weights.trainable)
return PublicClientOutput(
full_grad=full_grad,
client_weight=num_batches)
def build_server_init_fn(model_fn, optimizer_fn, dp_clip_norm, dp_noise_std, base_lr, server_momentum):
"""Builds a `tff.tf_computation` that returns the initial `ServerState`.
The attributes `ServerState.model`, `ServerState.optimizer_state`, and
`ServerState.optimizer_state` are initialized via their constructor
functions. The attribute `ServerState.round_num` is set to 0.0.
Args:
model_fn: A no-arg function that returns a `tff.learning.Model`.
optimizer_fn: A no-arg function that returns a
`tf.keras.optimizers.Optimizer`.
dp_clip_norm: L2 norm to clip client gradients.
dp_noise_std: Standard deviation of Gaussian distribution to sample noise
to add to gradients for differential privacy.
base_lr: Learning rate for server optimizer
server_momentum: Momentum for server optimizer
Returns:
A `tff.tf_computation` that returns initial `ServerState`.
"""
@tff.tf_computation
def server_init_tf():
optimizer = optimizer_fn(learning_rate=base_lr, momentum=server_momentum)
model = model_fn()
model_weights = _get_weights(model)
mean_full_grad = tf.nest.map_structure(lambda x: tf.Variable(tf.zeros_like(x)), model_weights.trainable)
_initialize_optimizer_vars(model, optimizer)
return ServerState(
model=_get_weights(model),
optimizer_state=_get_optimizer_state(optimizer),
round_num=0,
dp_clip_norm=dp_clip_norm,
dp_noise_std=dp_noise_std,
mean_full_grad=mean_full_grad)
return server_init_tf
def build_averaging_process(model_fn,
update_type = 'private',
optimizer_fn=tf.keras.optimizers.SGD,
base_lr=0.1,
server_lr=1.0,
server_momentum=0.0,
dp_clip_norm=1.0,
dp_noise_std=0.0,
client_weight_fn=None):
"""Builds the TFF computations for optimization using federated averaging.
Args:
model_fn: A no-arg function that returns a `tff.learning.Model`.
update_type: String to denote whether process operates on private or public
data.
optimizer_fn: A function that accepts a `learning_rate` argument and returns
a `tf.keras.optimizers.Optimizer` instance. Must return an optimizer with
`iterations` and `weights` attributes. This is the base optimizer whose
updates are split between the client and server in the Mime/Mimelite
algorithms.
base_lr: A scalar learning rate or a function that accepts a float
`round_num` argument and returns a learning rate for the base optimizer.
server_lr: A scalar learning rate or a function that accepts a float
`round_num` argument and returns a learning rate for applying weight
updates to server model.
server_momentum: A scalar momentum parameter for the server optimizer.
dp_clip_norm: L2 norm to clip deltas of clients to.
dp_noise_std: Standard deviation of Gaussian distribution to sample noise
to add to gradients for differential privacy.
client_weight_fn: Optional function that takes the output of
`model.report_local_outputs` and returns a tensor that provides the weight
in the federated average of the client models. If not provided, the
default is the total number of examples processed on device.
Returns:
A `tff.templates.IterativeProcess`.
"""
base_lr_schedule = base_lr
if not callable(base_lr_schedule):
base_lr_schedule = lambda round_num: base_lr
server_lr_schedule = server_lr
if not callable(server_lr_schedule):
server_lr_schedule = lambda round_num: server_lr
dummy_model = model_fn()
server_init_tf = build_server_init_fn(model_fn, optimizer_fn, dp_clip_norm, dp_noise_std, base_lr, server_momentum)
server_state_type = server_init_tf.type_signature.result
model_weights_type = server_state_type.model
optimizer_state_type = server_state_type.optimizer_state
round_num_type = server_state_type.round_num
clip_norm_type = server_state_type.dp_clip_norm
mean_full_grad_type = server_state_type.mean_full_grad
tf_dataset_type = tff.SequenceType(dummy_model.input_spec)
model_input_type = tff.SequenceType(dummy_model.input_spec)
federated_dataset_type = tff.type_at_clients(tf_dataset_type)
@tff.tf_computation(model_input_type, model_weights_type,
optimizer_state_type, round_num_type, clip_norm_type, mean_full_grad_type)
def private_client_update_fn(tf_dataset, initial_model_weights,
initial_optimizer_state, round_num, clip_norm, mean_full_grad):
"""Performs a private client update."""
model = model_fn()
initial_model = model_fn()
base_lr = base_lr_schedule(round_num)
optimizer = optimizer_fn(learning_rate=base_lr, momentum=server_momentum)
# We initialize the client optimizer variables to avoid creating them
# within the scope of the tf.function client_update.
_initialize_optimizer_vars(model, optimizer)
client_update = CreatePrivateClientUpdateFn()
return client_update(model, tf_dataset, initial_model_weights,
initial_optimizer_state, optimizer, mean_full_grad, client_weight_fn, clip_norm)
@tff.tf_computation(model_input_type, model_weights_type,
optimizer_state_type, round_num_type)
def public_client_update_fn(tf_dataset, initial_model_weights,
initial_optimizer_state, round_num):
"""Performs a public client update."""
model = model_fn()
base_lr = base_lr_schedule(round_num)
optimizer = optimizer_fn(learning_rate=base_lr, momentum=server_momentum)
# We initialize the client optimizer variables to avoid creating them
# within the scope of the tf.function client_update.
_initialize_optimizer_vars(model, optimizer)
client_update = CreatePublicClientUpdateFn()
return client_update(model, tf_dataset, initial_model_weights,
initial_optimizer_state, optimizer, client_weight_fn)
@tff.tf_computation(server_state_type, model_weights_type.trainable)
def private_server_update_fn(server_state, model_delta):
model = model_fn()
server_lr = server_lr_schedule(server_state.round_num)
base_lr = base_lr_schedule(server_state.round_num)
optimizer = optimizer_fn(learning_rate=base_lr, momentum=server_momentum)
# We initialize the server optimizer variables to avoid creating them
# within the scope of the tf.function server_update.
_initialize_optimizer_vars(model, optimizer)
private_server_update = CreatePrivateServerUpdateFn()
return private_server_update(model, optimizer, server_state, model_delta,
server_lr)
@tff.tf_computation(server_state_type,
model_weights_type.trainable,
model_weights_type.trainable)
def public_server_update_fn(server_state, mean_full_grad, sum_full_grad):
model = model_fn()
server_lr = server_lr_schedule(server_state.round_num)
base_lr = base_lr_schedule(server_state.round_num)
optimizer = optimizer_fn(learning_rate=base_lr, momentum=server_momentum)
# We initialize the server optimizer variables to avoid creating them
# within the scope of the tf.function server_update.
_initialize_optimizer_vars(model, optimizer)
return public_server_update(model, optimizer, server_state, sum_full_grad, mean_full_grad,
server_lr)
@tff.federated_computation(
tff.type_at_server(server_state_type),
tff.type_at_clients(tf_dataset_type))
def run_one_round_public(server_state, federated_dataset):
"""Orchestration logic for one round of computation.
Args:
server_state: A `ServerState`.
federated_dataset: A federated `tf.Dataset` with placement `tff.CLIENTS`.
Returns:
A tuple of updated `ServerState` and the result of
`tff.learning.Model.federated_output_computation`.
"""
client_model = tff.federated_broadcast(server_state.model)
optimizer_state = tff.federated_broadcast(server_state.optimizer_state)
client_round_num = tff.federated_broadcast(server_state.round_num)
client_outputs = tff.federated_map(
public_client_update_fn,
(federated_dataset, client_model, optimizer_state, client_round_num))
mean_full_grad = tff.federated_mean(
client_outputs.full_grad)
sum_full_grad = tff.federated_sum(
client_outputs.full_grad)
server_state = tff.federated_map(public_server_update_fn,
(server_state, mean_full_grad, sum_full_grad))
return server_state
@tff.federated_computation(
tff.type_at_server(server_state_type),
tff.type_at_clients(tf_dataset_type))
def run_one_round_private(server_state, federated_dataset):
"""Orchestration logic for one round of computation.
Args:
server_state: A `ServerState`.
federated_dataset: A federated `tf.Dataset` with placement `tff.CLIENTS`.
Returns:
A tuple of updated `ServerState` and the result of
`tff.learning.Model.federated_output_computation`.
"""
client_model = tff.federated_broadcast(server_state.model)
optimizer_state = tff.federated_broadcast(server_state.optimizer_state)
client_round_num = tff.federated_broadcast(server_state.round_num)
client_dp_clip_norm = tff.federated_broadcast(server_state.dp_clip_norm)
client_mean_full_grad = tff.federated_broadcast(server_state.mean_full_grad)
client_outputs = tff.federated_map(
private_client_update_fn,
(federated_dataset, client_model, optimizer_state, client_round_num, client_dp_clip_norm, client_mean_full_grad))
model_delta = tff.federated_mean(
client_outputs.weights_delta)
server_state = tff.federated_map(private_server_update_fn,
(server_state, model_delta))
return server_state
@tff.federated_computation
def server_init_tff():
"""Orchestration logic for server model initialization."""
return tff.federated_value(server_init_tf(), tff.SERVER)
if update_type == 'private':
return tff.templates.IterativeProcess(
initialize_fn=server_init_tff, next_fn=run_one_round_private)
elif update_type == 'public':
return tff.templates.IterativeProcess(
initialize_fn=server_init_tff, next_fn=run_one_round_public)
| google-research/public-data-in-dpfl | mime.py | Python | apache-2.0 | 26,910 | [
"Gaussian"
] | 022a8885c6d204244663c9089f45fbbaa1c3bb12445d8ded56fa648045324f41 |
"""This demo uses PETSc's TAO solver for nonlinear (bound-constrained)
optimisation problems to solve a buckling problem in FEniCS.
We consider here a hyperelastic beam constrained in a box
under axial compression.
The box is designed such that this beam will lose stability and move
upwards (and not downwards) in order to minimise the potential energy."""
# Copyright (C) 2014 Tianyi Li
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# First added: 2014-07-19
from __future__ import print_function
from dolfin import *
import matplotlib.pyplot as plt
if not has_petsc():
print("DOLFIN must be compiled at least with PETSc 3.6 to run this demo.")
exit(0)
# Read mesh and refine once
mesh = Mesh("../buckling.xml.gz")
mesh = refine(mesh)
# Create function space
V = VectorFunctionSpace(mesh, "Lagrange", 1)
# Create solution, trial and test functions
u, du, v = Function(V), TrialFunction(V), TestFunction(V)
# Elasticity parameters
E, nu = 10.0, 0.3
mu = Constant(E/(2.0*(1.0+nu)))
lmbda = Constant(E*nu/((1.0+nu)*(1.0-2.0*nu)))
# Compressible neo-Hookean model
I = Identity(mesh.geometry().dim())
F = I + grad(u)
C = F.T*F
Ic = tr(C)
J = det(F)
psi = (mu/2)*(Ic-2)-mu*ln(J)+(lmbda/2)*(ln(J))**2
# Surface force
f = Constant((-0.08, 0.0))
# The displacement u must be such that the current configuration
# doesn't escape the box [xmin, xmax] x [ymin, ymax]
constraint_u = Expression(("xmax-x[0]", "ymax-x[1]"), xmax=10.0, ymax=2.0, degree=1)
constraint_l = Expression(("xmin-x[0]", "ymin-x[1]"), xmin=0.0, ymin=-0.2, degree=1)
u_min = interpolate(constraint_l, V)
u_max = interpolate(constraint_u, V)
# Symmetry condition (to block rigid body rotations)
class Left(SubDomain):
def inside(self, x, on_boundary):
return on_boundary and near(x[0], 0)
class Right(SubDomain):
def inside(self, x, on_boundary):
return on_boundary and near(x[0], 10)
boundaries = FacetFunction("size_t", mesh)
boundaries.set_all(0)
left = Left()
left.mark(boundaries, 1)
right = Right()
right.mark(boundaries, 2)
ds = Measure('ds', domain=mesh, subdomain_data=boundaries)
bc = DirichletBC(V, Constant([0.0, 0.0]), boundaries, 1)
bc.apply(u_min.vector())
bc.apply(u_max.vector())
# Variational formulation
elastic_energy = psi*dx - dot(f, u)*ds(2)
grad_elastic_energy = derivative(elastic_energy, u, v)
H_elastic_energy = derivative(grad_elastic_energy, u, du)
# Define the minimisation problem by using OptimisationProblem class
class BucklingProblem(OptimisationProblem):
def __init__(self):
OptimisationProblem.__init__(self)
# Objective function
def f(self, x):
u.vector()[:] = x
return assemble(elastic_energy)
# Gradient of the objective function
def F(self, b, x):
u.vector()[:] = x
assemble(grad_elastic_energy, tensor=b)
# Hessian of the objective function
def J(self, A, x):
u.vector()[:] = x
assemble(H_elastic_energy, tensor=A)
# Create the PETScTAOSolver
solver = PETScTAOSolver()
# Set some parameters
solver.parameters["method"] = "tron"
solver.parameters["monitor_convergence"] = True
solver.parameters["report"] = True
# Uncomment this line to see the available parameters
# info(parameters, True)
# Parse (PETSc) parameters
parameters.parse()
# Solve the problem
solver.solve(BucklingProblem(), u.vector(), u_min.vector(), u_max.vector())
# Save solution in XDMF format if available
out = XDMFFile(mesh.mpi_comm(), "u.xdmf")
if has_hdf5():
out.write(u)
elif MPI.size(mesh.mpi_comm()) == 1:
encoding = XDMFFile.Encoding_ASCII
out.write(u, encoding)
else:
# Save solution in vtk format
out = File("u.pvd")
out << u
# Plot the current configuration
plot(u, mode="displacement", wireframe=True, title="Displacement field")
plt.show()
| FEniCS/dolfin | demo/undocumented/buckling-tao/python/demo_buckling-tao.py | Python | lgpl-3.0 | 4,418 | [
"VTK"
] | 9c12f1704c513ca0efabe5da1a8790c582db9d897f0c86f0234d046641160cdd |
from data_news import db, cache
from ..utils import epoch_seconds
from flask.ext.sqlalchemy import Pagination
from sqlalchemy import func
from datetime import datetime
from math import log
class Vote(db.Model):
""" We keep track of very vote
Pretty simple here, we have:
an item they voted on,
the voting user (user_from)
and the author (user_to)
and the timestamp =)
TODO: Add a value (if we want down votes)
"""
id = db.Column(db.Integer, primary_key=True)
timestamp = db.Column(db.DateTime)
user_from_id = db.Column(db.Integer, db.ForeignKey('user.id'))
user_to_id = db.Column(db.Integer, db.ForeignKey('user.id'))
item_id = db.Column(db.Integer, db.ForeignKey('item.id'))
def __str__(self):
return str(self.id)
@property
def serialize(self):
"""Return object data in easily serializeable format"""
return {
'id' : self.id,
'timestamp' : dump_datetime(self.timestamp),
'item_id' : self.item_id,
}
class Item(db.Model):
""" An item is any kind of post or comment
It should either have a url/title or have text
TODO: Right now kind is just a simple string ('post' or 'comment' or 'page' or 'external')
It should probably be another table, similar to Role
TODO?: There is no easy way to get the parent post for a deep nested comment
You have to do recursion on each parent. Should I make this a column?
parent currently refers to just the immediate parent (post or comment)
TODO: Children is not working great with caching. How to make that nicer?
TODO: user/votes is not working great with caching. How to make that nicer?
"""
id = db.Column(db.Integer, primary_key = True)
title = db.Column(db.String(140))
url = db.Column(db.String(), unique=True)
text = db.Column(db.String(3818))
timestamp = db.Column(db.DateTime)
last_changed = db.Column(db.DateTime, default=datetime.utcnow())
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
user = db.relationship('User', backref='items', lazy='joined')
kind = db.Column(db.String)
votes = db.relationship('Vote', backref="item", primaryjoin="Vote.item_id==Item.id", lazy='joined')
parent_id = db.Column(db.Integer, db.ForeignKey('item.id'))
children = db.relationship('Item',
backref=db.backref("parent",
remote_side='Item.id',
lazy='immediate',
),
lazy='dynamic',
order_by=db.desc('Item.timestamp')
)
def __repr__(self):
return '<Item %r>' % (self.id)
def __str__(self):
return str(self.id)
@property
def changed(self):
return str(self.last_changed)
@cache.memoize(60*5)
def get_children(self):
""" Get all the children of an item, recusively.
Returns a list of tuples=(item object, depth).
"""
recursiveChildren = []
def recurse(item, depth):
if depth != 0:
recursiveChildren.append((item,depth))
children = sorted(item.children, key=lambda x: x.comment_score, reverse=True)
for child in children:
recurse(child, depth + 1)
recurse(self, 0)
return recursiveChildren
@cache.memoize(60*5)
def voted_for(self, user_id):
""" Check if an item was voted for by a user
"""
vote = Vote.query.filter_by(item_id = self.id, user_from_id = user_id).first()
if vote:
return True
return False
@property
def post_score(self):
"""Kinda from hot formula from Reddit.
TODO: Actually think about this
"""
votes = len(self.votes)
comments = len(Item.query.filter_by(parent_id = self.id).all())
date = self.timestamp
s = votes * comments/10
order = log(max(abs(s), 1), 10)
sign = 1 if votes > 0 else -1 if votes < 0 else 0.1
seconds = epoch_seconds(date) - 1134028003
return round(order + sign * seconds / 45000, 7)
@property
def comment_score(self):
""" Give comments a score based on votes, replies.
TODO: Use brain.
"""
votes = len(self.votes)
comments = len(Item.query.filter_by(parent_id = self.id).all())
s = votes * comments/10
order = log(max(abs(s), 1), 10)
sign = 1 if votes > 0 else -1 if votes < 0 else 0.1
return round(order + sign, 7)
@staticmethod
@cache.memoize(60)
def get_item_and_children(id):
""" Get an item
Make sure everything we will need loads, since we are caching
TODO: Still not playing nice with cache
"""
item = Item.query.options(
db.joinedload('user'),
db.joinedload('votes'),
).get_or_404(id)
return item
@staticmethod
@cache.memoize(60)
def ranked_posts(page):
""" Returns the top ranked posts by post_score
(Kinda) Load all necessary sub-queries so we can cache
TODO: This should be an combined with post_score to be a
sqlalchemy query, but I keep breaking that =(
"""
items = Item.query.options(db.joinedload('user'),
db.joinedload('votes')
).filter_by(kind = 'post').order_by(Item.timestamp.desc())
items_paged = items.paginate(page)
start = items_paged.per_page * (items_paged.page - 1)
end = items_paged.per_page + start
items_paged.items = sorted(items,
key=lambda x: x.post_score,
reverse=True)[start:end]
#items_paged.adf
return {'items' : items_paged.items,
'has_next' : items_paged.has_next,
'next_num' : items_paged.next_num,
}
@staticmethod
@cache.memoize(60*5)
def find_by_title(title, kind='page'):
""" Find a page by title
Replace _ with spaces. Used to make nice URLs
"""
title = title.replace('_', ' ')
item_query = Item.query.filter_by(kind=kind).filter(
func.lower(Item.title) == func.lower(title)).first_or_404()
return item_query
| joehand/DataNews | data_news/frontend/models.py | Python | bsd-3-clause | 6,674 | [
"ADF"
] | e7ea4bdc018a3c692282a2b9acf61135fcb1fd969eaa1ba618ae2f268b5c670d |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2009 Prof. William H. Green (whgreen@mit.edu) and the
# RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
import math
import numpy
import os.path
import rmgpy.constants as constants
from rmgpy.cantherm.common import checkConformerEnergy
from rmgpy.statmech import IdealGasTranslation, NonlinearRotor, LinearRotor, HarmonicOscillator, Conformer
################################################################################
class QchemLog:
"""
Represent an output file from Qchem. The attribute `path` refers to the
location on disk of the Qchem output file of interest. Methods are provided
to extract a variety of information into CanTherm classes and/or NumPy
arrays.
"""
def __init__(self, path):
self.path = path
def getNumberOfAtoms(self):
"""
Return the number of atoms in the molecular configuration used in
the Qchem output file.
"""
Natoms = 0
# Open Qchem log file for parsing
f = open(self.path, 'r')
line = f.readline()
while line != '' and Natoms == 0:
# Automatically determine the number of atoms
if 'Standard Nuclear Orientation' in line and Natoms == 0:
for i in range(3): line = f.readline()
while '----------------------------------------------------' not in line:
Natoms += 1
line = f.readline()
line = f.readline()
# Close file when finished
f.close()
# Return the result
return Natoms
def loadForceConstantMatrix(self):
"""
Return the force constant matrix (in Cartesian coordinates) from the
QChem log file. If multiple such matrices are identified,
only the last is returned. The units of the returned force constants
are J/m^2. If no force constant matrix can be found in the log file,
``None`` is returned.
"""
F = None
Natoms = self.getNumberOfAtoms()
Nrows = Natoms * 3
f = open(self.path, 'r')
line = f.readline()
while line != '':
# Read force constant matrix
if 'Final Hessian.' in line or 'Hessian of the SCF Energy' in line:
F = numpy.zeros((Nrows,Nrows), numpy.float64)
for i in range(int(math.ceil(Nrows / 6.0))):
# Header row
line = f.readline()
# Matrix element rows
for j in range(Nrows): #for j in range(i*6, Nrows):
data = f.readline().split()
for k in range(len(data)-1):
F[j,i*6+k] = float(data[k+1])
#F[i*5+k,j] = F[j,i*5+k]
# Convert from atomic units (Hartree/Bohr_radius^2) to J/m^2
F *= 4.35974417e-18 / 5.291772108e-11**2
line = f.readline()
# Close file when finished
f.close()
return F
def loadGeometry(self):
"""
Return the optimum geometry of the molecular configuration from the
Qchem log file. If multiple such geometries are identified, only the
last is returned.
"""
atom = []; coord = []; number = [];
try:
f = open(self.path, 'r')
except IndexError:
print('File not found')
f = open(self.path, 'r')
line = f.readline()
while line != '':
if 'Final energy is' in line:
print 'found a sucessfully completed Qchem Geometry Optimization Job'
line = f.readline()
atom = []; coord = []
break
line = f.readline()
found = 0
while line != '':
if 'Standard Nuclear Orientation' in line:
found += 1
for i in range(3): line = f.readline() # skip lines
while '----------------------------------------------------' not in line:
data = line.split()
atom.append((data[1]))
coord.append([float(data[2]), float(data[3]), float(data[4])])
line = f.readline()
# Read the next line in the file
line = f.readline()
# Read the next line in the file
line = f.readline()
if found ==1: break
line = f.readline()
#print coord
f.close()
coord = numpy.array(coord, numpy.float64)
mass = numpy.array(coord, numpy.float64)
# Assign appropriate mass to each atom in molecule
# These values were taken from "Atomic Weights and Isotopic Compositions" v3.0 (July 2010) from NIST
mass = [0]*len(atom)
for i in range(len(atom)):
if atom[i] == 'H':
mass[i] = 1.00782503207
number.append('1')
elif atom[i] == 'C':
mass[i] = 12.0
number.append('6')
elif atom[i] == 'N':
mass[i] = 14.0030740048
number[i] = 7
number.append('7')
elif atom[i] == 'O':
mass[i] = 15.99491461956
number.append('8')
elif atom[i] == 'P':
mass[i] = 30.97376163
number.append('15')
elif atom[i] == 'S':
mass[i] = 31.97207100
number.append('16')
elif atom[i] == 'Cl':
mass[i] = 35.4527
number.append('17')
else:
print 'Atomic atom {0:d} not yet supported in loadGeometry().'.format(atom[i])
number = numpy.array(number, numpy.int)
return coord, number, mass
def loadConformer(self, symmetry=None, spinMultiplicity=None, opticalIsomers=1):
"""
Load the molecular degree of freedom data from a log file created as
the result of a Qchem "Freq" calculation. As
Qchem's guess of the external symmetry number is not always correct,
you can use the `symmetry` parameter to substitute your own value; if
not provided, the value in the Qchem output file will be adopted.
"""
modes = []; freq = []; mmass = []; rot = []
E0 = 0.0
f = open(self.path, 'r')
line = f.readline()
while line != '':
# The data we want is in the Thermochemistry section of the output
if 'VIBRATIONAL ANALYSIS' in line:
modes = []
inPartitionFunctions = False
line = f.readline()
while line != '':
# This marks the end of the thermochemistry section
if 'Thank you very much for using Q-Chem.' in line:
break
# Read vibrational modes
elif 'VIBRATIONAL FREQUENCIES (CM**-1)' in line:
frequencies = []
while 'STANDARD THERMODYNAMIC QUANTITIES AT' not in line:
if ' Frequency:' in line:
frequencies.extend([float(d) for d in line.split()[-3:]])
line = f.readline()
line = f.readline()
# If there is an imaginary frequency, remove it
if frequencies[0] < 0.0:
frequencies = frequencies[1:]
vibration = HarmonicOscillator(frequencies=(frequencies,"cm^-1"))
#modes.append(vibration)
freq.append(vibration)
# Read molecular mass for external translational modes
elif 'Molecular Mass:' in line:
mass = float(line.split()[2])
translation = IdealGasTranslation(mass=(mass,"amu"))
#modes.append(translation)
mmass.append(translation)
# Read moments of inertia for external rotational modes, given in atomic units
elif 'Eigenvalues --' in line:
inertia = [float(d) for d in line.split()[-3:]]
# If the first eigenvalue is 0, the rotor is linear
if inertia[0] == 0.0:
inertia.remove(0.0)
for i in range(2):
inertia[i] *= (constants.a0/1e-10)**2
rotation = LinearRotor(inertia=(inertia,"amu*angstrom^2"), symmetry=symmetry)
#modes.append(rotation)
rot.append(rotation)
else:
for i in range(3):
inertia[i] *= (constants.a0/1e-10)**2
rotation = NonlinearRotor(inertia=(inertia,"amu*angstrom^2"), symmetry=symmetry)
#modes.append(rotation)
rot.append(rotation)
# Read Qchem's estimate of the external rotational symmetry number, which may very well be incorrect
elif 'Rotational Symmetry Number is' in line and symmetry is None:
symmetry = int(float(line.split()[4]))
elif 'Final energy is' in line:
E0 = float(line.split()[3]) * constants.E_h * constants.Na
print 'energy is' + str(E0)
# Read ZPE and add to ground-state energy
# NEED TO MULTIPLY ZPE BY scaling factor!
elif 'Zero point vibrational energy:' in line:
ZPE = float(line.split()[4]) * 4184
E0=E0+ZPE
# Read spin multiplicity if not explicitly given
# elif 'Electronic' in line and inPartitionFunctions and spinMultiplicity is None:
# spinMultiplicity = int(float(line.split()[1].replace('D', 'E')))
# elif 'Log10(Q)' in line:
# inPartitionFunctions = True
# Read the next line in the file
line = f.readline()
# Read the next line in the file
line = f.readline()
# Close file when finished
f.close()
modes = mmass + rot + freq
#modes.append(mmass), modes.append(rot), modes.append(freq)
return Conformer(E0=(E0*0.001,"kJ/mol"), modes=modes, spinMultiplicity=spinMultiplicity, opticalIsomers=opticalIsomers)
def loadEnergy(self,frequencyScaleFactor=1.):
"""
Load the energy in J/mol from a Qchem log file. Only the last energy
in the file is returned. The zero-point energy is *not* included in
the returned value.
"""
modes = []
E0 = None
spinMultiplicity = 1
f = open(self.path, 'r')
line = f.readline()
while line != '':
if 'Final energy is' in line:
E0 = float(line.split()[3]) * constants.E_h * constants.Na
print 'energy is' + str(E0)
# elif 'Zero point vibrational energy' in line:
#Qchem's ZPE is in kcal/mol
# ZPE = float(line.split()[4]) * 4184
# scaledZPE = ZPE * frequencyScaleFactor
# print 'ZPE is ' + str(ZPE)
# Read the next line in the file
line = f.readline()
# Close file when finished
f.close()
if E0 is not None:
return E0
else:
raise Exception('Unable to find energy in Qchem output file.')
def loadZeroPointEnergy(self,frequencyScaleFactor=1.):
"""
Load the unscaled zero-point energy in J/mol from a Qchem output file.
"""
modes = []
ZPE = None
spinMultiplicity = 1
f = open(self.path, 'r')
line = f.readline()
while line != '':
# if 'Final energy is' in line:
# E0 = float(line.split()[3]) * constants.E_h * constants.Na
# print 'energy is' + str(E0)
if 'Zero point vibrational energy' in line:
#Qchem's ZPE is in kcal/mol
ZPE = float(line.split()[4]) * 4184
#scaledZPE = ZPE * frequencyScaleFactor
print 'ZPE is' + str(ZPE)
# Read the next line in the file
line = f.readline()
# Close file when finished
f.close()
if ZPE is not None:
return ZPE
else:
raise Exception('Unable to find zero-point energy in Qchem output file.')
def loadScanEnergies(self):
"""
Extract the optimized energies in J/mol from a Qchem log file, e.g. the
result of a Qchem "PES Scan" quantum chemistry calculation.
"""
Vlist = []
angle = []
f = open(self.path, 'r')
line = f.readline()
while line != '':
if 'Summary of potential scan:' in line:
line = f.readline()
print 'found a sucessfully completed Qchem Job'
while '-----------------' not in line:
# print len(line.split())
# Vlist.append(float(line.split()[1]))
values = [float(item) for item in line.split()]
angle.append(values[0])
Vlist.append(values[1])
# Read the next line in the file
line = f.readline()
line = f.readline()
if 'SCF failed to converge' in line:
print 'Qchem Job did not sucessfully complete: SCF failed to converge'
break
# Close file when finished
print ' Assuming', os.path.basename(self.path), 'is the output from a Qchem PES scan...'
f.close()
Vlist = numpy.array(Vlist, numpy.float64)
# check to see if the scanlog indicates that one of your reacting species may not be the lowest energy conformer
checkConformerEnergy(Vlist, self.path)
# Adjust energies to be relative to minimum energy conformer
# Also convert units from Hartree/particle to J/mol
Vlist -= numpy.min(Vlist)
Vlist *= constants.E_h * constants.Na
angle = numpy.arange(0.0, 2*math.pi+0.00001, 2*math.pi/(len(Vlist)-1), numpy.float64)
return Vlist, angle
def loadNegativeFrequency(self):
"""
Return the imaginary frequency from a transition state frequency
calculation in cm^-1.
"""
f = open(self.path, 'r')
line = f.readline()
while line != '':
# Read imaginary frequency
if ' Frequency:' in line:
frequency = float((line.split()[1]))
break
line = f.readline()
# Close file when finished
f.close()
#Make sure the frequency is imaginary:
if frequency < 0:
return frequency
else:
raise Exception('Unable to find imaginary frequency in QChem output file.')
| comocheng/RMG-Py | rmgpy/cantherm/qchem.py | Python | mit | 17,000 | [
"Q-Chem"
] | a0d6f68ab9c2624476af0f63340a1939a4332569138964f333ba2e0a09517a18 |
#PPM from RC transmitters Calibrate Version 0.5
#
#PPMRC - PPM interpreter, calibrator and remapper
#Copyright (C) 2017-2018 Francesco Antonetti Lamorgese Passeri
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#PPMRC is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
import serial
import time
class ppmrc:
def __init__(self,number_of_channels=8,serial_in_port,serial_in_baud,serial_out_port,serial_out_baud):
self.licenseText = "\n\nPPMRC Copyright (C) 2017-2018 Francesco Antonetti \
Lamorgese Passeri\nThis program comes with ABSOLUTELY NO WARRANTY; for details \
visit http://fsf.org/.\nThis is free software, and you are welcome to \
redistribute it under certain conditions; visit http://fsf.org/ \
for details.\n\n"
self.numberOfChannels = number_of_channels
self.boundaries = []
self.serialIn = serial.Serial()
self.serialOut = serial.Serial()
self.serialIn.port = serial_in_port
self.serialIn.baudrate = serial_in_baud
self.serialOut.port = serial_out_port
self.serialOut.baudrate = serial_out_baud
self.serialIn.open()
self.serialOut.open()
def remap(self,x,in_min,in_max,out_min,out_max):
# Maps the x value from a range [in_min,in_max] to another [out_min,out_max]
# It is the same of map() function of Arduino's language
return float((x-in_min)*(out_max-out_min)/(in_max-in_min)+out_min)
def readValues(self):
# Reads the values from the serial port.
# the input parameter is the serial port where read data and returns the
# values array, converted in floating point variables.
# The input comes in form of string of numbers separated by spaces
while True:
self.serialInput = self.serialIn.readline()
channelsArray = self.serialInput.split()
channelsValue = []
good = 1
for i in channelsArray:
if i.isdigit():
channelsValue += [float(i)]
else:
good = 0
break
if good == 1:
return channelsValue
def calibrate(self,tests,wait,lowerBound,upperBound,numberOfChannels):
# Calibrates the serial input to be included in a range correctly readable
# from FlightGear ([-1.0,1.0] for roll, pitch and yaw; [0,1.0] for throttle)
numberOfTests = range(1,tests)
for i in range(numberOfChannels):
self.boundaries.extend([float(lowerBound)])
for i in range(numberOfChannels):
self.boundaries.extend([float(upperBound)])
print("Move all axes to minimum")
time.sleep(wait)
for i in numberOfTests:
chVal = readValues()
if len(chVal) != numberOfChannels:
i -= 1
continue
if i == 0:
j = 0
for ch in chVal:
if float(ch) > lowerBound:
self.boundaries[j] = float(ch)
j += 1
else:
i -= 1
break
else:
j = 0
for ch in chVal:
if float(ch) < self.boundaries[j]:
if float(ch) > lowerBound:
self.boundaries[j] = float(ch)
else:
self.boundaries[j] = lowerBound
j += 1
print("Move all axes to maximum")
time.sleep(wait)
for i in numberOfTests:
chVal = readValues()
if i == 0:
j = 0
for ch in chVal:
if ch < upperBound:
self.boundaries[numberOfChannels+j] = float(ch)
j += 1
else:
i -= 1
break
else:
j = 0
for ch in chVal:
if float(ch) > self.boundaries[numberOfChannels+j]:
if float(ch) < upperBound:
self.boundaries[numberOfChannels+j] = float(ch)
else:
self.boundaries[numberOfChannels+j] = upperBound
j += 1
| antlampas/ppmrc | ppmrc.py | Python | gpl-3.0 | 4,958 | [
"VisIt"
] | 9d454666558c9bbeaa22e1b7bba93604ead439f04de6dc9a35bd58a7f7e513b5 |
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Eric Martin <eric@ericmart.in>
# Giorgio Patrini <giorgio.patrini@anu.edu.au>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils import deprecated
from ..utils.extmath import row_norms
from ..utils.extmath import _incremental_mean_and_var
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.fixes import bincount
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale,
mean_variance_axis, incr_mean_variance_axis,
min_max_axis)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
DEPRECATION_MSG_1D = (
"Passing 1d arrays as data is deprecated in 0.17 and will "
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample."
)
def _handle_zeros_in_scale(scale, copy=True):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == .0:
scale = 1.
return scale
elif isinstance(scale, np.ndarray):
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[scale == 0.0] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix}
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSC matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSC matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSC matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csc', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if with_std:
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var, copy=False)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
if with_mean:
mean_ = np.mean(X, axis)
if with_std:
scale_ = np.std(X, axis)
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
scale_ = _handle_zeros_in_scale(scale_, copy=False)
Xr /= scale_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# scale_ is very small so that mean_2 = mean_1/scale_ > 0, even
# if mean_1 was close to zero. The problem is thus essentially
# due to the lack of precision of mean_. A solution is then to
# subtract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
data_min_ : ndarray, shape (n_features,)
Per feature minimum seen in the data
.. versionadded:: 0.17
*data_min_* instead of deprecated *data_min*.
data_max_ : ndarray, shape (n_features,)
Per feature maximum seen in the data
.. versionadded:: 0.17
*data_max_* instead of deprecated *data_max*.
data_range_ : ndarray, shape (n_features,)
Per feature range ``(data_max_ - data_min_)`` seen in the data
.. versionadded:: 0.17
*data_range_* instead of deprecated *data_range*.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
@property
@deprecated("Attribute data_range will be removed in "
"0.19. Use ``data_range_`` instead")
def data_range(self):
return self.data_range_
@property
@deprecated("Attribute data_min will be removed in "
"0.19. Use ``data_min_`` instead")
def data_min(self):
return self.data_min_
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.min_
del self.n_samples_seen_
del self.data_min_
del self.data_max_
del self.data_range_
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of min and max on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
if sparse.issparse(X):
raise TypeError("MinMaxScaler does no support sparse input. "
"You may consider to use MaxAbsScaler instead.")
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
data_min = np.min(X, axis=0)
data_max = np.max(X, axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next steps
else:
data_min = np.minimum(self.data_min_, data_min)
data_max = np.maximum(self.data_max_, data_max)
self.n_samples_seen_ += X.shape[0]
data_range = data_max - data_min
self.scale_ = ((feature_range[1] - feature_range[0]) /
_handle_zeros_in_scale(data_range))
self.min_ = feature_range[0] - data_min * self.scale_
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed. It cannot be sparse.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
.. versionadded:: 0.17
*minmax_scale* function interface to :class:`sklearn.preprocessing.MinMaxScaler`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
# To allow retro-compatibility, we handle here the case of 1D-input
# From 0.17, 1D-input are deprecated in scaler objects
# Although, we want to allow the users to keep calling this function
# with 1D-input.
# Cast input to array, as we need to check ndim. Prior to 0.17, that was
# done inside the scaler object fit_transform.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, copy=False, ensure_2d=False, warn_on_dtype=True,
dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
This scaler can also be applied to sparse CSR or CSC matrices by passing
`with_mean=False` to avoid breaking the sparsity structure of the data.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* is recommended instead of deprecated *std_*.
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
var_ : array of floats with shape [n_features]
The variance for each feature in the training set. Used to compute
`scale_`
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
@property
@deprecated("Attribute ``std_`` will be removed in 0.19. Use ``scale_`` instead")
def std_(self):
return self.scale_
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.mean_
del self.var_
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of mean and std on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
The algorithm for incremental mean and std is given in Equation 1.5a,b
in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms
for computing the sample variance: Analysis and recommendations."
The American Statistician 37.3 (1983): 242-247:
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
# Even in the case of `with_mean=False`, we update the mean anyway
# This is needed for the incremental computation of the var
# See incr_mean_variance_axis and _incremental_mean_variance_axis
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.with_std:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_, self.var_ = mean_variance_axis(X, axis=0)
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
self.mean_, self.var_, self.n_samples_seen_ = \
incr_mean_variance_axis(X, axis=0,
last_mean=self.mean_,
last_var=self.var_,
last_n=self.n_samples_seen_)
else:
self.mean_ = None
self.var_ = None
else:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_ = .0
self.n_samples_seen_ = 0
if self.with_std:
self.var_ = .0
else:
self.var_ = None
self.mean_, self.var_, self.n_samples_seen_ = \
_incremental_mean_and_var(X, self.mean_, self.var_,
self.n_samples_seen_)
if self.with_std:
self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_))
else:
self.scale_ = None
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.scale_ is not None:
inplace_column_scale(X, 1 / self.scale_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.scale_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.scale_ is not None:
inplace_column_scale(X, self.scale_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.scale_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
.. versionadded:: 0.17
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
max_abs_ : ndarray, shape (n_features,)
Per feature maximum absolute value.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
"""
def __init__(self, copy=True):
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.max_abs_
def fit(self, X, y=None):
"""Compute the maximum absolute value to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of max absolute value of X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
max_abs = np.maximum(np.abs(mins), np.abs(maxs))
else:
max_abs = np.abs(X).max(axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
max_abs = np.maximum(self.max_abs_, max_abs)
self.n_samples_seen_ += X.shape[0]
self.max_abs_ = max_abs
self.scale_ = _handle_zeros_in_scale(max_abs)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : {array-like, sparse matrix}
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : {array-like, sparse matrix}
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
# To allow retro-compatibility, we handle here the case of 1D-input
# From 0.17, 1D-input are deprecated in scaler objects
# Although, we want to allow the users to keep calling this function
# with 1D-input.
# Cast input to array, as we need to check ndim. Prior to 0.17, that was
# done inside the scaler object fit_transform.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,
ensure_2d=False, dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MaxAbsScaler(copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
.. versionadded:: 0.17
*scale_* attribute.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0., 0., 1.],
[ 1., 2., 3., 4., 6., 9.],
[ 1., 4., 5., 16., 20., 25.]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0.],
[ 1., 2., 3., 6.],
[ 1., 4., 5., 20.]])
Attributes
----------
powers_ : array, shape (n_output_features, n_input_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(bincount(c, minlength=self.n_input_features_)
for c in combinations)
def get_feature_names(self, input_features=None):
"""
Return feature names for output features
Parameters
----------
input_features : list of string, length n_features, optional
String names for input features if available. By default,
"x0", "x1", ... "xn_features" is used.
Returns
-------
output_feature_names : list of string, length n_output_features
"""
powers = self.powers_
if input_features is None:
input_features = ['x%d' % i for i in range(powers.shape[1])]
feature_names = []
for row in powers:
inds = np.where(row)[0]
if len(inds):
name = " ".join("%s^%d" % (input_features[ind], exp)
if exp != 1 else input_features[ind]
for ind, exp in zip(inds, row[inds]))
else:
name = "1"
feature_names.append(name)
return feature_names
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X, dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True, return_norm=False):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
return_norm : boolean, default False
whether to return the computed norms
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
if return_norm:
return X, norms
else:
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K, dtype=FLOAT_DTYPES)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K, copy=copy, dtype=FLOAT_DTYPES)
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : {array, sparse matrix}, shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if isinstance(selected, six.string_types) and selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy, dtype=FLOAT_DTYPES)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : number of categorical values per feature.
Each feature value should be in ``range(n_values)``
- array : ``n_values[i]`` is the number of categorical values in
``X[:, i]``. Each feature value should be in ``range(n_values[i])``
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'numpy.float64'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float64, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape [n_samples, n_feature]
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those categorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X.ravel()[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| hugobowne/scikit-learn | sklearn/preprocessing/data.py | Python | bsd-3-clause | 68,325 | [
"Gaussian"
] | 6ca755d13fdb6731eba18e4337f65b2bb41c53833f878894c8875225fd0ffbbb |
""" Example smearing script
This script:
* Reads in mc spectra from hdf5
* Smears spectra, default is to use weighted Gaussian method, but can
also use specify random Gaussian method via command line
* Smeared spectrum is saved to the same directory with ``_smeared``
added to the file name
Examples:
To smear hdf5 file ``example.hdf5`` using the random Gaussian method::
$ python dump_smeared.py --smear_method "random" /path/to/example.hdf5
This will create the smeared hdf5 file ``/path/to/example_smeared.hdf5``.
.. note:: Valid smear methods include:
* "weight", default
* "random"
"""
import echidna.output.store as store
import echidna.core.smear as smear
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--smear_method", nargs='?', const="weight",
type=str, default="weight",
help="specify the smearing method to use")
parser.add_argument("-r", "--energy_resolution", default=None, type=float,
help="specify energy resolution "
"e.g. 0.05 for 5 percent")
parser.add_argument("path", type=str,
help="specify path to hdf5 file")
args = parser.parse_args()
directory = args.path[:args.path.rfind("/")+1] # strip filename
# strip directory and extension
filename = args.path[args.path.rfind("/")+1:args.path.rfind(".")]
if args.energy_resolution:
energy_smear = smear.EnergySmearRes()
energy_smear.set_resolution(args.energy_resolution)
else: # use light yield
energy_smear = smear.EnergySmearLY()
radial_smear = smear.RadialSmear()
spectrum = store.load(args.path)
if args.smear_method == "weight": # Use default smear method
for par in spectrum.get_config().get_pars():
if "energy" in par:
energy_par = par
spectrum = energy_smear.weighted_smear(spectrum,
par=energy_par)
elif "radial" in par:
radial_par = par
spectrum = radial_smear.weighted_smear(spectrum,
par=radial_par)
elif args.smear_method == "random":
for par in spectrum.get_config().get_pars():
if "energy" in par:
energy_par = par
spectrum = energy_smear.random_smear(spectrum,
par=energy_par)
elif "radial" in par:
radial_par = par
spectrum = radial_smear.random_smear(spectrum,
par=radial_par)
else: # Not a valid smear method
parser.error(args.smear_method + " is not a valid smear method")
filename = directory + filename + "_smeared.hdf5"
store.dump(filename, spectrum)
| ashleyrback/echidna | echidna/scripts/dump_smeared.py | Python | mit | 2,974 | [
"Gaussian"
] | 4789a2b3efc15093f57172c2a0cf4258bc9cd541906bf0e2da31fe8dbda41b10 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
<<<<<<< HEAD
from __future__ import division, unicode_literals
=======
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
"""
Created on Nov 10, 2012
@author: shyue
"""
<<<<<<< HEAD
=======
from __future__ import division, unicode_literals
from pymatgen.util.testing import PymatgenTest
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Nov 10, 2012"
import unittest
from pymatgen.core.periodic_table import Element
from pymatgen.core.composition import Composition, CompositionError, \
ChemicalPotential
import random
<<<<<<< HEAD
class CompositionTest(unittest.TestCase):
=======
class CompositionTest(PymatgenTest):
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
def setUp(self):
self.comp = list()
self.comp.append(Composition("Li3Fe2(PO4)3"))
self.comp.append(Composition("Li3Fe(PO4)O"))
self.comp.append(Composition("LiMn2O4"))
self.comp.append(Composition("Li4O4"))
self.comp.append(Composition("Li3Fe2Mo3O12"))
self.comp.append(Composition("Li3Fe2((PO4)3(CO3)5)2"))
self.comp.append(Composition("Li1.5Si0.5"))
self.comp.append(Composition("ZnOH"))
self.indeterminate_comp = []
self.indeterminate_comp.append(
Composition.ranked_compositions_from_indeterminate_formula("Co1",
True)
)
self.indeterminate_comp.append(
Composition.ranked_compositions_from_indeterminate_formula("Co1",
False)
)
self.indeterminate_comp.append(
Composition.ranked_compositions_from_indeterminate_formula("co2o3")
)
self.indeterminate_comp.append(
Composition.ranked_compositions_from_indeterminate_formula("ncalu")
)
self.indeterminate_comp.append(
Composition.ranked_compositions_from_indeterminate_formula("calun")
)
self.indeterminate_comp.append(
Composition.ranked_compositions_from_indeterminate_formula(
"liCoo2n (pO4)2")
)
self.indeterminate_comp.append(
Composition.ranked_compositions_from_indeterminate_formula(
"(co)2 (PO)4")
)
self.indeterminate_comp.append(
Composition.ranked_compositions_from_indeterminate_formula("Fee3"))
def test_immutable(self):
try:
self.comp[0]["Fe"] = 1
except Exception as ex:
self.assertIsInstance(ex, TypeError)
try:
del self.comp[0]["Fe"]
except Exception as ex:
self.assertIsInstance(ex, TypeError)
def test_in(self):
self.assertIn("Fe", self.comp[0])
self.assertNotIn("Fe", self.comp[2])
self.assertIn(Element("Fe"), self.comp[0])
self.assertEqual(self.comp[0]["Fe"], 2)
self.assertEqual(self.comp[0]["Mn"], 0)
self.assertRaises(TypeError, self.comp[0].__getitem__, "Hello")
self.assertRaises(TypeError, self.comp[0].__getitem__, "Vac")
def test_hill_formula(self):
c = Composition("CaCO3")
self.assertEqual(c.hill_formula, "C Ca O3")
c = Composition("C2H5OH")
self.assertEqual(c.hill_formula, "C2 H6 O")
def test_init_(self):
self.assertRaises(CompositionError, Composition, {"H": -0.1})
f = {'Fe': 4, 'Li': 4, 'O': 16, 'P': 4}
self.assertEqual("Li4 Fe4 P4 O16", Composition(f).formula)
f = {None: 4, 'Li': 4, 'O': 16, 'P': 4}
self.assertRaises(TypeError, Composition, f)
f = {1: 2, 8: 1}
self.assertEqual("H2 O1", Composition(f).formula)
self.assertEqual("Na2 O1", Composition(Na=2, O=1).formula)
c = Composition({'S': Composition.amount_tolerance / 2})
self.assertEqual(len(c.elements), 0)
def test_average_electroneg(self):
val = [2.7224999999999997, 2.4160000000000004, 2.5485714285714285,
2.21, 2.718, 3.08, 1.21, 2.43]
for i, c in enumerate(self.comp):
self.assertAlmostEqual(c.average_electroneg,
val[i])
def test_total_electrons(self):
test_cases = {'C': 6, 'SrTiO3': 84}
for item in test_cases.keys():
c = Composition(item)
self.assertAlmostEqual(c.total_electrons, test_cases[item])
def test_formula(self):
correct_formulas = ['Li3 Fe2 P3 O12', 'Li3 Fe1 P1 O5', 'Li1 Mn2 O4',
'Li4 O4', 'Li3 Fe2 Mo3 O12', 'Li3 Fe2 P6 C10 O54',
'Li1.5 Si0.5', 'Zn1 H1 O1']
all_formulas = [c.formula for c in self.comp]
self.assertEqual(all_formulas, correct_formulas)
self.assertRaises(CompositionError, Composition,
"(co2)(po4)2")
<<<<<<< HEAD
=======
self.assertEqual(Composition("K Na 2").reduced_formula, "KNa2")
self.assertEqual(Composition("K3 Na 2").reduced_formula, "K3Na2")
self.assertEqual(Composition("Na 3 Zr (PO 4) 3").reduced_formula,
"Na3Zr(PO4)3")
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
def test_mixed_valence(self):
comp = Composition({"Fe2+": 2, "Fe3+": 4, "Li+": 8})
self.assertEqual(comp.reduced_formula, "Li4Fe3")
self.assertEqual(comp.alphabetical_formula, "Fe6 Li8")
self.assertEqual(comp.formula, "Li8 Fe6")
def test_indeterminate_formula(self):
correct_formulas = [["Co1"], ["Co1", "C1 O1"], ["Co2 O3", "C1 O5"],
["N1 Ca1 Lu1", "U1 Al1 C1 N1"],
["N1 Ca1 Lu1", "U1 Al1 C1 N1"],
["Li1 Co1 P2 N1 O10", "Li1 P2 C1 N1 O11",
"Li1 Co1 Po8 N1 O2", "Li1 Po8 C1 N1 O3"],
["Co2 P4 O4", "Co2 Po4", "P4 C2 O6",
"Po4 C2 O2"], []]
for i, c in enumerate(correct_formulas):
self.assertEqual([Composition(comp) for comp in c],
self.indeterminate_comp[i])
def test_alphabetical_formula(self):
correct_formulas = ['Fe2 Li3 O12 P3', 'Fe1 Li3 O5 P1', 'Li1 Mn2 O4',
'Li4 O4', 'Fe2 Li3 Mo3 O12', 'C10 Fe2 Li3 O54 P6',
'Li1.5 Si0.5', 'H1 O1 Zn1']
all_formulas = [c.alphabetical_formula for c in self.comp]
self.assertEqual(all_formulas, correct_formulas)
def test_reduced_composition(self):
correct_reduced_formulas = ['Li3Fe2(PO4)3', 'Li3FePO5', 'LiMn2O4',
'Li2O2', 'Li3Fe2(MoO4)3',
'Li3Fe2P6(C5O27)2', 'Li1.5Si0.5', 'ZnHO']
for i in range(len(self.comp)):
self.assertEqual(self.comp[i]
.get_reduced_composition_and_factor()[0],
Composition(correct_reduced_formulas[i]))
def test_reduced_formula(self):
correct_reduced_formulas = ['Li3Fe2(PO4)3', 'Li3FePO5', 'LiMn2O4',
'Li2O2', 'Li3Fe2(MoO4)3',
'Li3Fe2P6(C5O27)2', 'Li1.5Si0.5', 'ZnHO']
all_formulas = [c.reduced_formula for c in self.comp]
self.assertEqual(all_formulas, correct_reduced_formulas)
<<<<<<< HEAD
=======
# test rounding
c = Composition({'Na': 2 - Composition.amount_tolerance / 2, 'Cl': 2})
self.assertEqual('NaCl', c.reduced_formula)
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
def test_integer_formula(self):
correct_reduced_formulas = ['Li3Fe2(PO4)3', 'Li3FePO5', 'LiMn2O4',
'Li2O2', 'Li3Fe2(MoO4)3',
'Li3Fe2P6(C5O27)2', 'Li3Si', 'ZnHO']
all_formulas = [c.get_integer_formula_and_factor()[0] for c in self.comp]
self.assertEqual(all_formulas, correct_reduced_formulas)
self.assertEqual(Composition('Li0.5O0.25').get_integer_formula_and_factor(),
('Li2O', 0.25))
self.assertEqual(Composition('O0.25').get_integer_formula_and_factor(),
('O2', 0.125))
formula, factor = Composition("Li0.16666667B1.0H1.0").get_integer_formula_and_factor()
self.assertEqual(formula, 'Li(BH)6')
self.assertAlmostEqual(factor, 1 / 6)
def test_num_atoms(self):
correct_num_atoms = [20, 10, 7, 8, 20, 75, 2, 3]
all_natoms = [c.num_atoms for c in self.comp]
self.assertEqual(all_natoms, correct_num_atoms)
def test_weight(self):
correct_weights = [417.427086, 187.63876199999999, 180.81469, 91.7616,
612.3258, 1302.430172, 24.454250000000002, 82.41634]
all_weights = [c.weight for c in self.comp]
self.assertArrayAlmostEqual(all_weights, correct_weights, 5)
def test_get_atomic_fraction(self):
correct_at_frac = {"Li": 0.15, "Fe": 0.1, "P": 0.15, "O": 0.6}
for el in ["Li", "Fe", "P", "O"]:
self.assertEqual(self.comp[0].get_atomic_fraction(el),
correct_at_frac[el],
"Wrong computed atomic fractions")
self.assertEqual(self.comp[0].get_atomic_fraction("S"), 0,
"Wrong computed atomic fractions")
def test_anonymized_formula(self):
expected_formulas = ['A2B3C3D12', 'ABC3D5', 'AB2C4', 'AB',
'A2B3C3D12', 'A2B3C6D10E54', 'A0.5B1.5', 'ABC']
for i in range(len(self.comp)):
self.assertEqual(self.comp[i].anonymized_formula,
expected_formulas[i])
def test_get_wt_fraction(self):
correct_wt_frac = {"Li": 0.0498841610868, "Fe": 0.267567687258,
"P": 0.222604831158, "O": 0.459943320496}
for el in ["Li", "Fe", "P", "O"]:
self.assertAlmostEqual(correct_wt_frac[el],
self.comp[0].get_wt_fraction(el),
5, "Wrong computed weight fraction")
self.assertEqual(self.comp[0].get_wt_fraction(Element("S")), 0,
"Wrong computed weight fractions")
def test_from_dict(self):
sym_dict = {"Fe": 6, "O": 8}
self.assertEqual(Composition.from_dict(sym_dict).reduced_formula,
"Fe3O4",
"Creation form sym_amount dictionary failed!")
comp = Composition({"Fe2+": 2, "Fe3+": 4, "O2-": 8})
comp2 = Composition.from_dict(comp.as_dict())
self.assertEqual(comp, comp2)
def test_as_dict(self):
c = Composition.from_dict({'Fe': 4, 'O': 6})
d = c.as_dict()
correct_dict = {'Fe': 4.0, 'O': 6.0}
self.assertEqual(d['Fe'], correct_dict['Fe'])
self.assertEqual(d['O'], correct_dict['O'])
correct_dict = {'Fe': 2.0, 'O': 3.0}
d = c.to_reduced_dict
self.assertEqual(d['Fe'], correct_dict['Fe'])
self.assertEqual(d['O'], correct_dict['O'])
<<<<<<< HEAD
=======
def test_pickle(self):
for c in self.comp:
self.serialize_with_pickle(c, test_eq=True)
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
def test_add(self):
self.assertEqual((self.comp[0] + self.comp[2]).formula,
"Li4 Mn2 Fe2 P3 O16",
"Incorrect composition after addition!")
self.assertEqual((self.comp[3] + {"Fe": 4, "O": 4}).formula,
"Li4 Fe4 O8", "Incorrect composition after addition!")
def test_sub(self):
self.assertEqual((self.comp[0]
- Composition("Li2O")).formula,
"Li1 Fe2 P3 O11",
"Incorrect composition after addition!")
self.assertEqual((self.comp[0] - {"Fe": 2, "O": 3}).formula,
"Li3 P3 O9")
self.assertRaises(CompositionError, Composition('O').__sub__,
Composition('H'))
#check that S is completely removed by subtraction
c1 = Composition({'S': 1 + Composition.amount_tolerance / 2, 'O': 1})
c2 = Composition({'S': 1})
self.assertEqual(len((c1 - c2).elements), 1)
def test_mul(self):
self.assertEqual((self.comp[0] * 4).formula, "Li12 Fe8 P12 O48")
self.assertEqual((3 * self.comp[1]).formula, "Li9 Fe3 P3 O15")
def test_div(self):
self.assertEqual((self.comp[0] / 4).formula, 'Li0.75 Fe0.5 P0.75 O3')
def test_equals(self):
random_z = random.randint(1, 92)
fixed_el = Element.from_Z(random_z)
other_z = random.randint(1, 92)
while other_z == random_z:
other_z = random.randint(1, 92)
comp1 = Composition({fixed_el: 1, Element.from_Z(other_z): 0})
other_z = random.randint(1, 92)
while other_z == random_z:
other_z = random.randint(1, 92)
comp2 = Composition({fixed_el: 1, Element.from_Z(other_z): 0})
self.assertEqual(comp1, comp2,
"Composition equality test failed. " +
"%s should be equal to %s" % (comp1.formula,
comp2.formula))
self.assertEqual(comp1.__hash__(), comp2.__hash__(),
"Hashcode equality test failed!")
def test_comparisons(self):
c1 = Composition({'S': 1})
c1_1 = Composition({'S': 1.00000000000001})
c2 = Composition({'S': 2})
c3 = Composition({'O': 1})
c4 = Composition({'O': 1, 'S': 1})
self.assertFalse(c1 > c2)
self.assertFalse(c1_1 > c1)
self.assertFalse(c1_1 < c1)
self.assertTrue(c1 > c3)
self.assertTrue(c3 < c1)
self.assertTrue(c4 > c1)
self.assertEqual(sorted([c1, c1_1, c2, c4, c3]),
[c3, c1, c1_1, c4, c2])
def test_almost_equals(self):
c1 = Composition({'Fe': 2.0, 'O': 3.0, 'Mn': 0})
c2 = Composition({'O': 3.2, 'Fe': 1.9, 'Zn': 0})
c3 = Composition({'Ag': 2.0, 'O': 3.0})
c4 = Composition({'Fe': 2.0, 'O': 3.0, 'Ag': 2.0})
self.assertTrue(c1.almost_equals(c2, rtol=0.1))
self.assertFalse(c1.almost_equals(c2, rtol=0.01))
self.assertFalse(c1.almost_equals(c3, rtol=0.1))
self.assertFalse(c1.almost_equals(c4, rtol=0.1))
def test_equality(self):
self.assertTrue(self.comp[0].__eq__(self.comp[0]))
self.assertFalse(self.comp[0].__eq__(self.comp[1]))
self.assertFalse(self.comp[0].__ne__(self.comp[0]))
self.assertTrue(self.comp[0].__ne__(self.comp[1]))
def test_fractional_composition(self):
for c in self.comp:
self.assertAlmostEqual(c.fractional_composition.num_atoms, 1)
def test_init_numerical_tolerance(self):
self.assertEqual(Composition({'B':1, 'C':-1e-12}), Composition('B'))
def test_negative_compositions(self):
self.assertEqual(Composition('Li-1(PO-1)4', allow_negative=True).formula,
'Li-1 P4 O-4')
self.assertEqual(Composition('Li-1(PO-1)4', allow_negative=True).reduced_formula,
'Li-1(PO-1)4')
self.assertEqual(Composition('Li-2Mg4', allow_negative=True).reduced_composition,
Composition('Li-1Mg2', allow_negative=True))
self.assertEqual(Composition('Li-2.5Mg4', allow_negative=True).reduced_composition,
Composition('Li-2.5Mg4', allow_negative=True))
#test math
c1 = Composition('LiCl', allow_negative=True)
c2 = Composition('Li')
self.assertEqual(c1 - 2 * c2, Composition({'Li': -1, 'Cl': 1},
allow_negative=True))
self.assertEqual((c1 + c2).allow_negative, True)
self.assertEqual(c1 / -1, Composition('Li-1Cl-1', allow_negative=True))
#test num_atoms
c1 = Composition('Mg-1Li', allow_negative=True)
self.assertEqual(c1.num_atoms, 2)
self.assertEqual(c1.get_atomic_fraction('Mg'), 0.5)
self.assertEqual(c1.get_atomic_fraction('Li'), 0.5)
self.assertEqual(c1.fractional_composition,
Composition('Mg-0.5Li0.5', allow_negative=True))
#test copy
self.assertEqual(c1.copy(), c1)
#test species
c1 = Composition({'Mg':1, 'Mg2+':-1}, allow_negative=True)
self.assertEqual(c1.num_atoms, 2)
self.assertEqual(c1.element_composition, Composition())
self.assertEqual(c1.average_electroneg, 1.31)
def test_special_formulas(self):
special_formulas = {"LiO": "Li2O2", "NaO": "Na2O2", "KO": "K2O2",
"HO": "H2O2", "CsO": "Cs2O2", "RbO": "Rb2O2",
"O": "O2", "N": "N2", "F": "F2", "Cl": "Cl2",
"H": "H2"}
for k, v in special_formulas.items():
self.assertEqual(Composition(k).reduced_formula, v)
def test_oxi_state_guesses(self):
self.assertEqual(Composition("LiFeO2").oxi_state_guesses(),
[{"Li": 1, "Fe": 3, "O": -2}])
self.assertEqual(Composition("Fe4O5").oxi_state_guesses(),
[{"Fe": 2.5, "O": -2}])
self.assertEqual(Composition("V2O3").oxi_state_guesses(),
[{"V": 3, "O": -2}])
# all_oxidation_states produces *many* possible responses
self.assertEqual(len(Composition("MnO").oxi_state_guesses(
all_oxi_states=True)), 4)
# can't balance b/c missing V4+
self.assertEqual(Composition("VO2").oxi_state_guesses(
oxi_states_override={"V": [2, 3, 5]}), [])
# missing V4+, but can balance due to additional sites
self.assertEqual(Composition("V2O4").oxi_state_guesses(
oxi_states_override={"V": [2, 3, 5]}), [{"V": 4, "O": -2}])
# multiple solutions - Mn/Fe = 2+/4+ or 3+/3+ or 4+/2+
self.assertEqual(len(Composition("MnFeO3").oxi_state_guesses(
oxi_states_override={"Mn": [2, 3, 4], "Fe": [2, 3, 4]})), 3)
# multiple solutions prefers 3/3 over 2/4 or 4/2
self.assertEqual(Composition("MnFeO3").oxi_state_guesses(
oxi_states_override={"Mn": [2, 3, 4], "Fe": [2, 3, 4]})[0],
{"Mn": 3, "Fe": 3, "O": -2})
# target charge of 1
self.assertEqual(Composition("V2O6").oxi_state_guesses(
oxi_states_override={"V": [2, 3, 4, 5]}, target_charge=-2),
[{"V": 5, "O": -2}])
# max_sites for very large composition - should timeout if incorrect
self.assertEqual(Composition("Li10000Fe10000P10000O40000").
oxi_state_guesses(max_sites=7)[0],
{"Li": 1, "Fe": 2, "P": 5, "O": -2})
# max_sites for very large composition - should timeout if incorrect
self.assertEqual(Composition("Li10000Fe10000P10000O40000").
oxi_state_guesses(max_sites=-1)[0],
{"Li": 1, "Fe": 2, "P": 5, "O": -2})
self.assertRaises(ValueError, Composition("V2O3").
oxi_state_guesses, max_sites=1)
class ChemicalPotentialTest(unittest.TestCase):
def test_init(self):
d = {'Fe': 1, Element('Fe'): 1}
self.assertRaises(ValueError, ChemicalPotential, d)
for k in ChemicalPotential(Fe=1).keys():
self.assertIsInstance(k, Element)
def test_math(self):
fepot = ChemicalPotential({'Fe': 1})
opot = ChemicalPotential({'O': 2.1})
pots = ChemicalPotential({'Fe': 1, 'O': 2.1})
potsx2 = ChemicalPotential({'Fe': 2, 'O': 4.2})
feo2 = Composition('FeO2')
# test get_energy()
self.assertAlmostEqual(pots.get_energy(feo2), 5.2)
self.assertAlmostEqual(fepot.get_energy(feo2, False), 1)
self.assertRaises(ValueError, fepot.get_energy, feo2)
# test multiplication
self.assertRaises(TypeError, lambda: (pots * pots))
self.assertDictEqual(pots * 2, potsx2)
self.assertDictEqual(2 * pots, potsx2)
# test division
self.assertDictEqual(potsx2 / 2, pots)
self.assertRaises(TypeError, lambda: (pots / pots))
self.assertRaises(TypeError, lambda: (pots / feo2))
# test add/subtract
self.assertDictEqual(pots + pots, potsx2)
self.assertDictEqual(potsx2 - pots, pots)
self.assertDictEqual(fepot + opot, pots)
self.assertDictEqual(fepot - opot, pots - opot - opot)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| Bismarrck/pymatgen | pymatgen/core/tests/test_composition.py | Python | mit | 21,119 | [
"pymatgen"
] | 6c072ebf16bd1366a69e8a3bc2500e5e6b6f8fd83dd8f40cea1e29393e211402 |
from __future__ import division
import numpy as np
from . import _hoghistogram
from .._shared.utils import skimage_deprecation, warn
def _hog_normalize_block(block, method, eps=1e-5):
if method == 'L1':
out = block / (np.sum(np.abs(block)) + eps)
elif method == 'L1-sqrt':
out = np.sqrt(block / (np.sum(np.abs(block)) + eps))
elif method == 'L2':
out = block / np.sqrt(np.sum(block ** 2) + eps ** 2)
elif method == 'L2-Hys':
out = block / np.sqrt(np.sum(block ** 2) + eps ** 2)
out = np.minimum(out, 0.2)
out = out / np.sqrt(np.sum(out ** 2) + eps ** 2)
else:
raise ValueError('Selected block normalization method is invalid.')
return out
def _hog_channel_gradient(channel):
"""Compute unnormalized gradient image along `row` and `col` axes.
Parameters
----------
channel : (M, N) ndarray
Grayscale image or one of image channel.
Returns
-------
g_row, g_col : channel gradient along `row` and `col` axes correspondingly.
"""
g_row = np.empty(channel.shape, dtype=np.double)
g_row[0, :] = 0
g_row[-1, :] = 0
g_row[1:-1, :] = channel[2:, :] - channel[:-2, :]
g_col = np.empty(channel.shape, dtype=np.double)
g_col[:, 0] = 0
g_col[:, -1] = 0
g_col[:, 1:-1] = channel[:, 2:] - channel[:, :-2]
return g_row, g_col
def hog(image, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(3, 3),
block_norm=None, visualize=False, visualise=None, transform_sqrt=False,
feature_vector=True, multichannel=None):
"""Extract Histogram of Oriented Gradients (HOG) for a given image.
Compute a Histogram of Oriented Gradients (HOG) by
1. (optional) global image normalization
2. computing the gradient image in `row` and `col`
3. computing gradient histograms
4. normalizing across blocks
5. flattening into a feature vector
Parameters
----------
image : (M, N[, C]) ndarray
Input image.
orientations : int, optional
Number of orientation bins.
pixels_per_cell : 2-tuple (int, int), optional
Size (in pixels) of a cell.
cells_per_block : 2-tuple (int, int), optional
Number of cells in each block.
block_norm : str {'L1', 'L1-sqrt', 'L2', 'L2-Hys'}, optional
Block normalization method:
``L1``
Normalization using L1-norm. (default)
``L1-sqrt``
Normalization using L1-norm, followed by square root.
``L2``
Normalization using L2-norm.
``L2-Hys``
Normalization using L2-norm, followed by limiting the
maximum values to 0.2 (`Hys` stands for `hysteresis`) and
renormalization using L2-norm.
For details, see [3]_, [4]_.
visualize : bool, optional
Also return an image of the HOG. For each cell and orientation bin,
the image contains a line segment that is centered at the cell center,
is perpendicular to the midpoint of the range of angles spanned by the
orientation bin, and has intensity proportional to the corresponding
histogram value.
transform_sqrt : bool, optional
Apply power law compression to normalize the image before
processing. DO NOT use this if the image contains negative
values. Also see `notes` section below.
feature_vector : bool, optional
Return the data as a feature vector by calling .ravel() on the result
just before returning.
multichannel : boolean, optional
If True, the last `image` dimension is considered as a color channel,
otherwise as spatial.
Returns
-------
out : (n_blocks_row, n_blocks_col, n_cells_row, n_cells_col, n_orient) ndarray
HOG descriptor for the image. If `feature_vector` is True, a 1D
(flattened) array is returned.
hog_image : (M, N) ndarray, optional
A visualisation of the HOG image. Only provided if `visualize` is True.
References
----------
.. [1] http://en.wikipedia.org/wiki/Histogram_of_oriented_gradients
.. [2] Dalal, N and Triggs, B, Histograms of Oriented Gradients for
Human Detection, IEEE Computer Society Conference on Computer
Vision and Pattern Recognition 2005 San Diego, CA, USA,
https://lear.inrialpes.fr/people/triggs/pubs/Dalal-cvpr05.pdf,
DOI:10.1109/CVPR.2005.177
.. [3] Lowe, D.G., Distinctive image features from scale-invatiant
keypoints, International Journal of Computer Vision (2004) 60: 91,
http://www.cs.ubc.ca/~lowe/papers/ijcv04.pdf,
DOI:10.1023/B:VISI.0000029664.99615.94
.. [4] Dalal, N, Finding People in Images and Videos,
Human-Computer Interaction [cs.HC], Institut National Polytechnique
de Grenoble - INPG, 2006,
https://tel.archives-ouvertes.fr/tel-00390303/file/NavneetDalalThesis.pdf
Notes
-----
The presented code implements the HOG extraction method from [2]_ with
the following changes: (I) blocks of (3, 3) cells are used ((2, 2) in the
paper; (II) no smoothing within cells (Gaussian spatial window with sigma=8pix
in the paper); (III) L1 block normalization is used (L2-Hys in the paper).
Power law compression, also known as Gamma correction, is used to reduce
the effects of shadowing and illumination variations. The compression makes
the dark regions lighter. When the kwarg `transform_sqrt` is set to
``True``, the function computes the square root of each color channel
and then applies the hog algorithm to the image.
"""
if block_norm is None:
block_norm = 'L1'
warn('Default value of `block_norm`==`L1` is deprecated and will '
'be changed to `L2-Hys` in v0.15. To supress this message '
'specify explicitly the normalization method.',
skimage_deprecation)
image = np.atleast_2d(image)
if multichannel is None:
multichannel = (image.ndim == 3)
ndim_spatial = image.ndim - 1 if multichannel else image.ndim
if ndim_spatial != 2:
raise ValueError('Only images with 2 spatial dimensions are '
'supported. If using with color/multichannel '
'images, specify `multichannel=True`.')
"""
The first stage applies an optional global image normalization
equalisation that is designed to reduce the influence of illumination
effects. In practice we use gamma (power law) compression, either
computing the square root or the log of each color channel.
Image texture strength is typically proportional to the local surface
illumination so this compression helps to reduce the effects of local
shadowing and illumination variations.
"""
if transform_sqrt:
image = np.sqrt(image)
"""
The second stage computes first order image gradients. These capture
contour, silhouette and some texture information, while providing
further resistance to illumination variations. The locally dominant
color channel is used, which provides color invariance to a large
extent. Variant methods may also include second order image derivatives,
which act as primitive bar detectors - a useful feature for capturing,
e.g. bar like structures in bicycles and limbs in humans.
"""
if image.dtype.kind == 'u':
# convert uint image to float
# to avoid problems with subtracting unsigned numbers
image = image.astype('float')
if multichannel:
g_row_by_ch = np.empty_like(image, dtype=np.double)
g_col_by_ch = np.empty_like(image, dtype=np.double)
g_magn = np.empty_like(image, dtype=np.double)
for idx_ch in range(image.shape[2]):
g_row_by_ch[:, :, idx_ch], g_col_by_ch[:, :, idx_ch] = \
_hog_channel_gradient(image[:, :, idx_ch])
g_magn[:, :, idx_ch] = np.hypot(g_row_by_ch[:, :, idx_ch],
g_col_by_ch[:, :, idx_ch])
# For each pixel select the channel with the highest gradient magnitude
idcs_max = g_magn.argmax(axis=2)
rr, cc = np.meshgrid(np.arange(image.shape[0]),
np.arange(image.shape[1]),
indexing='ij',
sparse=True)
g_row = g_row_by_ch[rr, cc, idcs_max]
g_col = g_col_by_ch[rr, cc, idcs_max]
else:
g_row, g_col = _hog_channel_gradient(image)
"""
The third stage aims to produce an encoding that is sensitive to
local image content while remaining resistant to small changes in
pose or appearance. The adopted method pools gradient orientation
information locally in the same way as the SIFT [Lowe 2004]
feature. The image window is divided into small spatial regions,
called "cells". For each cell we accumulate a local 1-D histogram
of gradient or edge orientations over all the pixels in the
cell. This combined cell-level 1-D histogram forms the basic
"orientation histogram" representation. Each orientation histogram
divides the gradient angle range into a fixed number of
predetermined bins. The gradient magnitudes of the pixels in the
cell are used to vote into the orientation histogram.
"""
s_row, s_col = image.shape[:2]
c_row, c_col = pixels_per_cell
b_row, b_col = cells_per_block
n_cells_row = int(s_row // c_row) # number of cells along row-axis
n_cells_col = int(s_col // c_col) # number of cells along col-axis
# compute orientations integral images
orientation_histogram = np.zeros((n_cells_row, n_cells_col, orientations))
_hoghistogram.hog_histograms(g_col, g_row, c_col, c_row, s_col, s_row,
n_cells_col, n_cells_row,
orientations, orientation_histogram)
# now compute the histogram for each cell
hog_image = None
if visualise is not None:
visualize = visualise
warn('Argument `visualise` is deprecated and will '
'be changed to `visualize` in v0.16', skimage_deprecation)
if visualize:
from .. import draw
radius = min(c_row, c_col) // 2 - 1
orientations_arr = np.arange(orientations)
# set dr_arr, dc_arr to correspond to midpoints of orientation bins
orientation_bin_midpoints = (
np.pi * (orientations_arr + .5) / orientations)
dr_arr = radius * np.sin(orientation_bin_midpoints)
dc_arr = radius * np.cos(orientation_bin_midpoints)
hog_image = np.zeros((s_row, s_col), dtype=float)
for r in range(n_cells_row):
for c in range(n_cells_col):
for o, dr, dc in zip(orientations_arr, dr_arr, dc_arr):
centre = tuple([r * c_row + c_row // 2,
c * c_col + c_col // 2])
rr, cc = draw.line(int(centre[0] - dc),
int(centre[1] + dr),
int(centre[0] + dc),
int(centre[1] - dr))
hog_image[rr, cc] += orientation_histogram[r, c, o]
"""
The fourth stage computes normalization, which takes local groups of
cells and contrast normalizes their overall responses before passing
to next stage. Normalization introduces better invariance to illumination,
shadowing, and edge contrast. It is performed by accumulating a measure
of local histogram "energy" over local groups of cells that we call
"blocks". The result is used to normalize each cell in the block.
Typically each individual cell is shared between several blocks, but
its normalizations are block dependent and thus different. The cell
thus appears several times in the final output vector with different
normalizations. This may seem redundant but it improves the performance.
We refer to the normalized block descriptors as Histogram of Oriented
Gradient (HOG) descriptors.
"""
n_blocks_row = (n_cells_row - b_row) + 1
n_blocks_col = (n_cells_col - b_col) + 1
normalized_blocks = np.zeros((n_blocks_row, n_blocks_col,
b_row, b_col, orientations))
for r in range(n_blocks_row):
for c in range(n_blocks_col):
block = orientation_histogram[r:r + b_row, c:c + b_col, :]
normalized_blocks[r, c, :] = \
_hog_normalize_block(block, method=block_norm)
"""
The final step collects the HOG descriptors from all blocks of a dense
overlapping grid of blocks covering the detection window into a combined
feature vector for use in the window classifier.
"""
if feature_vector:
normalized_blocks = normalized_blocks.ravel()
if visualize:
return normalized_blocks, hog_image
else:
return normalized_blocks
| kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/skimage/feature/_hog.py | Python | gpl-3.0 | 13,056 | [
"Gaussian"
] | e9f7af865672b06dea1df10e2d6db58b9e31fff2452be5bfc7a279c04df46a09 |
'''
Finds CRISPR spacers in a metagenome; then looks to see if those spacers have good BLAST matches to other contigs in that metagenome.
The implication is the CRISPR spacer will be on a host contig and the BLAST match on the other contig could be a phage it is targeting.
'''
import sys
import argparse
import subprocess
import os
from Bio import SeqIO
from Bio.SeqUtils import GC
#convenient code that checks path for minced and blastn.
def which(program):
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
##Check for valid input
if __name__ == "__main__":
__author__ = "Alex Crits-Christoph"
parser = argparse.ArgumentParser(description='Identifies CRISPR arrays in an assembled metagenome, and matches identified spacers to other contigs.')
parser.add_argument('-i','--input', help='Input assembly filename (FASTA format)',required=True)
args = parser.parse_args()
if args.input:
assembly = args.input
else:
print("ERROR: No assembly file indicated. Use like: crispr_matches -i assembly.fna")
sys.exit()
if not which("minced"):
print("ERROR: Please install the program minced in your path.")
sys.exit()
if not which("makeblastdb"):
print("ERROR: Please install BLAST+ (makeblastdb) in your path.")
sys.exit()
if not which("blastn"):
print("ERROR: Please install BLAST+ (blastn) in your path.")
sys.exit()
print("All requirements met. Creating new output directory...")
os.system("rm -rf " + assembly + "_crispr_matches")
os.system("mkdir " + assembly + "_crispr_matches")
os.system("cp " + assembly + " ./" + assembly+ "_crispr_matches")
filename = "./" + assembly+ "_crispr_matches/" + assembly
print("Finding CRISPRs with minced...")
output = subprocess.check_output(["minced", "-gff", "-spacers", filename], encoding='utf8')
# lines = output.split("\n")
#Read fasta file
handle = open(filename, "rU")
records_dict = {}
records = SeqIO.parse(handle, "fasta")
for record in records:
records_dict[record.id] = str(record.seq)
handle.close()
crispr_contigs = []
for line in output.split("\n"):
if line.strip() != '' and not line.startswith("#"):
contig = line.split()[0]
crispr_contigs.append(contig)
crisprs = open("./" + assembly+ "_crispr_matches/" + assembly + "_crisprs.fna", 'a+')
for contig in crispr_contigs:
crisprs.write(">" + contig + "\n")
crisprs.write(records_dict[contig] + "\n")
print(str(len(crispr_contigs)) + " contigs with CRISPRs found, stored in " + assembly+ "_crispr_matches/" + assembly + "_crisprs.fna")
print("Creating blastdb for non-CRISPR contigs...")
f = open("./" + assembly+ "_crispr_matches/" + assembly + '_nocrisprs.fna', 'a+')
for contig in records_dict:
if contig not in crispr_contigs:
f.write(">" + contig + "\n")
f.write(records_dict[contig] + "\n")
f.close()
output = subprocess.check_output("makeblastdb" + " -in " + "./" + assembly+ "_crispr_matches/" + assembly + '_nocrisprs.fna' + " -dbtype nucl", shell=True, encoding='utf8')
print("BLASTing spacers against non-CRISPR contigs...")
output = subprocess.check_output("blastn " + "-query " + "./" + assembly+ "_crispr_matches/" + assembly.split(".")[0] + "_spacers.fa" + " -db " + "./" + assembly+ "_crispr_matches/" + assembly + '_nocrisprs.fna' + " -outfmt 6", shell=True, encoding='utf8')
print("Host/CRISPR contig\tHost contig length\tSpacer #\tViral contig\tViral contig length\tMatch PID\tMatch Length\tHost GC%\tViral GC%")
for line in output.split("\n"):
try:
crispr_contig = line.split("_CRISPR")[0]
crispr_spacer = line.split("spacer_")[1].split()[0]
viral_contig = line.split("\t")[1]
crispr_length = str(len(records_dict[crispr_contig]))
viral_length = str(len(records_dict[viral_contig]))
pid = line.split("\t")[2]
length = line.split("\t")[3]
host_gc = str(GC(records_dict[crispr_contig]))
viral_gc = str(GC(records_dict[viral_contig]))
print(crispr_contig + "\t" + crispr_length + "\t" + crispr_spacer + "\t" + viral_contig + "\t" + viral_length + "\t" + pid + "\t" + length + "\t" + host_gc + "\t" + viral_gc)
except:
pass
| alexcritschristoph/CircHMP | crispr_matches.py | Python | gpl-2.0 | 4,395 | [
"BLAST"
] | 0a8787635389d6558905565da82b3ef357945e7dd44eac905ff81dbee5dc72b1 |
#!/usr/bin/python
#---------PLEASE INDICATE ABSOLUTE PATHS FOR THE FOLLOWING VARIABLES-------------#
# These variables indicate the directories of all tools used in this pipeline
# It is also suggested that you make all tools executable
# See the README.txt for more information
#----------------------------------------------------
#-------------UNIVERSAL VARIABLES--------------------
#-----(required for all scripts in package)----------
#----------------------------------------------------
BLAST_DATABASE_DIRECTORY = '/home/mike/bin/BLAST_Databases/'
# Should contain formatted blast database files and unformatted database fasta files
BLAST_EXE_DIRECTORY = '/usr/bin/'
# Should contain BLAST executables, specifically blastp
#----------------------------------------------------
#-------------ANNOTATION PIPELINE VARIABLES----------
#----------------------------------------------------
# Annotation Pipeline utilizes the BLAST_DATABASE_DIRECTORY and BLAST_EXE_DIRECTORY variables above
# Please set those variables before attempting to use the Solazyme_Annotation_Pipeline.py script for pathway analysis
VELVETOPTIMISER_DIRECTORY = '/home/mike/bin/velvet_1.2.10/contrib/VelvetOptimiser-2.2.4/'
# Directory should contain VelvetOptimiser.pl, velveth, and velvetg
CONTIG_STATS_DIRECTORY = '/usr/bin/contig-stats.pl'
# Included in distribution
MAKER_DIRECTORY = '/home/mike/bin/maker/bin/'
# Should contain maker and all of its supporting scripts
INTERPROSCAN_DIRECTORY = '/home/mike/bin/interproscan-5-RC6/'
# Should contain interproscan.sh
#-----------------------------------------------------
#----------------PATHWAY MAPPING VARIABLES------------
#-----------------------------------------------------
# Pathway mapping utilizes the BLAST_DATABASE_DIRECTORY and BLAST_EXE_DIRECTORY variables above
# Please set those variables before attempting to use the Pathway_Mapping.py script for pathway analysis
KOBAS_EXE_DIRECTORY = '/home/mike/bin/kobas/scripts/'
# Should contain KOBAS's annotate.py and identify.py scripts
#-----------------------------------------------------
#----------------ncRNA prediction---------------------
#-----------------------------------------------------
TRNASCAN_DIRECTORY = '/home/mike/bin/'
TRNASCAN_EXE_NAME = 'trnascan-1.4'
PORTRAIT_DIRECTORY = '/home/mike/bin/Portrait/portrait-1.1/'
PORTRAIT_EXE_NAME = 'portrait'
| mdeletto/STAT-GPS | statgps_pipeline_variables.py | Python | unlicense | 2,374 | [
"BLAST"
] | 991f3a2709eba7c0438c3a975a65521249a75799a46b88dd530b49fddc6b97ef |
from area51.namd import NamdObject
from .. import node, NodeBase
from .. import NullScope, DictScope, LoggingScope
from pytest import raises
class FibThing(NamdObject, NodeBase):
def __init__(self, x):
super().__init__(x)
if x < 0:
raise ValueError('cannot calculate Fib < 1')
self.x = x
@node
def Fib(self):
if self.x == 0 or self.x == 1:
return 1
return FibThing(self.x-1).Fib + FibThing(self.x-2).Fib
def test_fib():
with LoggingScope():
assert FibThing(0).Fib == 1
assert FibThing(1).Fib == 1
assert FibThing(2).Fib == 2
assert FibThing(3).Fib == 3
with raises(ValueError):
FibThing(-1).Fib
| thatcr/knowed | src/area51/nowd/test/test_simple_fib.py | Python | mit | 724 | [
"NAMD"
] | 582d14827aa5de969ccb0f0470082fcf8da09769115daf03cf3cb2c00836abd4 |
"""
Python implementation of the fast ICA algorithms.
Reference: Tables 8.3 and 8.4 page 196 in the book:
Independent Component Analysis, by Hyvarinen et al.
"""
# Authors: Pierre Lafaye de Micheaux, Stefan van der Walt, Gael Varoquaux,
# Bertrand Thirion, Alexandre Gramfort, Denis A. Engemann
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six import moves
from ..utils import check_array, as_float_array, check_random_state
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted
__all__ = ['fastica', 'FastICA']
def _gs_decorrelation(w, W, j):
"""
Orthonormalize w wrt the first j rows of W
Parameters
----------
w : ndarray of shape(n)
Array to be orthogonalized
W : ndarray of shape(p, n)
Null space definition
j : int < p
The no of (from the first) rows of Null space W wrt which w is
orthogonalized.
Notes
-----
Assumes that W is orthogonal
w changed in place
"""
w -= np.dot(np.dot(w, W[:j].T), W[:j])
return w
def _sym_decorrelation(W):
""" Symmetric decorrelation
i.e. W <- (W * W.T) ^{-1/2} * W
"""
s, u = linalg.eigh(np.dot(W, W.T))
# u (resp. s) contains the eigenvectors (resp. square roots of
# the eigenvalues) of W * W.T
return np.dot(np.dot(u * (1. / np.sqrt(s)), u.T), W)
def _ica_def(X, tol, g, fun_args, max_iter, w_init):
"""Deflationary FastICA using fun approx to neg-entropy function
Used internally by FastICA.
"""
n_components = w_init.shape[0]
W = np.zeros((n_components, n_components), dtype=X.dtype)
n_iter = []
# j is the index of the extracted component
for j in range(n_components):
w = w_init[j, :].copy()
w /= np.sqrt((w ** 2).sum())
for i in moves.xrange(max_iter):
gwtx, g_wtx = g(fast_dot(w.T, X), fun_args)
w1 = (X * gwtx).mean(axis=1) - g_wtx.mean() * w
_gs_decorrelation(w1, W, j)
w1 /= np.sqrt((w1 ** 2).sum())
lim = np.abs(np.abs((w1 * w).sum()) - 1)
w = w1
if lim < tol:
break
n_iter.append(i + 1)
W[j, :] = w
return W, max(n_iter)
def _ica_par(X, tol, g, fun_args, max_iter, w_init):
"""Parallel FastICA.
Used internally by FastICA --main loop
"""
W = _sym_decorrelation(w_init)
del w_init
p_ = float(X.shape[1])
for ii in moves.xrange(max_iter):
gwtx, g_wtx = g(fast_dot(W, X), fun_args)
W1 = _sym_decorrelation(fast_dot(gwtx, X.T) / p_
- g_wtx[:, np.newaxis] * W)
del gwtx, g_wtx
# builtin max, abs are faster than numpy counter parts.
lim = max(abs(abs(np.diag(fast_dot(W1, W.T))) - 1))
W = W1
if lim < tol:
break
else:
warnings.warn('FastICA did not converge.' +
' You might want' +
' to increase the number of iterations.')
return W, ii + 1
# Some standard non-linear functions.
# XXX: these should be optimized, as they can be a bottleneck.
def _logcosh(x, fun_args=None):
alpha = fun_args.get('alpha', 1.0) # comment it out?
x *= alpha
gx = np.tanh(x, x) # apply the tanh inplace
g_x = np.empty(x.shape[0])
# XXX compute in chunks to avoid extra allocation
for i, gx_i in enumerate(gx): # please don't vectorize.
g_x[i] = (alpha * (1 - gx_i ** 2)).mean()
return gx, g_x
def _exp(x, fun_args):
exp = np.exp(-(x ** 2) / 2)
gx = x * exp
g_x = (1 - x ** 2) * exp
return gx, g_x.mean(axis=-1)
def _cube(x, fun_args):
return x ** 3, (3 * x ** 2).mean(axis=-1)
def fastica(X, n_components=None, algorithm="parallel", whiten=True,
fun="logcosh", fun_args=None, max_iter=200, tol=1e-04, w_init=None,
random_state=None, return_X_mean=False, compute_sources=True,
return_n_iter=False):
"""Perform Fast Independent Component Analysis.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
n_components : int, optional
Number of components to extract. If None no dimension reduction
is performed.
algorithm : {'parallel', 'deflation'}, optional
Apply a parallel or deflational FASTICA algorithm.
whiten : boolean, optional
If True perform an initial whitening of the data.
If False, the data is assumed to have already been
preprocessed: it should be centered, normed and white.
Otherwise you will get incorrect results.
In this case the parameter n_components will be ignored.
fun : string or function, optional. Default: 'logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. Example:
def my_g(x):
return x ** 3, 3 * x ** 2
fun_args : dictionary, optional
Arguments to send to the functional form.
If empty or None and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}
max_iter : int, optional
Maximum number of iterations to perform.
tol: float, optional
A positive scalar giving the tolerance at which the
un-mixing matrix is considered to have converged.
w_init : (n_components, n_components) array, optional
Initial un-mixing array of dimension (n.comp,n.comp).
If None (default) then an array of normal r.v.'s is used.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
return_X_mean : bool, optional
If True, X_mean is returned too.
compute_sources : bool, optional
If False, sources are not computed, but only the rotation matrix.
This can save memory when working with big data. Defaults to True.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
K : array, shape (n_components, n_features) | None.
If whiten is 'True', K is the pre-whitening matrix that projects data
onto the first n_components principal components. If whiten is 'False',
K is 'None'.
W : array, shape (n_components, n_components)
Estimated un-mixing matrix.
The mixing matrix can be obtained by::
w = np.dot(W, K.T)
A = w.T * (w * w.T).I
S : array, shape (n_components, n_samples) | None
Estimated source matrix
X_mean : array, shape (n_features, )
The mean over features. Returned only if return_X_mean is True.
n_iter : int
If the algorithm is "deflation", n_iter is the
maximum number of iterations run across all components. Else
they are just the number of iterations taken to converge. This is
returned only when return_n_iter is set to `True`.
Notes
-----
The data matrix X is considered to be a linear combination of
non-Gaussian (independent) components i.e. X = AS where columns of S
contain the independent components and A is a linear mixing
matrix. In short ICA attempts to `un-mix' the data by estimating an
un-mixing matrix W where ``S = W K X.``
This implementation was originally made for data of shape
[n_features, n_samples]. Now the input is transposed
before the algorithm is applied. This makes it slightly
faster for Fortran-ordered input.
Implemented using FastICA:
`A. Hyvarinen and E. Oja, Independent Component Analysis:
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430`
"""
random_state = check_random_state(random_state)
fun_args = {} if fun_args is None else fun_args
# make interface compatible with other decompositions
# a copy is required only for non whitened data
X = check_array(X, copy=whiten).T
alpha = fun_args.get('alpha', 1.0)
if not 1 <= alpha <= 2:
raise ValueError('alpha must be in [1,2]')
if fun == 'logcosh':
g = _logcosh
elif fun == 'exp':
g = _exp
elif fun == 'cube':
g = _cube
elif callable(fun):
def g(x, fun_args):
return fun(x, **fun_args)
else:
exc = ValueError if isinstance(fun, six.string_types) else TypeError
raise exc("Unknown function %r;"
" should be one of 'logcosh', 'exp', 'cube' or callable"
% fun)
n, p = X.shape
if not whiten and n_components is not None:
n_components = None
warnings.warn('Ignoring n_components with whiten=False.')
if n_components is None:
n_components = min(n, p)
if (n_components > min(n, p)):
n_components = min(n, p)
print("n_components is too large: it will be set to %s" % n_components)
if whiten:
# Centering the columns (ie the variables)
X_mean = X.mean(axis=-1)
X -= X_mean[:, np.newaxis]
# Whitening and preprocessing by PCA
u, d, _ = linalg.svd(X, full_matrices=False)
del _
K = (u / d).T[:n_components] # see (6.33) p.140
del u, d
X1 = np.dot(K, X)
# see (13.6) p.267 Here X1 is white and data
# in X has been projected onto a subspace by PCA
X1 *= np.sqrt(p)
else:
# X must be casted to floats to avoid typing issues with numpy
# 2.0 and the line below
X1 = as_float_array(X, copy=False) # copy has been taken care of
if w_init is None:
w_init = np.asarray(random_state.normal(size=(n_components,
n_components)), dtype=X1.dtype)
else:
w_init = np.asarray(w_init)
if w_init.shape != (n_components, n_components):
raise ValueError('w_init has invalid shape -- should be %(shape)s'
% {'shape': (n_components, n_components)})
kwargs = {'tol': tol,
'g': g,
'fun_args': fun_args,
'max_iter': max_iter,
'w_init': w_init}
if algorithm == 'parallel':
W, n_iter = _ica_par(X1, **kwargs)
elif algorithm == 'deflation':
W, n_iter = _ica_def(X1, **kwargs)
else:
raise ValueError('Invalid algorithm: must be either `parallel` or'
' `deflation`.')
del X1
if whiten:
if compute_sources:
S = fast_dot(fast_dot(W, K), X).T
else:
S = None
if return_X_mean:
if return_n_iter:
return K, W, S, X_mean, n_iter
else:
return K, W, S, X_mean
else:
if return_n_iter:
return K, W, S, n_iter
else:
return K, W, S
else:
if compute_sources:
S = fast_dot(W, X).T
else:
S = None
if return_X_mean:
if return_n_iter:
return None, W, S, None, n_iter
else:
return None, W, S, None
else:
if return_n_iter:
return None, W, S, n_iter
else:
return None, W, S
class FastICA(BaseEstimator, TransformerMixin):
"""FastICA: a fast algorithm for Independent Component Analysis.
Parameters
----------
n_components : int, optional
Number of components to use. If none is passed, all are used.
algorithm : {'parallel', 'deflation'}
Apply parallel or deflational algorithm for FastICA.
whiten : boolean, optional
If whiten is false, the data is already considered to be
whitened, and no whitening is performed.
fun : string or function, optional. Default: 'logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. Example:
def my_g(x):
return x ** 3, 3 * x ** 2
fun_args : dictionary, optional
Arguments to send to the functional form.
If empty and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}.
max_iter : int, optional
Maximum number of iterations during fit.
tol : float, optional
Tolerance on update at each iteration.
w_init : None of an (n_components, n_components) ndarray
The mixing matrix to be used to initialize the algorithm.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : 2D array, shape (n_components, n_features)
The unmixing matrix.
mixing_ : array, shape (n_features, n_components)
The mixing matrix.
n_iter_ : int
If the algorithm is "deflation", n_iter is the
maximum number of iterations run across all components. Else
they are just the number of iterations taken to converge.
Notes
-----
Implementation based on
`A. Hyvarinen and E. Oja, Independent Component Analysis:
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430`
"""
def __init__(self, n_components=None, algorithm='parallel', whiten=True,
fun='logcosh', fun_args=None, max_iter=200, tol=1e-4,
w_init=None, random_state=None):
super(FastICA, self).__init__()
self.n_components = n_components
self.algorithm = algorithm
self.whiten = whiten
self.fun = fun
self.fun_args = fun_args
self.max_iter = max_iter
self.tol = tol
self.w_init = w_init
self.random_state = random_state
def _fit(self, X, compute_sources=False):
"""Fit the model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
compute_sources : bool
If False, sources are not computes but only the rotation matrix.
This can save memory when working with big data. Defaults to False.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
fun_args = {} if self.fun_args is None else self.fun_args
whitening, unmixing, sources, X_mean, self.n_iter_ = fastica(
X=X, n_components=self.n_components, algorithm=self.algorithm,
whiten=self.whiten, fun=self.fun, fun_args=fun_args,
max_iter=self.max_iter, tol=self.tol, w_init=self.w_init,
random_state=self.random_state, return_X_mean=True,
compute_sources=compute_sources, return_n_iter=True)
if self.whiten:
self.components_ = np.dot(unmixing, whitening)
self.mean_ = X_mean
self.whitening_ = whitening
else:
self.components_ = unmixing
self.mixing_ = linalg.pinv(self.components_)
if compute_sources:
self.__sources = sources
return sources
def fit_transform(self, X, y=None):
"""Fit the model and recover the sources from X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
return self._fit(X, compute_sources=True)
def fit(self, X, y=None):
"""Fit the model to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
self
"""
self._fit(X, compute_sources=False)
return self
def transform(self, X, y=None, copy=True):
"""Recover the sources from X (apply the unmixing matrix).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to transform, where n_samples is the number of samples
and n_features is the number of features.
copy : bool (optional)
If False, data passed to fit are overwritten. Defaults to True.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mixing_')
X = check_array(X, copy=copy)
if self.whiten:
X -= self.mean_
return fast_dot(X, self.components_.T)
def inverse_transform(self, X, copy=True):
"""Transform the sources back to the mixed data (apply mixing matrix).
Parameters
----------
X : array-like, shape (n_samples, n_components)
Sources, where n_samples is the number of samples
and n_components is the number of components.
copy : bool (optional)
If False, data passed to fit are overwritten. Defaults to True.
Returns
-------
X_new : array-like, shape (n_samples, n_features)
"""
check_is_fitted(self, 'mixing_')
if copy:
X = X.copy()
X = fast_dot(X, self.mixing_.T)
if self.whiten:
X += self.mean_
return X
| ashhher3/scikit-learn | sklearn/decomposition/fastica_.py | Python | bsd-3-clause | 18,043 | [
"Gaussian"
] | 2d9bf9ab8c92c4c6dee686d0d270c1336a1e1123212e832fa0707a78a87c1a36 |
from __future__ import print_function
__author__ = """Alex "O." Holcombe, Charles Ludowici, """ ## double-quotes will be silently removed, single quotes will be left, eg, O'Connor
import time, sys, platform, os
from math import atan, atan2, pi, cos, sin, sqrt, ceil, radians, degrees
import numpy as np
import psychopy, psychopy.info
import copy
from psychopy import visual, sound, monitors, logging, gui, event, core, data
try:
from helpersAOH import accelerateComputer, openMyStimWindow
except Exception as e:
print(e); print('Problem loading helpersAOH. Check that the file helpersAOH.py in the same directory as this file')
print('Current directory is ',os.getcwd())
eyeTracking = False
if eyeTracking:
try:
import eyelinkEyetrackerForPsychopySUPA3
except Exception as e:
print(e)
print('Problem loading eyelinkEyetrackerForPsychopySUPA3. Check that the file eyelinkEyetrackerForPsychopySUPA3.py in the same directory as this file')
print('While a different version of pylink might make your eyetracking code work, your code appears to generally be out of date. Rewrite your eyetracker code based on the SR website examples')
#Psychopy v1.83.01 broke this, pylink version prevents EyelinkEyetrackerForPsychopySUPA3 stuff from importing. But what really needs to be done is to change eyetracking code to more modern calls, as indicated on SR site
eyeTracking = False
expname= "dot-jump"
demo = False; exportImages = False
autopilot = False
subject='test'
###############################
### Setup the screen parameters ##############################################################################################
##
allowGUI = False
units='deg' #'cm'
fullscrn=False
waitBlank=False
if True: #just so I can indent all the below
refreshRate= 85 *1.0; #160 #set to the framerate of the monitor
fullscrn=True; #show in small window (0) or full screen (1)
scrn=True #which screen to display the stimuli. 0 is home screen, 1 is second screen
# create a dialog from dictionary
infoFirst = { 'Autopilot':autopilot, 'Check refresh etc':True, 'Use second screen':scrn, 'Fullscreen (timing errors if not)': fullscrn, 'Screen refresh rate': refreshRate }
OK = gui.DlgFromDict(dictionary=infoFirst,
title='MOT',
order=['Autopilot','Check refresh etc', 'Use second screen', 'Screen refresh rate', 'Fullscreen (timing errors if not)'],
tip={'Check refresh etc': 'To confirm refresh rate and that can keep up, at least when drawing a grating',
'Use second Screen': ''},
)
if not OK.OK:
print('User cancelled from dialog box'); logging.info('User cancelled from dialog box'); core.quit()
autopilot = infoFirst['Autopilot']
checkRefreshEtc = infoFirst['Check refresh etc']
scrn = infoFirst['Use second screen']
print('scrn = ',scrn, ' from dialog box')
fullscrn = infoFirst['Fullscreen (timing errors if not)']
refreshRate = infoFirst['Screen refresh rate']
#monitor parameters
widthPix = 1280 #1440 #monitor width in pixels
heightPix =1024 #900 #monitor height in pixels
monitorwidth = 40.5 #28.5 #monitor width in centimeters
viewdist = 55.; #cm
pixelperdegree = widthPix/ (atan(monitorwidth/viewdist) /np.pi*180)
bgColor = [-1,-1,-1] #black background
monitorname = 'testMonitor' # 'mitsubishi' #in psychopy Monitors Center
mon = monitors.Monitor(monitorname,width=monitorwidth, distance=viewdist)#fetch the most recent calib for this monitor
mon.setSizePix( (widthPix,heightPix) )
myWin = openMyStimWindow(mon,widthPix,heightPix,bgColor,allowGUI,units,fullscrn,scrn,waitBlank)
myWin.setRecordFrameIntervals(False)
trialsPerCondition = 2 #default value
refreshMsg2 = ''
if not checkRefreshEtc:
refreshMsg1 = 'REFRESH RATE WAS NOT CHECKED'
refreshRateWrong = False
else: #checkRefreshEtc
runInfo = psychopy.info.RunTimeInfo(
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=True, ## True means report on everything
userProcsDetailed=True ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
)
print('Finished runInfo- which assesses the refresh and processes of this computer')
refreshMsg1 = 'Median frames per second ='+ str( np.round(1000./runInfo["windowRefreshTimeMedian_ms"],1) )
refreshRateTolerancePct = 3
pctOff = abs( (1000./runInfo["windowRefreshTimeMedian_ms"]-refreshRate) / refreshRate)
refreshRateWrong = pctOff > (refreshRateTolerancePct/100.)
if refreshRateWrong:
refreshMsg1 += ' BUT'
refreshMsg1 += ' program assumes ' + str(refreshRate)
refreshMsg2 = 'which is off by more than' + str(round(refreshRateTolerancePct,0)) + '%!!'
else:
refreshMsg1 += ', which is close enough to desired val of ' + str( round(refreshRate,1) )
myWinRes = myWin.size
myWin.allowGUI =True
myWin.close() #have to close window to show dialog box
##
### END Setup of the screen parameters ##############################################################################################
####################################
askUserAndConfirmExpParams = True
if autopilot:
subject = 'autoTest'
###############################
### Ask user exp params ##############################################################################################
## askUserAndConfirmExpParams
if askUserAndConfirmExpParams:
dlgLabelsOrdered = list() #new dialog box
myDlg = gui.Dlg(title=expname, pos=(200,400))
if not autopilot:
myDlg.addField('Subject code :', subject)
dlgLabelsOrdered.append('subject')
else:
myDlg.addField('Subject code :', subject)
dlgLabelsOrdered.append('subject')
myDlg.addField('autoPilotTime:', 0, tip='Auto response time relative to cue')
myDlg.addField('randomTime:',False, tip = 'Add (rounded) gaussian N(0,2) error to time offset?')
myDlg.addField('autoPilotSpace:',0, tip='Auto response position relative to cue')
myDlg.addField('randomSpace:',False, tip = 'Add (rounded) gaussian N(0,2) error to space offset?')
dlgLabelsOrdered.append('autoPilotTime')
dlgLabelsOrdered.append('randomTime')
dlgLabelsOrdered.append('autoPilotSpace')
dlgLabelsOrdered.append('randomSpace')
myDlg.addField('Trials per condition (default=' + str(trialsPerCondition) + '):', trialsPerCondition, tip=str(trialsPerCondition))
dlgLabelsOrdered.append('trialsPerCondition')
pctCompletedBreak = 50
myDlg.addText(refreshMsg1, color='Black')
if refreshRateWrong:
myDlg.addText(refreshMsg2, color='Red')
msgWrongResolution = ''
if checkRefreshEtc and (not demo) and (myWinRes != [widthPix,heightPix]).any():
msgWrongResolution = 'Instead of desired resolution of '+ str(widthPix)+'x'+str(heightPix)+ ' pixels, screen apparently '+ str(myWinRes[0])+ 'x'+ str(myWinRes[1])
myDlg.addText(msgWrongResolution, color='Red')
print(msgWrongResolution); logging.info(msgWrongResolution)
myDlg.addText('Note: to abort press ESC at response time', color='DimGrey') #works in PsychoPy1.84
#myDlg.addText('Note: to abort press ESC at a trials response screen', color=[-1.,1.,-1.]) #color names not working for some pre-1.84 versions
myDlg.show()
if myDlg.OK: #unpack information from dialogue box
thisInfo = myDlg.data #this will be a list of data returned from each field added in order
if autopilot:
name=thisInfo[dlgLabelsOrdered.index('subject')]
if len(name) > 0: #if entered something
subject = name #change subject default name to what user entered
trialsPerCondition = int( thisInfo[ dlgLabelsOrdered.index('trialsPerCondition') ] ) #convert string to integer
autoSpace = thisInfo[dlgLabelsOrdered.index('autoPilotSpace')]
autoTime = thisInfo[dlgLabelsOrdered.index('autoPilotTime')]
randomTime = thisInfo[dlgLabelsOrdered.index('randomTime')]
randomSpace = thisInfo[dlgLabelsOrdered.index('randomSpace')]
print('trialsPerCondition=',trialsPerCondition)
logging.info('trialsPerCondition ='+str(trialsPerCondition))
else:
print('User cancelled from dialog box.'); logging.info('User cancelled from dialog box')
logging.flush()
core.quit()
### Ask user exp params
## END askUserAndConfirmExpParams ###############################
##############################################################################################
if os.path.isdir('.'+os.sep+'dataRaw'):
dataDir='dataRaw'
else:
msg= 'dataRaw directory does not exist, so saving data in present working directory'
print(msg); logging.info(msg)
dataDir='.'
timeAndDateStr = time.strftime("%d%b%Y_%H-%M", time.localtime())
fileNameWithPath = dataDir+os.sep+subject+ '_' + expname+timeAndDateStr
if not demo and not exportImages:
saveCodeCmd = 'cp \'' + sys.argv[0] + '\' '+ fileNameWithPath + '.py'
os.system(saveCodeCmd) #save a copy of the code as it was when that subject was run
logF = logging.LogFile(fileNameWithPath+'.log',
filemode='w',#if you set this to 'a' it will append instead of overwriting
level=logging.INFO)#info, data, warnings, and errors will be sent to this logfile
if demo or exportImages:
logging.console.setLevel(logging.ERROR) #only show this level's and higher messages
logging.console.setLevel(logging.WARNING) #DEBUG means set the console to receive nearly all messges, INFO is for everything else, INFO, EXP, DATA, WARNING and ERROR
if refreshRateWrong:
logging.error(refreshMsg1+refreshMsg2)
else: logging.info(refreshMsg1+refreshMsg2)
longerThanRefreshTolerance = 0.27
longFrameLimit = round(1000./refreshRate*(1.0+longerThanRefreshTolerance),3) # round(1000/refreshRate*1.5,2)
msg = 'longFrameLimit='+ str(longFrameLimit) +' Recording trials where one or more interframe interval exceeded this figure '
logging.info(msg); print(msg)
if msgWrongResolution != '':
logging.error(msgWrongResolution)
myWin = openMyStimWindow(mon,widthPix,heightPix,bgColor,allowGUI,units,fullscrn,scrn,waitBlank)
runInfo = psychopy.info.RunTimeInfo(
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=True, ## True means report on everything
userProcsDetailed=True ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
)
msg = 'second window opening runInfo mean ms='+ str( runInfo["windowRefreshTimeAvg_ms"] )
logging.info(msg); print(msg)
logging.info(runInfo)
logging.info('gammaGrid='+str(mon.getGammaGrid()))
logging.info('linearizeMethod='+str(mon.getLinearizeMethod()))
####Functions. Save time by automating processes like stimulus creation and ordering
############################################################################
def oneFrameOfStim(n, itemFrames, SOAFrames, cueFrames, cuePos, trialObjects):
cueFrame = cuePos * SOAFrames
cueMax = cueFrame + cueFrames
showIdx = int(np.floor(n/SOAFrames))
#objectIdxs = [i for i in range(len(trialObjects))]
#objectIdxs.append(len(trialObjects)-1) #AWFUL hack
#print(objectIdxs[showIdx])
#floored quotient
obj = trialObjects[showIdx]
drawObject = n%SOAFrames < itemFrames
if drawObject:
myWin.color = bgColor
if n >= cueFrame and n < cueMax:
#print('cueFrames! n is', n,'. cueFrame is ,', cueFrame, 'cueFrame + cueFrames is ', (cueFrame + cueFrames))
#if n%2 == 0: #This should make it flash, but it might be too fast
#print('cue flash')
#myWin.color = (0,0,0)
obj.draw()
cue.draw()
else:
obj.draw()
return True
#objects: Stimuli to display or
#cue: cue stimulus or stimuli
#timing parameters: Could be item duration, soa and isi. i.e. if SOA+Duration % n == 0: stimulus.setColor(stimulusColor)
#bgColor and stimulusColor: if displaying and hiding stimuli, i.e. for RSVP
#movementVector: direction and distance of movement if moving stimuli
def oneTrial(stimuli):
dotOrder = np.arange(len(stimuli))
np.random.shuffle(dotOrder)
print(dotOrder)
shuffledStimuli = [stimuli[i] for i in dotOrder]
ts = []
myWin.flip(); myWin.flip() #Make sure raster at top of screen (unless not in blocking mode), and give CPU a chance to finish other tasks
t0 = trialClock.getTime()
for n in range(trialFrames):
fixation.draw()
#print(n//SOAFrames)
oneFrameOfStim(n, itemFrames, SOAFrames, cueFrames, cuePos, shuffledStimuli)
myWin.flip()
ts.append(trialClock.getTime() - t0)
return True, shuffledStimuli, dotOrder, ts
def getResponse(trialStimuli):
if autopilot:
spacing = 360./nDots
autoResponseIdx = cuePos + autoTime #The serial position of the response in the stream
if randomTime:
autoResponseIdx += int(round( np.random.normal(0,2) ))
itemAtTemporalSelection = trialStimuli[autoResponseIdx]
unshuffledPositions = [dot.pos.tolist() for dot in stimuli]
itemSpatial = unshuffledPositions.index(itemAtTemporalSelection.pos.tolist())
itemSpatial = itemSpatial + autoSpace
if randomSpace:
itemSpatial += int(round( np.random.normal(0,2) ))
while itemSpatial>23:
itemSpatial = itemSpatial - 23
#Once we have temporal pos of selected item relative to start of the trial
#Need to get the serial spatial pos of this item, so that we can select items around it based on the autoSpace offset
#print('itemSpatial is: ', itemSpatial)
selectionTemporal = trialStimuli.index(stimuli[itemSpatial]) #This seems redundant, but it tests that the item we've selected in space is the cued item in time. if the temporal and spatial offsets are 0, it should be the same as cuePos.
accuracy = cuePos == selectionTemporal
mousePos = (stimuli[itemSpatial].pos[0],stimuli[itemSpatial].pos[1])
expStop = False
item = stimuli[itemSpatial]
return accuracy, item, expStop, mousePos
elif not autopilot:
myMouse = event.Mouse(visible = False,win=myWin)
responded = False
expStop = False
event.clearEvents()
mousePos = (1e6,1e6)
escape = event.getKeys()
myMouse.setPos((0,0))
myMouse.setVisible(True)
while not responded:
for item in trialStimuli:
item.draw()
myWin.flip()
button = myMouse.getPressed()
mousePos = myMouse.getPos()
escapeKey = event.getKeys()
if button[0]:
print('click detected')
responded = True
print('getResponse mousePos:',mousePos)
elif len(escapeKey)>0:
if escapeKey[0] == 'space' or escapeKey[0] == 'ESCAPE':
expStop = True
responded = True
return False, np.random.choice(trialStimuli), expStop, (0,0)
clickDistances = []
for item in trialStimuli:
x = mousePos[0] - item.pos[0]
y = mousePos[1] - item.pos[1]
distance = sqrt(x**2 + y**2)
clickDistances.append(distance)
if not expStop:
minDistanceIdx = clickDistances.index(min(clickDistances))
accuracy = minDistanceIdx == cuePos
item = trialStimuli[minDistanceIdx]
myMouse.setVisible(False)
return accuracy, item, expStop, mousePos
def drawStimuli(nDots, radius, center, stimulusObject, sameEachTime = True):
if len(center) > 2 or len(center) < 2:
print('Center coords must be list of length 2')
return None
if not sameEachTime and not isinstance(stimulusObject, (list, tuple)):
print('You want different objects in each position, but your stimuli is not a list or tuple')
return None
if not sameEachTime and isinstance(stimulusObject, (list, tuple)) and len(stimulusObject)!=nDots:
print('You want different objects in each position, but the number of positions does not equal the number of items')
return None
spacing = 360./nDots
stimuli = []
for dot in range(nDots): #have to specify positions for multiples of 90deg because python (computers in general?) can't store exact value of pi and thus cos(pi/2) = 6.123e-17, not 0
angle = dot*spacing
if angle == 0:
xpos = radius
ypos = 0
elif angle == 90:
xpos = 0
ypos = radius
elif angle == 180:
xpos = -radius
ypos = 0
elif angle == 270:
xpos = 0
ypos = -radius
elif angle%90!=0:
xpos = radius*cos(radians(angle))
ypos = radius*sin(radians(angle))
if sameEachTime:
stim = copy.copy(stimulusObject)
elif not sameEachTime:
stim = stimulusObject[dot]
stim.pos = (xpos,ypos)
stimuli.append(stim)
return stimuli
def checkTiming(ts):
interframeIntervals = np.diff(ts) * 1000
#print(interframeIntervals)
frameTimeTolerance=.3 #proportion longer than refreshRate that will not count as a miss
longFrameLimit = np.round(1000/refreshRate*(1.0+frameTimeTolerance),2)
idxsInterframeLong = np.where( interframeIntervals > longFrameLimit ) [0] #frames that exceeded 150% of expected duration
numCasesInterframeLong = len( idxsInterframeLong )
if numCasesInterframeLong > 0:
print(numCasesInterframeLong,'frames of', trialFrames,'were longer than',str(1000/refreshRate*(1.0+frameTimeTolerance)))
return numCasesInterframeLong
##Set up stimuli
stimulus = visual.Circle(myWin, radius = .2, fillColor = (1,1,1) )
nDots = 24
radius = 4
center = (0,0)
sameEachTime = True
#(nDots, radius, center, stimulusObject, sameEachTime = True)
stimuli = drawStimuli(nDots, radius, center, stimulus, sameEachTime)
#print(stimuli)
#print('length of stimuli object', len(stimuli))
######Create visual objects, noise masks, response prompts etc. ###########
######Draw your stimuli here if they don't change across trials, but other parameters do (like timing or distance)
######If you want to automate your stimuli. Do it in a function below and save clutter.
######For instance, maybe you want random pairs of letters. Write a function!
###########################################################################
fixSize = .1
fixation= visual.Circle(myWin, radius = fixSize , fillColor = (1,1,1), units=units)
cue = visual.Circle(myWin, radius = radius + 2, fillColor = None, lineColor = (1,1,1), units = units)
###Trial timing parameters
SOAMS = 10
itemMS = 8
ISIMS = SOAMS - itemMS
trialMS = SOAMS * nDots
cueMS = itemMS
SOAFrames = int(np.floor(SOAMS/(1000./refreshRate)))
itemFrames = int(np.floor(itemMS/(1000./refreshRate)))
ISIFrames = int(np.floor(ISIMS/(1000./refreshRate)))
trialFrames = int(nDots*SOAFrames)
cueFrames = int(np.floor(cueMS/(1000./refreshRate)))
print('cueFrames=',cueFrames)
print('itemFrames=',itemFrames)
print('refreshRate =', refreshRate)
print('cueMS from frames =', cueFrames*(1000./refreshRate))
print('num of SOAs in the trial:', trialFrames/SOAFrames)
##Factorial design
numResponsesPerTrial = 1 #default. Used to create headers for dataFile
stimList = []
#cuePositions = [dot for dot in range(nDots) if dot not in [0,nDots-1]]
cuePositions = [10]
print('cuePositions: ',cuePositions)
#cuePositions = cuePositions[2:(nDots-3)] #drop the first and final two dots
#Set up the factorial design (list of all conditions)
for cuePos in cuePositions:
stimList.append({'cuePos':cuePos})
trials = data.TrialHandler(stimList, nReps = trialsPerCondition)
#print(trials)
####Create output file###
#########################################################################
dataFile = open(fileNameWithPath + '.txt', 'w')
numResponsesPerTrial = 1
#headers for initial datafile rows, they don't get repeated. These appear in the file in the order they appear here.
oneOffHeaders = [
'subject',
'task',
'staircase',
'trialNum'
]
for header in oneOffHeaders:
print(header, '\t', end='', file=dataFile)
#Headers for duplicated datafile rows. These are repeated using numResponsesPerTrial. For instance, we might have two responses in a trial.
duplicatedHeaders = [
'responseSpatialPos',
'responseX',
'responseY',
'correctX',
'correctY',
'clickX',
'clickY',
'accuracy',
'responsePosInStream',
'correctPosInStream'
]
if numResponsesPerTrial == 1:
for header in duplicatedHeaders:
print(header, '\t', end='', file=dataFile)
elif numResponsesPerTrial > 1:
for response in range(numResponsesPerTrial):
for header in duplicatedHeaders:
print(header+str(response), '\t', end='', file=dataFile)
for pos in range(nDots):
print('position'+str(pos),'\t',end='',file=dataFile)
#Headers done. Do a new line
print('longFrames',file=dataFile)
expStop = False
trialNum=0; numTrialsCorrect=0; expStop=False; framesSaved=0;
print('Starting experiment of',trials.nTotal,'trials. Current trial is trial ',trialNum)
#NextRemindCountText.setText( str(trialNum) + ' of ' + str(trials.nTotal) )
#NextRemindCountText.draw()
myWin.flip()
#end of header
trialClock = core.Clock()
stimClock = core.Clock()
if eyeTracking:
if getEyeTrackingFileFromEyetrackingMachineAtEndOfExperiment:
eyeMoveFile=('EyeTrack_'+subject+'_'+timeAndDateStr+'.EDF')
tracker=Tracker_EyeLink(myWin,trialClock,subject,1, 'HV5',(255,255,255),(0,0,0),False,(widthPix,heightPix))
while trialNum < trials.nTotal and expStop==False:
fixation.draw()
myWin.flip()
if not autopilot:
core.wait(1)
trial = trials.next()
# print('trial idx is',trials.thisIndex)
cuePos = trial.cuePos
# print(cuePos)
print("Doing trialNum",trialNum)
trialDone, trialStimuli, trialStimuliOrder, ts = oneTrial(stimuli)
#Shift positions so that the list starts at 1, which is positioned at (0,radius), and increases clockwise. This is what the MM code expects
MMPositions = list() #Mixture modelling positions
for dotPos in trialStimuliOrder:
if dotPos < (nDots/4 - 1): #Because python indexes start at 0, 5 is the 6th pos.
MMPositions.append(dotPos + 20)
elif dotPos >= (nDots/4 -1):
MMPositions.append(dotPos -4)
nBlips = checkTiming(ts)
# print(trialStimuliOrder)
if trialDone:
accuracy, response, expStop, clickPos = getResponse(trialStimuli)
responseCoord = response.pos.tolist()
spatialRelativeToXAxis = [item.pos.tolist() for item in stimuli]
try:
responseSpatialRelativeToXAxis = spatialRelativeToXAxis.index(responseCoord)
except ValueError:
print('coord not in list')
if responseSpatialRelativeToXAxis < (nDots/4-1):
responseSpatial = responseSpatialRelativeToXAxis + 19
elif responseSpatialRelativeToXAxis >= (nDots/4-1):
responseSpatial = responseSpatialRelativeToXAxis - 5
trialPositions = [item.pos.tolist() for item in trialStimuli]
responseTemporal = trialPositions.index(responseCoord)
# print('trial positions in sequence:',trialPositions)
# print('position of item nearest to click:',responseSpatial)
# print('Position in sequence of item nearest to click:',responseTemporal)
correctSpatial = trialStimuli[cuePos].pos
correctTemporal = cuePos
print(subject,'\t',
'dot-jump','\t',
'False','\t',
trialNum,'\t',
responseSpatial,'\t',
responseCoord[0],'\t',
responseCoord[1],'\t',
correctSpatial[0],'\t',
correctSpatial[1],'\t',
clickPos[0],'\t',
clickPos[1],'\t',
accuracy,'\t',
responseTemporal,'\t',
correctTemporal,'\t',
end='',
file = dataFile
)
for dot in range(nDots):
print(MMPositions[dot], '\t',end='', file=dataFile)
print(nBlips, file=dataFile)
trialNum += 1
dataFile.flush()
if expStop:
print('Participant cancelled experiment on trial', trialNum)
dataFile.flush()
| alexholcombe/dot-jump | dataRaw/Fixed Cue/test_dot-jump25Oct2016_11-10.py | Python | gpl-3.0 | 25,144 | [
"Gaussian"
] | e451c29103f44f069b02288cd4263bb36d493caef934ac0a7427176a4098ba4c |
"""
Gaussian Process Regression Implementation using Theano for symbolic gradient computation.
Author: Shen Xu
"""
# To speed Theano up, create ram disk: mount -t tmpfs -o size=512m tmpfs /mnt/randisk
# Then use flag THEANO_FLAGS=base_compiledir=/mnt/randisk python script.py
import sys, os
import theano
import theano.tensor as T
import theano.sandbox.linalg as sT
import numpy as np
import cPickle
from copy import deepcopy
import pdb
print 'Theano version: ' + theano.__version__ + ', base compile dir: ' + theano.config.base_compiledir
theano.config.mode= 'FAST_RUN'
theano.config.optimizer = 'fast_run'
theano.config.reoptimize_unpickled_function = False
def np_uniform_scalar(scale=1):
np.random.seed(1984)
return np.random.uniform(low=-scale,high=scale)
def shared_scalar(val=0., dtype=theano.config.floatX,name=None):
return theano.shared(np.cast[dtype](val))
class GP_Theano(object):
def __init__(self,
initial_params=None):
print 'Setting up variables ...'
# Parameters
if initial_params is None:
initial_params = {'mean':None,
'sigma_n':0.+np_uniform_scalar(0),
'sigma_f':0.+np_uniform_scalar(0),
'l_k':0.+np.uniform_scalar(0)}
if initial_params['mean'] == None:
self.mean = shared_scalar(0.)
self.meanfunc = 'zero'
else:
self.mean = shared_scalar(initial_params['mean'])
self.meanfunc = 'const'
self.sigma_n = shared_scalar(initial_params['sigma_n'])
self.sigma_f = shared_scalar(initial_params['sigma_f'])
self.l_k = shared_scalar(initial_params['l_k'])
# Variables
X,Y,x_test = T.dmatrices('X','Y','x_test')
print 'Setting up model ...'
K, Ks, Kss, y_test_mu, y_test_var, log_likelihood,L,alpha,V,fs2,sW = self.get_model(X, Y, x_test)
print 'Compiling model ...'
inputs = {'X': X, 'Y': Y, 'x_test': x_test}
# solve a bug with derivative wrt inputs not in the graph
z = 0.0*sum([T.sum(v) for v in inputs.values()])
f = zip(['K', 'Ks', 'Kss', 'y_test_mu', 'y_test_var', 'log_likelihood',
'L','alpha','V','fs2','sW'],
[K, Ks, Kss, y_test_mu, y_test_var, log_likelihood,
L, alpha,V,fs2,sW])
self.f = {n: theano.function(inputs.values(), f+z, name=n, on_unused_input='ignore')
for n, f in f}
if self.meanfunc == 'zero':
wrt = {'sigma_n':self.sigma_n, 'sigma_f':self.sigma_f, 'l_k':self.l_k}
else:
wrt = {'mean':self.mean,'sigma_n':self.sigma_n, 'sigma_f':self.sigma_f, 'l_k':self.l_k}
self.g = {vn: theano.function(inputs.values(), T.grad(log_likelihood,vv),
name=vn,on_unused_input='ignore')
for vn, vv in wrt.iteritems()}
def get_model(self,X, Y, x_test):
'''
Gaussian Process Regression model.
Reference: C.E. Rasmussen, "Gaussian Process for Machine Learning", MIT Press 2006
Args:
X: tensor matrix, training data
Y: tensor matrix, training target
x_test: tensor matrix, testing data
Returns:
K: prior cov matrix
Ks: prior joint cov matrix
Kss: prior cov matrix for testing data
Posterior Distribution:
alpha: alpha = inv(K)*(mu-m)
sW: vector containing diagonal of sqrt(W)
L: L = chol(sW*K*sW+eye(n))
y_test_mu: predictive mean
y_test_var: predictive variance
fs2: predictive latent variance
Note: the cov matrix inverse is computed through Cholesky factorization
https://makarandtapaswi.wordpress.com/2011/07/08/cholesky-decomposition-for-matrix-inversion/
'''
# Compute GP prior distribution: mean and covariance matrices (eq 2.13, 2.14)
K = self.covFunc(X,X,'K') # pior cov
#m = T.mean(Y)*T.ones_like(Y) # pior mean
m = self.mean*T.ones_like(Y) # pior mean
# Compute GP joint prior distribution between training and test (eq 2.18)
Ks = self.covFunc(X,x_test,'Ks')
# Pay attention!! here is the self test cov matrix.
Kss = self.covFunc(x_test,x_test,'Kss',mode='self_test')
# Compute posterior distribution with noise: L,alpha,sW,and log_likelihood.
sn2 = T.exp(2*self.sigma_n) # noise variance of likGauss
L = sT.cholesky(K/sn2 + T.identity_like(K))
sl = sn2
alpha = T.dot(sT.matrix_inverse(L.T),
T.dot(sT.matrix_inverse(L), (Y-m)) ) / sl
sW = T.ones_like(T.sum(K,axis=1)).reshape((K.shape[0],1)) / T.sqrt(sl)
log_likelihood = T.sum(-0.5 * (T.dot((Y-m).T, alpha)) - T.sum(T.log(T.diag(L))) - X.shape[0] / 2 * T.log(2.*np.pi*sl))
# Compute predictive distribution using the computed posterior distribution.
fmu = m + T.dot(Ks.T, alpha) # Prediction Mu fs|f, eq 2.25
V = T.dot(sT.matrix_inverse(L),T.extra_ops.repeat(sW,x_test.shape[0],axis=1)*Ks)
fs2 = Kss - (T.sum(V*V,axis=0)).reshape((1,V.shape[1])).T # Predication Sigma, eq 2.26
fs2 = T.maximum(fs2,0) # remove negative variance noise
#fs2 = T.sum(fs2,axis=1) # in case x has multiple dimensions
y_test_mu = fmu
y_test_var = fs2 + sn2
return K, Ks, Kss, y_test_mu, y_test_var, log_likelihood, L, alpha,V, fs2,sW
def covFunc(self,x1,x2,name,method='SE',mode='cross'):
'''
Factorization Implementation of distance function.
https://chrisjmccormick.wordpress.com/2014/08/22/fast-euclidean-distance-calculation-with-matlab-code/
'''
if method == 'SE':
ell = T.exp(self.l_k)
sf2 = T.exp(2.*self.sigma_f)
if mode == 'cross':
xx = T.sum((x1/ell)**2,axis=1).reshape((x1.shape[0],1))
xc = T.dot((x1/ell), (x2/ell).T)
cc = T.sum((x2/ell)**2,axis=1).reshape((1,x2.shape[0]))
dist = xx - 2*xc + cc
elif mode == 'self_test':
tmp = T.sum(x1,axis=1).reshape((x1.shape[0],1))
dist = T.zeros_like(tmp)
else:
raise NotImplementedError
k = sf2 * T.exp(-dist/2)
else:
raise NotImplementedError
return k
def get_outputs(self, x_val, y_val, x_test_val):
'''
Input numpy array, output posterior distributions.
Note: This function is independent of Theano
'''
inputs = {'X':x_val, 'Y':y_val, 'x_test':x_test_val}
outputs = {n: self.f[n](*inputs.values()) for n in self.f.keys()}
return outputs
def get_prediction(self, x_val, y_val,x_test_val):
inputs = {'X':x_val, 'Y':y_val, 'x_test':x_test_val}
ymu = self.f['y_test_mu'](*inputs.values())
ys2 = self.f['y_test_var'](*inputs.values())
return ymu, ys2
def get_likelihood(self,x_val, y_val):
inputs = {'X':x_val, 'Y':y_val, 'x_test':x_val}
likelihood = self.f['log_likelihood'](*inputs.values())
return likelihood
def get_cost_grads(self, x_val, y_val):
'''
get the likelihood and gradients
'''
inputs = {'X':x_val, 'Y':y_val, 'x_test':x_val}
#outputs = {n: self.f[n](*inputs.values()) for n in self.f.keys()}
grads = {n: self.g[n](*inputs.values()) for n in self.g.keys()}
return grads#, outputs
def opt(self, train_x_val, train_y_val,params,
lr, momentum = 0., decay=None,
nesterov=False, updates={},opt_method='SGD'):
'''
Gradient based optimizations.
'''
if len(updates) == 0:
for n in params.keys():
updates[n] = 0.
if opt_method=='SGD':
grads = self.get_cost_grads(train_x_val, train_y_val)
for n in params.keys():
g,p = grads[n], params[n]
updates[n] = lr * g
elif opt_method =='rmsprop':
# RMSPROP: Tieleman, T. and Hinton, G. (2012), Lecture 6.5 - rmsprop, COURSERA:
# Neural Networks for Machine Learning.
if nesterov and momentum > 0.:
# nesterov momentum, make a move according to momentum first
# then calculate the gradients.
for n in params.keys():
params[n].set_value( params[n].get_value() + momentum * updates[n])
grads = self.get_cost_grads(train_x_val, train_y_val)
for n in params.keys():
g, p = grads[n], params[n]
self.moving_mean_squared[n] = (decay * self.moving_mean_squared[n] +
(1.-decay) * g ** 2)
updates[n] = lr * g / (np.sqrt(self.moving_mean_squared[n])+ 1e-8)
else:
raise NotImplementedError
return updates
##############################################
## BEGIN TRAIN MODEL by EXTERNAL OPTIMIZERS ##
##############################################
def estimate_grads(self):
batch_size = self.batch_size
'''
Estimate gradient by averaging mini-batch.
'''
if self.meanfunc == 'zero':
params = {'sigma_n':self.sigma_n, 'sigma_f':self.sigma_f, 'l_k':self.l_k}
else:
params = {'mean':self.mean,'sigma_n':self.sigma_n, 'sigma_f':self.sigma_f, 'l_k':self.l_k}
N = self.x_val.shape[0]
if batch_size is None:
batch_size = N
num_batches = N / batch_size
if N%batch_size!=0:
num_batches += 1
train_index = np.arange(0,N)
grads_list,est_grads = {}, {}
for n in params.keys():
grads_list[n] = []
for i in range(num_batches):
np.random.shuffle(train_index)
batch_x_val = self.x_val[train_index[:batch_size],:]
batch_y_val = self.y_val[train_index[:batch_size],:]
grads = self.get_cost_grads(batch_x_val, batch_y_val)
for n in params.keys():
grads_list[n].append(grads[n])
for n in params.keys():
est_grads[n] = -1.* np.mean(grads_list[n]) # NOTE: negative grads
return est_grads
def _apply_hyp(self, hypInArray):
'''
Keep the order: mean, sigma_n, sigma_f, l_k
'''
if len(hypInArray) == 3:
self.sigma_n.set_value(hypInArray[0])
self.sigma_f.set_value(hypInArray[1])
self.l_k.set_value(hypInArray[2])
elif len(hypInArray) == 4:
self.mean.set_value(hypInArray[0])
self.sigma_n.set_value(hypInArray[1])
self.sigma_f.set_value(hypInArray[2])
self.l_k.set_value(hypInArray[3])
else:
raise ValueError('Number of Hyperparameters should be 3 or 4.')
def _get_hypArray(self,params):
if len(params) == 3:
return np.array([np.sum(params['sigma_n'].get_value()),
np.sum(params['sigma_f'].get_value()),
np.sum(params['l_k'].get_value())])
elif len(params) == 4:
return np.array([np.sum(params['mean'].get_value()),
np.sum(params['sigma_n'].get_value()),
np.sum(params['sigma_f'].get_value()),
np.sum(params['l_k'].get_value())])
else:
raise ValueError('Number of Gradients should be 3 or 4.')
def _convert_to_array(self, grads):
if len(grads) == 3:
return [grads['sigma_n'],grads['sigma_f'],grads['l_k']]
elif len(grads) == 4:
return [grads['mean'],grads['sigma_n'],grads['sigma_f'],grads['l_k']]
else:
raise ValueError('Number of Gradients should be 3 or 4.')
def _optimizer_f(self, hypInArray):
self._apply_hyp(hypInArray)
ll = self.get_likelihood(self.x_val, self.y_val)
cost = -ll # negative log-likelihood
est_grads = self.estimate_grads()
grads_list = self._convert_to_array(est_grads)
return cost, np.array(grads_list)
def train_by_optimizer(self, x_val, y_val,
number_epoch=10, batch_size=None):
if self.meanfunc == 'zero':
params = {'sigma_n':self.sigma_n, 'sigma_f':self.sigma_f, 'l_k':self.l_k}
else:
params = {'mean':self.mean,'sigma_n':self.sigma_n, 'sigma_f':self.sigma_f, 'l_k':self.l_k}
import minimize
if batch_size is None:
self.batch_size = len(x_val)
else:
self.batch_size = batch_size
self.x_val = x_val
self.y_val = y_val
print 'start to optimize'
likelihood = self.get_likelihood(x_val, y_val)
print 'BEGINE Training, Log Likelihood = %.2f'% likelihood
opt_results = minimize.run(self._optimizer_f, self._get_hypArray(params),length=number_epoch,verbose=True)
optimalHyp = deepcopy(opt_results[0])
self._apply_hyp(optimalHyp)
likelihood = self.get_likelihood(x_val, y_val)
print 'END Training, Log Likelihood = %.2f'% likelihood
##############################################
## END TRAIN MODEL by EXTERNAL OPTIMIZERS ##
##############################################
def train(self, x_val, y_val,
lr = 0.001, momentum = 0,decay = None,
nesterov = False,batch_size=None,
num_epoch = 10,opt_method='SGD'):
'''
Move hyper-parameters according to opt_method on mini-batch.
'''
if self.meanfunc == 'zero':
params = {'sigma_n':self.sigma_n, 'sigma_f':self.sigma_f, 'l_k':self.l_k}
else:
params = {'mean':self.mean,'sigma_n':self.sigma_n, 'sigma_f':self.sigma_f, 'l_k':self.l_k}
updates = {}
# Initialize cache at the begining.
if opt_method == 'rmsprop':
self.moving_mean_squared={}
for n in params.keys():
self.moving_mean_squared[n] = 0.
N = x_val.shape[0]
if batch_size is None:
batch_size = N
num_batches = N / batch_size
if N%batch_size!=0:
num_batches += 1
train_index = np.arange(0,N)
#outputs = self.get_prediction(x_val, y_val, x_val) # Evaluate trained outputs
likelihood = self.get_likelihood(x_val, y_val)
print 'BEGINE Training, Log Likelihood = %.2f'% likelihood
last_ll = likelihood
for epoch in range(num_epoch):
#if decay is not None:
# lr = lr * (1./(1. + decay * epoch))
for i in range(num_batches):
np.random.shuffle(train_index)
batch_x_val = x_val[train_index[:batch_size],:]
batch_y_val = y_val[train_index[:batch_size],:]
updates = self.opt(batch_x_val, batch_y_val, params,
lr=lr,momentum=momentum,decay=decay,nesterov=nesterov,updates=updates,
opt_method=opt_method)
for n in params.keys():
p = params[n]
p.set_value(p.get_value() + updates[n])
likelihood = self.get_likelihood(x_val, y_val)
if likelihood < last_ll: # stop training
for n in params.keys():
p = params[n]
p.set_value(p.get_value() - updates[n])
break
last_ll = likelihood
print self.sigma_n.get_value(), self.sigma_f.get_value(), self.l_k.get_value()
if epoch % 1 == 0:
#outputs = self.get_prediction(x_val, y_val, x_val) # Evaluate trained outputs
print 'On Epoch %d, Log Likelihood = %.2f'%(epoch, likelihood)
#outputs = self.get_prediction(x_val, y_val, x_val) # Evaluate trained outputs
likelihood = self.get_likelihood(x_val, y_val)
print 'END Training, Log Likelihood = %.2f '% likelihood
| DTUWindEnergy/FUSED-Wake | examples/gptheano_model.py | Python | mit | 16,388 | [
"Gaussian"
] | 06fd157d0bf1246d74059df397807c99a19a848ab019cc51a284fe14170cc728 |
from __future__ import division, print_function
from hscom import __common__
(print, print_, print_on, print_off,
rrr, profile) = __common__.init(__name__, '[back]')
# Python
from os.path import split, exists, join
# Qt
from PyQt4 import QtCore
from PyQt4.Qt import pyqtSignal
# Science
import numpy as np
# Hotspotter
import guifront
import guitools
from guitools import drawing, slot_
from guitools import backblocking as blocking
from hscom import helpers as util
from hscom import fileio as io
from hscom import params
from hsviz import draw_func2 as df2
from hsviz import viz
from hsviz import interact
from hotspotter import HotSpotterAPI
FNUMS = dict(image=1, chip=2, res=3, inspect=4, special=5, name=6)
viz.register_FNUMS(FNUMS)
# Helper functions (should probably be moved into HotSpotter API)
def select_next_unannotated(back):
# FIXME THIS FUNCTION IS STUPID MESSY (and probably broken)
msg = 'err'
selection_exists = back.selection is None
if selection_exists or back.selection['type_'] == 'gx':
valid_gxs = back.hs.get_valid_gxs()
has_chips = lambda gx: len(back.hs.gx2_cxs(gx)) > 0
hascxs_list = map(has_chips, iter(valid_gxs))
try:
gx = valid_gxs[hascxs_list.index(False)]
back.select_gx(gx)
return
except ValueError:
msg = 'All images have detections. Excellent! '
was_err = msg is not None
cx_is_selected = selection_exists and back.selection['type_'] == 'cx'
if selection_exists or (was_err and cx_is_selected):
valid_cxs = back.hs.get_valid_cxs()
has_name = lambda cx: back.hs.cx2_name(cx) != '____'
is_named = map(has_name, iter(valid_cxs))
try:
cx = valid_cxs[is_named.index(False)]
cid = back.hs.tables.cx2_cid[cx]
back.select_cid(cid)
return
except ValueError:
msg = 'All chips are named. Awesome! '
return msg
def select_next_in_order(back):
if back.selection is None:
# No selection
#return back.select_next_unannotated()
back.selection = {'type_': 'gx', 'index': -1}
if back.selection['type_'] == 'gx':
# Select next image
gx = back.selection['index']
gx2_gname = back.hs.tables.gx2_gname
next_gx = gx + 1
while next_gx < len(gx2_gname):
if gx2_gname[next_gx] != '':
back.select_gx(next_gx)
break
next_gx += 1
return
elif back.selection['type_'] == 'cx':
# Select next chip
cx = back.selection['index']
cx2_cid = back.hs.tables.cx2_cid
next_cx = cx + 1
while next_cx < len(cx2_cid):
cid = cx2_cid[next_cx]
if cid != 0:
back.select_cid(cid)
break
next_cx += 1
return
return 'end of the list'
# Creation function
def make_main_window(app=None, hs=None):
#printDBG(r'[*back] make_main_window()')
back = MainWindowBackend(app=app, hs=hs)
if hs is None or not params.args.nogui:
back.show()
back.layout_figures()
if app is not None:
app.setActiveWindow(back.front)
#print('[*back] Finished creating main front\n')
return back
def _dev_reload(back):
from hsdev import dev_reload
dev_reload.reload_all_modules()
df2.unregister_qt4_win('all')
df2.register_qt4_win(back.front)
back.populate_tables()
def _user_select_new_dbdir(back):
'script for new database user interaction'
try:
# Ask the user what to call the new database
new_db = back.user_input('Enter the new database name')
# Return on cancel
if new_db is None:
raise StopIteration('Canceled')
# Ask the user where to put the new database
msg_put = 'Where should I put %r?' % new_db
opt_put = ['Choose Directory', 'My Work Dir']
reply = back.user_option(msg_put, 'options', opt_put, True)
if reply == opt_put[1]:
put_dir = back.get_work_directory()
elif reply == opt_put[0]:
put_dir = guitools.select_directory(
'Select where to put the new database')
else:
raise StopIteration('Canceled')
new_dbdir = join(put_dir, new_db)
if not exists(put_dir):
raise ValueError('Directory %r does not exist.' % put_dir)
elif exists(new_dbdir):
raise ValueError('New DB %r already exists.' % new_dbdir)
return new_dbdir
except ValueError as ex:
opt_try = ['Try Again']
title_try = 'New Database Failed'
try_again = back.user_option(str(ex), title_try, opt_try, False)
if try_again == 'Try Again':
return _user_select_new_dbdir(back)
except StopIteration as ex:
pass
return None
#------------------------
# Backend MainWindow Class
#------------------------
class MainWindowBackend(QtCore.QObject):
'''
Sends and recieves signals to and from the frontend
'''
# Backend Signals
populateSignal = pyqtSignal(str, list, list, list, list)
setEnabledSignal = pyqtSignal(bool)
#------------------------
# Constructor
#------------------------
def __init__(back, app=None, hs=None):
super(MainWindowBackend, back).__init__()
back.current_res = None
back.timer = None
back.selection = None
# A map from short internal headers to fancy headers seen by the user
back.fancy_headers = {
'gx': 'Image Index',
'nx': 'Name Index',
'cid': 'Chip ID',
'aif': 'All Detected',
'gname': 'Image Name',
'nCxs': '#Chips',
'name': 'Name',
'nGt': '#GT',
'nKpts': '#Kpts',
'theta': 'Theta',
'roi': 'ROI (x, y, w, h)',
'rank': 'Rank',
'score': 'Confidence',
'match_name': 'Matching Name',
}
back.reverse_fancy = {v: k for (k, v) in back.fancy_headers.items()}
# A list of default internal headers to display
back.table_headers = {
'gxs': ['gx', 'gname', 'nCxs', 'aif'],
'cxs': ['cid', 'name', 'gname', 'nGt', 'nKpts', 'theta'],
'nxs': ['nx', 'name', 'nCxs'],
'res': ['rank', 'score', 'name', 'cid']
}
# Lists internal headers whos items are editable
back.table_editable = {
'gxs': [],
'cxs': ['name'],
'nxs': ['name'],
'res': ['name'],
}
# connect signals and other objects
back.hs = hs
back.app = app
back.front = guifront.MainWindowFrontend(back=back)
df2.register_qt4_win(back.front)
back.populateSignal.connect(back.front.populate_tbl)
back.setEnabledSignal.connect(back.front.setEnabled)
if hs is not None:
back.connect_api(hs)
#------------------------
# Draw Functions
#------------------------
def show(back):
back.front.show()
@drawing
@profile
def show_splash(back, fnum, view='Nice', **kwargs):
if df2.plt.fignum_exists(fnum):
df2.figure(fnum=fnum, docla=True, doclf=True)
viz.show_splash(fnum=fnum)
df2.set_figtitle('%s View' % view)
def _layout_figures_if(back, did_exist):
#back._layout_figures_if(did_exist)
pass
@drawing
@profile
def show_image(back, gx, sel_cxs=[], figtitle='Image View', **kwargs):
fnum = FNUMS['image']
did_exist = df2.plt.fignum_exists(fnum)
df2.figure(fnum=fnum, docla=True, doclf=True)
interact.interact_image(back.hs, gx, sel_cxs, back.select_cx,
fnum=fnum, figtitle=figtitle)
back._layout_figures_if(did_exist)
@drawing
@profile
def show_chip(back, cx, **kwargs):
fnum = FNUMS['chip']
did_exist = df2.plt.fignum_exists(fnum)
df2.figure(fnum=fnum, docla=True, doclf=True)
INTERACTIVE_CHIPS = True # This should always be True
if INTERACTIVE_CHIPS:
interact_fn = interact.interact_chip
interact_fn(back.hs, cx, fnum=fnum, figtitle='Chip View')
else:
viz.show_chip(back.hs, cx, fnum=fnum, figtitle='Chip View')
back._layout_figures_if(did_exist)
@drawing
@profile
def show_query_result(back, res, tx=None, **kwargs):
if tx is not None:
fnum = FNUMS['inspect']
did_exist = df2.plt.fignum_exists(fnum)
# Interact with the tx\th top index
res.interact_top_chipres(back.hs, tx)
else:
fnum = FNUMS['res']
did_exist = df2.plt.fignum_exists(fnum)
df2.figure(fnum=fnum, docla=True, doclf=True)
if back.hs.prefs.display_cfg.showanalysis:
# Define callback for show_analysis
res.show_analysis(back.hs, fnum=fnum, figtitle=' Analysis View')
else:
res.show_top(back.hs, fnum=fnum, figtitle='Query View ')
back._layout_figures_if(did_exist)
@drawing
@profile
def show_single_query(back, res, cx, **kwargs):
# Define callback for show_analysis
fnum = FNUMS['inspect']
did_exist = df2.plt.fignum_exists(fnum)
df2.figure(fnum=fnum, docla=True, doclf=True)
interact.interact_chipres(back.hs, res, cx=cx, fnum=fnum)
back._layout_figures_if(did_exist)
@drawing
@profile
def show_nx(back, nx, sel_cxs=[], **kwargs):
# Define callback for show_analysis
fnum = FNUMS['name']
df2.figure(fnum=fnum, docla=True, doclf=True)
interact.interact_name(back.hs, nx, sel_cxs, back.select_cx,
fnum=fnum)
#----------------------
# Work Functions
#----------------------
def get_selected_gx(back):
'selected image index'
if back.selection is None:
return None
type_ = back.selection['type_']
if type_ == 'gx':
gx = back.selection['index']
if type_ == 'cx':
cx = back.selection['index']
gx = back.hs.tables.cx2_gx(cx)
return gx
def get_selected_cx(back, cid=None):
'selected chip index'
if cid is not None:
try:
cx = back.hs.cid2_cx(cid)
return cx
except IndexError as ex:
print(ex)
msg = 'Query qcid=%d does not exist / is invalid' % cid
raise AssertionError(msg)
if back.selection is None:
return None
type_ = back.selection['type_']
if type_ == 'cx':
cx = back.selection['index']
if type_ == 'gx':
cx = back.selection['sub']
return cx
def update_window_title(back):
if back.hs is None:
title = 'Hotspotter - NULL database'
if back.hs.dirs is None:
title = 'Hotspotter - invalid database'
else:
db_dir = back.hs.dirs.db_dir
db_name = split(db_dir)[1]
title = 'Hotspotter - %r - %s' % (db_name, db_dir)
back.front.setWindowTitle(title)
def connect_api(back, hs):
print('[*back] connect_api()')
back.hs = hs
if hs.tables is not None:
hs.register_backend(back)
back.populate_tables(res=False)
back.setEnabledSignal.emit(True)
back.clear_selection()
back.update_window_title()
back.layout_figures()
else:
back.setEnabledSignal.emit(False)
#back.database_loaded.emit()
#--------------------------------------------------------------------------
# Populate functions
#--------------------------------------------------------------------------
@profile
def _populate_table(back, tblname, extra_cols={},
index_list=None, prefix_cols=[]):
print('[*back] _populate_table(%r)' % tblname)
headers = back.table_headers[tblname]
editable = back.table_editable[tblname]
if tblname == 'cxs': # in ['cxs', 'res']: TODO props in restable
prop_keys = back.hs.tables.prop_dict.keys()
else:
prop_keys = []
col_headers, col_editable = guitools.make_header_lists(headers,
editable,
prop_keys)
if index_list is None:
index_list = back.hs.get_valid_indexes(tblname)
# Prefix datatup
prefix_datatup = [[prefix_col.get(header, 'error')
for header in col_headers]
for prefix_col in prefix_cols]
body_datatup = back.hs.get_datatup_list(tblname, index_list,
col_headers, extra_cols)
datatup_list = prefix_datatup + body_datatup
row_list = range(len(datatup_list))
# Populate with fancy headers.
col_fancyheaders = [back.fancy_headers[key]
if key in back.fancy_headers else key
for key in col_headers]
back.populateSignal.emit(tblname, col_fancyheaders, col_editable,
row_list, datatup_list)
def populate_image_table(back, **kwargs):
back._populate_table('gxs', **kwargs)
def populate_name_table(back, **kwargs):
back._populate_table('nxs', **kwargs)
def populate_chip_table(back, **kwargs):
back._populate_table('cxs', **kwargs)
def populate_result_table(back, **kwargs):
res = back.current_res
if res is None:
# Clear the table instead
print('[*back] no results available')
back._populate_table('res', index_list=[])
return
top_cxs = res.topN_cxs(back.hs, N='all')
qcx = res.qcx
# The ! mark is used for ascii sorting. TODO: can we work arround this?
prefix_cols = [{'rank': '!Query',
'score': '---',
'name': back.hs.cx2_name(qcx),
'cid': back.hs.cx2_cid(qcx), }]
extra_cols = {
'score': lambda cxs: [res.cx2_score[cx] for cx in iter(cxs)],
}
back._populate_table('res', index_list=top_cxs,
prefix_cols=prefix_cols,
extra_cols=extra_cols,
**kwargs)
def populate_tables(back, image=True, chip=True, name=True, res=True):
if image:
back.populate_image_table()
if chip:
back.populate_chip_table()
if name:
back.populate_name_table()
if res:
back.populate_result_table()
def append_header(back, tblname, header, editable=False):
try:
pos = back.table_headers[tblname].index(header)
print('[back] %s_TBL already has header=%r at pos=%d' %
(tblname, header, pos))
except ValueError:
back.table_headers[tblname].append(header)
#--------------------------------------------------------------------------
# Helper functions
#--------------------------------------------------------------------------
def user_info(back, *args, **kwargs):
# TODO: this code is duplicated in front
return guitools.user_info(back.front, *args, **kwargs)
def user_input(back, *args, **kwargs):
return guitools.user_input(back.front, *args, **kwargs)
def user_option(back, *args, **kwargs):
return guitools._user_option(back.front, *args, **kwargs)
def get_work_directory(back):
return params.get_workdir()
def get_work_directory2(back, use_cache=True):
# TODO: This should go in api (or higher level main?)
cache_id = 'work_directory_cache_id'
if use_cache:
work_dir = io.global_cache_read(cache_id, default='.')
if work_dir is not '.' and exists(work_dir):
return work_dir
msg_dir = 'Work directory not currently set. Select a work directory'
work_dir = guitools.select_directory(msg_dir)
if not exists(work_dir):
msg_try = 'Directory %r does not exist.' % work_dir
opt_try = ['Try Again']
try_again = back.user_option(msg_try, 'get work dir failed',
opt_try, False)
if try_again == 'Try Again':
return back.get_work_dir(use_cache)
io.global_cache_write(cache_id, work_dir)
return work_dir
def user_select_new_dbdir(back):
return _user_select_new_dbdir(back)
#--------------------------------------------------------------------------
# Selection Functions
#--------------------------------------------------------------------------
@slot_(int)
@blocking
@profile
def select_gx(back, gx, cx=None, show=True, **kwargs):
# Table Click -> Image Table
autoselect_chips = False
if autoselect_chips and cx is None:
cxs = back.hs.gx2_cxs(gx)
if len(cxs > 0):
cx = cxs[0]
sel_cxs = [] if cx is None else [cx]
back.selection = {'type_': 'gx', 'index': gx, 'sub': cx}
if show:
if cx is None:
back.show_splash(2, 'Chip', dodraw=False)
else:
back.show_chip(cx, dodraw=False)
back.show_image(gx, sel_cxs, **kwargs)
@slot_(int)
def select_cid(back, cid, **kwargs):
# Table Click -> Chip Table
cx = back.hs.cid2_cx(cid)
gx = back.hs.cx2_gx(cx)
back.select_gx(gx, cx=cx, **kwargs)
@slot_(int)
def select_cx(back, cx, **kwargs):
gx = back.hs.cx2_gx(cx)
back.select_gx(gx, cx=cx, **kwargs)
@slot_(int)
def select_nx(back, nx):
back.show_nx(nx)
@slot_(str)
def select_name(back, name):
name = str(name)
nx = np.where(back.hs.tables.nx2_name == name)[0]
back.select_nx(nx)
@slot_(int)
def select_res_cid(back, cid, **kwargs):
# Table Click -> Chip Table
cx = back.hs.cid2_cx(cid)
gx = back.hs.cx2_gx(cx)
back.select_gx(gx, cx=cx, dodraw=False, **kwargs)
back.show_single_query(back.current_res, cx, **kwargs)
#--------------------------------------------------------------------------
# Misc Slots
#--------------------------------------------------------------------------
@slot_(str)
def backend_print(back, msg):
print(str(msg))
@slot_()
def clear_selection(back, **kwargs):
back.selection = None
back.show_splash(FNUMS['image'], 'Image', dodraw=False)
back.show_splash(FNUMS['chip'], 'Chip', dodraw=False)
back.show_splash(FNUMS['res'], 'Results', **kwargs)
@slot_()
@blocking
def default_preferences(back):
# Button Click -> Preferences Defaults
# TODO: Propogate changes back to back.edit_prefs.ui
back.hs.default_preferences()
back.hs.prefs.save()
@slot_(int, str, str)
@blocking
@profile
def change_chip_property(back, cid, key, val):
# Table Edit -> Change Chip Property
# RCOS TODO: These function should take the type of the variable as an
# arugment as well. (Guifront tries to automatically interpret the
# variable type by its value and it will get stuck on things like
# 'True'. Is that a string or a bool? I don't know. We should tell it.)
key, val = map(str, (key, val))
print('[*back] change_chip_property(%r, %r, %r)' % (cid, key, val))
cx = back.hs.cid2_cx(cid)
if key in ['name', 'matching_name']:
back.hs.change_name(cx, val)
else:
back.hs.change_property(cx, key, val)
back.populate_tables(image=False)
print('')
@slot_(int, str, str)
@blocking
@profile
def alias_name(back, nx, key, val):
key, val = map(str, (key, val))
print('[*back] alias_name(%r, %r, %r)' % (nx, key, val))
if key in ['name']:
# TODO: Add option to change name if alias fails
back.hs.alias_name(nx, val)
back.populate_tables(image=False)
print('')
@slot_(int, str, bool)
@blocking
def change_image_property(back, gx, key, val):
# Table Edit -> Change Image Property
key, val = str(key), bool(val)
print('[*back] change_img_property(%r, %r, %r)' % (gx, key, val))
if key in ['aif']:
back.hs.change_aif(gx, val)
back.populate_image_table()
print('')
#--------------------------------------------------------------------------
# File Slots
#--------------------------------------------------------------------------
@slot_()
@blocking
def new_database(back, new_dbdir=None):
# File -> New Database
if new_dbdir is None:
new_dbdir = back.user_select_new_dbdir()
if new_dbdir is not None:
print('[*back] valid new_dbdir = %r' % new_dbdir)
util.ensurepath(new_dbdir)
back.open_database(new_dbdir)
else:
print('[*back] abort new database()')
@slot_()
@blocking
def open_database(back, db_dir=None):
# File -> Open Database
try:
# Use the same args in a new (opened) database
args = params.args
#args = back.params.args
if db_dir is None:
msg = 'Select (or create) a database directory.'
db_dir = guitools.select_directory(msg)
print('[*back] user selects database: ' + db_dir)
# Try and load db
if args is not None:
args.dbdir = db_dir
hs = HotSpotterAPI.HotSpotter(args=args, db_dir=db_dir)
hs.load(load_all=False)
# Write to cache and connect if successful
io.global_cache_write('db_dir', db_dir)
back.connect_api(hs)
#back.layout_figures()
except Exception as ex:
import traceback
import sys
print(traceback.format_exc())
back.user_info('Aborting open database')
print('aborting open database')
print(ex)
if '--strict' in sys.argv:
raise
print('')
return hs
@slot_()
@blocking
def save_database(back):
# File -> Save Database
back.hs.save_database()
@slot_()
@blocking
def import_images(back):
# File -> Import Images
print('[*back] import images')
msg = 'Import specific files or whole directory?'
title = 'Import Images'
options = ['Files', 'Directory']
reply = back.user_option(msg, title, options, False)
if reply == 'Files':
back.import_images_from_file()
if reply == 'Directory':
back.import_images_from_dir()
@slot_()
@blocking
def import_images_from_file(back):
# File -> Import Images From File
fpath_list = guitools.select_images('Select image files to import')
back.hs.add_images(fpath_list)
back.populate_image_table()
print('')
@slot_()
@blocking
def import_images_from_dir(back):
# File -> Import Images From Directory
msg = 'Select directory with images in it'
img_dpath = guitools.select_directory(msg)
print('[*back] selected %r' % img_dpath)
fpath_list = util.list_images(img_dpath, fullpath=True)
back.hs.add_images(fpath_list)
back.populate_image_table()
print('')
@slot_()
def quit(back):
# File -> Quit
guitools.exit_application()
#--------------------------------------------------------------------------
# Action menu slots
#--------------------------------------------------------------------------
@slot_()
@blocking
def new_prop(back):
# Action -> New Chip Property
newprop = back.user_input('What is the new property name?')
back.hs.add_property(newprop)
back.populate_chip_table()
back.populate_result_table()
print(r'[/back] added newprop = %r' % newprop)
print('')
@slot_()
@blocking
@profile
def add_chip(back, gx=None, roi=None):
# Action -> Add ROI
if gx is None:
gx = back.get_selected_gx()
if roi is None:
figtitle = 'Image View - Select ROI (click two points)'
back.show_image(gx, figtitle=figtitle)
roi = guitools.select_roi()
if roi is None:
print('[back*] roiselection failed. Not adding')
return
cx = back.hs.add_chip(gx, roi) # NOQA
back.populate_tables()
# RCOS TODO: Autoselect should be an option
#back.select_gx(gx, cx)
back.select_gx(gx)
print('')
cid = back.hs.cx2_cid(cx)
return cid
@slot_()
@blocking
@profile
def query(back, cid=None, tx=None, **kwargs):
# Action -> Query
with util.Indent('[back.prequery]'):
print('[back] query(cid=%r, %r)' % (cid, kwargs))
cx = back.get_selected_cx(cid)
print('[back] cx = %r' % cx)
if cx is None:
back.user_info('Cannot query. No chip selected')
return
with util.Indent('[back.query]'):
try:
res = back.hs.query(cx, **kwargs)
except Exception as ex:
# TODO Catch actually exceptions here
print('[back] ex = %r' % ex)
raise
with util.Indent('[back.postquery]'):
if isinstance(res, str):
back.user_info(res)
return
back.current_res = res
back.populate_result_table()
print(r'[back] finished query')
print('')
# Show results against test chip index (tx)
back.show_query_result(res, tx)
return res
@slot_()
@blocking
@profile
def reselect_roi(back, cid=None, roi=None, **kwargs):
# Action -> Reselect ROI
print(r'[\back] reselect_roi()')
cx = back.get_selected_cx(cid)
if cx is None:
back.user_info('Cannot reselect ROI. No chip selected')
return
gx = back.hs.tables.cx2_gx[cx]
if roi is None:
figtitle = 'Image View - ReSelect ROI (click two points)'
back.show_image(gx, [cx], figtitle=figtitle, **kwargs)
roi = guitools.select_roi()
if roi is None:
print('[back*] roiselection failed. Not changing')
return
back.hs.change_roi(cx, roi)
back.populate_tables()
back.select_gx(gx, cx, **kwargs)
print(r'[/back] reselected ROI = %r' % roi)
print('')
pass
@slot_()
@blocking
@profile
def reselect_ori(back, cid=None, theta=None, **kwargs):
# Action -> Reselect ORI
cx = back.get_selected_cx(cid)
if cx is None:
back.user_info('Cannot reselect orientation. No chip selected')
return
gx = back.hs.tables.cx2_gx[cx]
if theta is None:
figtitle = 'Image View - Select Orientation (click two points)'
back.show_image(gx, [cx], figtitle=figtitle, **kwargs)
theta = guitools.select_orientation()
if theta is None:
print('[back*] theta selection failed. Not changing')
return
back.hs.change_theta(cx, theta)
back.populate_tables()
back.select_gx(gx, cx, **kwargs)
print(r'[/back] reselected theta=%r' % theta)
print('')
@slot_()
@blocking
@profile
def delete_chip(back):
# Action -> Delete Chip
# RCOS TODO: Are you sure?
cx = back.get_selected_cx()
if cx is None:
back.user_info('Cannot delete chip. No chip selected')
return
gx = back.hs.cx2_gx(cx)
back.hs.delete_chip(cx)
back.populate_tables()
back.select_gx(gx)
print('[back] deleted cx=%r\n' % cx)
print('')
@slot_()
@blocking
@profile
def delete_image(back, gx=None):
if gx is None:
gx = back.get_selected_gx()
if gx is None:
back.user_info('Cannot delete image. No image selected')
return
back.clear_selection()
back.hs.delete_image(gx)
back.populate_tables()
print('[back] deleted gx=%r\n' % gx)
print('')
@slot_()
@blocking
@profile
def select_next(back):
# Action -> Next
select_mode = 'in_order' # 'unannotated'
if select_mode == 'in_order':
msg = select_next_in_order(back)
elif select_mode == 'unannotated':
msg = select_next_unannotated(back)
else:
raise Exception('uknown=%r' % select_mode)
if msg is not None:
back.user_info(msg)
#--------------------------------------------------------------------------
# Batch menu slots
#--------------------------------------------------------------------------
@slot_()
@blocking
def precompute_feats(back):
# Batch -> Precompute Feats
#prevBlock = back.front.blockSignals(True)
back.hs.update_samples()
back.hs.refresh_features()
#back.front.blockSignals(prevBlock)
back.populate_chip_table()
print('')
@slot_()
@blocking
def precompute_queries(back):
# Batch -> Precompute Queries
# TODO:
#http://stackoverflow.com/questions/15637768/
# pyqt-how-to-capture-output-of-pythons-interpreter-
# and-display-it-in-qedittext
#prevBlock = back.front.blockSignals(True)
#import matching_functions as mf
#import DataStructures as ds
#import match_chips3 as mc3
import sys
back.precompute_feats()
valid_cx = back.hs.get_valid_cxs()
#if back.params.args.quiet:
#mc3.print_off()
#ds.print_off()
#mf.print_off()
fmtstr = util.progress_str(len(valid_cx), '[back*] Query qcx=%r: ')
for count, qcx in enumerate(valid_cx):
sys.stdout.write(fmtstr % (qcx, count))
back.hs.query(qcx, dochecks=False)
if count % 100 == 0:
sys.stdout.write('\n ...')
sys.stdout.write('\n ...')
#mc3.print_on()
#ds.print_on()
#mf.print_on()
print('')
#back.front.blockSignals(prevBlock)
#--------------------------------------------------------------------------
# Option menu slots
#--------------------------------------------------------------------------
#@slot_(rundbg=True)
@slot_()
@blocking
def layout_figures(back):
# Options -> Layout Figures
print('[back] layout_figures')
nCols = 3
nRows = 2
if back.app is None:
print('[*back] WARNING: cannot detect screen geometry')
dlen = 1618
else:
app = back.app
screen_rect = app.desktop().screenGeometry()
width = screen_rect.width()
height = screen_rect.height()
dlen = np.sqrt(width ** 2 + height ** 2) / 1.618
df2.present(num_rc=(nRows, nCols), wh=dlen, wh_off=(0, 60))
@slot_()
def edit_preferences(back):
# Options -> Edit Preferences
back.edit_prefs = back.hs.prefs.createQWidget()
epw = back.edit_prefs
epw.ui.defaultPrefsBUT.clicked.connect(back.default_preferences)
query_uid = ''.join(back.hs.prefs.query_cfg.get_uid())
print('[*back] query_uid = %s' % query_uid)
print('')
#--------------------------------------------------------------------------
# Help menu slots
#--------------------------------------------------------------------------
@slot_()
def view_docs(back):
from hscom import cross_platform as cplat
hsdir = io.get_hsdir()
pdf_dpath = join(hsdir, '_doc')
pdf_fpath = join(pdf_dpath, 'HotSpotterUserGuide.pdf')
cplat.startfile(pdf_fpath)
@slot_()
def view_database_dir(back):
# Help -> View Directory Slots
back.hs.vdd()
@slot_()
def view_computed_dir(back):
back.hs.vcd()
@slot_()
def view_global_dir(back):
back.hs.vgd()
@slot_()
def delete_cache(back):
# Help -> Delete Directory Slots
# RCOS TODO: Are you sure?
ans = back.user_option('Are you sure you want to delete cache?')
if ans != 'Yes':
return
back.invalidate_result()
df2.close_all_figures()
back.hs.delete_cache()
back.populate_result_table()
@slot_()
def delete_global_prefs(back):
# RCOS TODO: Are you sure?
df2.close_all_figures()
back.hs.delete_global_prefs()
@slot_()
def delete_queryresults_dir(back):
# RCOS TODO: Are you sure?
df2.close_all_figures()
back.invalidate_result()
back.hs.delete_queryresults_dir()
back.populate_result_table()
def invalidate_result(back):
back.current_res = None
@slot_()
@blocking
def dev_mode(back):
# Help -> Developer Help
steal_again = back.front.return_stdout()
hs = back.hs # NOQA
front = back.front
wasBlocked = front.blockSignals(True)
devmode = True # NOQA
#print(util.indent(str(hs), '[*back.hs] '))
#rrr()
print(r'[\back] finished dev_help')
#app = back.app
#from PyQt4 import QtGui
#QtGui.qApp.quit()
#app.exit() # Stop the main loop
#app.quit()
#if back.timer is not None:
from PyQt4.QtCore import pyqtRemoveInputHook
pyqtRemoveInputHook()
#from IPython.lib.inputhook import enable_qt4
#enable_qt4()
execstr = util.ipython_execstr()
#print(execstr)
print('Debugging in IPython. IPython will break gui until you exit')
exec(execstr)
if steal_again:
back.front.steal_stdout()
back.front.blockSignals(wasBlocked)
#back.timer.start()
@slot_()
@blocking
def dev_reload(back):
# Help -> Developer Reload
_dev_reload(back)
@slot_()
@blocking
def detect_dupimg(back):
back.hs.dbg_duplicate_images()
| Erotemic/hotspotter | hsgui/guiback.py | Python | apache-2.0 | 35,176 | [
"EPW"
] | eea8c86caf37a14b8cc2843798505aeef95b415c1103a6d2203b1032d2875ccf |
import numpy as np
from skimage import data
from skimage.color import rgb2gray
from skimage.filters import gaussian
from skimage.segmentation import active_contour
from skimage._shared import testing
from skimage._shared.testing import assert_equal, assert_allclose
def test_periodic_reference():
img = data.astronaut()
img = rgb2gray(img)
s = np.linspace(0, 2*np.pi, 400)
x = 220 + 100*np.cos(s)
y = 100 + 100*np.sin(s)
init = np.array([x, y]).T
snake = active_contour(gaussian(img, 3), init, alpha=0.015, beta=10,
w_line=0, w_edge=1, gamma=0.001)
refx = [299, 298, 298, 298, 298, 297, 297, 296, 296, 295]
refy = [98, 99, 100, 101, 102, 103, 104, 105, 106, 108]
assert_equal(np.array(snake[:10, 0], dtype=np.int32), refx)
assert_equal(np.array(snake[:10, 1], dtype=np.int32), refy)
def test_fixed_reference():
img = data.text()
x = np.linspace(5, 424, 100)
y = np.linspace(136, 50, 100)
init = np.array([x, y]).T
snake = active_contour(gaussian(img, 1), init, bc='fixed',
alpha=0.1, beta=1.0, w_line=-5, w_edge=0, gamma=0.1)
refx = [5, 9, 13, 17, 21, 25, 30, 34, 38, 42]
refy = [136, 135, 134, 133, 132, 131, 129, 128, 127, 125]
assert_equal(np.array(snake[:10, 0], dtype=np.int32), refx)
assert_equal(np.array(snake[:10, 1], dtype=np.int32), refy)
def test_free_reference():
img = data.text()
x = np.linspace(5, 424, 100)
y = np.linspace(70, 40, 100)
init = np.array([x, y]).T
snake = active_contour(gaussian(img, 3), init, bc='free',
alpha=0.1, beta=1.0, w_line=-5, w_edge=0, gamma=0.1)
refx = [10, 13, 16, 19, 23, 26, 29, 32, 36, 39]
refy = [76, 76, 75, 74, 73, 72, 71, 70, 69, 69]
assert_equal(np.array(snake[:10, 0], dtype=np.int32), refx)
assert_equal(np.array(snake[:10, 1], dtype=np.int32), refy)
def test_RGB():
img = gaussian(data.text(), 1)
imgR = np.zeros((img.shape[0], img.shape[1], 3))
imgG = np.zeros((img.shape[0], img.shape[1], 3))
imgRGB = np.zeros((img.shape[0], img.shape[1], 3))
imgR[:, :, 0] = img
imgG[:, :, 1] = img
imgRGB[:, :, :] = img[:, :, None]
x = np.linspace(5, 424, 100)
y = np.linspace(136, 50, 100)
init = np.array([x, y]).T
snake = active_contour(imgR, init, bc='fixed',
alpha=0.1, beta=1.0, w_line=-5, w_edge=0, gamma=0.1)
refx = [5, 9, 13, 17, 21, 25, 30, 34, 38, 42]
refy = [136, 135, 134, 133, 132, 131, 129, 128, 127, 125]
assert_equal(np.array(snake[:10, 0], dtype=np.int32), refx)
assert_equal(np.array(snake[:10, 1], dtype=np.int32), refy)
snake = active_contour(imgG, init, bc='fixed',
alpha=0.1, beta=1.0, w_line=-5, w_edge=0, gamma=0.1)
assert_equal(np.array(snake[:10, 0], dtype=np.int32), refx)
assert_equal(np.array(snake[:10, 1], dtype=np.int32), refy)
snake = active_contour(imgRGB, init, bc='fixed', alpha=0.1, beta=1.0,
w_line=-5/3., w_edge=0, gamma=0.1)
assert_equal(np.array(snake[:10, 0], dtype=np.int32), refx)
assert_equal(np.array(snake[:10, 1], dtype=np.int32), refy)
def test_end_points():
img = data.astronaut()
img = rgb2gray(img)
s = np.linspace(0, 2*np.pi, 400)
x = 220 + 100*np.cos(s)
y = 100 + 100*np.sin(s)
init = np.array([x, y]).T
snake = active_contour(gaussian(img, 3), init,
bc='periodic', alpha=0.015, beta=10,
w_line=0, w_edge=1, gamma=0.001, max_iterations=100)
assert np.sum(np.abs(snake[0, :]-snake[-1, :])) < 2
snake = active_contour(gaussian(img, 3), init,
bc='free', alpha=0.015, beta=10,
w_line=0, w_edge=1, gamma=0.001, max_iterations=100)
assert np.sum(np.abs(snake[0, :]-snake[-1, :])) > 2
snake = active_contour(gaussian(img, 3), init,
bc='fixed', alpha=0.015, beta=10,
w_line=0, w_edge=1, gamma=0.001, max_iterations=100)
assert_allclose(snake[0, :], [x[0], y[0]], atol=1e-5)
def test_bad_input():
img = np.zeros((10, 10))
x = np.linspace(5, 424, 100)
y = np.linspace(136, 50, 100)
init = np.array([x, y]).T
with testing.raises(ValueError):
active_contour(img, init, bc='wrong')
with testing.raises(ValueError):
active_contour(img, init, max_iterations=-15)
| kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/skimage/segmentation/tests/test_active_contour_model.py | Python | gpl-3.0 | 4,472 | [
"Gaussian"
] | 00f3720817167119022d8f93813b68954421390d68989deb3c6f1fa7b2643c85 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-14 18:47
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('visit', '0099_auto_20160120_2205'),
]
operations = [
migrations.RemoveField(
model_name='visit',
name='attempt_num',
),
migrations.RemoveField(
model_name='visit',
name='contact_result',
),
migrations.RemoveField(
model_name='visit',
name='had_impact',
),
migrations.RemoveField(
model_name='visit',
name='impact_description',
),
migrations.RemoveField(
model_name='visit',
name='improvement_level',
),
migrations.RemoveField(
model_name='visit',
name='improvementissues',
),
migrations.RemoveField(
model_name='visit',
name='improvements',
),
migrations.RemoveField(
model_name='visit',
name='insights',
),
migrations.RemoveField(
model_name='visit',
name='involvement',
),
migrations.RemoveField(
model_name='visit',
name='involvement_types',
),
migrations.RemoveField(
model_name='visit',
name='issues_prek',
),
migrations.RemoveField(
model_name='visit',
name='issues_primary',
),
migrations.RemoveField(
model_name='visit',
name='learning_strategies',
),
migrations.RemoveField(
model_name='visit',
name='level',
),
migrations.RemoveField(
model_name='visit',
name='location',
),
migrations.RemoveField(
model_name='visit',
name='participants',
),
migrations.RemoveField(
model_name='visit',
name='request_services',
),
migrations.RemoveField(
model_name='visit',
name='requested_services',
),
migrations.RemoveField(
model_name='visit',
name='siblings_under_5_present',
),
migrations.RemoveField(
model_name='visit',
name='strengthen',
),
migrations.RemoveField(
model_name='visit',
name='strengthen_how',
),
migrations.RemoveField(
model_name='visit',
name='student_absent_reason',
),
migrations.RemoveField(
model_name='visit',
name='student_present',
),
migrations.RemoveField(
model_name='visit',
name='time_begin',
),
migrations.RemoveField(
model_name='visit',
name='time_end',
),
migrations.RemoveField(
model_name='visit',
name='time_scheduled',
),
migrations.RemoveField(
model_name='visit',
name='visit2_other_concerns',
),
migrations.RemoveField(
model_name='visit',
name='visit2_share_more',
),
migrations.RemoveField(
model_name='visit',
name='visit_num',
),
migrations.RemoveField(
model_name='visit',
name='what_else',
),
]
| koebbe/homeworks | visit/migrations/0100_auto_20160614_1347.py | Python | mit | 3,577 | [
"VisIt"
] | 0ef2862c2fe99fbc9898b0db512bd4b7a12a64573deb249139d37b64f6758c57 |
from __future__ import division
from __future__ import absolute_import
from builtins import zip
from builtins import range
from past.utils import old_div
from builtins import object
import pytest
from numpy.testing import assert_array_almost_equal
from .test_helpers import u
import openpathsampling as paths
import openpathsampling.engines as peng
import numpy as np
try:
import openmmtools as omt
except ImportError:
omt = None
import openpathsampling.engines.openmm as omm_engine
from openpathsampling.snapshot_modifier import *
from collections import Counter
import logging
logging.getLogger('openpathsampling.initialization').setLevel(logging.CRITICAL)
logging.getLogger('openpathsampling.storage').setLevel(logging.CRITICAL)
logging.getLogger('openpathsampling.netcdfplus').setLevel(logging.CRITICAL)
class TestSnapshotModifier(object):
def setup(self):
# TODO OPS 2.0: This subclass is only here for python 2.7 should be
# replaced with SnapshotModifier
class DummyMod(SnapshotModifier):
def __call__(self, a):
return a
self.Modifier = DummyMod
self.modifier = DummyMod()
self.snapshot_1D = peng.toy.Snapshot(
coordinates=np.array([0.0, 1.0, 2.0, 3.0]),
velocities=np.array([0.5, 1.5, 2.5, 3.5])
)
if paths.integration_tools.HAS_OPENMM:
Class3D = peng.openmm.MDSnapshot
else:
Class3D = peng.toy.ToySnapshot
self.snapshot_3D = Class3D(
coordinates=np.array([[0.0, 0.1, 0.2],
[1.0, 1.1, 1.2],
[2.0, 2.1, 2.2],
[3.0, 3.1, 3.2]]),
velocities=np.array([[0.5, 0.6, 0.7],
[1.5, 1.6, 1.7],
[2.5, 2.6, 2.7],
[3.5, 3.6, 3.7]])
)
def test_extract_subset(self):
mod = self.Modifier(subset_mask=[1, 2])
sub_1Dx = mod.extract_subset(self.snapshot_1D.coordinates)
assert_array_almost_equal(sub_1Dx, np.array([1.0, 2.0]))
sub_1Dv = mod.extract_subset(self.snapshot_1D.velocities)
assert_array_almost_equal(sub_1Dv, np.array([1.5, 2.5]))
sub_3Dx = mod.extract_subset(self.snapshot_3D.coordinates)
assert_array_almost_equal(sub_3Dx, np.array([[1.0, 1.1, 1.2],
[2.0, 2.1, 2.2]]))
sub_3Dv = mod.extract_subset(self.snapshot_3D.velocities)
assert_array_almost_equal(sub_3Dv, np.array([[1.5, 1.6, 1.7],
[2.5, 2.6, 2.7]]))
def test_apply_to_subset(self):
mod = self.Modifier(subset_mask=[1, 2])
copy_1Dx = self.snapshot_1D.coordinates.copy()
new_1Dx = mod.apply_to_subset(copy_1Dx, np.array([-1.0, -2.0]))
assert_array_almost_equal(new_1Dx, np.array([0.0, -1.0, -2.0, 3.0]))
# and check that memory points to the right things; orig unchanged
assert copy_1Dx is new_1Dx
assert_array_almost_equal(self.snapshot_1D.coordinates,
np.array([0.0, 1.0, 2.0, 3.0]))
copy_3Dx = self.snapshot_3D.coordinates.copy()
new_3Dx = mod.apply_to_subset(copy_3Dx,
np.array([[-1.0, -1.1, -1.2],
[-2.0, -2.1, -2.2]]))
assert_array_almost_equal(new_3Dx, np.array([[0.0, 0.1, 0.2],
[-1.0, -1.1, -1.2],
[-2.0, -2.1, -2.2],
[3.0, 3.1, 3.2]]))
# and check that memory points to the right things; orig unchanged
assert copy_3Dx is new_3Dx
assert_array_almost_equal(self.snapshot_3D.coordinates,
np.array([[0.0, 0.1, 0.2],
[1.0, 1.1, 1.2],
[2.0, 2.1, 2.2],
[3.0, 3.1, 3.2]]))
class TestNoModification(TestSnapshotModifier):
def setup(self):
super(TestNoModification, self).setup()
self.modifier = NoModification()
def test_call(self):
new_1D = self.modifier(self.snapshot_1D)
assert_array_almost_equal(self.snapshot_1D.coordinates,
new_1D.coordinates)
assert_array_almost_equal(self.snapshot_1D.velocities,
new_1D.velocities)
new_3D = self.modifier(self.snapshot_3D)
assert_array_almost_equal(self.snapshot_3D.coordinates,
new_3D.coordinates)
assert_array_almost_equal(self.snapshot_3D.velocities,
new_3D.velocities)
assert self.snapshot_1D.coordinates is not new_1D.coordinates
assert self.snapshot_1D.velocities is not new_1D.velocities
assert self.snapshot_3D.coordinates is not new_3D.coordinates
assert self.snapshot_3D.velocities is not new_3D.velocities
def test_call_no_copy(self):
mod = NoModification(as_copy=False)
new_1D = mod(self.snapshot_1D)
assert new_1D is self.snapshot_1D
new_3D = mod(self.snapshot_3D)
assert new_3D is self.snapshot_3D
def test_probability_ratio(self):
# This should always return 1.0 even for invalid input
assert self.modifier.probability_ratio(None, None) == 1.0
class TestRandomizeVelocities(object):
def setup(self):
# TODO: check against several possibilities, including various
# combinations of shapes of velocities and masses.
topology_2x3D = paths.engines.toy.Topology(
n_spatial=3, n_atoms=2, masses=np.array([2.0, 3.0]), pes=None
)
topology_3x1D = paths.engines.toy.Topology(
n_spatial=1, n_atoms=3, masses=np.array([[2.0], [3.0], [4.0]]),
pes=None
)
topology_1x2D = paths.engines.toy.Topology(
n_spatial=2, n_atoms=1, masses=np.array([1.0, 2.0]), pes=None
)
self.snap_2x3D = paths.engines.toy.Snapshot(
coordinates=np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),
velocities=np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),
engine=paths.engines.toy.Engine({}, topology_2x3D)
)
self.snap_3x1D = paths.engines.toy.Snapshot(
coordinates=np.array([[0.0], [0.0], [0.0]]),
velocities=np.array([[0.0], [0.0], [0.0]]),
engine=paths.engines.toy.Engine({}, topology_3x1D)
)
self.snap_1x2D = paths.engines.toy.Snapshot(
coordinates=np.array([[0.0, 0.0]]),
velocities=np.array([[0.0, 0.0]]),
engine=paths.engines.toy.Engine({}, topology_1x2D)
)
def test_call(self):
# NOTE: these tests basically check the API. Tests for correctness
# are in `test_snapshot_modifier.ipynb`, because they are inherently
# stochastic.
randomizer = RandomVelocities(beta=old_div(1.0, 5.0))
new_1x2D = randomizer(self.snap_1x2D)
assert new_1x2D.coordinates.shape == new_1x2D.velocities.shape
assert (pytest.approx(new_1x2D.coordinates) ==
self.snap_1x2D.coordinates)
assert new_1x2D is not self.snap_1x2D
assert new_1x2D.coordinates is not self.snap_1x2D.coordinates
assert new_1x2D.velocities is not self.snap_1x2D.velocities
for val in new_1x2D.velocities.flatten():
assert val != 0.0
assert randomizer.probability_ratio(self.snap_1x2D, new_1x2D) == 1.0
new_2x3D = randomizer(self.snap_2x3D)
assert new_2x3D.coordinates.shape == new_2x3D.velocities.shape
assert_array_almost_equal(new_2x3D.coordinates,
self.snap_2x3D.coordinates)
assert new_2x3D is not self.snap_2x3D
assert new_2x3D.coordinates is not self.snap_2x3D.coordinates
assert new_2x3D.velocities is not self.snap_2x3D.velocities
for val in new_2x3D.velocities.flatten():
assert val != 0.0
new_3x1D = randomizer(self.snap_3x1D)
assert new_3x1D.coordinates.shape == new_3x1D.velocities.shape
assert_array_almost_equal(new_3x1D.coordinates,
self.snap_3x1D.coordinates)
assert new_3x1D is not self.snap_3x1D
assert new_3x1D.coordinates is not self.snap_3x1D.coordinates
assert new_3x1D.velocities is not self.snap_3x1D.velocities
for val in new_3x1D.velocities.flatten():
assert val != 0.0
def test_subset_call(self):
randomizer = RandomVelocities(beta=old_div(1.0, 5.0), subset_mask=[0])
new_2x3D = randomizer(self.snap_2x3D)
assert new_2x3D.coordinates.shape == new_2x3D.velocities.shape
assert_array_almost_equal(new_2x3D.coordinates,
self.snap_2x3D.coordinates)
assert new_2x3D is not self.snap_2x3D
assert new_2x3D.coordinates is not self.snap_2x3D.coordinates
assert new_2x3D.velocities is not self.snap_2x3D.velocities
# show that the unchanged atom is, in fact, unchanged
assert_array_almost_equal(new_2x3D.velocities[1],
self.snap_2x3D.velocities[1])
for val in new_2x3D.velocities[0]:
assert val != 0.0
def test_no_beta_bad_engine(self):
engine = self.snap_2x3D.engine
randomizer = RandomVelocities(engine=engine)
with pytest.raises(RuntimeError):
randomizer(self.snap_2x3D)
def test_with_openmm_snapshot(self):
# note: this is only a smoke test; correctness depends on OpenMM's
# tests of its constraint approaches.
if not omt:
pytest.skip("Requires OpenMMTools (not installed)")
test_system = omt.testsystems.AlanineDipeptideVacuum()
template = omm_engine.snapshot_from_testsystem(test_system)
engine = omm_engine.Engine(
topology=template.topology,
system=test_system.system,
integrator=omt.integrators.VVVRIntegrator()
)
beta = old_div(1.0, (300.0 * u.kelvin * u.BOLTZMANN_CONSTANT_kB))
# when the engine doesn't have an existing snapshot
randomizer = RandomVelocities(beta=beta, engine=engine)
new_snap = randomizer(template)
# coordinates stayed the same
assert_array_almost_equal(template.coordinates,
new_snap.coordinates)
# velocities changed
assert not np.isclose(template.velocities, new_snap.velocities).all()
engine.generate(new_snap, [lambda x, foo: len(x) <= 4])
# when the engine does have an existing snapshot
zeros = np.zeros((engine.n_atoms, engine.n_spatial))
zero_snap = paths.engines.openmm.Snapshot.construct(
coordinates=zeros * u.nanometer,
velocities=zeros * u.nanometer / u.picosecond,
box_vectors=template.box_vectors,
engine=engine
)
engine.current_snapshot = zero_snap
randomizer = RandomVelocities(beta=beta, engine=engine)
new_snap = randomizer(template)
# coordinates stayed the same
assert_array_almost_equal(template.coordinates,
new_snap.coordinates)
# velocities changed
assert not np.isclose(template.velocities, new_snap.velocities).all()
# internal snapshot unchanged
assert engine.current_snapshot == zero_snap
engine.generate(new_snap, [lambda x, foo: len(x) <= 4])
def test_probability_ratio(self):
# Should be sampled correctio, so this has to be 1.0
randomizer = RandomVelocities(beta=20)
assert randomizer.probability_ratio(None, None) == 1.0
class TestGeneralizedDirectionModifier(object):
def setup(self):
import openpathsampling.engines.toy as toys
# applies one delta_v to all atoms
self.toy_modifier_all = GeneralizedDirectionModifier(1.5)
# defines delta_v per atom, including those not in the mask
self.toy_modifier_long_dv = GeneralizedDirectionModifier(
delta_v=[0.5, 1.0, 2.0],
subset_mask=[1, 2]
)
# defines delta_v per atom in the subset mask
self.toy_modifier = GeneralizedDirectionModifier(
delta_v=[1.0, 2.0],
subset_mask=[1, 2]
)
self.toy_engine = toys.Engine(
topology=toys.Topology(n_spatial=2, n_atoms=3, pes=None,
masses=[1.0, 1.5, 4.0]),
options={}
)
self.toy_snapshot = toys.Snapshot(
coordinates=np.array([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]]),
velocities=np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]]),
engine=self.toy_engine
)
# create the OpenMM versions
if not omt:
pytest.skip("Requires OpenMMTools (not installed)")
if not u:
pytest.skip("Requires openmm.unit (not installed)")
u_vel = old_div(u.nanometer, u.picosecond)
self.openmm_modifier = GeneralizedDirectionModifier(1.2 * u_vel)
ad_vacuum = omt.testsystems.AlanineDipeptideVacuum(constraints=None)
self.test_snap = omm_engine.snapshot_from_testsystem(ad_vacuum)
self.openmm_engine = omm_engine.Engine(
topology=self.test_snap.topology,
system=ad_vacuum.system,
integrator=omt.integrators.VVVRIntegrator()
)
self.openmm_snap = self.test_snap.copy_with_replacement(
engine=self.openmm_engine
)
def test_verify_snapshot_toy(self):
self.toy_modifier._verify_snapshot(self.toy_snapshot)
self.toy_modifier_all._verify_snapshot(self.toy_snapshot)
self.toy_modifier_long_dv._verify_snapshot(self.toy_snapshot)
def test_verify_snapshot_openmm(self):
self.openmm_modifier._verify_snapshot(self.openmm_snap)
def test_verify_snapshot_no_dofs(self):
assert isinstance(self.test_snap.engine,
omm_engine.tools.OpenMMToolsTestsystemEngine)
with pytest.raises(RuntimeError, match="missing n_degrees_of_freedom"):
self.openmm_modifier._verify_snapshot(self.test_snap)
def test_verify_snapshot_constraints(self):
ad_vacuum_constr = omt.testsystems.AlanineDipeptideVacuum()
constrained_engine = omm_engine.Engine(
topology=self.test_snap.topology,
system=ad_vacuum_constr.system,
integrator=omt.integrators.VVVRIntegrator()
)
constr_snap = self.test_snap.copy_with_replacement(
engine=constrained_engine
)
with pytest.raises(RuntimeError, match="constraints"):
self.openmm_modifier._verify_snapshot(constr_snap)
def test_verify_engine_constraints(self):
ad_vacuum_constr = omt.testsystems.AlanineDipeptideVacuum()
constrained_engine = omm_engine.Engine(
topology=self.test_snap.topology,
system=ad_vacuum_constr.system,
integrator=omt.integrators.VVVRIntegrator()
)
modifier = GeneralizedDirectionModifier(
1.2 * u.nanometer / u.picosecond,
engine=constrained_engine
)
# this is a hack because ndofs not defined in TestsystemEngine
self.openmm_engine.current_snapshot = self.test_snap
snap = self.openmm_engine.current_snapshot
# when it checks based on the engine, it should be fine
self.openmm_modifier._verify_snapshot(snap)
# when modifier overrides snap.engine, it errors
with pytest.raises(RuntimeError, match="constraints"):
modifier._verify_snapshot(snap)
def test_verify_snapshot_box_vectors(self):
ad_explicit = omt.testsystems.AlanineDipeptideExplicit(
constraints=None,
rigid_water=False
)
ad_explicit_tmpl = omm_engine.snapshot_from_testsystem(ad_explicit)
explicit_engine = omm_engine.Engine(
topology=ad_explicit_tmpl.topology,
system=ad_explicit.system,
integrator=omt.integrators.VVVRIntegrator()
)
ad_explicit_snap = ad_explicit_tmpl.copy_with_replacement(
engine=explicit_engine
)
self.openmm_modifier._verify_snapshot(ad_explicit_snap)
def test_dv_widths_toy(self):
selected = np.array([1.0, 2.0])
n_atoms = len(self.toy_snapshot.coordinates)
assert_array_almost_equal(self.toy_modifier._dv_widths(n_atoms, 2),
selected)
assert_array_almost_equal(
self.toy_modifier_long_dv._dv_widths(n_atoms, 2),
selected
)
assert_array_almost_equal(
self.toy_modifier_all._dv_widths(n_atoms, n_atoms),
np.array([1.5]*3)
)
def test_dv_widths_openmm(self):
n_atoms = len(self.openmm_snap.coordinates)
results = self.openmm_modifier._dv_widths(n_atoms, n_atoms)
expected = np.array([1.2] * n_atoms) * u.nanometer / u.picosecond
for truth, beauty in zip(expected, results):
assert pytest.approx(truth._value) == beauty._value
def test_rescale_linear_momenta_constant_energy_toy(self):
velocities = np.array([[1.5, -1.0], [-1.0, 2.0], [0.25, -1.0]])
masses = np.array([1.0, 1.5, 4.0])
new_vel = self.toy_modifier._remove_linear_momentum(
velocities=velocities,
masses=masses
)
new_momenta = new_vel * masses[:, np.newaxis]
total_momenta = sum(new_momenta)
assert_array_almost_equal(total_momenta, np.array([0.0]*2))
new_vel = self.toy_modifier._rescale_kinetic_energy(
velocities=velocities,
masses=masses,
double_KE=20.0
)
new_momenta = new_vel * masses[:, np.newaxis]
total_momenta = sum(new_momenta)
new_ke = sum(sum(new_momenta * new_vel))
# tests require that the linear momentum be 0, and KE be correct
assert_array_almost_equal(total_momenta, np.array([0.0]*2))
assert pytest.approx(new_ke) == 20.0
def test_remove_momentum_rescale_energy_openmm(self):
# don't actually need to do everything with OpenMM, but do need to
# add units
u_vel = old_div(u.nanometer, u.picosecond)
u_mass = old_div(u.dalton, u.AVOGADRO_CONSTANT_NA)
u_energy = old_div(u.kilojoule_per_mole, u.AVOGADRO_CONSTANT_NA)
velocities = np.array([[1.5, -1.0],
[-1.0, 2.0],
[0.25, -1.0]]
) * u_vel
masses = np.array([1.0, 1.5, 4.0]) * u_mass
new_vel = self.openmm_modifier._remove_linear_momentum(
velocities=velocities,
masses=masses
)
new_momenta = new_vel * masses[:, np.newaxis]
total_momenta = sum(new_momenta, new_momenta[0])
assert_array_almost_equal(total_momenta,
np.array([0.0]*2) * u_vel * u_mass)
new_vel = self.openmm_modifier._rescale_kinetic_energy(
velocities=velocities,
masses=masses,
double_KE=20.0 * u_energy
)
new_momenta = new_vel * masses[:, np.newaxis]
total_momenta = sum(new_momenta, new_momenta[0])
zero_energy = 0.0 * u_energy
new_ke = sum(sum(new_momenta * new_vel, zero_energy), zero_energy)
# tests require that the linear momentum be 0, and KE be correct
assert_array_almost_equal(total_momenta,
np.array([0.0]*2) * u_vel * u_mass)
assert new_ke.unit == (20.0 * u_energy).unit
assert pytest.approx(new_ke._value) == (20.0 * u_energy)._value
def test_probability_ratio(self):
# Should always be 1 as KE is conserved
assert self.toy_modifier_all.probability_ratio(None, None) == 1.0
class TestVelocityDirectionModifier(object):
def setup(self):
import openpathsampling.engines.toy as toys
self.toy_modifier = VelocityDirectionModifier(
delta_v=[1.0, 2.0],
subset_mask=[1, 2],
remove_linear_momentum=False
)
self.toy_engine = toys.Engine(
topology=toys.Topology(n_spatial=2, n_atoms=3, pes=None,
masses=np.array([1.0, 1.5, 4.0])),
options={}
)
self.toy_snapshot = toys.Snapshot(
coordinates=np.array([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]]),
velocities=np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]]),
engine=self.toy_engine
)
if paths.integration_tools.HAS_SIMTK_UNIT:
u_vel = old_div(u.nanometer, u.picosecond)
self.openmm_modifier = VelocityDirectionModifier(
delta_v=1.2*u_vel,
remove_linear_momentum=False
)
if omt: # TODO: separate out tests
ad_vacuum = omt.testsystems.AlanineDipeptideVacuum(
constraints=None)
self.test_snap = omm_engine.snapshot_from_testsystem(ad_vacuum)
self.openmm_engine = omm_engine.Engine(
topology=self.test_snap.topology,
system=ad_vacuum.system,
integrator=omt.integrators.VVVRIntegrator()
)
self.openmm_snap = self.test_snap.copy_with_replacement(
engine=self.openmm_engine,
velocities=np.ones(
shape=self.test_snap.velocities.shape) * u_vel
)
def test_select_atoms_to_modify(self):
assert self.toy_modifier._select_atoms_to_modify(2) == [0, 1]
if omt: # TODO: separate out tests
n_atoms = len(self.openmm_snap.coordinates)
assert (self.openmm_modifier._select_atoms_to_modify(n_atoms) ==
list(range(n_atoms)))
def test_call(self):
new_toy_snap = self.toy_modifier(self.toy_snapshot)
assert_array_almost_equal(new_toy_snap.coordinates,
self.toy_snapshot.coordinates)
new_vel = new_toy_snap.velocities
old_vel = self.toy_snapshot.velocities
same_vel = [np.allclose(new_vel[i], old_vel[i])
for i in range(len(new_vel))]
assert Counter(same_vel) == Counter({True: 1, False: 2})
for new_v, old_v in zip(new_vel, old_vel):
assert (pytest.approx(sum([v**2 for v in new_v])) ==
sum([v**2 for v in old_v]))
if omt: # TODO: separate out tests
new_omm_snap = self.openmm_modifier(self.openmm_snap)
n_atoms = len(self.openmm_snap.coordinates)
assert_array_almost_equal(new_omm_snap.coordinates,
self.openmm_snap.coordinates)
new_vel = new_omm_snap.velocities
old_vel = self.openmm_snap.velocities
same_vel = [np.allclose(new_vel[i], old_vel[i])
for i in range(len(new_vel))]
same_vel = [np.allclose(new_vel[i], old_vel[i])
for i in range(len(new_vel))]
assert Counter(same_vel) == Counter({False: n_atoms})
u_vel_sq = (old_div(u.nanometers, u.picoseconds))**2
for new_v, old_v in zip(new_vel, old_vel):
assert (pytest.approx(sum([(v**2).value_in_unit(u_vel_sq)
for v in new_v])
) ==
sum([(v**2).value_in_unit(u_vel_sq) for v in old_v])
)
def test_call_with_linear_momentum_fix(self):
toy_modifier = VelocityDirectionModifier(
delta_v=[1.0, 2.0],
subset_mask=[1, 2],
remove_linear_momentum=True
)
new_toy_snap = toy_modifier(self.toy_snapshot)
velocities = new_toy_snap.velocities
momenta = velocities * new_toy_snap.masses[:, np.newaxis]
assert_array_almost_equal(sum(momenta), np.array([0.0]*2))
double_ke = sum(sum(momenta * velocities))
assert pytest.approx(double_ke) == 86.0
if omt: # TODO: separate out tests
u_vel = old_div(u.nanometer, u.picosecond)
u_mass = old_div(u.dalton, u.AVOGADRO_CONSTANT_NA)
openmm_modifier = VelocityDirectionModifier(
delta_v=1.2*u_vel,
remove_linear_momentum=False
)
new_openmm_snap = openmm_modifier(self.openmm_snap)
velocities = new_openmm_snap.velocities
momenta = velocities * new_openmm_snap.masses[:, np.newaxis]
zero_momentum = 0 * u_vel * u_mass
total_momenta = sum(momenta, zero_momentum)
assert_array_almost_equal(total_momenta,
np.array([0.0]*3) * u_vel * u_mass)
class TestSingleAtomVelocityDirectionModifier(object):
def setup(self):
import openpathsampling.engines.toy as toys
self.toy_modifier = SingleAtomVelocityDirectionModifier(
delta_v=[1.0, 2.0],
subset_mask=[1, 2],
remove_linear_momentum=False
)
self.toy_engine = toys.Engine(
topology=toys.Topology(n_spatial=2, n_atoms=3, pes=None,
masses=np.array([1.0, 1.5, 4.0])),
options={}
)
self.toy_snapshot = toys.Snapshot(
coordinates=np.array([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]]),
velocities=np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]]),
engine=self.toy_engine
)
if omt: # TODO: separate out tests/
u_vel = old_div(u.nanometer, u.picosecond)
self.openmm_modifier = SingleAtomVelocityDirectionModifier(
delta_v=1.2*u_vel,
remove_linear_momentum=False
)
ad_vacuum = omt.testsystems.AlanineDipeptideVacuum(
constraints=None)
self.test_snap = omm_engine.snapshot_from_testsystem(ad_vacuum)
self.openmm_engine = omm_engine.Engine(
topology=self.test_snap.topology,
system=ad_vacuum.system,
integrator=omt.integrators.VVVRIntegrator()
)
self.openmm_snap = self.test_snap.copy_with_replacement(
engine=self.openmm_engine,
velocities=np.ones(
shape=self.test_snap.velocities.shape) * u_vel
)
def test_select_atoms_to_modify(self):
selected = self.toy_modifier._select_atoms_to_modify(2)
assert len(selected) == 1
selected = [self.toy_modifier._select_atoms_to_modify(2)[0]
for i in range(20)]
count = Counter(selected)
assert set([0, 1]) == set(count.keys())
assert count[0] > 0
assert count[1] > 0
def test_call(self):
new_toy_snap = self.toy_modifier(self.toy_snapshot)
assert_array_almost_equal(new_toy_snap.coordinates,
self.toy_snapshot.coordinates)
new_vel = new_toy_snap.velocities
old_vel = self.toy_snapshot.velocities
same_vel = [np.allclose(new_vel[i], old_vel[i])
for i in range(len(new_vel))]
assert Counter(same_vel) == Counter({True: 2, False: 1})
for new_v, old_v in zip(new_vel, old_vel):
assert (pytest.approx(sum([v**2 for v in new_v])) ==
sum([v**2 for v in old_v]))
if omt: # TODO: separate out tests
new_omm_snap = self.openmm_modifier(self.openmm_snap)
n_atoms = len(self.openmm_snap.coordinates)
assert_array_almost_equal(new_omm_snap.coordinates,
self.openmm_snap.coordinates)
new_vel = new_omm_snap.velocities
old_vel = self.openmm_snap.velocities
same_vel = [np.allclose(new_vel[i], old_vel[i])
for i in range(len(new_vel))]
same_vel = [np.allclose(new_vel[i], old_vel[i])
for i in range(len(new_vel))]
assert Counter(same_vel) == Counter({True: n_atoms-1, False: 1})
u_vel_sq = (old_div(u.nanometers, u.picoseconds))**2
for new_v, old_v in zip(new_vel, old_vel):
assert (pytest.approx(
sum([(v**2).value_in_unit(u_vel_sq) for v in new_v])) ==
sum([(v**2).value_in_unit(u_vel_sq) for v in old_v]))
def test_call_with_linear_momentum_fix(self):
toy_modifier = SingleAtomVelocityDirectionModifier(
delta_v=[1.0, 2.0],
subset_mask=[1, 2],
remove_linear_momentum=True
)
new_toy_snap = toy_modifier(self.toy_snapshot)
velocities = new_toy_snap.velocities
momenta = velocities * new_toy_snap.masses[:, np.newaxis]
assert_array_almost_equal(sum(momenta), np.array([0.0]*2))
double_ke = sum(sum(momenta * velocities))
assert pytest.approx(double_ke) == 86.0
if omt: # TODO: separate out tests
u_vel = old_div(u.nanometer, u.picosecond)
u_mass = old_div(u.dalton, u.AVOGADRO_CONSTANT_NA)
openmm_modifier = SingleAtomVelocityDirectionModifier(
delta_v=1.2*u_vel,
remove_linear_momentum=False
)
new_openmm_snap = openmm_modifier(self.openmm_snap)
velocities = new_openmm_snap.velocities
momenta = velocities * new_openmm_snap.masses[:, np.newaxis]
zero_momentum = 0 * u_vel * u_mass
total_momenta = sum(momenta, zero_momentum)
assert_array_almost_equal(total_momenta,
np.array([0.0]*3) * u_vel * u_mass)
class TestSnapshotModifierDeprecations(object):
# TODO OPS 2.0: Depr should be completed and this test altered to check for
# the error
def test_raise_deprecation_prob_ratio(self):
class DummyMod(SnapshotModifier):
# TODO PY 2.7, don't override __call__ for PY 3.x
def __call__(self, a):
pass
dummy_mod = DummyMod()
with pytest.warns(DeprecationWarning) as warn:
a = dummy_mod.probability_ratio(None, None)
assert len(warn) == 1
assert "NotImplementedError" in str(warn[0])
assert a == 1.0
def test_raise_depr_nomodifier_subset(self):
# The warning might be emited before on line 75
# (NoModification(subset_mask))
# Therefor this will not always trigger
pass
# with pytest.warns(DeprecationWarning) as warn:
# _ = NoModification(subset_mask="foo")
# assert len(warn) == 1
# assert "subset_mask" in str(warn[0])
| choderalab/openpathsampling | openpathsampling/tests/test_snapshot_modifier.py | Python | lgpl-2.1 | 31,336 | [
"Dalton",
"OpenMM"
] | af5099197669fa930cda3655dfd4df537955c73a38a732ad0fa0be5be7ce0c4b |
#!/usr/bin/python
#-*-coding:utf-8-*-
#Author : Manning
#Date : 2015-10-17
'''
About how to get html.
'''
import requests
import urlparse
import time
import random
import urllib2
from splinter import Browser
import sys
sys.path.append(sys.path[0].split('MSpider')[0] + "MSpider/lib")
import logging
spider_logger = logging.getLogger('MSpiderLogs')
def html_pretreatment(html):
html = html.lower()
html = urllib2.unquote(html)
return html
def fetch(url, spider_model=0, fetch_time_interval=1, set_random_agent=True, set_referer=False, set_cookies=False):
try:
spider_model = spider_model
fetch_time_interval = fetch_time_interval
random_agent = random_agent
except Exception, e:
spider_model = 0
fetch_time_interval = 1
random_agent = False
myheaders = dict()
if random_agent:
myheaders['Agent'] = random_http_header()
else:
myheaders['Agent'] = 'MSpider'
if set_referer:
myheaders['Referer'] = set_referer
if set_cookies:
myheaders['Cookie'] = set_cookies
returnhtml = ''
if spider_model == 0:
# Static Model
try:
response = requests.get(url, timeout=15, headers=myheaders, allow_redirects=False)
if response.status_code == 200:
returnhtml = response.content
else:
return ""
except Exception, e:
msg = 'Function: fetch_0, Info: ' + str(e)
spider_logger.error(msg)
return ""
elif spider_model == 1:
# Dynamic Model
try:
browser = Browser(driver_name='phantomjs', user_agent=myheaders['User-Agent'], load_images=False)
browser.visit(url)
html = browser.html
browser.quit()
returnhtml = html
except Exception, e:
msg = 'Function: fetch_1, Info: ' + str(e)
spider_logger.error(msg)
return ""
else:
return ""
if len(returnhtml) < 10:
return ''
html = html_pretreatment(returnhtml).decode('gb2312','ignore')
time.sleep(fetch_time_interval) # 抓取时间间隔
return html
def random_http_header():
user_agents = [
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:39.0) Gecko/20100101 Firefox/39.0",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0)",
]
return random.choice(user_agents)
| manning23/MSpider | lib/core/fetch.py | Python | gpl-2.0 | 2,531 | [
"VisIt"
] | 4fe0ea574f8bb5d2cafb45da25e28f61e187a4d0d84b7406a55b112e9e346d91 |
#! /usr/bin/env python
"""
Unit tests for landlab.io.netcdf module.
"""
import os
import numpy as np
from nose.tools import assert_equal, assert_true, assert_raises
from nose import SkipTest
try:
from nose import assert_list_equal
except ImportError:
from landlab.testing.tools import assert_list_equal
from numpy.testing import assert_array_equal
from landlab import RasterModelGrid
from landlab.io.netcdf import write_netcdf, NotRasterGridError, WITH_NETCDF4
from landlab.io.netcdf.read import _get_raster_spacing
from landlab.testing.tools import cdtemp
try:
import netCDF4 as nc
except ImportError:
pass
def test_netcdf_write_as_netcdf3_64bit():
from scipy.io import netcdf
field = RasterModelGrid(4, 3)
field.add_field('node', 'topographic__elevation', np.arange(12.))
field.add_field('node', 'uplift_rate', 2. * np.arange(12.))
with cdtemp() as _:
write_netcdf('test.nc', field, format='NETCDF3_64BIT')
f = netcdf.netcdf_file('test.nc', 'r')
for name in ['topographic__elevation', 'uplift_rate']:
assert_true(name in f.variables)
assert_array_equal(f.variables[name][:].flat, field.at_node[name])
f.close()
def test_netcdf_write_as_netcdf3_classic():
from scipy.io import netcdf
field = RasterModelGrid(4, 3)
field.add_field('node', 'topographic__elevation', np.arange(12.))
field.add_field('node', 'uplift_rate', 2. * np.arange(12.))
with cdtemp() as _:
write_netcdf('test.nc', field, format='NETCDF3_CLASSIC')
f = netcdf.netcdf_file('test.nc', 'r')
for name in ['topographic__elevation', 'uplift_rate']:
assert_true(name in f.variables)
assert_array_equal(f.variables[name][:].flat, field.at_node[name])
f.close()
def test_netcdf_write():
if not WITH_NETCDF4:
raise SkipTest('netCDF4 package not installed')
field = RasterModelGrid(4, 3)
field.add_field('node', 'topographic__elevation', np.arange(12.))
with cdtemp() as _:
write_netcdf('test.nc', field, format='NETCDF4')
root = nc.Dataset('test.nc', 'r', format='NETCDF4')
assert_equal(set(root.dimensions), set(['ni', 'nj', 'nt']))
assert_equal(len(root.dimensions['ni']), 3)
assert_equal(len(root.dimensions['nj']), 4)
assert_true(len(root.dimensions['nt']), 1)
assert_true(root.dimensions['nt'].isunlimited())
assert_equal(set(root.variables),
set(['x', 'y', 'topographic__elevation']))
assert_array_equal(root.variables['x'][:].flat,
np.array([0., 1., 2., 0., 1., 2., 0., 1., 2.,
0., 1., 2., ]))
assert_array_equal(root.variables['y'][:].flat,
np.array([0., 0., 0., 1., 1., 1., 2., 2., 2.,
3., 3., 3., ]))
assert_array_equal(root.variables['topographic__elevation'][:].flat,
field.at_node['topographic__elevation'])
root.close()
def test_netcdf_write_as_netcdf4_classic():
if not WITH_NETCDF4:
raise SkipTest('netCDF4 package not installed')
field = RasterModelGrid(4, 3)
field.add_field('node', 'topographic__elevation', np.arange(12.))
field.add_field('node', 'uplift_rate', np.arange(12.))
with cdtemp() as _:
write_netcdf('test.nc', field, format='NETCDF4_CLASSIC')
root = nc.Dataset('test.nc', 'r', format='NETCDF4_CLASSIC')
for name in ['topographic__elevation', 'uplift_rate']:
assert_true(name in root.variables)
assert_array_equal(root.variables[name][:].flat,
field.at_node[name])
root.close()
def test_netcdf_write_names_keyword_as_list():
if not WITH_NETCDF4:
raise SkipTest('netCDF4 package not installed')
field = RasterModelGrid(4, 3)
field.add_field('node', 'topographic__elevation', np.arange(12.))
field.add_field('node', 'uplift_rate', np.arange(12.))
with cdtemp() as _:
write_netcdf('test.nc', field, names=['topographic__elevation'],
format='NETCDF4')
root = nc.Dataset('test.nc', 'r', format='NETCDF4')
assert_true('topographic__elevation' in root.variables)
assert_true('uplift_rate' not in root.variables)
assert_array_equal(root.variables['topographic__elevation'][:].flat,
field.at_node['topographic__elevation'])
root.close()
def test_netcdf_write_names_keyword_as_str():
if not WITH_NETCDF4:
raise SkipTest('netCDF4 package not installed')
field = RasterModelGrid(4, 3)
field.add_field('node', 'topographic__elevation', np.arange(12.))
field.add_field('node', 'uplift_rate', np.arange(12.))
with cdtemp() as _:
write_netcdf('test.nc', field, names='uplift_rate', format='NETCDF4')
root = nc.Dataset('test.nc', 'r', format='NETCDF4')
assert_true('topographic__elevation' not in root.variables)
assert_true('uplift_rate' in root.variables)
assert_array_equal(root.variables['uplift_rate'][:].flat,
field.at_node['uplift_rate'])
root.close()
def test_netcdf_write_names_keyword_as_none():
if not WITH_NETCDF4:
raise SkipTest('netCDF4 package not installed')
field = RasterModelGrid(4, 3)
field.add_field('node', 'topographic__elevation', np.arange(12.))
field.add_field('node', 'uplift_rate', np.arange(12.))
with cdtemp() as _:
write_netcdf('test.nc', field, names=None, format='NETCDF4')
root = nc.Dataset('test.nc', 'r', format='NETCDF4')
for name in ['topographic__elevation', 'uplift_rate']:
assert_true(name in root.variables)
assert_array_equal(root.variables[name][:].flat,
field.at_node[name])
root.close()
def test_2d_unit_spacing():
(x, y) = np.meshgrid(np.arange(5.), np.arange(4.))
spacing = _get_raster_spacing((y, x))
assert_equal(spacing, 1.)
def test_2d_non_unit_spacing():
(x, y) = np.meshgrid(np.arange(5.) * 2, np.arange(4.) * 2)
spacing = _get_raster_spacing((y, x))
assert_equal(spacing, 2.)
def test_2d_uneven_spacing_axis_0():
(x, y) = np.meshgrid(np.logspace(0., 2., num=5), np.arange(4.))
assert_raises(NotRasterGridError, _get_raster_spacing, (y, x))
def test_2d_uneven_spacing_axis_1():
(x, y) = np.meshgrid(np.arange(4.), np.logspace(0., 2., num=5))
assert_raises(NotRasterGridError, _get_raster_spacing, (y, x))
def test_2d_switched_coords():
(x, y) = np.meshgrid(np.arange(5.), np.arange(4.))
spacing = _get_raster_spacing((x, y))
assert_equal(spacing, 0.)
def test_1d__unit_spacing():
spacing = _get_raster_spacing((np.arange(5.), ))
assert_equal(spacing, 1.)
def test_1d_non_unit_spacing():
spacing = _get_raster_spacing((np.arange(5.) * 2, ))
assert_equal(spacing, 2.)
def test_1d_uneven_spacing():
(x, y) = np.meshgrid(np.logspace(0., 2., num=5), np.arange(4.))
assert_raises(NotRasterGridError, _get_raster_spacing,
(np.logspace(0., 2., num=5), ))
| decvalts/landlab | landlab/io/netcdf/tests/test_write_netcdf.py | Python | mit | 7,250 | [
"NetCDF"
] | 3767822ce6d9324ea0891da1284d1f8ccb696a177e3909801f4f5817d79ea97f |
"""Options manager for :class:`Poly` and public API functions. """
from sympy.core import S, Basic, sympify
from sympy.utilities import numbered_symbols, topological_sort
from sympy.polys.polyerrors import (
GeneratorsError,
OptionError,
FlagError,
)
import sympy.polys
import re
class Option(object):
"""Base class for all kinds of options. """
option = None
is_Flag = False
requires = []
excludes = []
after = []
before = []
@classmethod
def default(cls):
return None
@classmethod
def preprocess(cls, option):
return None
@classmethod
def postprocess(cls, options):
pass
class Flag(Option):
"""Base class for all kinds of flags. """
is_Flag = True
class BooleanOption(Option):
"""An option that must have a boolean value or equivalent assigned. """
@classmethod
def preprocess(cls, value):
if value is True or value is False or value is 1 or value is 0:
return bool(value)
else:
raise OptionError("'%s' must have a boolean value assigned, got %s" % (cls.option, value))
class OptionType(type):
"""Base type for all options that does registers options. """
def __init__(cls, *args, **kwargs):
@property
def getter(self):
try:
return self[cls.option]
except KeyError:
return cls.default()
setattr(Options, cls.option, getter)
Options.__options__[cls.option] = cls
class Options(dict):
"""
Options manager for polynomial manipulation module.
**Examples**
>>> from sympy.polys.polyoptions import Options
>>> from sympy.polys.polyoptions import build_options
>>> from sympy.abc import x, y, z
>>> Options((x, y, z), {'domain': 'ZZ'})
{'auto': False, 'domain': ZZ, 'gens': (x, y, z)}
>>> build_options((x, y, z), {'domain': 'ZZ'})
{'auto': False, 'domain': ZZ, 'gens': (x, y, z)}
**Options**
* Expand --- boolean option
* Gens --- option
* Wrt --- option
* Sort --- option
* Order --- option
* Field --- boolean option
* Greedy --- boolean option
* Domain --- option
* Split --- boolean option
* Gaussian --- boolean option
* Extension --- option
* Modulus --- option
* Symmetric --- boolean option
* Strict --- boolean option
* Repr --- option
**Flags**
* Auto --- boolean flag
* Frac --- boolean flag
* Formal --- boolean flag
* Polys --- boolean flag
* Include --- boolean flag
* All --- boolean flag
* Gen --- flag
"""
__order__ = None
__options__ = {}
def __init__(self, gens, args, flags=None, strict=False):
dict.__init__(self)
if gens and args.get('gens', ()):
raise OptionError("both '*gens' and keyword argument 'gens' supplied")
elif gens:
args = dict(args)
args['gens'] = gens
for option, value in args.iteritems():
try:
cls = self.__options__[option]
except KeyError:
raise OptionError("'%s' is not a valid option" % option)
if issubclass(cls, Flag):
if flags is None or option not in flags:
if strict:
raise OptionError("'%s' flag is not allowed in this context" % option)
if value is not None:
self[option] = cls.preprocess(value)
for option in self.keys():
cls = self.__options__[option]
for require_option in cls.requires:
if self.get(require_option) is None:
raise OptionError("'%s' option is only allowed together with '%s'" % (option, require_option))
for exclude_option in cls.excludes:
if self.get(exclude_option) is not None:
raise OptionError("'%s' option is not allowed together with '%s'" % (option, exclude_option))
for option in self.__order__:
self.__options__[option].postprocess(self)
@classmethod
def _init_dependencies_order(cls):
"""Resolve the order of options' processing. """
if cls.__order__ is None:
vertices, edges = [], set([])
for name, option in cls.__options__.iteritems():
vertices.append(name)
for _name in option.after:
edges.add((_name, name))
for _name in option.before:
edges.add((name, _name))
try:
cls.__order__ = topological_sort((vertices, list(edges)))
except ValueError:
raise RuntimeError("cycle detected in sympy.polys options framework")
def clone(self, updates={}):
"""Clone ``self`` and update specified options. """
obj = dict.__new__(self.__class__)
for option, value in self.iteritems():
obj[option] = value
for option, value in updates.iteritems():
obj[option] = value
return obj
def __setattr__(self, attr, value):
if attr in self.__options__:
self[attr] = value
else:
super(Options, self).__setattr__(attr, value)
@property
def args(self):
args = {}
for option, value in self.iteritems():
if value is not None and option != 'gens':
cls = self.__options__[option]
if not issubclass(cls, Flag):
args[option] = value
return args
@property
def options(self):
options = {}
for option, cls in self.__options__.iteritems():
if not issubclass(cls, Flag):
options[option] = getattr(self, option)
return options
@property
def flags(self):
flags = {}
for option, cls in self.__options__.iteritems():
if issubclass(cls, Flag):
flags[option] = getattr(self, option)
return flags
class Expand(BooleanOption):
"""``expand`` option to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'expand'
requires = []
excludes = []
@classmethod
def default(cls):
return True
class Gens(Option):
"""``gens`` option to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'gens'
requires = []
excludes = []
@classmethod
def default(cls):
return ()
@classmethod
def preprocess(cls, gens):
if isinstance(gens, Basic):
gens = (gens,)
elif len(gens) == 1 and hasattr(gens[0], '__iter__'):
gens = gens[0]
if gens == (None,):
gens = ()
elif len(set(gens)) != len(gens):
raise GeneratorsError("duplicated generators: %s" % str(gens))
elif any(gen.is_commutative is False for gen in gens):
raise GeneratorsError("non-commutative generators: %s" % str(gens))
return tuple(gens)
class Wrt(Option):
"""``wrt`` option to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'wrt'
requires = []
excludes = []
_re_split = re.compile(r"\s*,\s*|\s+")
@classmethod
def preprocess(cls, wrt):
if isinstance(wrt, Basic):
return [str(wrt)]
elif isinstance(wrt, str):
wrt = wrt.strip()
if wrt.endswith(','):
raise OptionError('Bad input: missing parameter.')
if not wrt:
return []
return [ gen for gen in cls._re_split.split(wrt) ]
elif hasattr(wrt, '__getitem__'):
return list(map(str, wrt))
else:
raise OptionError("invalid argument for 'wrt' option")
class Sort(Option):
"""``sort`` option to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'sort'
requires = []
excludes = []
@classmethod
def default(cls):
return []
@classmethod
def preprocess(cls, sort):
if isinstance(sort, str):
return [ gen.strip() for gen in sort.split('>') ]
elif hasattr(sort, '__getitem__'):
return list(map(str, sort))
else:
raise OptionError("invalid argument for 'sort' option")
class Order(Option):
"""``order`` option to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'order'
requires = []
excludes = []
@classmethod
def default(cls):
return sympy.polys.monomialtools.monomial_key('lex')
@classmethod
def preprocess(cls, order):
return sympy.polys.monomialtools.monomial_key(order)
class Field(BooleanOption):
"""``field`` option to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'field'
requires = []
excludes = ['domain', 'split', 'gaussian']
class Greedy(BooleanOption):
"""``greedy`` option to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'greedy'
requires = []
excludes = ['domain', 'split', 'gaussian', 'extension', 'modulus', 'symmetric']
class Composite(BooleanOption):
""" """
__metaclass__ = OptionType
option = 'composite'
@classmethod
def default(cls):
return True
requires = []
excludes = ['domain', 'split', 'gaussian', 'extension', 'modulus', 'symmetric']
class Domain(Option):
"""``domain`` option to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'domain'
requires = []
excludes = ['field', 'greedy', 'split', 'gaussian', 'extension']
_re_finitefield = re.compile("^(FF|GF)\((\d+)\)$")
_re_polynomial = re.compile("^(Z|ZZ|Q|QQ)\[(.+)\]$")
_re_fraction = re.compile("^(Z|ZZ|Q|QQ)\((.+)\)$")
_re_algebraic = re.compile("^(Q|QQ)\<(.+)\>$")
@classmethod
def preprocess(cls, domain):
if not isinstance(domain, str):
return domain
else:
if domain in ['Z', 'ZZ']:
return sympy.polys.domains.ZZ
if domain in ['Q', 'QQ']:
return sympy.polys.domains.QQ
if domain in ['R', 'RR']:
return sympy.polys.domains.RR
if domain == 'EX':
return sympy.polys.domains.EX
r = cls._re_finitefield.match(domain)
if r is not None:
return sympy.polys.domains.FF(int(r.groups()[1]))
r = cls._re_polynomial.match(domain)
if r is not None:
ground, gens = r.groups()
gens = map(sympify, gens.split(','))
if ground in ['Z', 'ZZ']:
return sympy.polys.domains.ZZ.poly_ring(*gens)
else:
return sympy.polys.domains.QQ.poly_ring(*gens)
r = cls._re_fraction.match(domain)
if r is not None:
ground, gens = r.groups()
gens = map(sympify, gens.split(','))
if ground in ['Z', 'ZZ']:
return sympy.polys.domains.ZZ.frac_field(*gens)
else:
return sympy.polys.domains.QQ.frac_field(*gens)
r = cls._re_algebraic.match(domain)
if r is not None:
gens = map(sympify, r.groups()[1].split(','))
return sympy.polys.domains.QQ.algebraic_field(*gens)
raise OptionError('expected a valid domain specification, got %s' % domain)
@classmethod
def postprocess(cls, options):
if 'gens' in options and 'domain' in options and options['domain'].is_Composite and \
(set(options['domain'].gens) & set(options['gens'])):
raise GeneratorsError("ground domain and generators interferes together")
class Split(BooleanOption):
"""``split`` option to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'split'
requires = []
excludes = ['field', 'greedy', 'domain', 'gaussian', 'extension', 'modulus', 'symmetric']
@classmethod
def postprocess(cls, options):
if 'split' in options:
raise NotImplementedError("'split' option is not implemented yet")
class Gaussian(BooleanOption):
"""``gaussian`` option to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'gaussian'
requires = []
excludes = ['field', 'greedy', 'domain', 'split', 'extension', 'modulus', 'symmetric']
@classmethod
def postprocess(cls, options):
if 'gaussian' in options and options['gaussian'] is True:
options['extension'] = set([S.ImaginaryUnit])
Extension.postprocess(options)
class Extension(Option):
"""``extension`` option to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'extension'
requires = []
excludes = ['greedy', 'domain', 'split', 'gaussian', 'modulus', 'symmetric']
@classmethod
def preprocess(cls, extension):
if extension is True or extension is 1:
return bool(extension)
elif extension is False or extension is 0:
raise OptionError("'False' is an invalid argument for 'extension'")
else:
if not hasattr(extension, '__iter__'):
extension = set([extension])
else:
if not extension:
extension = None
else:
extension = set(extension)
return extension
@classmethod
def postprocess(cls, options):
if 'extension' in options and options['extension'] is not True:
options['domain'] = sympy.polys.domains.QQ.algebraic_field(*options['extension'])
class Modulus(Option):
"""``modulus`` option to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'modulus'
requires = []
excludes = ['greedy', 'split', 'domain', 'gaussian', 'extension']
@classmethod
def preprocess(cls, modulus):
modulus = sympify(modulus)
if modulus.is_Integer and modulus > 0:
return int(modulus)
else:
raise OptionError("'modulus' must a positive integer, got %s" % modulus)
@classmethod
def postprocess(cls, options):
if 'modulus' in options:
modulus = options['modulus']
symmetric = options.get('symmetric', True)
options['domain'] = sympy.polys.domains.FF(modulus, symmetric)
class Symmetric(BooleanOption):
"""``symmetric`` option to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'symmetric'
requires = ['modulus']
excludes = ['greedy', 'domain', 'split', 'gaussian', 'extension']
class Strict(BooleanOption):
"""``strict`` option to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'strict'
@classmethod
def default(cls):
return True
class Repr(Option):
"""``repr`` option to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'repr'
@classmethod
def default(cls):
return sympy.polys.densepolys.DensePoly
@classmethod
def preprocess(cls, repr):
if isinstance(repr, str):
if repr == 'sparse':
return sympy.polys.sparsepolys.SparsePoly
elif repr == 'dense':
return sympy.polys.densepolys.DensePoly
else:
raise OptionError("'%s' is not a valid value 'repr' option" % repr)
elif isinstance(repr, sympy.polys.polyclasses.GenericPoly):
return repr
else:
raise OptionError("'repr' must a string or a class, got %s" % repr)
class Auto(BooleanOption, Flag):
"""``auto`` flag to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'auto'
after = ['field', 'domain', 'extension', 'gaussian']
@classmethod
def default(cls):
return True
@classmethod
def postprocess(cls, options):
if ('domain' in options or 'field' in options) and 'auto' not in options:
options['auto'] = False
class Frac(BooleanOption, Flag):
"""``auto`` option to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'frac'
@classmethod
def default(cls):
return False
class Formal(BooleanOption, Flag):
"""``formal`` flag to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'formal'
@classmethod
def default(cls):
return False
class Polys(BooleanOption, Flag):
"""``polys`` flag to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'polys'
class Include(BooleanOption, Flag):
"""``include`` flag to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'include'
@classmethod
def default(cls):
return False
class All(BooleanOption, Flag):
"""``all`` flag to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'all'
@classmethod
def default(cls):
return False
class Gen(Flag):
"""``gen`` flag to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'gen'
@classmethod
def default(cls):
return 0
@classmethod
def preprocess(cls, gen):
if isinstance(gen, (Basic, int)):
return gen
else:
raise OptionError("invalid argument for 'gen' option")
class Symbols(Flag):
"""``symbols`` flag to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'symbols'
@classmethod
def default(cls):
return numbered_symbols('s', start=1)
@classmethod
def preprocess(cls, symbols):
if hasattr(symbols, '__iter__'):
return iter(symbols)
else:
raise OptionError("expected an iterator or iterable container, got %s" % symbols)
def build_options(gens, args=None):
"""Construct options from keyword arguments or ... options. """
if args is None:
gens, args = (), gens
if len(args) != 1 or 'opt' not in args or gens:
return Options(gens, args)
else:
return args['opt']
def allowed_flags(args, flags):
"""
Allow specified flags to be used in the given context.
**Examples**
>>> from sympy.polys.polyoptions import allowed_flags
>>> from sympy.polys.domains import ZZ
>>> allowed_flags({'domain': ZZ}, [])
>>> allowed_flags({'domain': ZZ, 'frac': True}, [])
Traceback (most recent call last):
...
FlagError: 'frac' flag is not allowed in this context
>>> allowed_flags({'domain': ZZ, 'frac': True}, ['frac'])
"""
flags = set(flags)
for arg in args.iterkeys():
try:
if Options.__options__[arg].is_Flag and not arg in flags:
raise FlagError("'%s' flag is not allowed in this context" % arg)
except KeyError:
raise OptionError("'%s' is not a valid option" % arg)
Options._init_dependencies_order()
| GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/sympy/polys/polyoptions.py | Python | agpl-3.0 | 19,339 | [
"Gaussian"
] | bb16f5708c8ac1e8a8d629a82d739dd63deba14b7c0ea1f0f94fcecbbcb6d86f |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/server/database/models.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import datetime
import sqlalchemy
import sqlalchemy.ext.declarative
import sqlalchemy.orm
DATABASE_TABLE_REGEX = '[a-z_]+'
"""A regular expression which will match all valid database table names."""
DATABASE_TABLES = {}
"""A dictionary which contains all the database tables and their columns."""
DATABASE_TABLE_OBJECTS = {}
"""A dictionary which contains all the database tables and their primitive objects."""
SCHEMA_VERSION = 2
"""The schema version of the database, used for compatibility checks."""
def current_timestamp(*args, **kwargs):
"""
The function used for creating the timestamp used by database objects.
:return: The current timestamp.
:rtype: :py:class:`datetime.datetime`
"""
return datetime.datetime.utcnow()
def get_tables_with_column_id(column_id):
"""
Get all tables which contain a column named *column_id*.
:param str column_id: The column name to get all the tables of.
:return: The list of matching tables.
:rtype: set
"""
return set(x[0] for x in DATABASE_TABLES.items() if column_id in x[1])
def register_table(table):
"""
Register a database table. This will populate the information provided in
DATABASE_TABLES dictionary.
:param table: The table to register.
"""
columns = tuple(col.name for col in table.__table__.columns)
DATABASE_TABLES[table.__tablename__] = columns
DATABASE_TABLE_OBJECTS[table.__tablename__] = table
return table
class Base(object):
__repr_attributes__ = ()
def __repr__(self):
description = "<{0} id={1} ".format(self.__class__.__name__, repr(self.id))
for repr_attr in self.__repr_attributes__:
description += "{0}={1} ".format(repr_attr, repr(getattr(self, repr_attr)))
description += '>'
return description
Base = sqlalchemy.ext.declarative.declarative_base(cls=Base)
@register_table
class AlertSubscription(Base):
__repr_attributes__ = ('campaign_id', 'user_id')
__tablename__ = 'alert_subscriptions'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
user_id = sqlalchemy.Column(sqlalchemy.String, sqlalchemy.ForeignKey('users.id'), nullable=False)
campaign_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('campaigns.id'), nullable=False)
@register_table
class Campaign(Base):
__repr_attributes__ = ('name',)
__tablename__ = 'campaigns'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
name = sqlalchemy.Column(sqlalchemy.String, unique=True, nullable=False)
user_id = sqlalchemy.Column(sqlalchemy.String, sqlalchemy.ForeignKey('users.id'), nullable=False)
created = sqlalchemy.Column(sqlalchemy.DateTime, default=current_timestamp)
reject_after_credentials = sqlalchemy.Column(sqlalchemy.Boolean, default=False)
# relationships
alert_subscriptions = sqlalchemy.orm.relationship('AlertSubscription', backref='campaign', cascade='all, delete-orphan')
credentials = sqlalchemy.orm.relationship('Credential', backref='campaign', cascade='all, delete-orphan')
deaddrop_connections = sqlalchemy.orm.relationship('DeaddropConnection', backref='campaign', cascade='all, delete-orphan')
deaddrop_deployments = sqlalchemy.orm.relationship('DeaddropDeployment', backref='campaign', cascade='all, delete-orphan')
landing_pages = sqlalchemy.orm.relationship('LandingPage', backref='campaign', cascade='all, delete-orphan')
messages = sqlalchemy.orm.relationship('Message', backref='campaign', cascade='all, delete-orphan')
visits = sqlalchemy.orm.relationship('Visit', backref='campaign', cascade='all, delete-orphan')
@register_table
class Credential(Base):
__repr_attributes__ = ('campaign_id', 'username')
__tablename__ = 'credentials'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
visit_id = sqlalchemy.Column(sqlalchemy.String, sqlalchemy.ForeignKey('visits.id'), nullable=False)
message_id = sqlalchemy.Column(sqlalchemy.String, sqlalchemy.ForeignKey('messages.id'), nullable=False)
campaign_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('campaigns.id'), nullable=False)
username = sqlalchemy.Column(sqlalchemy.String)
password = sqlalchemy.Column(sqlalchemy.String)
submitted = sqlalchemy.Column(sqlalchemy.DateTime, default=current_timestamp)
@register_table
class DeaddropDeployment(Base):
__repr_attributes__ = ('campaign_id', 'destination')
__tablename__ = 'deaddrop_deployments'
id = sqlalchemy.Column(sqlalchemy.String, primary_key=True)
campaign_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('campaigns.id'), nullable=False)
destination = sqlalchemy.Column(sqlalchemy.String)
# relationships
deaddrop_connections = sqlalchemy.orm.relationship('DeaddropConnection', backref='deaddrop_deployment', cascade='all, delete-orphan')
@register_table
class DeaddropConnection(Base):
__repr_attributes__ = ('campaign_id', 'deployment_id', 'visitor_ip')
__tablename__ = 'deaddrop_connections'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
deployment_id = sqlalchemy.Column(sqlalchemy.String, sqlalchemy.ForeignKey('deaddrop_deployments.id'), nullable=False)
campaign_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('campaigns.id'), nullable=False)
visit_count = sqlalchemy.Column(sqlalchemy.Integer, default=1)
visitor_ip = sqlalchemy.Column(sqlalchemy.String)
local_username = sqlalchemy.Column(sqlalchemy.String)
local_hostname = sqlalchemy.Column(sqlalchemy.String)
local_ip_addresses = sqlalchemy.Column(sqlalchemy.String)
first_visit = sqlalchemy.Column(sqlalchemy.DateTime, default=current_timestamp)
last_visit = sqlalchemy.Column(sqlalchemy.DateTime, default=current_timestamp)
@register_table
class LandingPage(Base):
__repr_attributes__ = ('campaign_id', 'hostname', 'page')
__tablename__ = 'landing_pages'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
campaign_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('campaigns.id'), nullable=False)
hostname = sqlalchemy.Column(sqlalchemy.String, nullable=False)
page = sqlalchemy.Column(sqlalchemy.String, nullable=False)
@register_table
class Message(Base):
__repr_attributes__ = ('campaign_id', 'target_email')
__tablename__ = 'messages'
id = sqlalchemy.Column(sqlalchemy.String, primary_key=True)
campaign_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('campaigns.id'), nullable=False)
target_email = sqlalchemy.Column(sqlalchemy.String)
company_name = sqlalchemy.Column(sqlalchemy.String)
first_name = sqlalchemy.Column(sqlalchemy.String)
last_name = sqlalchemy.Column(sqlalchemy.String)
opened = sqlalchemy.Column(sqlalchemy.DateTime)
sent = sqlalchemy.Column(sqlalchemy.DateTime, default=current_timestamp)
trained = sqlalchemy.Column(sqlalchemy.Boolean, default=False)
# relationships
credentials = sqlalchemy.orm.relationship('Credential', backref='message', cascade='all, delete-orphan')
visits = sqlalchemy.orm.relationship('Visit', backref='message', cascade='all, delete-orphan')
@register_table
class MetaData(Base):
__repr_attributes__ = ('value_type', 'value')
__tablename__ = 'meta_data'
id = sqlalchemy.Column(sqlalchemy.String, primary_key=True)
value_type = sqlalchemy.Column(sqlalchemy.String, default='str')
value = sqlalchemy.Column(sqlalchemy.String)
@register_table
class User(Base):
__tablename__ = 'users'
id = sqlalchemy.Column(sqlalchemy.String, primary_key=True)
phone_carrier = sqlalchemy.Column(sqlalchemy.String)
phone_number = sqlalchemy.Column(sqlalchemy.String)
# relationships
alert_subscriptions = sqlalchemy.orm.relationship('AlertSubscription', backref='user', cascade='all, delete-orphan')
campaigns = sqlalchemy.orm.relationship('Campaign', backref='user', cascade='all, delete-orphan')
@register_table
class Visit(Base):
__repr_attributes__ = ('campaign_id', 'message_id')
__tablename__ = 'visits'
id = sqlalchemy.Column(sqlalchemy.String, primary_key=True)
message_id = sqlalchemy.Column(sqlalchemy.String, sqlalchemy.ForeignKey('messages.id'), nullable=False)
campaign_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('campaigns.id'), nullable=False)
visit_count = sqlalchemy.Column(sqlalchemy.Integer, default=1)
visitor_ip = sqlalchemy.Column(sqlalchemy.String)
visitor_details = sqlalchemy.Column(sqlalchemy.String)
first_visit = sqlalchemy.Column(sqlalchemy.DateTime, default=current_timestamp)
last_visit = sqlalchemy.Column(sqlalchemy.DateTime, default=current_timestamp)
# relationships
credentials = sqlalchemy.orm.relationship('Credential', backref='visit', cascade='all, delete-orphan')
| drptbl/king-phisher | king_phisher/server/database/models.py | Python | bsd-3-clause | 10,073 | [
"VisIt"
] | 3b5557130f467ecf7a1f2a4e54818949848e6bbd928876080f7373e8a14a64b9 |
# Copyright 2013 Cloudbase Solutions SRL
# Copyright 2013 Pedro Navarro Perez
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import re
import time
import eventlet
from eventlet import tpool
from os_win import exceptions
from os_win import utilsfactory
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
import threading
from hyperv.common.i18n import _, _LE, _LW, _LI # noqa
from hyperv.neutron import _common_utils as c_util
from hyperv.neutron import constants
from hyperv.neutron import exception
from hyperv.neutron import nvgre_ops
CONF = cfg.CONF
CONF.import_group('NVGRE', 'hyperv.neutron.config')
LOG = logging.getLogger(__name__)
_port_synchronized = c_util.get_port_synchronized_decorator('n-hv-agent-')
synchronized = lockutils.synchronized_with_prefix('n-hv-agent-')
class HyperVNeutronAgentMixin(object):
def __init__(self, conf=None):
"""Initializes local configuration of the Hyper-V Neutron Agent.
:param conf: dict or dict-like object containing the configuration
details used by this Agent. If None is specified, default
values are used instead. conf format is as follows:
{
'host': string,
'AGENT': {'polling_interval': int,
'local_network_vswitch': string,
'physical_network_vswitch_mappings': array,
'enable_metrics_collection': boolean,
'metrics_max_retries': int},
'SECURITYGROUP': {'enable_security_group': boolean}
}
For more information on the arguments, their meaning and their default
values, visit: http://docs.openstack.org/juno/config-reference/content/
networking-plugin-hyperv_agent.html
"""
super(HyperVNeutronAgentMixin, self).__init__()
self._metricsutils = utilsfactory.get_metricsutils()
self._utils = utilsfactory.get_networkutils()
self._utils.init_caches()
self._network_vswitch_map = {}
self._port_metric_retries = {}
self._nvgre_enabled = False
self._cache_lock = threading.Lock()
conf = conf or {}
agent_conf = conf.get('AGENT', {})
security_conf = conf.get('SECURITYGROUP', {})
self._host = conf.get('host', None)
self._polling_interval = agent_conf.get('polling_interval', 2)
self._local_network_vswitch = agent_conf.get('local_network_vswitch',
'private')
self._worker_count = agent_conf.get('worker_count')
self._phys_net_map = agent_conf.get(
'physical_network_vswitch_mappings', [])
self.enable_metrics_collection = agent_conf.get(
'enable_metrics_collection', False)
self._metrics_max_retries = agent_conf.get('metrics_max_retries', 100)
self.enable_security_groups = security_conf.get(
'enable_security_group', False)
tpool.set_num_threads(self._worker_count)
self._load_physical_network_mappings(self._phys_net_map)
def _load_physical_network_mappings(self, phys_net_vswitch_mappings):
self._physical_network_mappings = collections.OrderedDict()
for mapping in phys_net_vswitch_mappings:
parts = mapping.split(':')
if len(parts) != 2:
LOG.debug('Invalid physical network mapping: %s', mapping)
else:
pattern = re.escape(parts[0].strip()).replace('\\*', '.*')
pattern = pattern + '$'
vswitch = parts[1].strip()
self._physical_network_mappings[pattern] = vswitch
def init_nvgre(self):
# if NVGRE is enabled, self._nvgre_ops is required in order to properly
# set the agent state (see get_agent_configrations method).
if not CONF.NVGRE.enable_support:
return
if not CONF.NVGRE.provider_tunnel_ip:
err_msg = _('enable_nvgre_support is set to True, but provider '
'tunnel IP is not configured. Check neutron.conf '
'config file.')
LOG.error(err_msg)
raise exception.NetworkingHyperVException(err_msg)
self._nvgre_enabled = True
self._nvgre_ops = nvgre_ops.HyperVNvgreOps(
list(self._physical_network_mappings.values()))
self._nvgre_ops.init_notifier(self.context, self.client)
self._nvgre_ops.tunnel_update(self.context,
CONF.NVGRE.provider_tunnel_ip,
constants.TYPE_NVGRE)
def _get_vswitch_for_physical_network(self, phys_network_name):
for pattern in self._physical_network_mappings:
if phys_network_name is None:
phys_network_name = ''
if re.match(pattern, phys_network_name):
return self._physical_network_mappings[pattern]
# Not found in the mappings, the vswitch has the same name
return phys_network_name
def _get_network_vswitch_map_by_port_id(self, port_id):
for network_id, map in self._network_vswitch_map.items():
if port_id in map['ports']:
return (network_id, map)
# if the port was not found, just return (None, None)
return (None, None)
def network_delete(self, context, network_id=None):
LOG.debug("network_delete received. "
"Deleting network %s", network_id)
# The network may not be defined on this agent
if network_id in self._network_vswitch_map:
self._reclaim_local_network(network_id)
else:
LOG.debug("Network %s not defined on agent.", network_id)
def port_delete(self, context, port_id=None):
pass
def port_update(self, context, port=None, network_type=None,
segmentation_id=None, physical_network=None):
LOG.debug("port_update received: %s", port['id'])
if self._utils.vnic_port_exists(port['id']):
self._treat_vif_port(
port['id'], port['network_id'],
network_type, physical_network,
segmentation_id, port['admin_state_up'])
else:
LOG.debug("No port %s defined on agent.", port['id'])
def tunnel_update(self, context, **kwargs):
LOG.info(_LI('tunnel_update received: kwargs: %s'), kwargs)
tunnel_ip = kwargs.get('tunnel_ip')
if tunnel_ip == CONF.NVGRE.provider_tunnel_ip:
# the notification should be ignored if it originates from this
# node.
return
tunnel_type = kwargs.get('tunnel_type')
self._nvgre_ops.tunnel_update(context, tunnel_ip, tunnel_type)
def lookup_update(self, context, **kwargs):
self._nvgre_ops.lookup_update(kwargs)
def _get_vswitch_name(self, network_type, physical_network):
if network_type != constants.TYPE_LOCAL:
vswitch_name = self._get_vswitch_for_physical_network(
physical_network)
else:
vswitch_name = self._local_network_vswitch
return vswitch_name
def _provision_network(self, port_id,
net_uuid, network_type,
physical_network,
segmentation_id):
LOG.info(_LI("Provisioning network %s"), net_uuid)
vswitch_name = self._get_vswitch_name(network_type, physical_network)
if network_type == constants.TYPE_VLAN:
# Nothing to do
pass
elif network_type == constants.TYPE_NVGRE and self._nvgre_enabled:
self._nvgre_ops.bind_nvgre_network(
segmentation_id, net_uuid, vswitch_name)
elif network_type == constants.TYPE_FLAT:
# Nothing to do
pass
elif network_type == constants.TYPE_LOCAL:
# TODO(alexpilotti): Check that the switch type is private
# or create it if not existing
pass
else:
raise exception.NetworkingHyperVException(
(_("Cannot provision unknown network type %(network_type)s"
" for network %(net_uuid)s") %
dict(network_type=network_type, net_uuid=net_uuid)))
map = {
'network_type': network_type,
'vswitch_name': vswitch_name,
'ports': [],
'vlan_id': segmentation_id}
self._network_vswitch_map[net_uuid] = map
def _reclaim_local_network(self, net_uuid):
LOG.info(_LI("Reclaiming local network %s"), net_uuid)
del self._network_vswitch_map[net_uuid]
def _port_bound(self, port_id,
net_uuid,
network_type,
physical_network,
segmentation_id):
LOG.debug("Binding port %s", port_id)
if net_uuid not in self._network_vswitch_map:
self._provision_network(
port_id, net_uuid, network_type,
physical_network, segmentation_id)
map = self._network_vswitch_map[net_uuid]
map['ports'].append(port_id)
self._utils.connect_vnic_to_vswitch(map['vswitch_name'], port_id)
if network_type == constants.TYPE_VLAN:
LOG.info(_LI('Binding VLAN ID %(segmentation_id)s '
'to switch port %(port_id)s'),
dict(segmentation_id=segmentation_id, port_id=port_id))
self._utils.set_vswitch_port_vlan_id(
segmentation_id,
port_id)
elif network_type == constants.TYPE_NVGRE and self._nvgre_enabled:
self._nvgre_ops.bind_nvgre_port(
segmentation_id, map['vswitch_name'], port_id)
elif network_type == constants.TYPE_FLAT:
# Nothing to do
pass
elif network_type == constants.TYPE_LOCAL:
# Nothing to do
pass
else:
LOG.error(_LE('Unsupported network type %s'), network_type)
if self.enable_metrics_collection:
self._utils.add_metrics_collection_acls(port_id)
self._port_metric_retries[port_id] = self._metrics_max_retries
def _port_unbound(self, port_id, vnic_deleted=False):
(net_uuid, map) = self._get_network_vswitch_map_by_port_id(port_id)
if not net_uuid:
LOG.debug('Port %s was not found on this agent.', port_id)
return
LOG.debug("Unbinding port %s", port_id)
self._utils.remove_switch_port(port_id, vnic_deleted)
map['ports'].remove(port_id)
if not map['ports']:
self._reclaim_local_network(net_uuid)
def _port_enable_control_metrics(self):
if not self.enable_metrics_collection:
return
for port_id in list(self._port_metric_retries.keys()):
try:
if self._utils.is_metrics_collection_allowed(port_id):
self._metricsutils.enable_port_metrics_collection(port_id)
LOG.info(_LI('Port metrics enabled for port: %s'), port_id)
del self._port_metric_retries[port_id]
elif self._port_metric_retries[port_id] < 1:
self._metricsutils.enable_port_metrics_collection(port_id)
LOG.error(_LE('Port metrics raw enabling for port: %s'),
port_id)
del self._port_metric_retries[port_id]
else:
self._port_metric_retries[port_id] -= 1
except exceptions.NotFound:
# the vNIC no longer exists. it might have been removed or
# the VM it was attached to was destroyed.
LOG.warning(_LW("Port %s no longer exists. Cannot enable "
"metrics."), port_id)
del self._port_metric_retries[port_id]
@_port_synchronized
def _treat_vif_port(self, port_id, network_id, network_type,
physical_network, segmentation_id,
admin_state_up):
if admin_state_up:
self._port_bound(port_id, network_id, network_type,
physical_network, segmentation_id)
# check if security groups is enabled.
# if not, teardown the security group rules
if self.enable_security_groups:
self.sec_groups_agent.refresh_firewall([port_id])
else:
self._utils.remove_all_security_rules(port_id)
else:
self._port_unbound(port_id)
self.sec_groups_agent.remove_devices_filter([port_id])
def _process_added_port(self, device_details):
device = device_details['device']
port_id = device_details['port_id']
try:
self._treat_vif_port(port_id,
device_details['network_id'],
device_details['network_type'],
device_details['physical_network'],
device_details['segmentation_id'],
device_details['admin_state_up'])
LOG.debug("Updating cached port %s status as UP.", port_id)
self._update_port_status_cache(device, device_bound=True)
LOG.info("Port %s processed.", port_id)
except Exception:
LOG.exception(_LE("Exception encountered while processing port "
"%s."), port_id)
# readd the port as "added", so it can be reprocessed.
self._added_ports.add(device)
def _treat_devices_added(self):
try:
devices_details_list = self.plugin_rpc.get_devices_details_list(
self.context,
self._added_ports,
self.agent_id)
except Exception as e:
LOG.debug("Unable to get ports details for "
"devices %(devices)s: %(e)s",
{'devices': self._added_ports, 'e': e})
return
for device_details in devices_details_list:
device = device_details['device']
LOG.info(_LI("Adding port %s"), device)
if 'port_id' in device_details:
LOG.info(_LI("Port %(device)s updated. Details: "
"%(device_details)s"),
{'device': device, 'device_details': device_details})
eventlet.spawn_n(self._process_added_port, device_details)
# remove the port from added ports set, so it doesn't get
# reprocessed.
self._added_ports.discard(device)
def _treat_devices_removed(self):
for device in self._removed_ports.copy():
eventlet.spawn_n(self._process_removed_port, device)
def _process_removed_port(self, device):
self._update_port_status_cache(device, device_bound=False)
self._port_unbound(device, vnic_deleted=True)
self.sec_groups_agent.remove_devices_filter([device])
# if the port unbind was successful, remove the port from removed
# set, so it won't be reprocessed.
self._removed_ports.discard(device)
def _process_added_port_event(self, port_name):
LOG.info(_LI("Hyper-V VM vNIC added: %s"), port_name)
self._added_ports.add(port_name)
def _process_removed_port_event(self, port_name):
LOG.info(_LI("Hyper-V VM vNIC removed: %s"), port_name)
self._removed_ports.add(port_name)
def _create_event_listeners(self):
event_callback_pairs = [
(self._utils.EVENT_TYPE_CREATE, self._process_added_port_event),
(self._utils.EVENT_TYPE_DELETE, self._process_removed_port_event)]
for event_type, callback in event_callback_pairs:
listener = self._utils.get_vnic_event_listener(event_type)
eventlet.spawn_n(listener, callback)
def _update_port_status_cache(self, device, device_bound=True):
with self._cache_lock:
if device_bound:
self._bound_ports.add(device)
self._unbound_ports.discard(device)
else:
self._bound_ports.discard(device)
self._unbound_ports.add(device)
@synchronized('n-plugin-notifier')
def _notify_plugin_on_port_updates(self):
if not (self._bound_ports or self._unbound_ports):
return
with self._cache_lock:
bound_ports = self._bound_ports.copy()
unbound_ports = self._unbound_ports.copy()
self.plugin_rpc.update_device_list(self.context,
list(bound_ports),
list(unbound_ports),
self.agent_id,
self._host)
with self._cache_lock:
self._bound_ports = self._bound_ports.difference(bound_ports)
self._unbound_ports = self._unbound_ports.difference(
unbound_ports)
def daemon_loop(self):
# The following sets contain ports that are to be processed.
self._added_ports = self._utils.get_vnic_ids()
self._removed_ports = set()
# The following sets contain ports that have been processed.
self._bound_ports = set()
self._unbound_ports = set()
self._create_event_listeners()
while True:
try:
start = time.time()
eventlet.spawn_n(self._notify_plugin_on_port_updates)
# notify plugin about port deltas
if self._added_ports:
LOG.debug("Agent loop has new devices!")
self._treat_devices_added()
if self._removed_ports:
LOG.debug("Agent loop has lost devices...")
self._treat_devices_removed()
if self._nvgre_enabled:
self._nvgre_ops.refresh_nvgre_records()
self._port_enable_control_metrics()
except Exception:
LOG.exception(_LE("Error in agent event loop"))
# inconsistent cache might cause exceptions. for example, if a
# port has been removed, it will be known in the next loop.
# using the old switch port can cause exceptions.
self._utils.update_cache()
# sleep till end of polling interval
elapsed = (time.time() - start)
if (elapsed < self._polling_interval):
time.sleep(self._polling_interval - elapsed)
else:
LOG.debug("Loop iteration exceeded interval "
"(%(polling_interval)s vs. %(elapsed)s)",
{'polling_interval': self._polling_interval,
'elapsed': elapsed})
| gabriel-samfira/networking-hyperv | hyperv/neutron/hyperv_neutron_agent.py | Python | apache-2.0 | 19,665 | [
"VisIt"
] | 6d45610e6a0f7a2db47ed19a4b6fd9a974d6f3e16ce0a2244fff8ba3c09cc7a6 |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 6 10:39:20 2017
@author: konstantin
"""
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=FutureWarning)
import h5py
from dolfin import *
import numpy as np
import time
import pickle
import os
from tissue_dielectrics import DielectricProperties
parameters['linear_algebra_backend']='PETSc'
set_log_active(False) #turns off debugging info
def get_dielectric_properties_from_subdomains(mesh,subdomains,Laplace_formulation,float_conductors,conductivities,rel_permittivities,frequenc):
cond_default,cond_GM,cond_WM,cond_CSF,cond_encap=conductivities[:]
V0_r=FunctionSpace(mesh,'DG',0)
kappa_r=Function(V0_r)
if float_conductors==-1: #(-1 means no floating contacts) #[Default,CSF,WM,GM,Encap,Floating]
k_val_r=[cond_default*0.001,cond_CSF*0.001,cond_WM*0.001,cond_GM*0.001,cond_encap*0.001]
else:
k_val_r=[cond_default*0.001,cond_CSF*0.001,cond_WM*0.001,cond_GM*0.001,cond_encap*0.001,1000.0]
help = np.asarray(subdomains.array(), dtype=np.int32)
kappa_r.vector()[:] = np.choose(help, k_val_r)
kappa=[kappa_r]
if Laplace_formulation=='EQS':
perm_default,perm_GM,perm_WM,perm_CSF,perm_encap=rel_permittivities
V0_i=FunctionSpace(mesh,'DG',0)
kappa_i=Function(V0_i)
omega_eps0=2*np.pi*frequenc*8.854e-12 #2*pi*f*eps0
if float_conductors==-1: #[Default,CSF,WM,GM,Encap,Floating]
k_val_i=[omega_eps0*perm_default*0.001,omega_eps0*perm_CSF*0.001,omega_eps0*perm_WM*0.001,omega_eps0*perm_GM*0.001,1*omega_eps0*perm_encap*0.001]
else:
k_val_i=[omega_eps0*perm_default*0.001,omega_eps0*perm_CSF*0.001,omega_eps0*perm_WM*0.001,omega_eps0*perm_GM*0.001,1*omega_eps0*perm_encap*0.001,1000000000*omega_eps0]
help = np.asarray(subdomains.array(), dtype=np.int32)
kappa_i.vector()[:] = np.choose(help, k_val_i) #because model is in mm
kappa = [kappa_r, kappa_i]
return kappa,k_val_r
def get_solution_space_and_Dirichlet_BC(external_grounding,current_controlled,mesh,subdomains,boundaries,element_order,Laplace_eq,Contacts_indices,Phi_vector,only_space=False):
facets_bc=MeshFunction('size_t',mesh, 2)
facets_bc.set_all(0)
if Laplace_eq=='EQS': #complex numbers are not yet implemented in FEniCS, mixed space is used
El_r = FiniteElement("Lagrange", mesh.ufl_cell(),element_order)
El_i = FiniteElement("Lagrange", mesh.ufl_cell(),element_order)
El_complex = El_r * El_i
V = FunctionSpace(mesh, El_complex)
else:
V = FunctionSpace(mesh, "Lagrange",element_order)
# Dirichlet boundary condition (electric potenial on the contacts. In case of current-controlled stimulation, it will be scaled afterwards (due to the system linearity))
bc=[]
if external_grounding==True:
tdim = mesh.topology().dim()
mesh.init(tdim-1, tdim)
zmin = mesh.coordinates()[:, 2].min() #assuming that z is dorso-ventral axis
ground_height=1000.0
#for cell in SubsetIterator(subdomains, 1):
for cell in cells(mesh):
z_coord=cell.midpoint().z()
if z_coord<zmin+ground_height and subdomains[cell]!=4 and subdomains[cell]!=5:
for facet in facets(cell):
if facet.exterior():
facets_bc[facet] = 1
#print("was there")
if Laplace_eq=='EQS':
bc.append(DirichletBC(V.sub(0),0.0,facets_bc,1))
bc.append(DirichletBC(V.sub(1),0.0,facets_bc,1))
else:
bc.append(DirichletBC(V, 0.0,facets_bc,1))
if not(0.0 in Phi_vector):
if external_grounding==True:
ground_index=-1 #will be assigned later
elif current_controlled==0:
ground_index=0 # does not matter which one, we have only two active contacts in VC with CPE, or we don't use it at all
else:
print("No Dirichlet BC for grounding was found. It is mandatory for current-controlled mode")
raise SystemExit
if only_space==True: #for some setups we have custom assignment of boundaries
return V,facets_bc
#ground_index=-1 # just initialization
for bc_i in range(len(Contacts_indices)):
if Laplace_eq=='EQS':
bc.append(DirichletBC(V.sub(0), Phi_vector[bc_i], boundaries,Contacts_indices[bc_i]))
bc.append(DirichletBC(V.sub(1), Constant(0.0), boundaries,Contacts_indices[bc_i])) # the imaginary part is set to 0 for the initial computation
else:
bc.append(DirichletBC(V, Phi_vector[bc_i], boundaries,Contacts_indices[bc_i]))
if Phi_vector[bc_i]==0.0: #we must have ground in every simulation
ground_index=bc_i
if not('ground_index' in locals()) and current_controlled==0:
ground_index=bc_i # does not matter which one, we have only two active contacts in VC with CPE, or we don't use it at all
return V,bc,ground_index,facets_bc
def get_scaled_cond_tensor(mesh,subdomains,sine_freq,signal_freq,unscaled_tensor,cond_list,plot_tensors=False):
# Code for C++ evaluation of conductivity
conductivity_code = """
#include <pybind11/pybind11.h>
#include <pybind11/eigen.h>
namespace py = pybind11;
#include <dolfin/function/Expression.h>
#include <dolfin/mesh/MeshFunction.h>
class Conductivity : public dolfin::Expression
{
public:
// Create expression with 6 components
Conductivity() : dolfin::Expression(6) {}
// Function for evaluating expression on each cell
void eval(Eigen::Ref<Eigen::VectorXd> values, Eigen::Ref<const Eigen::VectorXd> x, const ufc::cell& cell) const override
{
const uint cell_index = cell.index;
values[0] = (*c00)[cell_index];
values[1] = (*c01)[cell_index];
values[2] = (*c02)[cell_index];
values[3] = (*c11)[cell_index];
values[4] = (*c12)[cell_index];
values[5] = (*c22)[cell_index];
}
// The data stored in mesh functions
std::shared_ptr<dolfin::MeshFunction<double>> c00;
std::shared_ptr<dolfin::MeshFunction<double>> c01;
std::shared_ptr<dolfin::MeshFunction<double>> c02;
std::shared_ptr<dolfin::MeshFunction<double>> c11;
std::shared_ptr<dolfin::MeshFunction<double>> c12;
std::shared_ptr<dolfin::MeshFunction<double>> c22;
};
PYBIND11_MODULE(SIGNATURE, m)
{
py::class_<Conductivity, std::shared_ptr<Conductivity>, dolfin::Expression>
(m, "Conductivity")
.def(py::init<>())
.def_readwrite("c00", &Conductivity::c00)
.def_readwrite("c01", &Conductivity::c01)
.def_readwrite("c02", &Conductivity::c02)
.def_readwrite("c11", &Conductivity::c11)
.def_readwrite("c12", &Conductivity::c12)
.def_readwrite("c22", &Conductivity::c22);
}
"""
#the Tensor order is xx,xy,xz,yy,yz,zz
# initializing here just to ensure stability
c00 = MeshFunction("double", mesh, 3, 1.0)
c01 = MeshFunction("double", mesh, 3, 0.0)
c02 = MeshFunction("double", mesh, 3, 0.0)
c11 = MeshFunction("double", mesh, 3, 1.0)
c12 = MeshFunction("double", mesh, 3, 0.0)
c22 = MeshFunction("double", mesh, 3, 1.0)
if int(sine_freq)==int(signal_freq): # will be used for visualization
c_unscaled00 = unscaled_tensor[0]
c_unscaled01 = unscaled_tensor[1]
c_unscaled02 = unscaled_tensor[2]
c_unscaled11 = unscaled_tensor[3]
c_unscaled12 = unscaled_tensor[4]
c_unscaled22 = unscaled_tensor[5]
cell_Anis = MeshFunction('bool',mesh,3)
cell_Anis.set_all(False)
for cell in cells(mesh):
scale_cond=cond_list[subdomains[cell]] #check whick cond. value (S/mm) this cell was assigned
if unscaled_tensor[0][cell]!=1.0 or unscaled_tensor[3][cell]!=1.0 or unscaled_tensor[5][cell]!=1.0:
cell_Anis[cell]=True
c00[cell]=unscaled_tensor[0][cell]*scale_cond
c01[cell]=unscaled_tensor[1][cell]*scale_cond
c02[cell]=unscaled_tensor[2][cell]*scale_cond
c11[cell]=unscaled_tensor[3][cell]*scale_cond
c12[cell]=unscaled_tensor[4][cell]*scale_cond
c22[cell]=unscaled_tensor[5][cell]*scale_cond
if plot_tensors==True:
file=File(os.environ['PATIENTDIR']+'/Tensors/c00_mapped.pvd')
file<<c00,mesh
file=File(os.environ['PATIENTDIR']+'/Tensors/c11_mapped.pvd')
file<<c11,mesh
file=File(os.environ['PATIENTDIR']+'/Tensors/c22_mapped.pvd')
file<<c22,mesh
file=File(os.environ['PATIENTDIR']+'/Tensors/c01_mapped.pvd')
file<<c01,mesh
file=File(os.environ['PATIENTDIR']+'/Tensors/Anis_cells.pvd')
file<<cell_Anis,mesh
c = CompiledExpression(compile_cpp_code(conductivity_code).Conductivity(),
c00=c00, c01=c01, c02=c02, c11=c11, c12=c12, c22=c22, degree=0)
C_tensor = as_matrix(((c[0], c[1], c[2]), (c[1], c[3], c[4]),(c[2],c[4],c[5])))
if int(sine_freq)==int(signal_freq):
c_unscaled = CompiledExpression(compile_cpp_code(conductivity_code).Conductivity(),
c00=c_unscaled00, c01=c_unscaled01, c02=c_unscaled02, c11=c_unscaled11, c12=c_unscaled12, c22=c_unscaled22, degree=0)
tensor= as_tensor([[c_unscaled[0], c_unscaled[1], c_unscaled[2]], [c_unscaled[1], c_unscaled[3], c_unscaled[4]],[c_unscaled[2],c_unscaled[4],c_unscaled[5]]])
f_vector_repr=project(tensor,TensorFunctionSpace(mesh, "Lagrange", 1),solver_type="cg", preconditioner_type="amg")
file=File(os.environ['PATIENTDIR']+'/Tensors/Ellipsoids_unscaled_at_'+str(signal_freq)+'_Hz.pvd')
file<<f_vector_repr
return C_tensor
def define_variational_form_and_solve(V,dirichlet_bc,kappa,Laplace_eq,Cond_tensor,Solver_type): # to solve the Laplace equation div(kappa*grad(phi))=0 (variational form: a(u,v)=L(v))
# to define the variational problem
f = Constant(0.0) #to keep the right side of Laplace equation 0
if Laplace_eq=='EQS':
u_r, u_i = TrialFunction(V)
v_r, v_i = TestFunction(V)
if Cond_tensor!=False:
a = (inner(Cond_tensor*grad(u_r), grad(v_r))*dx
-inner(kappa[1]*grad(u_i), grad(v_r))*dx
-inner(kappa[1]*grad(u_r), grad(v_i))*dx
-inner(Cond_tensor*grad(u_i), grad(v_i))*dx
+inner(Cond_tensor*grad(u_r), grad(v_i))*dx
-inner(kappa[1]*grad(u_i), grad(v_i))*dx
+inner(kappa[1]*grad(u_r), grad(v_r))*dx
+inner(Cond_tensor*grad(u_i), grad(v_r))*dx
)
else:
a = (inner(kappa[0]*grad(u_r), grad(v_r))*dx
-inner(kappa[1]*grad(u_i), grad(v_r))*dx
-inner(kappa[1]*grad(u_r), grad(v_i))*dx
-inner(kappa[0]*grad(u_i), grad(v_i))*dx
+inner(kappa[0]*grad(u_r), grad(v_i))*dx
-inner(kappa[1]*grad(u_i), grad(v_i))*dx
+inner(kappa[1]*grad(u_r), grad(v_r))*dx
+inner(kappa[0]*grad(u_i), grad(v_r))*dx
)
L = -(f*v_r+f*v_i)*dx
else:
u = TrialFunction(V)
v = TestFunction(V)
if Cond_tensor!=False:
a = inner(Cond_tensor*grad(u), grad(v))*dx
else:
a = inner(kappa[0]*grad(u), grad(v))*dx
L = f*v*dx
u=Function(V)
# to compute the solution
if Solver_type=='MUMPS': # slow but realiable, especially suitable with multiple floating conductors
# solving the problem
# solve iterative
problem = LinearVariationalProblem(a, L, u, dirichlet_bc)
solver = LinearVariationalSolver(problem)
#solver.parameters.linear_solver = 'cg'
solver.parameters["linear_solver"] = "mumps"
solver.parameters['preconditioner'] = 'ilu'
cg_prm = solver.parameters['krylov_solver']
cg_prm['absolute_tolerance'] = 1E-7
cg_prm['relative_tolerance'] = 1E-6
solver.solve()
elif Solver_type=='BiCGSTAB': # efficient iterative solver suitable for problems with no more than one floating conductor
A, b = assemble_system(a, L, dirichlet_bc)
solver = PETScKrylovSolver('bicgstab','petsc_amg')
#solver.parameters['monitor_convergence'] = True
#solver.parameters['report'] = True
#print dir(self.solver.parameters)
solver.set_operator(A)
solver.solve(u.vector(), b)
elif Solver_type=='GMRES': # iterative solver suitable for QS, also suitable for QS problems with multiple floating conductors (check!)
A, b = assemble_system(a, L, dirichlet_bc)
solver = KrylovSolver(A,'gmres','hypre_amg')
#solver.parameters['monitor_convergence'] = True
#solver.parameters['report'] = True
solver.solve(u.vector(), b)
else:
print("Solver was not found")
raise SystemExit
return u
def get_current(mesh,facets_function,boundaries,element_order,Laplace_eq,Contacts_indices,kappa,C_tensor,phi_real,phi_imag,ground_index,get_E_field=False):
if element_order>1:
W = VectorFunctionSpace(mesh,'DG',element_order-1)
W_i = VectorFunctionSpace(mesh,'DG',element_order-1)
else:
W = VectorFunctionSpace(mesh,'DG',element_order)
W_i = VectorFunctionSpace(mesh,'DG',element_order)
if ground_index!=-1: # no extermal groudning
facets_function.array()[boundaries.array()==Contacts_indices[ground_index]]=1
ds=Measure("ds",domain=mesh,subdomain_data=facets_function)
# Ground_surface=assemble(1.0*ds(1))
# print("Ground_surface: ",Ground_surface)
#Explicit E-field projection
w = TestFunction(W)
Pv = TrialFunction(W)
E_field = Function(W)
a_local = inner(w, Pv) * dx
L_local = inner(w, -grad(phi_real)) * dx
A_local, b_local = assemble_system(a_local, L_local, bcs=[])
local_solver = PETScKrylovSolver('bicgstab')
local_solver.solve(A_local,E_field.vector(),b_local)
n = FacetNormal(mesh)
if Laplace_eq == 'EQS':
w_i = TestFunction(W_i)
Pv_i = TrialFunction(W_i)
E_field_im = Function(W_i)
a_local = inner(w_i, Pv_i) * dx
L_local = inner(w_i, -grad(phi_imag)) * dx
A_local, b_local = assemble_system(a_local, L_local, bcs=[])
local_solver = PETScKrylovSolver('bicgstab')
local_solver.solve(A_local,E_field_im.vector(),b_local)
if C_tensor!=False:
j_dens_real_ground = dot(C_tensor*E_field,-1*n)*ds(1)-dot(kappa[1]*E_field_im,-1*n)*ds(1)
j_dens_im_ground= dot(C_tensor*E_field_im,-1*n)*ds(1)+dot(kappa[1]*E_field,-1*n)*ds(1)
else:
j_dens_real_ground = dot(kappa[0]*E_field,-1*n)*ds(1)-dot(kappa[1]*E_field_im,-1*n)*ds(1)
j_dens_im_ground= dot(kappa[0]*E_field_im,-1*n)*ds(1)+dot(kappa[1]*E_field,-1*n)*ds(1)
#we always assess current on the ground in 2 contact-case, so the sign should be flipped
J_real=-1*assemble(j_dens_real_ground)
J_im=-1*assemble(j_dens_im_ground)
J_complex_ground=J_real+1j*J_im
if get_E_field==True:
return J_complex_ground,E_field,E_field_im
else:
return J_complex_ground
else:
E_field_im = Function(W)
E_field_im.vector()[:] = 0.0 #fake
if C_tensor!=False:
j_dens_real_ground = dot(C_tensor*E_field,-1*n)*ds(1)
else:
j_dens_real_ground = dot(kappa[0]*E_field,-1*n)*ds(1)
#we always assess current on the ground in 2 contact-case, so the sign should be flipped
J_real_ground=-1*assemble(j_dens_real_ground)
if get_E_field==True:
return J_real_ground,E_field,E_field_im
else:
return J_real_ground
def get_CPE_corrected_Dirichlet_BC(external_grounding,ground_facets,boundaries,CPE_param,Laplace_eq,sine_freq,freq_signal,Contacts_indices,Phi_vector,Voltage_drop,Z_tissue,V_space):
if external_grounding==True:
Phi_vector.append(0.0) #just to keep things going, won't be used
elif (Phi_vector[0]==0.0) and (Phi_vector[1]==0.0):
print("Setting error: both contacts were set to 0.0 V")
raise SystemExit
K_A,beta,K_A_ground,beta_ground = CPE_param
if sine_freq==0.0: #how to define this?
Z_CPE=0.0
Z_CPE_ground=0.0
else:
Z_CPE=K_A/((1j*2*np.pi*sine_freq)**(beta))
Z_CPE_ground=K_A_ground/((1j*2*np.pi*sine_freq)**(beta_ground))
if sine_freq==freq_signal:
print("Z_CPE_ground at "+str(sine_freq)+" Hz : ", Z_CPE_ground)
print("Z_CPE at "+str(sine_freq)+" Hz : ", Z_CPE)
bc_cpe=[]
for bc_i in range(len(Contacts_indices)): #CPE estimation is valid only for one activa and one ground contact configuration
if -1*Phi_vector[0]!=Phi_vector[1]:
if Phi_vector[bc_i] == min(Phi_vector,key=abs): # "ground" contact is the one with a smaller voltage
if Laplace_eq=='EQS':
Ground_with_CPE=Phi_vector[bc_i]+(Voltage_drop/(Z_tissue+Z_CPE+Z_CPE_ground))*Z_CPE_ground
bc_cpe.append(DirichletBC(V_space.sub(0), np.real(Ground_with_CPE), boundaries,Contacts_indices[bc_i]))
bc_cpe.append(DirichletBC(V_space.sub(1), np.imag(Ground_with_CPE), boundaries,Contacts_indices[bc_i]))
else:
Ground_with_CPE=Phi_vector[bc_i]+(Voltage_drop/(Z_tissue+Z_CPE+Z_CPE_ground))*Z_CPE_ground
bc_cpe.append(DirichletBC(V_space, np.real(Ground_with_CPE), boundaries,Contacts_indices[bc_i]))
else:
if Laplace_eq=='EQS':
Active_with_CPE=Phi_vector[bc_i]-(Voltage_drop/(Z_tissue+Z_CPE+Z_CPE_ground))*Z_CPE
bc_cpe.append(DirichletBC(V_space.sub(0), np.real(Active_with_CPE), boundaries,Contacts_indices[bc_i]))
bc_cpe.append(DirichletBC(V_space.sub(1), np.imag(Active_with_CPE), boundaries,Contacts_indices[bc_i]))
else:
Active_with_CPE=Phi_vector[bc_i]-(Voltage_drop/(Z_tissue+Z_CPE+Z_CPE_ground))*Z_CPE
bc_cpe.append(DirichletBC(V_space, np.real(Active_with_CPE), boundaries,Contacts_indices[bc_i]))
else:
if Phi_vector[bc_i]<0.0:
if Laplace_eq=='EQS':
Ground_with_CPE=Phi_vector[bc_i]+(Voltage_drop/(Z_tissue+Z_CPE+Z_CPE_ground))*Z_CPE_ground
bc_cpe.append(DirichletBC(V_space.sub(0), np.real(Ground_with_CPE), boundaries,Contacts_indices[bc_i]))
bc_cpe.append(DirichletBC(V_space.sub(1), np.imag(Ground_with_CPE), boundaries,Contacts_indices[bc_i]))
else:
Ground_with_CPE=Phi_vector[bc_i]+(Voltage_drop/(Z_tissue+Z_CPE+Z_CPE_ground))*Z_CPE_ground
bc_cpe.append(DirichletBC(V_space, np.real(Ground_with_CPE), boundaries,Contacts_indices[bc_i]))
else:
if Laplace_eq=='EQS':
Active_with_CPE=Phi_vector[bc_i]-(Voltage_drop/(Z_tissue+Z_CPE+Z_CPE_ground))*Z_CPE
bc_cpe.append(DirichletBC(V_space.sub(0), np.real(Active_with_CPE), boundaries,Contacts_indices[bc_i]))
bc_cpe.append(DirichletBC(V_space.sub(1), np.imag(Active_with_CPE), boundaries,Contacts_indices[bc_i]))
else:
Active_with_CPE=Phi_vector[bc_i]-(Voltage_drop/(Z_tissue+Z_CPE+Z_CPE_ground))*Z_CPE
bc_cpe.append(DirichletBC(V_space, np.real(Active_with_CPE), boundaries,Contacts_indices[bc_i]))
if external_grounding==True: #normally, we won't have a double layer on the external ground, but just in case
Ground_with_CPE=Voltage_drop/(Z_tissue+Z_CPE+Z_CPE_ground)*Z_CPE_ground
if Laplace_eq=='EQS':
bc_cpe.append(DirichletBC(V_space.sub(0),np.real(Ground_with_CPE),ground_facets,1))
bc_cpe.append(DirichletBC(V_space.sub(1),np.imag(Ground_with_CPE),ground_facets,1))
else:
bc_cpe.append(DirichletBC(V_space,np.real(Ground_with_CPE),ground_facets,1))
if sine_freq==freq_signal:
if external_grounding==False:
print("'Ground' adjusted by CPE at "+str(sine_freq)+" Hz : ", Ground_with_CPE)
print("Active contact adjusted by CPE at "+str(sine_freq)+" Hz : ", Active_with_CPE)
if Laplace_eq=='EQS':
comb_Z=np.vstack((np.real(Z_tissue+Z_CPE+Z_CPE_ground),np.imag(Z_tissue+Z_CPE+Z_CPE_ground),sine_freq)).T
else:
comb_Z=np.vstack((np.real(Z_tissue+Z_CPE+Z_CPE_ground),0.0,sine_freq)).T #no imag. part for QS
return bc_cpe,comb_Z
def get_bc_for_external_grounding(dirichlet_bc,ground_facets,mesh,subdomains,V_func_space,Laplace_eq):
tdim = mesh.topology().dim()
mesh.init(tdim-1, tdim)
#ground_facets=MeshFunction('size_t',mesh,2)
#ground_facets.set_all(0)
zmin = mesh.coordinates()[:, 0].min() #assuming that z is dorso-ventral axis
ground_height=1000.0
#for cell in SubsetIterator(subdomains, 1):
#print("started ground mapping")
for cell in cells(mesh):
z_coord=cell.midpoint().z()
if z_coord<zmin+ground_height:
for facet in facets(cell):
if facet.exterior():
ground_facets[facet] = 2
#print("finished ground mapping")
if Laplace_eq=='EQS':
dirichlet_bc.append(DirichletBC(V_func_space.sub(0),0.0,ground_facets,2))
dirichlet_bc.append(DirichletBC(V_func_space.sub(1),0.0,ground_facets,2))
else:
dirichlet_bc.append(DirichletBC(V_func_space, 0.0,ground_facets,2))
#secnds=tm.time() - start_ground
#print("grounding took: ",secnds)
return dirichlet_bc,ground_facets
def solve_Laplace(Sim_setup,Solver_type,Vertices_array,Domains,core,VTA_IFFT,output):
set_log_active(False) #turns off debugging info
Sim_setup.mesh.coordinates()
Sim_setup.mesh.init()
# to get conductivity (and permittivity if EQS formulation) mapped accrodingly to the subdomains. k_val_r is just a list of conductivities (S/mm!) in a specific order to scale the cond. tensor
kappa,k_val_r=get_dielectric_properties_from_subdomains(Sim_setup.mesh,Sim_setup.subdomains,Sim_setup.Laplace_eq,Domains.Float_contacts,Sim_setup.conductivities,Sim_setup.rel_permittivities,Sim_setup.sine_freq)
if int(Sim_setup.sine_freq)==int(Sim_setup.signal_freq):
file=File(os.environ['PATIENTDIR']+'/Field_solutions/Conductivity_map_'+str(Sim_setup.signal_freq)+'Hz.pvd')
file<<kappa[0]
if Sim_setup.Laplace_eq == 'EQS':
file=File(os.environ['PATIENTDIR']+'/Field_solutions/Permittivity_map_'+str(Sim_setup.signal_freq)+'Hz.pvd')
file<<kappa[1]
# to get tensor scaled by the conductivity map
if Sim_setup.anisotropy==1:
Cond_tensor=get_scaled_cond_tensor(Sim_setup.mesh,Sim_setup.subdomains,Sim_setup.sine_freq,Sim_setup.signal_freq,Sim_setup.unscaled_tensor,k_val_r)
else:
Cond_tensor=False #just to initialize
#In case of current-controlled stimulation, Dirichlet_bc or the whole potential distribution will be scaled afterwards (due to the system's linearity)
V_space,Dirichlet_bc,ground_index,facets=get_solution_space_and_Dirichlet_BC(Sim_setup.external_grounding,Sim_setup.c_c,Sim_setup.mesh,Sim_setup.subdomains,Sim_setup.boundaries,Sim_setup.element_order,Sim_setup.Laplace_eq,Domains.Contacts,Domains.fi)
#ground index refers to the ground in .med/.msh file
# to solve the Laplace equation div(kappa*grad(phi))=0 (variational form: a(u,v)=L(v))
phi_sol=define_variational_form_and_solve(V_space,Dirichlet_bc,kappa,Sim_setup.Laplace_eq,Cond_tensor,Solver_type)
if Sim_setup.Laplace_eq=='EQS':
(phi_r,phi_i)=phi_sol.split(deepcopy=True)
else:
phi_r=phi_sol
phi_i=Function(V_space)
phi_i.vector()[:] = 0.0
#save unscaled real solution for plotting
if int(Sim_setup.sine_freq)==int(Sim_setup.signal_freq):
file=File(os.environ['PATIENTDIR']+'/Field_solutions/Phi_real_unscaled_'+str(Sim_setup.signal_freq)+'Hz.pvd')
file<<phi_r,Sim_setup.mesh
if Sim_setup.external_grounding==True:
file=File(os.environ['PATIENTDIR']+'/Field_solutions/ground_facets'+str(Sim_setup.signal_freq)+'Hz.pvd')
file<<facets
print("DoFs on the mesh for "+Sim_setup.Laplace_eq+" : ", (max(V_space.dofmap().dofs())+1))
if Sim_setup.c_c==1 or Sim_setup.CPE_status==1: #we compute E-field, currents and impedances only for current-controlled or if CPE is used
J_ground=get_current(Sim_setup.mesh,facets,Sim_setup.boundaries,Sim_setup.element_order,Sim_setup.Laplace_eq,Domains.Contacts,kappa,Cond_tensor,phi_r,phi_i,ground_index)
#If EQS, J_ground is a complex number
#V_across=max(Domains.fi[:], key=abs) # voltage drop in the system
#V_across=abs(max(Domains.fi[:])-min(Domains.fi[:])) # voltage drop in the system
if Sim_setup.external_grounding==True and (Sim_setup.c_c==1 or len(Domains.fi)==1):
V_max=max(Domains.fi[:], key=abs)
V_min=0.0
elif -1*Domains.fi[0]==Domains.fi[1]: # V_across is needed only for 2 active contact systems
V_min=-1*abs(Domains.fi[0])
V_max=abs(Domains.fi[0])
else:
V_min=min(Domains.fi[:], key=abs)
V_max=max(Domains.fi[:], key=abs)
V_across=V_max-V_min # this can be negative
Z_tissue = V_across/J_ground # Tissue impedance
if int(Sim_setup.sine_freq)==int(Sim_setup.signal_freq):
print("Tissue impedance at the signal freq.: ",Z_tissue)
if Sim_setup.CPE_status==1: # in this case we need to estimate the voltage drop over the CPE and adjust the Derichlet BC accordingly
if len(Domains.fi)>2:
print("Currently, CPE can be used only for simulations with two contacts. Please, assign the rest to 'None'")
raise SystemExit
Dirichlet_bc_with_CPE,total_impedance=get_CPE_corrected_Dirichlet_BC(Sim_setup.external_grounding,facets,Sim_setup.boundaries,Sim_setup.CPE_param,Sim_setup.Laplace_eq,Sim_setup.sine_freq,Sim_setup.signal_freq,Domains.Contacts,Domains.fi,V_across,Z_tissue,V_space)
f=open(os.environ['PATIENTDIR']+'/Field_solutions/Impedance'+str(core)+'.csv','ab')
np.savetxt(f, total_impedance, delimiter=" ")
f.close()
# to solve the Laplace equation for the adjusted Dirichlet
phi_sol_CPE=define_variational_form_and_solve(V_space,Dirichlet_bc_with_CPE,kappa,Sim_setup.Laplace_eq,Cond_tensor,Solver_type)
if Sim_setup.Laplace_eq=='EQS':
(phi_r_CPE,phi_i_CPE)=phi_sol_CPE.split(deepcopy=True)
else:
phi_r_CPE=phi_sol_CPE
phi_i_CPE=Function(V_space)
phi_i_CPE.vector()[:] = 0.0
J_ground_CPE=get_current(Sim_setup.mesh,facets,Sim_setup.boundaries,Sim_setup.element_order,Sim_setup.Laplace_eq,Domains.Contacts,kappa,Cond_tensor,phi_r_CPE,phi_i_CPE,ground_index)
# just resaving
phi_sol,phi_r,phi_i,J_ground=(phi_sol_CPE,phi_r_CPE,phi_i_CPE,J_ground_CPE)
# if Full_IFFT==1:
# Hdf=HDF5File(Sim_setup.mesh.mpi_comm(), os.environ['PATIENTDIR']+"/Field_solutions_functions/solution"+str(np.round(Sim_setup.sine_freq,6))+".h5", "w")
# Hdf.write(Sim_setup.mesh, "mesh")
# if Sim_setup.CPE_status!=1:
# Hdf.write(phi_sol, "solution_full")
# if Sim_setup.c_c==1:
# with open(os.environ['PATIENTDIR']+'/Field_solutions_functions/current_scale'+str(np.round(Sim_setup.sine_freq,6))+'.file', 'wb') as f:
# pickle.dump(np.array([np.real(J_ground),np.imag(J_ground)]), f)
# Hdf.close()
#else:
if VTA_IFFT==1:
Sim_type='Astrom' # fixed for now
if Sim_type=='Astrom' or Sim_type=='Butson':
if Sim_setup.c_c==1:
# Solve for rescaled
Dirichlet_bc_scaled=[]
for bc_i in range(len(Domains.Contacts)): #CPE estimation is valid only for one activa and one ground contact configuration
if Sim_setup.Laplace_eq == 'EQS':
if Domains.fi[bc_i]==0.0:
Dirichlet_bc_scaled.append(DirichletBC(V_space.sub(0), 0.0, Sim_setup.boundaries,Domains.Contacts[bc_i]))
Dirichlet_bc_scaled.append(DirichletBC(V_space.sub(1), 0.0, Sim_setup.boundaries,Domains.Contacts[bc_i]))
else:
Dirichlet_bc_scaled.append(DirichletBC(V_space.sub(0), np.real((Domains.fi[bc_i])/J_ground),Sim_setup.boundaries,Domains.Contacts[bc_i]))
Dirichlet_bc_scaled.append(DirichletBC(V_space.sub(1), np.imag((Domains.fi[bc_i])/J_ground),Sim_setup.boundaries,Domains.Contacts[bc_i]))
else:
if Domains.fi[bc_i]==0.0:
Dirichlet_bc_scaled.append(DirichletBC(V_space, 0.0, Sim_setup.boundaries,Domains.Contacts[bc_i]))
else:
Dirichlet_bc_scaled.append(DirichletBC(V_space, np.real((Domains.fi[bc_i])/J_ground),Sim_setup.boundaries,Domains.Contacts[bc_i]))
if Sim_setup.external_grounding==True:
if Sim_setup.Laplace_eq == 'EQS':
Dirichlet_bc_scaled.append(DirichletBC(V_space.sub(0),0.0,facets,1))
Dirichlet_bc_scaled.append(DirichletBC(V_space.sub(1),0.0,facets,1))
else:
Dirichlet_bc_scaled.append(DirichletBC(V_space,0.0,facets,1))
phi_sol_check=define_variational_form_and_solve(V_space,Dirichlet_bc_scaled,kappa,Sim_setup.Laplace_eq,Cond_tensor,Solver_type)
if Sim_setup.Laplace_eq=='EQS':
(phi_r_check,phi_i_check)=phi_sol_check.split(deepcopy=True)
else:
phi_r_check=phi_sol_check
phi_i_check=Function(V_space)
phi_i_check.vector()[:] = 0.0
else:
phi_r_check,phi_i_check=(phi_r,phi_i) # no need to recompute
J_ground,E_field_r,E_field_im=get_current(Sim_setup.mesh,facets,Sim_setup.boundaries,Sim_setup.element_order,Sim_setup.Laplace_eq,Domains.Contacts,kappa,Cond_tensor,phi_r_check,phi_i_check,ground_index,get_E_field=True)
if Sim_type=='Astrom':
W_amp=FunctionSpace(Sim_setup.mesh,'DG',Sim_setup.element_order-1)
w_amp = TestFunction(W_amp)
Pv_amp = TrialFunction(W_amp)
E_amp_real = Function(W_amp)
a_local = inner(w_amp, Pv_amp) * dx
L_local = inner(w_amp, sqrt(dot(E_field_r,E_field_r))) * dx
A_local, b_local = assemble_system(a_local, L_local, bcs=[])
local_solver = PETScKrylovSolver('bicgstab')
local_solver.solve(A_local,E_amp_real.vector(),b_local)
#E_amp_real.vector()[:]=E_amp_real.vector()
E_amp_imag = Function(W_amp)
a_local = inner(w_amp, Pv_amp) * dx
L_local = inner(w_amp, sqrt(dot(E_field_im,E_field_im))) * dx
A_local, b_local = assemble_system(a_local, L_local, bcs=[])
local_solver = PETScKrylovSolver('bicgstab')
local_solver.solve(A_local,E_amp_imag.vector(),b_local)
elif Sim_type=='Butson':
from ufl import nabla_div
W_amp=FunctionSpace(Sim_setup.mesh,'DG',Sim_setup.element_order-1)
w_amp = TestFunction(W_amp)
Pv_amp = TrialFunction(W_amp)
Second_deriv= Function(W_amp)
a_local = inner(w_amp, Pv_amp) * dx
L_local = inner(w_amp, nabla_div(E_field_r)) * dx
A_local, b_local = assemble_system(a_local, L_local, bcs=[])
local_solver = PETScKrylovSolver('bicgstab')
local_solver.solve(A_local,Second_deriv.vector(),b_local)
W_amp=FunctionSpace(Sim_setup.mesh,'DG',Sim_setup.element_order-1)
w_amp = TestFunction(W_amp)
Pv_amp = TrialFunction(W_amp)
Second_deriv_imag= Function(W_amp)
a_local = inner(w_amp, Pv_amp) * dx
L_local = inner(w_amp, nabla_div(E_field_im)) * dx
A_local, b_local = assemble_system(a_local, L_local, bcs=[])
local_solver = PETScKrylovSolver('bicgstab')
local_solver.solve(A_local,Second_deriv_imag.vector(),b_local)
Phi_ROI=np.zeros((Vertices_array.shape[0],5),float)
#VTA=0.0
for inx in range(Vertices_array.shape[0]):
pnt=Point(Vertices_array[inx,0],Vertices_array[inx,1],Vertices_array[inx,2])
if Sim_setup.mesh.bounding_box_tree().compute_first_entity_collision(pnt)<Sim_setup.mesh.num_cells()*100:
Phi_ROI[inx,0]=Vertices_array[inx,0]
Phi_ROI[inx,1]=Vertices_array[inx,1]
Phi_ROI[inx,2]=Vertices_array[inx,2]
#if Sim_setup.c_c==1:
if Sim_type=='Butson':
Phi_ROI[inx,3]=Second_deriv(pnt)
Phi_ROI[inx,4]=Second_deriv_imag(pnt)
elif Sim_type=='Astrom':
Phi_ROI[inx,3]=E_amp_real(pnt) # if VC, they are already scaled here and the signal will be unit
Phi_ROI[inx,4]=E_amp_imag(pnt) # if CC, they will be scaled as the signal (only one contact and ground here, so ok)
#if Sim_setup.sine_freq==Sim_setup.signal_freq and abs(Phi_ROI[inx,3])>=0.3:
# VTA+=0.1**3
else: # we assign 0.0 here
Phi_ROI[inx,3]=0.0
Phi_ROI[inx,4]=0.0
#print("Couldn't probe the potential at the point ",Vertices_array[inx,0],Vertices_array[inx,1],Vertices_array[inx,2])
#print("check the neuron array, exiting....")
#raise SystemExit
fre_vector=[Sim_setup.sine_freq]*Phi_ROI.shape[0]
comb=np.vstack((Phi_ROI[:,0],Phi_ROI[:,1],Phi_ROI[:,2],Phi_ROI[:,3],Phi_ROI[:,4],fre_vector)).T
f = h5py.File(os.environ['PATIENTDIR']+'/Field_solutions/sol_cor'+str(core)+'.h5','a')
f.create_dataset(str(Sim_setup.sine_freq), data=comb)
f.close()
if Sim_setup.c_c==1:
comb_Z=np.vstack((np.real(Z_tissue),np.imag(Z_tissue),Sim_setup.sine_freq)).T
f=open(os.environ['PATIENTDIR']+'/Field_solutions/Impedance'+str(core)+'.csv','ab')
np.savetxt(f, comb_Z, delimiter=" ")
f.close()
# if Sim_setup.sine_freq==Sim_setup.signal_freq and Sim_setup.c_c==1: # re-solve with the scaled potential just to check (to match 1 A)
# Dirichlet_bc_scaled=[]
# for bc_i in range(len(Domains.Contacts)): #CPE estimation is valid only for one activa and one ground contact configuration
# if Sim_setup.Laplace_eq == 'EQS':
# if Domains.fi[bc_i]==0.0:
# Dirichlet_bc_scaled.append(DirichletBC(V_space.sub(0), 0.0, Sim_setup.boundaries,Domains.Contacts[bc_i]))
# Dirichlet_bc_scaled.append(DirichletBC(V_space.sub(1), 0.0, Sim_setup.boundaries,Domains.Contacts[bc_i]))
# else:
# Dirichlet_bc_scaled.append(DirichletBC(V_space.sub(0), np.real((Domains.fi[bc_i])/J_ground),Sim_setup.boundaries,Domains.Contacts[bc_i]))
# Dirichlet_bc_scaled.append(DirichletBC(V_space.sub(1), np.imag((Domains.fi[bc_i])/J_ground),Sim_setup.boundaries,Domains.Contacts[bc_i]))
# else:
# if Domains.fi[bc_i]==0.0:
# Dirichlet_bc_scaled.append(DirichletBC(V_space, 0.0, Sim_setup.boundaries,Domains.Contacts[bc_i]))
# else:
# Dirichlet_bc_scaled.append(DirichletBC(V_space, np.real((Domains.fi[bc_i])/J_ground),Sim_setup.boundaries,Domains.Contacts[bc_i]))
#
# phi_sol_check=define_variational_form_and_solve(V_space,Dirichlet_bc_scaled,kappa,Sim_setup.Laplace_eq,Cond_tensor,Solver_type)
#
# if Sim_setup.Laplace_eq=='EQS':
# (phi_r_check,phi_i_check)=phi_sol_check.split(deepcopy=True)
# else:
# phi_r_check=phi_sol_check
# phi_i_check=Function(V_space)
# phi_i_check.vector()[:] = 0.0
#J_ground=get_current(Sim_setup.mesh,Sim_setup.boundaries,Sim_setup.element_order,Sim_setup.Laplace_eq,Domains.Contacts,kappa,Cond_tensor,phi_r_check,phi_i_check,ground_index)
if Sim_setup.sine_freq==Sim_setup.signal_freq:
if Sim_setup.c_c==1:
print("Current through the ground after normalizing to 1 A at the signal freq.: ",J_ground)
file=File(os.environ['PATIENTDIR']+'/Field_solutions/'+str(Sim_setup.Laplace_eq)+str(Sim_setup.signal_freq)+'_phi_r_1A.pvd')
file<<phi_r_check
else:
file=File(os.environ['PATIENTDIR']+'/Field_solutions/'+str(Sim_setup.Laplace_eq)+str(Sim_setup.signal_freq)+'_phi_r.pvd')
file<<phi_r_check
file=File(os.environ['PATIENTDIR']+'/Field_solutions/'+str(Sim_setup.Laplace_eq)+str(Sim_setup.signal_freq)+'_E_amp_real.pvd')
file<<E_amp_real
output.put(1)
else:
Phi_ROI=np.zeros((Vertices_array.shape[0],5),float)
for inx in range(Vertices_array.shape[0]):
pnt=Point(Vertices_array[inx,0],Vertices_array[inx,1],Vertices_array[inx,2])
if Sim_setup.mesh.bounding_box_tree().compute_first_entity_collision(pnt)<Sim_setup.mesh.num_cells()*100:
Phi_ROI[inx,0]=Vertices_array[inx,0]
Phi_ROI[inx,1]=Vertices_array[inx,1]
Phi_ROI[inx,2]=Vertices_array[inx,2]
if Sim_setup.c_c==1:
Phi_ROI[inx,3]=np.real((phi_r(pnt)+1j*phi_i(pnt))/J_ground) #*1A is left out here
Phi_ROI[inx,4]=np.imag((phi_r(pnt)+1j*phi_i(pnt))/J_ground) #*1A is left out here
else:
Phi_ROI[inx,3]=phi_r(pnt)
Phi_ROI[inx,4]=phi_i(pnt)
else:
print("Couldn't probe the potential at the point ",Vertices_array[inx,0],Vertices_array[inx,1],Vertices_array[inx,2])
print("check the neuron array, exiting....")
raise SystemExit
fre_vector=[Sim_setup.sine_freq]*Phi_ROI.shape[0]
comb=np.vstack((Phi_ROI[:,0],Phi_ROI[:,1],Phi_ROI[:,2],Phi_ROI[:,3],Phi_ROI[:,4],fre_vector)).T
f = h5py.File(os.environ['PATIENTDIR']+'/Field_solutions/sol_cor'+str(core)+'.h5','a')
f.create_dataset(str(Sim_setup.sine_freq), data=comb)
f.close()
if Sim_setup.c_c==1:
comb_Z=np.vstack((np.real(Z_tissue),np.imag(Z_tissue),Sim_setup.sine_freq)).T
f=open(os.environ['PATIENTDIR']+'/Field_solutions/Impedance'+str(core)+'.csv','ab')
np.savetxt(f, comb_Z, delimiter=" ")
f.close()
if Sim_setup.sine_freq==Sim_setup.signal_freq and Sim_setup.c_c==1: # re-solve with the scaled potential just to check (to match 1 A)
Dirichlet_bc_scaled=[]
for bc_i in range(len(Domains.Contacts)): #CPE estimation is valid only for one activa and one ground contact configuration
if Sim_setup.Laplace_eq == 'EQS':
if Domains.fi[bc_i]==0.0:
Dirichlet_bc_scaled.append(DirichletBC(V_space.sub(0), 0.0, Sim_setup.boundaries,Domains.Contacts[bc_i]))
Dirichlet_bc_scaled.append(DirichletBC(V_space.sub(1), 0.0, Sim_setup.boundaries,Domains.Contacts[bc_i]))
else:
Dirichlet_bc_scaled.append(DirichletBC(V_space.sub(0), np.real((Domains.fi[bc_i])/J_ground),Sim_setup.boundaries,Domains.Contacts[bc_i]))
Dirichlet_bc_scaled.append(DirichletBC(V_space.sub(1), np.imag((Domains.fi[bc_i])/J_ground),Sim_setup.boundaries,Domains.Contacts[bc_i]))
else:
if Domains.fi[bc_i]==0.0:
Dirichlet_bc_scaled.append(DirichletBC(V_space, 0.0, Sim_setup.boundaries,Domains.Contacts[bc_i]))
else:
Dirichlet_bc_scaled.append(DirichletBC(V_space, np.real((Domains.fi[bc_i])/J_ground),Sim_setup.boundaries,Domains.Contacts[bc_i]))
if Sim_setup.external_grounding==True:
if Sim_setup.Laplace_eq == 'EQS':
Dirichlet_bc_scaled.append(DirichletBC(V_space.sub(0),0.0,facets,1))
Dirichlet_bc_scaled.append(DirichletBC(V_space.sub(1),0.0,facets,1))
else:
Dirichlet_bc_scaled.append(DirichletBC(V_space,0.0,facets,1))
phi_sol_check=define_variational_form_and_solve(V_space,Dirichlet_bc_scaled,kappa,Sim_setup.Laplace_eq,Cond_tensor,Solver_type)
if Sim_setup.Laplace_eq=='EQS':
(phi_r_check,phi_i_check)=phi_sol_check.split(deepcopy=True)
else:
phi_r_check=phi_sol_check
phi_i_check=Function(V_space)
phi_i_check.vector()[:] = 0.0
J_ground=get_current(Sim_setup.mesh,facets,Sim_setup.boundaries,Sim_setup.element_order,Sim_setup.Laplace_eq,Domains.Contacts,kappa,Cond_tensor,phi_r_check,phi_i_check,ground_index)
print("Current through the ground after normalizing to 1 A at the signal freq.: ",J_ground)
file=File(os.environ['PATIENTDIR']+'/Field_solutions/'+str(Sim_setup.Laplace_eq)+str(Sim_setup.signal_freq)+'_phi_r_1A.pvd')
file<<phi_r_check
output.put(1)
| andreashorn/lead_dbs | ext_libs/OSS-DBS/OSS_platform/FEM_in_spectrum.py | Python | gpl-3.0 | 42,544 | [
"NEURON"
] | aebb1e5280d25c16d4fa359de231926f47b73c70534ad0f6401a6d25ab02e8e7 |
# -*- coding: utf-8 -*-
#
# taxtastic documentation build configuration file, created by
# sphinx-quickstart on Tue Mar 8 19:52:20 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import datetime
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
if 'VIRTUAL_ENV' in os.environ:
venv = '{venv}/lib/python{v.major}.{v.minor}/site-packages'.format(
venv=os.environ['VIRTUAL_ENV'],
v=sys.version_info)
sys.path.insert(0, os.path.abspath(venv))
sys.path.insert(0, os.path.abspath('..'))
from taxtastic import __version__
# -- General configuration -----------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'taxtastic'
copyright = ('2011-{}, Noah Hoffman, Erick Matsen, '
'Brian Hodges, Connor McCoy, Chris Rosenthal').format(
datetime.date.today().strftime('%Y'))
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'alabaster'
html_theme = 'classic'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'taxtasticdoc'
# -- Options for LaTeX output --------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'taxtastic.tex', 'taxtastic Documentation',
'Noah Hoffman, Erick Matsen, Brian Hodges', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'taxtastic', 'taxtastic Documentation',
['Noah Hoffman, Erick Matsen, Brian Hodges, Frederick Ross'], 1)
]
| fhcrc/taxtastic | docs/conf.py | Python | gpl-3.0 | 7,539 | [
"Brian"
] | 78e3c05ecce257de8b3e90289a1a44ff2dffcfe1e7242be9c6a6f570ea6361cd |
#from .... import graph
from scipy.spatial.distance import pdist
import numpy as np
import sys
from numpy.linalg import norm
from tree_kernel import TreeKernel
from copy import deepcopy
import math
def gaussianKernel(X,Y,beta):
Z=[X,Y]
return np.exp(-beta * pdist(Z, 'sqeuclidean'))
def linearKernel(X,Y):
Xnum=np.array(X)
Ynum=np.array(Y)
Xnum=Xnum/norm(Xnum)
Ynum=Ynum/norm(Ynum)
return np.dot(Xnum,Ynum)
class SSTprodKernel(TreeKernel):
def __init__(self,l,hashsep="#",labels=True,veclabels=False,normalize=True):
self.normalize=normalize
self.l = float(l)
self.hashsep = hashsep
self.cache = {}
self.labels=labels
self.veclabels=veclabels
def preProcess(self,T):
if 'kernelsstrepr' in T.graph:
return
#a['hashsep']=self.hashsep
#ordinare se l'albero non e' gia' ordered
if not 'childrenOrder' in T.nodes(data=True)[0][1]:
setOrder(T,T.graph['root'],sep=self.hashsep,labels=self.labels,veclabels=self.veclabels,order=self.order)
#graph.setHashSubtreeIdentifier(T,T.graph['root'],self.hashsep)
T.graph['kernelsstrepr']=ProdSubtreeList(T,T.graph['root'],labels=self.labels)
T.graph['kernelsstrepr'].sort()
if self.normalize:
T.graph['norm'] = 1.0
b = deepcopy(T)
T.graph['norm'] = math.sqrt(self.evaluate(T,b))
def evaluate(self,a,b):
#self.preProcess(a)
#self.preProcess(b)
pa,pb=(a.graph['kernelsstrepr'], b.graph['kernelsstrepr'])
# print "PA"
# for i in range(len(pa)):
# print pa.getProduction(i),
self.cache.clear()
i,j,k,toti,totj = (0,0,0,len(pa),len(pb))
while i < toti and j < totj:
if pa.getProduction(i) == pb.getProduction(j):
ci,cj=(i,j)
while i < toti and pa.getProduction(i)==pa.getProduction(ci):
j = cj
while j < totj and pb.getProduction(j)==pb.getProduction(cj):
k +=self.CSST(a, pa.getTree(i),b, pb.getTree(j))
j += 1
i += 1
elif len(pa.getProduction(i))<len(pb.getProduction(j)) or (len(pa.getProduction(i))==len(pb.getProduction(j)) and pa.getProduction(i) < pb.getProduction(j)):
i += 1
else:
j += 1
if self.normalize:
k = k/(a.graph['norm']*b.graph['norm'])
return k
def kernel(self, a, b):
"""compute the tree kernel on the trees a and b"""
# if not isinstance(a, tree.Tree):
# print "ERROR: first parameter has to be a Tree Object"
# return ""
# if not isinstance(b, tree.Tree):
# print "ERROR: second parameter has to be a Tree Object"
# return ""
self.preProcess(a)
self.preProcess(b)
return self.evaluate(a,b)
# def CSST(self,G1,c,G2,d):
# #ASSUME THAT C AND D HAVE THE SAME LABEL
# #c and d are node indices
# tmpkey = str(c) + "#" + str(d)
# if self.cache.exists(tmpkey):
## print "existing key value ",self.cache.read(tmpkey)
# return float(self.cache.read(tmpkey))
# else:
# prod = self.l
## print "roots",G1.node[c]['label'],",",G2.node[d]['label']
# #if 'veclabel' in G1.nodes(data=True)[0][1]:
# if self.veclabels:
# prod*=gaussianKernel(G1.node[c]['veclabel'],G2.node[d]['veclabel'],1.0/len(G2.node[d]['veclabel']))#self.beta)
#
# #print "kernel etichette", gaussianKernel(G1.node[c]['veclabel'],G2.node[d]['veclabel'],self.beta)
# #print "no vector label detected"
# #TODO qui va aggiunto il kenrel tra label continue
# #primo controllo tra produzioni
#
# #TODO qui non credo
#
# children1=G1.node[c]['childrenOrder']
# children2=G2.node[d]['childrenOrder']
# nc = G1.out_degree(c)
# if nc==G2.out_degree(d) and nc>0:
# if graph.getProduction(G1,c) == graph.getProduction(G2,d):
## print "productions",graph.getProduction(G1,c),graph.getProduction(G2,d)
# SST_partial=1
# for ci in range(nc):
## print "child",ci
## print G1.node[children1[ci]]['label'],",",G2.node[children2[ci]]['label']
# SST_partial*=self.CSST(G1,children1[ci],G2,children2[ci])
# #print "SST partial", SST_partial
# prod *= (1 + SST_partial )
# #print "after first loop", prod
## if nc==G2.out_degree(d):
## for ci in range(nc):
## #if G1.node[children1[ci]]['label']==G2.node[children2[ci]]['label']:
## if graph.getProduction(G1,children1[ci]) == graph.getProduction(G2,children2[ci]):
## prod *= (1 + self.CSST(G1,children1[ci],G2,children2[ci]))
## #TODO qui non credo
## else:
## #print G1.node[children1[ci]]['subtreeID']
## cid, did = (children1[ci],children2[ci])
## #self.cache.insert(str(cid) +"#"+ str(did), 0)
#
# #print "total", prod
# self.cache.insert(tmpkey, prod)
# return float(prod)
#originale + label vettoriali
def CSST(self,G1,c,G2,d):
#ASSUME THAT C AND D HAVE THE SAME LABEL
#c and d are node indices
tmpkey = str(c) + "#" + str(d)
if tmpkey in self.cache:
return float(self.cache[tmpkey])
else:
prod = self.l
#print G1.node[c]
#if 'veclabel' in G1.nodes(data=True)[0][1]:
if self.veclabels:
prod*=gaussianKernel(G1.node[c]['veclabel'],G2.node[d]['veclabel'],1.0/len(G2.node[d]['veclabel']))#self.beta)
#print "kernel etichette", gaussianKernel(G1.node[c]['veclabel'],G2.node[d]['veclabel'],1.0/len(G2.node[d]['veclabel']))
#print "no vector label detected"
#TODO qui va aggiunto il kenrel tra label continue
children1=G1.node[c]['childrenOrder']
children2=G2.node[d]['childrenOrder']
nc = G1.out_degree(c)
if nc==G2.out_degree(d):
for ci in range(nc):
if getProduction(G1,children1[ci]) == getProduction(G2,children2[ci]):
prod *= (1 + self.CSST(G1,children1[ci],G2,children2[ci]))
#TODO qui non credo
else:
#print G1.node[children1[ci]]['subtreeID']
cid, did = (children1[ci],children2[ci])
self.cache[str(cid) +"#"+ str(did)] = 0
self.cache[tmpkey] = prod
return float(prod)
# def evaluate(self,a,b):
# pa,pb=(a.kernelsstrepr, b.kernelsstrepr)
# self.cache.removeAll()
# i,j,k,toti,totj = (0,0,0,len(pa),len(pb))
# while i < toti and j < totj:
# if pa.getProduction(i) == pb.getProduction(j):
# ci,cj=(i,j)
# while i < toti and pa.getProduction(i)==pa.getProduction(ci):
# j = cj
# while j < totj and pb.getProduction(j)==pb.getProduction(cj):
# k += self.CSST(pa.getTree(i),pb.getTree(j))
# j += 1
# i += 1
# elif len(pa.getProduction(i))<len(pb.getProduction(j)) or (len(pa.getProduction(i))==len(pb.getProduction(j)) and pa.getProduction(i) < pb.getProduction(j)):
# i += 1
# else:
# j += 1
# return k
def __str__(self):
return "Subset Tree Kernel, with lambda=" + self.l
#class SSTKernelOrdered(SSTKernel):
#
# def preProcess(self,T):
# if 'kernelsstrepr' in T.graph:
# return
# #a['hashsep']=self.hashsep
# #ordinare se l'albero non e' gia' ordered
# #for n in T.nodes():
# # T.node[n]['childrenOrder']=T.successors(n)
# #graph.setHashSubtreeIdentifier(T,T.graph['root'],self.hashsep)
#
# T.graph['kernelsstrepr']=graph.ProdSubtreeList(T,T.graph['root'])
# T.graph['kernelsstrepr'].sort()
## GRAPH FUNCTIONS
def setOrder(T, nodeID, sep='|',labels=True,veclabels=True,order="gaussian"):
if veclabels:
return setOrderVeclabels(T, nodeID,sep,labels,order)
else:
return setOrderNoVeclabels(T, nodeID,sep,labels)
def setOrderVeclabels(T, nodeID, sep,labels,order):
"""
The method computes an identifier of the node based on
1) the label of the node self
2) the hash values of the children of self
For each visited node the hash value is stored into the attribute subtreeId
The label and the identifiers of the children nodes are separated by the char 'sep'
"""
#print "setOrder Veclabels"
#print "sep",sep
if 'orderString' in T.node[nodeID]:
return T.node[nodeID]['orderString']
if labels==True:
stri = str(T.node[nodeID]['label'])
else:
stri = str(T.out_degree(nodeID))
#order according kernel between veclabels and the one of the father
#print str(T.node[nodeID]['veclabel'])
#print stri
if stri.find(sep) != -1:
print "ERROR: identifier " + sep + "used in label. Please set it with setHashSep(newsep)"
succ_labels=[]
if len(T.successors(nodeID))>0:
stri +=sep+str(len(T.successors(nodeID)))
for c in T.successors(nodeID):
if order=='gaussian':
dist=gaussianKernel(T.node[nodeID]['veclabel'],T.node[c]['veclabel'],1.0/len(T.node[c]['veclabel']))#self.beta)
elif order=='norm':
dist=norm(T.node[nodeID]['veclabel'])
else:
print "no ordering specified"
tup=([setOrderVeclabels(T,c,sep,labels,order),dist],c)
succ_labels.append(tup)
#print "before sorting",succ_labels
succ_labels.sort(key=lambda x:(x[0][0],x[0][1]))#cmp = lambda x, y: cmp(x[0], y[0])
#print "after sorting",succ_labels
children=[]
for l in succ_labels:
stri += sep + str(l[0][0])
children.append(l[1])
T.node[nodeID]['orderString'] = stri
#print "order string", stri
T.node[nodeID]['childrenOrder']= children
#print "order children", children
#debug
#print T.node[nodeID]['orderString']
#print T.node[nodeID]['childrenOrder']
return T.node[nodeID]['orderString']
def setOrderNoVeclabels(T, nodeID, sep,labels,order):
"""
The method computes an identifier of the node based on
1) the label of the node self
2) the hash values of the children of self
For each visited node the hash value is stored into the attribute subtreeId
The label and the identifiers of the children nodes are separated by the char 'sep'
"""
#print "labels",labels
#print "sep",sep
if 'orderString' in T.node[nodeID]:
return T.node[nodeID]['orderString']
if labels==True:
stri = str(T.node[nodeID]['label'])
else:
stri = str(T.out_degree(nodeID))
#append veclabels if available
#print str(T.node[nodeID]['veclabel'])
#print stri
if stri.find(sep) != -1:
print "ERROR: identifier " + sep + "used in label. Please set it with setHashSep(newsep)"
succ_labels=[]
if len(T.successors(nodeID))>0:
stri +=sep+str(len(T.successors(nodeID)))
for c in T.successors(nodeID):
tup=(setOrderNoVeclabels(T,c,sep,labels,order),c)
succ_labels.append(tup)
succ_labels.sort(cmp = lambda x, y: cmp(x[0], y[0]))
children=[]
for l in succ_labels:
stri += sep + str(l[0])
children.append(l[1])
T.node[nodeID]['orderString'] = stri
T.node[nodeID]['childrenOrder']= children
#debug
#print T.node[nodeID]['orderString']
#print T.node[nodeID]['childrenOrder']
return T.node[nodeID]['orderString']
def setHashSubtreeIdentifier(T, nodeID, sep='|',labels=True):
"""
The method computes an identifier of the node based on
1) the label of the node self
2) the hash values of the children of self
For each visited node the hash value is stored into the attribute subtreeId
The label and the identifiers of the children nodes are separated by the char 'sep'
"""
#print "labels",labels
if 'subtreeID' in T.node[nodeID]:
return T.node[nodeID]['subtreeID']
if labels:
stri = str(T.node[nodeID]['label'])
else:
stri = str(T.out_degree(nodeID))
if stri.find(sep) != -1:
print "ERROR: identifier " + sep + "used in label. Please set it with setHashSep(newsep)"
for c in T.node[nodeID]['childrenOrder']:#T.successors(nodeID):
stri += sep + setHashSubtreeIdentifier(T,c,sep)
T.node[nodeID]['subtreeID'] = str(hash(stri))
return T.node[nodeID]['subtreeID']
def computeSubtreeIDSubtreeSizeList(self):
#compute a list of pairs (subtree-hash-identifiers, subtree-size)
if not self:
return
p = [(self.subtreeId, self.stsize)]
for c in self.chs:
p.extend(c.computeSubtreeIDSubtreeSizeList())
return p
class ProdSubtreeList():
def __init__(self,T, root,labels=True):
self.labels=labels
self.productionlist = self.productionlist(T,root)
def getProduction(self,i):
return self.productionlist[i][0]
def getTree(self,i):
return self.productionlist[i][1]
# def sort(self):
# #erano invertiti quando confrontavo produzioni
# self.productionlist.sort(cmp = lambda x, y: cmp(len(x[0]), len(y[0])))
# self.productionlist.sort(cmp = lambda x, y: cmp(x[0], y[0]))
#ORIGINAL SORT FOR SST CONSIDERING PRODUCTINS
def sort(self):
self.productionlist.sort(cmp = lambda x, y: cmp(x[0], y[0]))
self.productionlist.sort(cmp = lambda x, y: cmp(len(x[0]), len(y[0])))
def __len__(self):
return len(self.productionlist)
def compareprods(x,y):
if len(x[0])==len(y[0]):
return cmp(x[0],y[0])
else:
return cmp(len(x[0]),len(y[0]))
def productionlist(self,G,nodeID):
p = [(getProduction(G,nodeID,self.labels),nodeID)]
for c in G.successors(nodeID):
p.extend(self.productionlist(G,c))
return p
def getProduction(G, nodeID,labels=True):
"""
The method returns a string representing the label of the current node (self) concatenated with the labels of its children
The format of the string is the following: l_v(l_ch1,l_ch2,...,l_chn)
where l_v is the label of self and l_chi is the label of the i-th child.
For example the string representing a subtree composed by a node labelled with A and two children labelled as B and C,
is represented as A(B,C)
The empty string is returned in case the node is not a TreeNode object properly initialized.
"""
if 'production' in G.node[nodeID]:
return G.node[nodeID]['production']
#print nodeID
#print "labels",labels
if labels:
G.node[nodeID]['production'] = G.node[nodeID]['label'] + "(" + ','.join([G.node[childID]['label'] for childID in G.node[nodeID]['childrenOrder']]) + ")"
else:
#TODO outdegree
G.node[nodeID]['production'] =str(G.out_degree(nodeID)) + "(" + ','.join([str(G.out_degree(childID)) for childID in G.node[nodeID]['childrenOrder']]) + ")"
#print G.node[nodeID]['production']
return G.node[nodeID]['production'] | nickgentoo/scikit-learn-graph | skgraph/tree/tree_kernels_SSTprod_new.py | Python | gpl-3.0 | 16,413 | [
"Gaussian"
] | c17f7023239f0f7b30a86be0adace7b408d9970912aab422adf318eefae07d08 |
import token_names as tokens
from node_visitor import NodeVisitor
class Interpreter(NodeVisitor):
def __init__(self, tree):
self.tree = tree
def visit_Program(self, node):
self.visit(node.block)
def visit_Block(self, node):
for declaration in node.declarations:
self.visit(declaration)
self.visit(node.compound_statement)
def visit_Variable(self, node):
pass
def visit_VariableDeclaration(self, node):
pass
def visit_FunctionDeclaration(self, node):
pass
def visit_UnaryOperator(self, node):
if node.op.type == tokens.PLUS:
return +self.visit(node.expr)
elif node.op.type == tokens.MINUS:
return -self.visit(node.expr)
def visit_BinaryOperator(self, node):
if node.op.type == tokens.PLUS:
return self.visit(node.left) + self.visit(node.right)
elif node.op.type == tokens.MINUS:
return self.visit(node.left) - self.visit(node.right)
elif node.op.type == tokens.MULTIPLY:
return self.visit(node.left) * self.visit(node.right)
elif node.op.type == tokens.DIVIDE:
return self.visit(node.left) / self.visit(node.right)
def visit_Number(self, node):
return node.value
def visit_Compound(self, node):
for child in node.children:
self.visit(child)
def visit_NoOp(self, node):
pass
def visit_Assign(self, node):
self.visit(node.left)
self.visit(node.right)
def interpret(self):
return self.visit(self.tree)
| doubledherin/my_compiler | interpreter.py | Python | mit | 1,612 | [
"VisIt"
] | e32e6ec7cbc13a3c2ff4fb60cea58c8e9297b46db1085a2eecd1c4c8b149e612 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import time
import datetime
import os
from django.contrib.auth.models import Permission
from django.conf import settings
from django.core import mail
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.test import TestCase
from django.test.client import Client
from django.test.utils import override_settings
from pybb import permissions, views as pybb_views
from pybb.templatetags.pybb_tags import pybb_is_topic_unread, pybb_topic_unread, pybb_forum_unread, \
pybb_get_latest_topics, pybb_get_latest_posts
from pybb import util
from pybb.util import build_cache_key
User = util.get_user_model()
username_field = util.get_username_field()
try:
from lxml import html
except ImportError:
raise Exception('PyBB requires lxml for self testing')
from pybb import defaults
from pybb.models import Topic, TopicReadTracker, Forum, ForumReadTracker, Post, Category, PollAnswer, Profile
__author__ = 'zeus'
class SharedTestModule(object):
def create_user(self):
self.user = User.objects.create_user('zeus', 'zeus@localhost', 'zeus')
def login_client(self, username='zeus', password='zeus'):
self.client.login(username=username, password=password)
def create_initial(self, post=True):
self.category = Category.objects.create(name='foo')
self.forum = Forum.objects.create(name='xfoo', description='bar', category=self.category)
self.topic = Topic.objects.create(name='etopic', forum=self.forum, user=self.user)
if post:
self.post = Post.objects.create(topic=self.topic, user=self.user, body='bbcode [b]test[b]')
def get_form_values(self, response, form="post-form"):
return dict(html.fromstring(response.content).xpath('//form[@class="%s"]' % form)[0].form_values())
def get_with_user(self, url, username=None, password=None):
if username:
self.client.login(username=username, password=password)
r = self.client.get(url)
self.client.logout()
return r
class FeaturesTest(TestCase, SharedTestModule):
def setUp(self):
self.ORIG_PYBB_ENABLE_ANONYMOUS_POST = defaults.PYBB_ENABLE_ANONYMOUS_POST
self.ORIG_PYBB_PREMODERATION = defaults.PYBB_PREMODERATION
defaults.PYBB_PREMODERATION = False
defaults.PYBB_ENABLE_ANONYMOUS_POST = False
self.create_user()
self.create_initial()
mail.outbox = []
def test_base(self):
# Check index page
Forum.objects.create(name='xfoo1', description='bar1', category=self.category, parent=self.forum)
url = reverse('pybb:index')
response = self.client.get(url)
parser = html.HTMLParser(encoding='utf8')
tree = html.fromstring(response.content, parser=parser)
self.assertContains(response, 'foo')
self.assertContains(response, self.forum.get_absolute_url())
self.assertTrue(defaults.PYBB_DEFAULT_TITLE in tree.xpath('//title')[0].text_content())
self.assertEqual(len(response.context['categories']), 1)
self.assertEqual(len(response.context['categories'][0].forums_accessed), 1)
def test_forum_page(self):
# Check forum page
response = self.client.get(self.forum.get_absolute_url())
self.assertEqual(response.context['forum'], self.forum)
tree = html.fromstring(response.content)
self.assertTrue(tree.xpath('//a[@href="%s"]' % self.topic.get_absolute_url()))
self.assertTrue(tree.xpath('//title[contains(text(),"%s")]' % self.forum.name))
self.assertFalse(tree.xpath('//a[contains(@href,"?page=")]'))
self.assertFalse(response.context['is_paginated'])
def test_category_page(self):
Forum.objects.create(name='xfoo1', description='bar1', category=self.category, parent=self.forum)
response = self.client.get(self.category.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertContains(response, self.forum.get_absolute_url())
self.assertEqual(len(response.context['object'].forums_accessed), 1)
def test_profile_language_default(self):
user = User.objects.create_user(username='user2', password='user2', email='user2@example.com')
self.assertEqual(util.get_pybb_profile(user).language, settings.LANGUAGE_CODE)
def test_profile_edit(self):
# Self profile edit
self.login_client()
response = self.client.get(reverse('pybb:edit_profile'))
self.assertEqual(response.status_code, 200)
values = self.get_form_values(response, 'profile-edit')
values['signature'] = 'test signature'
response = self.client.post(reverse('pybb:edit_profile'), data=values, follow=True)
self.assertEqual(response.status_code, 200)
self.client.get(self.post.get_absolute_url(), follow=True)
self.assertContains(response, 'test signature')
# Test empty signature
values['signature'] = ''
response = self.client.post(reverse('pybb:edit_profile'), data=values, follow=True)
self.assertEqual(len(response.context['form'].errors), 0)
def test_pagination_and_topic_addition(self):
for i in range(0, defaults.PYBB_FORUM_PAGE_SIZE + 3):
topic = Topic(name='topic_%s_' % i, forum=self.forum, user=self.user)
topic.save()
url = reverse('pybb:forum', args=[self.forum.id])
response = self.client.get(url)
self.assertEqual(len(response.context['topic_list']), defaults.PYBB_FORUM_PAGE_SIZE)
self.assertTrue(response.context['is_paginated'])
self.assertEqual(response.context['paginator'].num_pages,
int((defaults.PYBB_FORUM_PAGE_SIZE + 3) / defaults.PYBB_FORUM_PAGE_SIZE) + 1)
def test_bbcode_and_topic_title(self):
response = self.client.get(self.topic.get_absolute_url())
tree = html.fromstring(response.content)
self.assertTrue(self.topic.name in tree.xpath('//title')[0].text_content())
self.assertContains(response, self.post.body_html)
self.assertContains(response, 'bbcode <strong>test</strong>')
def test_topic_addition(self):
self.login_client()
add_topic_url = reverse('pybb:add_topic', kwargs={'forum_id': self.forum.id})
response = self.client.get(add_topic_url)
values = self.get_form_values(response)
values['body'] = 'new topic test'
values['name'] = 'new topic name'
values['poll_type'] = 0
response = self.client.post(add_topic_url, data=values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(Topic.objects.filter(name='new topic name').exists())
def test_post_deletion(self):
post = Post(topic=self.topic, user=self.user, body='bbcode [b]test[b]')
post.save()
post.delete()
Topic.objects.get(id=self.topic.id)
Forum.objects.get(id=self.forum.id)
def test_topic_deletion(self):
topic = Topic(name='xtopic', forum=self.forum, user=self.user)
topic.save()
post = Post(topic=topic, user=self.user, body='one')
post.save()
post = Post(topic=topic, user=self.user, body='two')
post.save()
post.delete()
Topic.objects.get(id=topic.id)
Forum.objects.get(id=self.forum.id)
topic.delete()
Forum.objects.get(id=self.forum.id)
def test_forum_updated(self):
time.sleep(1)
topic = Topic(name='xtopic', forum=self.forum, user=self.user)
topic.save()
post = Post(topic=topic, user=self.user, body='one')
post.save()
post = Post.objects.get(id=post.id)
self.assertTrue(self.forum.updated == post.created)
def test_read_tracking(self):
topic = Topic(name='xtopic', forum=self.forum, user=self.user)
topic.save()
post = Post(topic=topic, user=self.user, body='one')
post.save()
client = Client()
client.login(username='zeus', password='zeus')
# Topic status
tree = html.fromstring(client.get(topic.forum.get_absolute_url()).content)
self.assertTrue(tree.xpath('//a[@href="%s"]/parent::td[contains(@class,"unread")]' % topic.get_absolute_url()))
# Forum status
tree = html.fromstring(client.get(reverse('pybb:index')).content)
self.assertTrue(
tree.xpath('//a[@href="%s"]/parent::td[contains(@class,"unread")]' % topic.forum.get_absolute_url()))
# Visit it
client.get(topic.get_absolute_url())
# Topic status - readed
tree = html.fromstring(client.get(topic.forum.get_absolute_url()).content)
# Visit others
for t in topic.forum.topics.all():
client.get(t.get_absolute_url())
self.assertFalse(tree.xpath('//a[@href="%s"]/parent::td[contains(@class,"unread")]' % topic.get_absolute_url()))
# Forum status - readed
tree = html.fromstring(client.get(reverse('pybb:index')).content)
self.assertFalse(
tree.xpath('//a[@href="%s"]/parent::td[contains(@class,"unread")]' % topic.forum.get_absolute_url()))
# Post message
add_post_url = reverse('pybb:add_post', kwargs={'topic_id': topic.id})
response = client.get(add_post_url)
values = self.get_form_values(response)
values['body'] = 'test tracking'
response = client.post(add_post_url, values, follow=True)
self.assertContains(response, 'test tracking')
# Topic status - readed
tree = html.fromstring(client.get(topic.forum.get_absolute_url()).content)
self.assertFalse(tree.xpath('//a[@href="%s"]/parent::td[contains(@class,"unread")]' % topic.get_absolute_url()))
# Forum status - readed
tree = html.fromstring(client.get(reverse('pybb:index')).content)
self.assertFalse(
tree.xpath('//a[@href="%s"]/parent::td[contains(@class,"unread")]' % topic.forum.get_absolute_url()))
post = Post(topic=topic, user=self.user, body='one')
post.save()
client.get(reverse('pybb:mark_all_as_read'))
tree = html.fromstring(client.get(reverse('pybb:index')).content)
self.assertFalse(
tree.xpath('//a[@href="%s"]/parent::td[contains(@class,"unread")]' % topic.forum.get_absolute_url()))
# Empty forum - readed
f = Forum(name='empty', category=self.category)
f.save()
tree = html.fromstring(client.get(reverse('pybb:index')).content)
self.assertFalse(tree.xpath('//a[@href="%s"]/parent::td[contains(@class,"unread")]' % f.get_absolute_url()))
def test_read_tracking_multi_user(self):
topic_1 = self.topic
topic_2 = Topic(name='topic_2', forum=self.forum, user=self.user)
topic_2.save()
Post(topic=topic_2, user=self.user, body='one').save()
user_ann = User.objects.create_user('ann', 'ann@localhost', 'ann')
client_ann = Client()
client_ann.login(username='ann', password='ann')
user_bob = User.objects.create_user('bob', 'bob@localhost', 'bob')
client_bob = Client()
client_bob.login(username='bob', password='bob')
# Two topics, each with one post. everything is unread, so the db should reflect that:
self.assertEqual(TopicReadTracker.objects.all().count(), 0)
self.assertEqual(ForumReadTracker.objects.all().count(), 0)
# user_ann reads topic_1, she should get one topic read tracker, there should be no forum read trackers
client_ann.get(topic_1.get_absolute_url())
self.assertEqual(TopicReadTracker.objects.all().count(), 1)
self.assertEqual(TopicReadTracker.objects.filter(user=user_ann).count(), 1)
self.assertEqual(TopicReadTracker.objects.filter(user=user_ann, topic=topic_1).count(), 1)
self.assertEqual(ForumReadTracker.objects.all().count(), 0)
# user_bob reads topic_1, he should get one topic read tracker, there should be no forum read trackers
client_bob.get(topic_1.get_absolute_url())
self.assertEqual(TopicReadTracker.objects.all().count(), 2)
self.assertEqual(TopicReadTracker.objects.filter(user=user_bob).count(), 1)
self.assertEqual(TopicReadTracker.objects.filter(user=user_bob, topic=topic_1).count(), 1)
# user_bob reads topic_2, he should get a forum read tracker,
# there should be no topic read trackers for user_bob
client_bob.get(topic_2.get_absolute_url())
self.assertEqual(TopicReadTracker.objects.all().count(), 1)
self.assertEqual(ForumReadTracker.objects.all().count(), 1)
self.assertEqual(ForumReadTracker.objects.filter(user=user_bob).count(), 1)
self.assertEqual(ForumReadTracker.objects.filter(user=user_bob, forum=self.forum).count(), 1)
self.assertEqual(TopicReadTracker.objects.filter(user=user_bob).count(), 0)
self.assertListEqual(
[t.unread for t in pybb_topic_unread([topic_1, topic_2], user_bob)],
[False, False])
# user_ann creates topic_3, they should get a new topic read tracker in the db
add_topic_url = reverse('pybb:add_topic', kwargs={'forum_id': self.forum.id})
response = client_ann.get(add_topic_url)
values = self.get_form_values(response)
values['body'] = 'topic_3'
values['name'] = 'topic_3'
values['poll_type'] = 0
response = client_ann.post(add_topic_url, data=values, follow=True)
self.assertEqual(TopicReadTracker.objects.all().count(), 2)
self.assertEqual(TopicReadTracker.objects.filter(user=user_ann).count(), 2)
self.assertEqual(ForumReadTracker.objects.all().count(), 1)
topic_3 = Topic.objects.order_by('-updated')[0]
self.assertEqual(topic_3.name, 'topic_3')
# user_ann posts to topic_1, a topic they've already read, no new trackers should be created
add_post_url = reverse('pybb:add_post', kwargs={'topic_id': topic_1.id})
response = client_ann.get(add_post_url)
values = self.get_form_values(response)
values['body'] = 'test tracking'
response = client_ann.post(add_post_url, values, follow=True)
self.assertEqual(TopicReadTracker.objects.all().count(), 2)
self.assertEqual(TopicReadTracker.objects.filter(user=user_ann).count(), 2)
self.assertEqual(ForumReadTracker.objects.all().count(), 1)
# user_bob has two unread topics, 'topic_1' and 'topic_3'.
# This is because user_ann created a new topic and posted to an existing topic,
# after user_bob got his forum read tracker.
# user_bob reads 'topic_1'
# user_bob gets a new topic read tracker, and the existing forum read tracker stays the same.
# 'topic_3' appears unread for user_bob
#
self.assertEqual(ForumReadTracker.objects.all().count(), 1)
previous_time = ForumReadTracker.objects.all()[0].time_stamp
client_bob.get(topic_1.get_absolute_url())
self.assertEqual(ForumReadTracker.objects.all().count(), 1)
self.assertEqual(ForumReadTracker.objects.all()[0].time_stamp, previous_time)
self.assertEqual(TopicReadTracker.objects.all().count(), 3)
self.assertEqual(TopicReadTracker.objects.filter(user=user_bob).count(), 1)
# user_bob reads the last unread topic, 'topic_3'.
# user_bob's existing forum read tracker updates and his topic read tracker disappears
#
self.assertEqual(ForumReadTracker.objects.all().count(), 1)
previous_time = ForumReadTracker.objects.all()[0].time_stamp
client_bob.get(topic_3.get_absolute_url())
self.assertEqual(ForumReadTracker.objects.all().count(), 1)
self.assertGreater(ForumReadTracker.objects.all()[0].time_stamp, previous_time)
self.assertEqual(TopicReadTracker.objects.all().count(), 2)
self.assertEqual(TopicReadTracker.objects.filter(user=user_bob).count(), 0)
def test_read_tracking_multi_forum(self):
topic_1 = self.topic
topic_2 = Topic(name='topic_2', forum=self.forum, user=self.user)
topic_2.save()
Post(topic=topic_2, user=self.user, body='one').save()
forum_1 = self.forum
forum_2 = Forum(name='forum_2', description='bar', category=self.category)
forum_2.save()
Topic(name='garbage', forum=forum_2, user=self.user).save()
client = Client()
client.login(username='zeus', password='zeus')
# everything starts unread
self.assertEqual(ForumReadTracker.objects.all().count(), 0)
self.assertEqual(TopicReadTracker.objects.all().count(), 0)
# user reads topic_1, they should get one topic read tracker, there should be no forum read trackers
client.get(topic_1.get_absolute_url())
self.assertEqual(TopicReadTracker.objects.all().count(), 1)
self.assertEqual(TopicReadTracker.objects.filter(user=self.user).count(), 1)
self.assertEqual(TopicReadTracker.objects.filter(user=self.user, topic=topic_1).count(), 1)
# user reads topic_2, they should get a forum read tracker,
# there should be no topic read trackers for the user
client.get(topic_2.get_absolute_url())
self.assertEqual(TopicReadTracker.objects.all().count(), 0)
self.assertEqual(ForumReadTracker.objects.all().count(), 1)
self.assertEqual(ForumReadTracker.objects.filter(user=self.user).count(), 1)
self.assertEqual(ForumReadTracker.objects.filter(user=self.user, forum=self.forum).count(), 1)
def test_read_tracker_after_posting(self):
client = Client()
client.login(username='zeus', password='zeus')
add_post_url = reverse('pybb:add_post', kwargs={'topic_id': self.topic.id})
response = client.get(add_post_url)
values = self.get_form_values(response)
values['body'] = 'test tracking'
response = client.post(add_post_url, values, follow=True)
# after posting in topic it should be readed
# because there is only one topic, so whole forum should be marked as readed
self.assertEqual(TopicReadTracker.objects.filter(user=self.user, topic=self.topic).count(), 0)
self.assertEqual(ForumReadTracker.objects.filter(user=self.user, forum=self.forum).count(), 1)
def test_pybb_is_topic_unread_filter(self):
forum_1 = self.forum
topic_1 = self.topic
topic_2 = Topic.objects.create(name='topic_2', forum=forum_1, user=self.user)
forum_2 = Forum.objects.create(name='forum_2', description='forum2', category=self.category)
topic_3 = Topic.objects.create(name='topic_2', forum=forum_2, user=self.user)
Post(topic=topic_1, user=self.user, body='one').save()
Post(topic=topic_2, user=self.user, body='two').save()
Post(topic=topic_3, user=self.user, body='three').save()
user_ann = User.objects.create_user('ann', 'ann@localhost', 'ann')
client_ann = Client()
client_ann.login(username='ann', password='ann')
# Two topics, each with one post. everything is unread, so the db should reflect that:
self.assertTrue(pybb_is_topic_unread(topic_1, user_ann))
self.assertTrue(pybb_is_topic_unread(topic_2, user_ann))
self.assertTrue(pybb_is_topic_unread(topic_3, user_ann))
self.assertListEqual(
[t.unread for t in pybb_topic_unread([topic_1, topic_2, topic_3], user_ann)],
[True, True, True])
client_ann.get(topic_1.get_absolute_url())
topic_1 = Topic.objects.get(id=topic_1.id)
topic_2 = Topic.objects.get(id=topic_2.id)
topic_3 = Topic.objects.get(id=topic_3.id)
self.assertFalse(pybb_is_topic_unread(topic_1, user_ann))
self.assertTrue(pybb_is_topic_unread(topic_2, user_ann))
self.assertTrue(pybb_is_topic_unread(topic_3, user_ann))
self.assertListEqual(
[t.unread for t in pybb_topic_unread([topic_1, topic_2, topic_3], user_ann)],
[False, True, True])
client_ann.get(topic_2.get_absolute_url())
topic_1 = Topic.objects.get(id=topic_1.id)
topic_2 = Topic.objects.get(id=topic_2.id)
topic_3 = Topic.objects.get(id=topic_3.id)
self.assertFalse(pybb_is_topic_unread(topic_1, user_ann))
self.assertFalse(pybb_is_topic_unread(topic_2, user_ann))
self.assertTrue(pybb_is_topic_unread(topic_3, user_ann))
self.assertListEqual(
[t.unread for t in pybb_topic_unread([topic_1, topic_2, topic_3], user_ann)],
[False, False, True])
client_ann.get(topic_3.get_absolute_url())
topic_1 = Topic.objects.get(id=topic_1.id)
topic_2 = Topic.objects.get(id=topic_2.id)
topic_3 = Topic.objects.get(id=topic_3.id)
self.assertFalse(pybb_is_topic_unread(topic_1, user_ann))
self.assertFalse(pybb_is_topic_unread(topic_2, user_ann))
self.assertFalse(pybb_is_topic_unread(topic_3, user_ann))
self.assertListEqual(
[t.unread for t in pybb_topic_unread([topic_1, topic_2, topic_3], user_ann)],
[False, False, False])
def test_is_forum_unread_filter(self):
Forum.objects.all().delete()
forum_parent = Forum.objects.create(name='f1', category=self.category)
forum_child1 = Forum.objects.create(name='f2', category=self.category, parent=forum_parent)
forum_child2 = Forum.objects.create(name='f3', category=self.category, parent=forum_parent)
topic_1 = Topic.objects.create(name='topic_1', forum=forum_parent, user=self.user)
topic_2 = Topic.objects.create(name='topic_2', forum=forum_child1, user=self.user)
topic_3 = Topic.objects.create(name='topic_3', forum=forum_child2, user=self.user)
Post(topic=topic_1, user=self.user, body='one').save()
Post(topic=topic_2, user=self.user, body='two').save()
Post(topic=topic_3, user=self.user, body='three').save()
user_ann = User.objects.create_user('ann', 'ann@localhost', 'ann')
client_ann = Client()
client_ann.login(username='ann', password='ann')
forum_parent = Forum.objects.get(id=forum_parent.id)
forum_child1 = Forum.objects.get(id=forum_child1.id)
forum_child2 = Forum.objects.get(id=forum_child2.id)
self.assertListEqual([f.unread for f in pybb_forum_unread([forum_parent, forum_child1, forum_child2], user_ann)],
[True, True, True])
# unless we read parent topic, there is unreaded topics in child forums
client_ann.get(topic_1.get_absolute_url())
forum_parent = Forum.objects.get(id=forum_parent.id)
forum_child1 = Forum.objects.get(id=forum_child1.id)
forum_child2 = Forum.objects.get(id=forum_child2.id)
self.assertListEqual([f.unread for f in pybb_forum_unread([forum_parent, forum_child1, forum_child2], user_ann)],
[True, True, True])
# still unreaded topic in one of the child forums
client_ann.get(topic_2.get_absolute_url())
forum_parent = Forum.objects.get(id=forum_parent.id)
forum_child1 = Forum.objects.get(id=forum_child1.id)
forum_child2 = Forum.objects.get(id=forum_child2.id)
self.assertListEqual([f.unread for f in pybb_forum_unread([forum_parent, forum_child1, forum_child2], user_ann)],
[True, False, True])
# all topics readed
client_ann.get(topic_3.get_absolute_url())
forum_parent = Forum.objects.get(id=forum_parent.id)
forum_child1 = Forum.objects.get(id=forum_child1.id)
forum_child2 = Forum.objects.get(id=forum_child2.id)
self.assertListEqual([f.unread for f in pybb_forum_unread([forum_parent, forum_child1, forum_child2], user_ann)],
[False, False, False])
def test_read_tracker_when_topics_forum_changed(self):
forum_1 = Forum.objects.create(name='f1', description='bar', category=self.category)
forum_2 = Forum.objects.create(name='f2', description='bar', category=self.category)
topic_1 = Topic.objects.create(name='t1', forum=forum_1, user=self.user)
topic_2 = Topic.objects.create(name='t2', forum=forum_2, user=self.user)
Post.objects.create(topic=topic_1, user=self.user, body='one')
Post.objects.create(topic=topic_2, user=self.user, body='two')
user_ann = User.objects.create_user('ann', 'ann@localhost', 'ann')
client_ann = Client()
client_ann.login(username='ann', password='ann')
# Everything is unread
self.assertListEqual(
[t.unread for t in pybb_topic_unread([topic_1, topic_2], user_ann)],
[True, True])
self.assertListEqual(
[t.unread for t in pybb_forum_unread([forum_1, forum_2], user_ann)],
[True, True])
# read all
client_ann.get(reverse('pybb:mark_all_as_read'))
self.assertListEqual(
[t.unread for t in pybb_topic_unread([topic_1, topic_2], user_ann)],
[False, False])
self.assertListEqual(
[t.unread for t in pybb_forum_unread([forum_1, forum_2], user_ann)],
[False, False])
post = Post.objects.create(topic=topic_1, user=self.user, body='three')
topic_1 = Topic.objects.get(id=topic_1.id)
topic_2 = Topic.objects.get(id=topic_2.id)
self.assertEqual(topic_1.updated, post.updated or post.created)
self.assertEqual(forum_1.updated, post.updated or post.created)
self.assertListEqual(
[t.unread for t in pybb_topic_unread([topic_1, topic_2], user_ann)],
[True, False])
self.assertListEqual(
[t.unread for t in pybb_forum_unread([forum_1, forum_2], user_ann)],
[True, False])
post.topic = topic_2
post.save()
topic_1 = Topic.objects.get(id=topic_1.id)
topic_2 = Topic.objects.get(id=topic_2.id)
forum_1 = Forum.objects.get(id=forum_1.id)
forum_2 = Forum.objects.get(id=forum_2.id)
self.assertEqual(topic_2.updated, post.updated or post.created)
self.assertEqual(forum_2.updated, post.updated or post.created)
self.assertListEqual(
[t.unread for t in pybb_topic_unread([topic_1, topic_2], user_ann)],
[False, True])
self.assertListEqual(
[t.unread for t in pybb_forum_unread([forum_1, forum_2], user_ann)],
[False, True])
topic_2.forum = forum_1
topic_2.save()
topic_1 = Topic.objects.get(id=topic_1.id)
topic_2 = Topic.objects.get(id=topic_2.id)
forum_1 = Forum.objects.get(id=forum_1.id)
forum_2 = Forum.objects.get(id=forum_2.id)
self.assertEqual(forum_1.updated, post.updated or post.created)
self.assertListEqual(
[t.unread for t in pybb_topic_unread([topic_1, topic_2], user_ann)],
[False, True])
self.assertListEqual(
[t.unread for t in pybb_forum_unread([forum_1, forum_2], user_ann)],
[True, False])
def test_open_first_unread_post(self):
forum_1 = self.forum
topic_1 = Topic.objects.create(name='topic_1', forum=forum_1, user=self.user)
topic_2 = Topic.objects.create(name='topic_2', forum=forum_1, user=self.user)
post_1_1 = Post.objects.create(topic=topic_1, user=self.user, body='1_1')
post_1_2 = Post.objects.create(topic=topic_1, user=self.user, body='1_2')
post_2_1 = Post.objects.create(topic=topic_2, user=self.user, body='2_1')
user_ann = User.objects.create_user('ann', 'ann@localhost', 'ann')
client_ann = Client()
client_ann.login(username='ann', password='ann')
response = client_ann.get(reverse('pybb:topic', kwargs={'pk': topic_1.id}), data={'first-unread': 1},
follow=True)
self.assertRedirects(response,
'%s?page=%d#post-%d' % (reverse('pybb:topic', kwargs={'pk': topic_1.id}), 1, post_1_1.id))
response = client_ann.get(reverse('pybb:topic', kwargs={'pk': topic_1.id}), data={'first-unread': 1},
follow=True)
self.assertRedirects(response,
'%s?page=%d#post-%d' % (reverse('pybb:topic', kwargs={'pk': topic_1.id}), 1, post_1_2.id))
response = client_ann.get(reverse('pybb:topic', kwargs={'pk': topic_2.id}), data={'first-unread': 1},
follow=True)
self.assertRedirects(response,
'%s?page=%d#post-%d' % (reverse('pybb:topic', kwargs={'pk': topic_2.id}), 1, post_2_1.id))
post_1_3 = Post.objects.create(topic=topic_1, user=self.user, body='1_3')
post_1_4 = Post.objects.create(topic=topic_1, user=self.user, body='1_4')
response = client_ann.get(reverse('pybb:topic', kwargs={'pk': topic_1.id}), data={'first-unread': 1},
follow=True)
self.assertRedirects(response,
'%s?page=%d#post-%d' % (reverse('pybb:topic', kwargs={'pk': topic_1.id}), 1, post_1_3.id))
def test_latest_topics(self):
topic_1 = self.topic
topic_1.updated = datetime.datetime.utcnow()
topic_2 = Topic.objects.create(name='topic_2', forum=self.forum, user=self.user)
topic_2.updated = datetime.datetime.utcnow() + datetime.timedelta(days=-1)
category_2 = Category.objects.create(name='cat2')
forum_2 = Forum.objects.create(name='forum_2', category=category_2)
topic_3 = Topic.objects.create(name='topic_3', forum=forum_2, user=self.user)
topic_3.updated = datetime.datetime.utcnow() + datetime.timedelta(days=-2)
self.login_client()
response = self.client.get(reverse('pybb:topic_latest'))
self.assertEqual(response.status_code, 200)
self.assertListEqual(list(response.context['topic_list']), [topic_1, topic_2, topic_3])
topic_2.forum.hidden = True
topic_2.forum.save()
response = self.client.get(reverse('pybb:topic_latest'))
self.assertListEqual(list(response.context['topic_list']), [topic_3])
topic_2.forum.hidden = False
topic_2.forum.save()
category_2.hidden = True
category_2.save()
response = self.client.get(reverse('pybb:topic_latest'))
self.assertListEqual(list(response.context['topic_list']), [topic_1, topic_2])
topic_2.forum.hidden = False
topic_2.forum.save()
category_2.hidden = False
category_2.save()
topic_1.on_moderation = True
topic_1.save()
response = self.client.get(reverse('pybb:topic_latest'))
self.assertListEqual(list(response.context['topic_list']), [topic_1, topic_2, topic_3])
topic_1.user = User.objects.create_user('another', 'another@localhost', 'another')
topic_1.save()
response = self.client.get(reverse('pybb:topic_latest'))
self.assertListEqual(list(response.context['topic_list']), [topic_2, topic_3])
topic_1.forum.moderators.add(self.user)
response = self.client.get(reverse('pybb:topic_latest'))
self.assertListEqual(list(response.context['topic_list']), [topic_1, topic_2, topic_3])
topic_1.forum.moderators.remove(self.user)
self.user.is_superuser = True
self.user.save()
response = self.client.get(reverse('pybb:topic_latest'))
self.assertListEqual(list(response.context['topic_list']), [topic_1, topic_2, topic_3])
self.client.logout()
response = self.client.get(reverse('pybb:topic_latest'))
self.assertListEqual(list(response.context['topic_list']), [topic_2, topic_3])
def test_hidden(self):
client = Client()
category = Category(name='hcat', hidden=True)
category.save()
forum_in_hidden = Forum(name='in_hidden', category=category)
forum_in_hidden.save()
topic_in_hidden = Topic(forum=forum_in_hidden, name='in_hidden', user=self.user)
topic_in_hidden.save()
forum_hidden = Forum(name='hidden', category=self.category, hidden=True)
forum_hidden.save()
topic_hidden = Topic(forum=forum_hidden, name='hidden', user=self.user)
topic_hidden.save()
post_hidden = Post(topic=topic_hidden, user=self.user, body='hidden')
post_hidden.save()
post_in_hidden = Post(topic=topic_in_hidden, user=self.user, body='hidden')
post_in_hidden.save()
self.assertFalse(category.id in [c.id for c in client.get(reverse('pybb:index')).context['categories']])
self.assertEqual(client.get(category.get_absolute_url()).status_code, 302)
self.assertEqual(client.get(forum_in_hidden.get_absolute_url()).status_code, 302)
self.assertEqual(client.get(topic_in_hidden.get_absolute_url()).status_code, 302)
self.assertNotContains(client.get(reverse('pybb:index')), forum_hidden.get_absolute_url())
self.assertNotContains(client.get(reverse('pybb:feed_topics')), topic_hidden.get_absolute_url())
self.assertNotContains(client.get(reverse('pybb:feed_topics')), topic_in_hidden.get_absolute_url())
self.assertNotContains(client.get(reverse('pybb:feed_posts')), post_hidden.get_absolute_url())
self.assertNotContains(client.get(reverse('pybb:feed_posts')), post_in_hidden.get_absolute_url())
self.assertEqual(client.get(forum_hidden.get_absolute_url()).status_code, 302)
self.assertEqual(client.get(topic_hidden.get_absolute_url()).status_code, 302)
client.login(username='zeus', password='zeus')
self.assertFalse(category.id in [c.id for c in client.get(reverse('pybb:index')).context['categories']])
self.assertNotContains(client.get(reverse('pybb:index')), forum_hidden.get_absolute_url())
self.assertEqual(client.get(category.get_absolute_url()).status_code, 403)
self.assertEqual(client.get(forum_in_hidden.get_absolute_url()).status_code, 403)
self.assertEqual(client.get(topic_in_hidden.get_absolute_url()).status_code, 403)
self.assertEqual(client.get(forum_hidden.get_absolute_url()).status_code, 403)
self.assertEqual(client.get(topic_hidden.get_absolute_url()).status_code, 403)
self.user.is_staff = True
self.user.save()
self.assertTrue(category.id in [c.id for c in client.get(reverse('pybb:index')).context['categories']])
self.assertContains(client.get(reverse('pybb:index')), forum_hidden.get_absolute_url())
self.assertEqual(client.get(category.get_absolute_url()).status_code, 200)
self.assertEqual(client.get(forum_in_hidden.get_absolute_url()).status_code, 200)
self.assertEqual(client.get(topic_in_hidden.get_absolute_url()).status_code, 200)
self.assertEqual(client.get(forum_hidden.get_absolute_url()).status_code, 200)
self.assertEqual(client.get(topic_hidden.get_absolute_url()).status_code, 200)
def test_inactive(self):
self.login_client()
url = reverse('pybb:add_post', kwargs={'topic_id': self.topic.id})
response = self.client.get(url)
values = self.get_form_values(response)
values['body'] = 'test ban'
response = self.client.post(url, values, follow=True)
self.assertEqual(len(Post.objects.filter(body='test ban')), 1)
self.user.is_active = False
self.user.save()
values['body'] = 'test ban 2'
self.client.post(url, values, follow=True)
self.assertEqual(len(Post.objects.filter(body='test ban 2')), 0)
def get_csrf(self, form):
return form.xpath('//input[@name="csrfmiddlewaretoken"]/@value')[0]
def test_csrf(self):
client = Client(enforce_csrf_checks=True)
client.login(username='zeus', password='zeus')
post_url = reverse('pybb:add_post', kwargs={'topic_id': self.topic.id})
response = client.get(post_url)
values = self.get_form_values(response)
del values['csrfmiddlewaretoken']
response = client.post(post_url, values, follow=True)
self.assertNotEqual(response.status_code, 200)
response = client.get(self.topic.get_absolute_url())
values = self.get_form_values(response)
response = client.post(reverse('pybb:add_post', kwargs={'topic_id': self.topic.id}), values, follow=True)
self.assertEqual(response.status_code, 200)
def test_user_blocking(self):
user = User.objects.create_user('test', 'test@localhost', 'test')
topic = Topic.objects.create(name='topic', forum=self.forum, user=self.user)
self.post = Post.objects.create(topic=topic, user=user, body='bbcode [b]test[b]')
self.user.is_superuser = True
self.user.save()
self.login_client()
response = self.client.get(reverse('pybb:block_user', args=[user.username]), follow=True)
self.assertEqual(response.status_code, 405)
response = self.client.post(reverse('pybb:block_user', args=[user.username]), follow=True)
self.assertEqual(response.status_code, 200)
user = User.objects.get(username=user.username)
self.assertFalse(user.is_active)
self.assertEqual(Topic.objects.filter().count(), 2)
self.assertEqual(Post.objects.filter(user=user).count(), 1)
user.is_active = True
user.save()
response = self.client.post(reverse('pybb:block_user', args=[user.username]),
data={'block_and_delete_messages': 'block_and_delete_messages'}, follow=True)
self.assertEqual(response.status_code, 200)
user = User.objects.get(username=user.username)
self.assertFalse(user.is_active)
self.assertEqual(Topic.objects.filter().count(), 1)
self.assertEqual(Post.objects.filter(user=user).count(), 0)
def test_user_unblocking(self):
user = User.objects.create_user('test', 'test@localhost', 'test')
user.is_active=False
user.save()
self.user.is_superuser = True
self.user.save()
self.login_client()
response = self.client.get(reverse('pybb:unblock_user', args=[user.username]), follow=True)
self.assertEqual(response.status_code, 405)
response = self.client.post(reverse('pybb:unblock_user', args=[user.username]), follow=True)
self.assertEqual(response.status_code, 200)
user = User.objects.get(username=user.username)
self.assertTrue(user.is_active)
def test_ajax_preview(self):
self.login_client()
response = self.client.post(reverse('pybb:post_ajax_preview'), data={'data': '[b]test bbcode ajax preview[b]'})
self.assertContains(response, '<strong>test bbcode ajax preview</strong>')
def test_headline(self):
self.forum.headline = 'test <b>headline</b>'
self.forum.save()
client = Client()
self.assertContains(client.get(self.forum.get_absolute_url()), 'test <b>headline</b>')
def test_quote(self):
self.login_client()
response = self.client.get(reverse('pybb:add_post', kwargs={'topic_id': self.topic.id}),
data={'quote_id': self.post.id, 'body': 'test tracking'}, follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, self.post.body)
def test_edit_post(self):
self.login_client()
edit_post_url = reverse('pybb:edit_post', kwargs={'pk': self.post.id})
response = self.client.get(edit_post_url)
self.assertEqual(response.status_code, 200)
tree = html.fromstring(response.content)
values = dict(tree.xpath('//form[@method="post"]')[0].form_values())
values['body'] = 'test edit'
response = self.client.post(edit_post_url, data=values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(Post.objects.get(pk=self.post.id).body, 'test edit')
response = self.client.get(self.post.get_absolute_url(), follow=True)
self.assertContains(response, 'test edit')
# Check admin form
self.user.is_staff = True
self.user.save()
response = self.client.get(edit_post_url)
self.assertEqual(response.status_code, 200)
tree = html.fromstring(response.content)
values = dict(tree.xpath('//form[@method="post"]')[0].form_values())
values['body'] = 'test edit'
values['login'] = 'new_login'
response = self.client.post(edit_post_url, data=values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'test edit')
def test_admin_post_add(self):
self.user.is_staff = True
self.user.save()
self.login_client()
response = self.client.post(reverse('pybb:add_post', kwargs={'topic_id': self.topic.id}),
data={'quote_id': self.post.id, 'body': 'test admin post', 'user': 'zeus'},
follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'test admin post')
def test_stick(self):
self.user.is_superuser = True
self.user.save()
self.login_client()
self.assertEqual(
self.client.get(reverse('pybb:stick_topic', kwargs={'pk': self.topic.id}), follow=True).status_code, 200)
self.assertEqual(
self.client.get(reverse('pybb:unstick_topic', kwargs={'pk': self.topic.id}), follow=True).status_code, 200)
def test_delete_view(self):
post = Post(topic=self.topic, user=self.user, body='test to delete')
post.save()
self.user.is_superuser = True
self.user.save()
self.login_client()
response = self.client.post(reverse('pybb:delete_post', args=[post.id]), follow=True)
self.assertEqual(response.status_code, 200)
# Check that topic and forum exists ;)
self.assertEqual(Topic.objects.filter(id=self.topic.id).count(), 1)
self.assertEqual(Forum.objects.filter(id=self.forum.id).count(), 1)
# Delete topic
response = self.client.post(reverse('pybb:delete_post', args=[self.post.id]), follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(Post.objects.filter(id=self.post.id).count(), 0)
self.assertEqual(Topic.objects.filter(id=self.topic.id).count(), 0)
self.assertEqual(Forum.objects.filter(id=self.forum.id).count(), 1)
def test_open_close(self):
self.user.is_superuser = True
self.user.save()
self.login_client()
add_post_url = reverse('pybb:add_post', args=[self.topic.id])
response = self.client.get(add_post_url)
values = self.get_form_values(response)
values['body'] = 'test closed'
response = self.client.get(reverse('pybb:close_topic', args=[self.topic.id]), follow=True)
self.assertEqual(response.status_code, 200)
response = self.client.post(add_post_url, values, follow=True)
self.assertEqual(response.status_code, 403)
response = self.client.get(reverse('pybb:open_topic', args=[self.topic.id]), follow=True)
self.assertEqual(response.status_code, 200)
response = self.client.post(add_post_url, values, follow=True)
self.assertEqual(response.status_code, 200)
def test_subscription(self):
user = User.objects.create_user(username='user2', password='user2', email='user2@example.com')
client = Client()
client.login(username='user2', password='user2')
response = client.get(reverse('pybb:add_subscription', args=[self.topic.id]), follow=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(user in list(self.topic.subscribers.all()))
# create a new reply (with another user)
self.client.login(username='zeus', password='zeus')
add_post_url = reverse('pybb:add_post', args=[self.topic.id])
response = self.client.get(add_post_url)
values = self.get_form_values(response)
values['body'] = 'test subscribtion юникод'
response = self.client.post(add_post_url, values, follow=True)
self.assertEqual(response.status_code, 200)
new_post = Post.objects.get(pk=2)
# there should only be one email in the outbox (to user2@example.com)
self.assertEqual(len(mail.outbox), 1)
self.assertTrue([msg for msg in mail.outbox if new_post.get_absolute_url() in msg.body])
# unsubscribe
client.login(username='user2', password='user2')
self.assertTrue([msg for msg in mail.outbox if new_post.get_absolute_url() in msg.body])
response = client.get(reverse('pybb:delete_subscription', args=[self.topic.id]), follow=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(user not in list(self.topic.subscribers.all()))
def test_topic_updated(self):
topic = Topic(name='etopic', forum=self.forum, user=self.user)
topic.save()
time.sleep(1)
post = Post(topic=topic, user=self.user, body='bbcode [b]test[b]')
post.save()
client = Client()
response = client.get(self.forum.get_absolute_url())
self.assertEqual(response.context['topic_list'][0], topic)
time.sleep(1)
post = Post(topic=self.topic, user=self.user, body='bbcode [b]test[b]')
post.save()
client = Client()
response = client.get(self.forum.get_absolute_url())
self.assertEqual(response.context['topic_list'][0], self.topic)
def test_topic_deleted(self):
forum_1 = Forum.objects.create(name='new forum', category=self.category)
topic_1 = Topic.objects.create(name='new topic', forum=forum_1, user=self.user)
post_1 = Post.objects.create(topic=topic_1, user=self.user, body='test')
time.sleep(2)
self.assertEqual(topic_1.updated, post_1.created)
self.assertEqual(forum_1.updated, post_1.created)
topic_2 = Topic.objects.create(name='another topic', forum=forum_1, user=self.user)
post_2 = Post.objects.create(topic=topic_2, user=self.user, body='another test')
time.sleep(2)
self.assertEqual(topic_2.updated, post_2.created)
self.assertEqual(forum_1.updated, post_2.created)
topic_2.delete()
self.assertEqual(forum_1.updated, post_1.created)
self.assertEqual(forum_1.topic_count, 1)
self.assertEqual(forum_1.post_count, 1)
post_1.delete()
self.assertEqual(forum_1.topic_count, 0)
self.assertEqual(forum_1.post_count, 0)
def test_user_views(self):
response = self.client.get(reverse('pybb:user', kwargs={'username': self.user.username}))
self.assertEqual(response.status_code, 200)
response = self.client.get(reverse('pybb:user_posts', kwargs={'username': self.user.username}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['object_list'].count(), 1)
response = self.client.get(reverse('pybb:user_topics', kwargs={'username': self.user.username}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['object_list'].count(), 1)
self.topic.forum.hidden = True
self.topic.forum.save()
self.client.logout()
response = self.client.get(reverse('pybb:user_posts', kwargs={'username': self.user.username}))
self.assertEqual(response.context['object_list'].count(), 0)
response = self.client.get(reverse('pybb:user_topics', kwargs={'username': self.user.username}))
self.assertEqual(response.context['object_list'].count(), 0)
def test_post_count(self):
topic = Topic(name='etopic', forum=self.forum, user=self.user)
topic.save()
post = Post(topic=topic, user=self.user, body='test') # another post
post.save()
self.assertEqual(util.get_pybb_profile(self.user).post_count, 2)
post.body = 'test2'
post.save()
self.assertEqual(Profile.objects.get(pk=util.get_pybb_profile(self.user).pk).post_count, 2)
post.delete()
self.assertEqual(Profile.objects.get(pk=util.get_pybb_profile(self.user).pk).post_count, 1)
def test_latest_topics_tag(self):
Topic.objects.all().delete()
for i in range(10):
Topic.objects.create(name='topic%s' % i, user=self.user, forum=self.forum)
latest_topics = pybb_get_latest_topics(context=None, user=self.user)
self.assertEqual(len(latest_topics), 5)
self.assertEqual(latest_topics[0].name, 'topic9')
self.assertEqual(latest_topics[4].name, 'topic5')
def test_latest_posts_tag(self):
Post.objects.all().delete()
for i in range(10):
Post.objects.create(body='post%s' % i, user=self.user, topic=self.topic)
latest_topics = pybb_get_latest_posts(context=None, user=self.user)
self.assertEqual(len(latest_topics), 5)
self.assertEqual(latest_topics[0].body, 'post9')
self.assertEqual(latest_topics[4].body, 'post5')
def test_multiple_objects_returned(self):
"""
see issue #87: https://github.com/hovel/pybbm/issues/87
"""
self.assertFalse(self.user.is_superuser)
self.assertFalse(self.user.is_staff)
self.assertFalse(self.topic.on_moderation)
self.assertEqual(self.topic.user, self.user)
user1 = User.objects.create_user('geyser', 'geyser@localhost', 'geyser')
self.topic.forum.moderators.add(self.user)
self.topic.forum.moderators.add(user1)
self.login_client()
response = self.client.get(reverse('pybb:add_post', kwargs={'topic_id': self.topic.id}))
self.assertEqual(response.status_code, 200)
def tearDown(self):
defaults.PYBB_ENABLE_ANONYMOUS_POST = self.ORIG_PYBB_ENABLE_ANONYMOUS_POST
defaults.PYBB_PREMODERATION = self.ORIG_PYBB_PREMODERATION
class AnonymousTest(TestCase, SharedTestModule):
def setUp(self):
self.ORIG_PYBB_ENABLE_ANONYMOUS_POST = defaults.PYBB_ENABLE_ANONYMOUS_POST
self.ORIG_PYBB_ANONYMOUS_USERNAME = defaults.PYBB_ANONYMOUS_USERNAME
self.PYBB_ANONYMOUS_VIEWS_CACHE_BUFFER = defaults.PYBB_ANONYMOUS_VIEWS_CACHE_BUFFER
defaults.PYBB_ENABLE_ANONYMOUS_POST = True
defaults.PYBB_ANONYMOUS_USERNAME = 'Anonymous'
self.user = User.objects.create_user('Anonymous', 'Anonymous@localhost', 'Anonymous')
self.category = Category.objects.create(name='foo')
self.forum = Forum.objects.create(name='xfoo', description='bar', category=self.category)
self.topic = Topic.objects.create(name='etopic', forum=self.forum, user=self.user)
add_post_permission = Permission.objects.get_by_natural_key('add_post', 'pybb', 'post')
self.user.user_permissions.add(add_post_permission)
def tearDown(self):
defaults.PYBB_ENABLE_ANONYMOUS_POST = self.ORIG_PYBB_ENABLE_ANONYMOUS_POST
defaults.PYBB_ANONYMOUS_USERNAME = self.ORIG_PYBB_ANONYMOUS_USERNAME
defaults.PYBB_ANONYMOUS_VIEWS_CACHE_BUFFER = self.PYBB_ANONYMOUS_VIEWS_CACHE_BUFFER
def test_anonymous_posting(self):
post_url = reverse('pybb:add_post', kwargs={'topic_id': self.topic.id})
response = self.client.get(post_url)
values = self.get_form_values(response)
values['body'] = 'test anonymous'
response = self.client.post(post_url, values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(Post.objects.filter(body='test anonymous')), 1)
self.assertEqual(Post.objects.get(body='test anonymous').user, self.user)
def test_anonymous_cache_topic_views(self):
self.assertNotIn(build_cache_key('anonymous_topic_views', topic_id=self.topic.id), cache)
url = self.topic.get_absolute_url()
self.client.get(url)
self.assertEqual(cache.get(build_cache_key('anonymous_topic_views', topic_id=self.topic.id)), 1)
for _ in range(defaults.PYBB_ANONYMOUS_VIEWS_CACHE_BUFFER - 2):
self.client.get(url)
self.assertEqual(Topic.objects.get(id=self.topic.id).views, 0)
self.assertEqual(cache.get(build_cache_key('anonymous_topic_views', topic_id=self.topic.id)),
defaults.PYBB_ANONYMOUS_VIEWS_CACHE_BUFFER - 1)
self.client.get(url)
self.assertEqual(Topic.objects.get(id=self.topic.id).views, defaults.PYBB_ANONYMOUS_VIEWS_CACHE_BUFFER)
self.assertEqual(cache.get(build_cache_key('anonymous_topic_views', topic_id=self.topic.id)), 0)
views = Topic.objects.get(id=self.topic.id).views
defaults.PYBB_ANONYMOUS_VIEWS_CACHE_BUFFER = None
self.client.get(url)
self.assertEqual(Topic.objects.get(id=self.topic.id).views, views + 1)
self.assertEqual(cache.get(build_cache_key('anonymous_topic_views', topic_id=self.topic.id)), 0)
def premoderate_test(user, post):
"""
Test premoderate function
Allow post without moderation for staff users only
"""
if user.username.startswith('allowed'):
return True
return False
class PreModerationTest(TestCase, SharedTestModule):
def setUp(self):
self.ORIG_PYBB_PREMODERATION = defaults.PYBB_PREMODERATION
defaults.PYBB_PREMODERATION = premoderate_test
self.create_user()
self.create_initial()
mail.outbox = []
def test_premoderation(self):
self.client.login(username='zeus', password='zeus')
add_post_url = reverse('pybb:add_post', kwargs={'topic_id': self.topic.id})
response = self.client.get(add_post_url)
values = self.get_form_values(response)
values['body'] = 'test premoderation'
response = self.client.post(add_post_url, values, follow=True)
self.assertEqual(response.status_code, 200)
post = Post.objects.get(body='test premoderation')
self.assertEqual(post.on_moderation, True)
# Post is visible by author
response = self.client.get(post.get_absolute_url(), follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'test premoderation')
# Post is not visible by anonymous user
client = Client()
response = client.get(post.get_absolute_url(), follow=True)
self.assertRedirects(response, settings.LOGIN_URL + '?next=%s' % post.get_absolute_url())
response = client.get(self.topic.get_absolute_url(), follow=True)
self.assertNotContains(response, 'test premoderation')
# But visible by superuser (with permissions)
user = User.objects.create_user('admin', 'zeus@localhost', 'admin')
user.is_superuser = True
user.save()
client.login(username='admin', password='admin')
response = client.get(post.get_absolute_url(), follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'test premoderation')
# user with names stats with allowed can post without premoderation
user = User.objects.create_user('allowed_zeus', 'zeus@localhost', 'allowed_zeus')
client.login(username='allowed_zeus', password='allowed_zeus')
response = client.get(add_post_url)
values = self.get_form_values(response)
values['body'] = 'test premoderation staff'
response = client.post(add_post_url, values, follow=True)
self.assertEqual(response.status_code, 200)
post = Post.objects.get(body='test premoderation staff')
client = Client()
response = client.get(post.get_absolute_url(), follow=True)
self.assertContains(response, 'test premoderation staff')
# Superuser can moderate
user.is_superuser = True
user.save()
admin_client = Client()
admin_client.login(username='admin', password='admin')
post = Post.objects.get(body='test premoderation')
response = admin_client.get(reverse('pybb:moderate_post', kwargs={'pk': post.id}), follow=True)
self.assertEqual(response.status_code, 200)
# Now all can see this post:
client = Client()
response = client.get(post.get_absolute_url(), follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'test premoderation')
# Other users can't moderate
post.on_moderation = True
post.save()
client.login(username='zeus', password='zeus')
response = client.get(reverse('pybb:moderate_post', kwargs={'pk': post.id}), follow=True)
self.assertEqual(response.status_code, 403)
# If user create new topic it goes to moderation if MODERATION_ENABLE
# When first post is moderated, topic becomes moderated too
self.client.login(username='zeus', password='zeus')
add_topic_url = reverse('pybb:add_topic', kwargs={'forum_id': self.forum.id})
response = self.client.get(add_topic_url)
values = self.get_form_values(response)
values['body'] = 'new topic test'
values['name'] = 'new topic name'
values['poll_type'] = 0
response = self.client.post(add_topic_url, values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'new topic test')
client = Client()
response = client.get(self.forum.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, 'new topic name')
response = client.get(Topic.objects.get(name='new topic name').get_absolute_url())
self.assertEqual(response.status_code, 302)
response = admin_client.get(reverse('pybb:moderate_post',
kwargs={'pk': Post.objects.get(body='new topic test').id}),
follow=True)
self.assertEqual(response.status_code, 200)
response = client.get(self.forum.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'new topic name')
response = client.get(Topic.objects.get(name='new topic name').get_absolute_url())
self.assertEqual(response.status_code, 200)
def tearDown(self):
defaults.PYBB_PREMODERATION = self.ORIG_PYBB_PREMODERATION
class AttachmentTest(TestCase, SharedTestModule):
def setUp(self):
self.PYBB_ATTACHMENT_ENABLE = defaults.PYBB_ATTACHMENT_ENABLE
defaults.PYBB_ATTACHMENT_ENABLE = True
self.ORIG_PYBB_PREMODERATION = defaults.PYBB_PREMODERATION
defaults.PYBB_PREMODERATION = False
self.file_name = os.path.join(os.path.dirname(__file__), 'static', 'pybb', 'img', 'attachment.png')
self.create_user()
self.create_initial()
def test_attachment_one(self):
add_post_url = reverse('pybb:add_post', kwargs={'topic_id': self.topic.id})
self.login_client()
response = self.client.get(add_post_url)
with open(self.file_name, 'rb') as fp:
values = self.get_form_values(response)
values['body'] = 'test attachment'
values['attachments-0-file'] = fp
response = self.client.post(add_post_url, values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(Post.objects.filter(body='test attachment').exists())
def test_attachment_two(self):
add_post_url = reverse('pybb:add_post', kwargs={'topic_id': self.topic.id})
self.login_client()
response = self.client.get(add_post_url)
with open(self.file_name, 'rb') as fp:
values = self.get_form_values(response)
values['body'] = 'test attachment'
values['attachments-0-file'] = fp
del values['attachments-INITIAL_FORMS']
del values['attachments-TOTAL_FORMS']
with self.assertRaises(ValidationError):
self.client.post(add_post_url, values, follow=True)
def tearDown(self):
defaults.PYBB_ATTACHMENT_ENABLE = self.PYBB_ATTACHMENT_ENABLE
defaults.PYBB_PREMODERATION = self.ORIG_PYBB_PREMODERATION
class PollTest(TestCase, SharedTestModule):
def setUp(self):
self.create_user()
self.create_initial()
self.PYBB_POLL_MAX_ANSWERS = defaults.PYBB_POLL_MAX_ANSWERS
defaults.PYBB_POLL_MAX_ANSWERS = 2
def test_poll_add(self):
add_topic_url = reverse('pybb:add_topic', kwargs={'forum_id': self.forum.id})
self.login_client()
response = self.client.get(add_topic_url)
values = self.get_form_values(response)
values['body'] = 'test poll body'
values['name'] = 'test poll name'
values['poll_type'] = 0 # poll_type = None, create topic without poll answers
values['poll_question'] = 'q1'
values['poll_answers-0-text'] = 'answer1'
values['poll_answers-1-text'] = 'answer2'
values['poll_answers-TOTAL_FORMS'] = 2
response = self.client.post(add_topic_url, values, follow=True)
self.assertEqual(response.status_code, 200)
new_topic = Topic.objects.get(name='test poll name')
self.assertIsNone(new_topic.poll_question)
self.assertFalse(PollAnswer.objects.filter(topic=new_topic).exists()) # no answers here
values['name'] = 'test poll name 1'
values['poll_type'] = 1
values['poll_answers-0-text'] = 'answer1' # not enough answers
values['poll_answers-TOTAL_FORMS'] = 1
response = self.client.post(add_topic_url, values, follow=True)
self.assertFalse(Topic.objects.filter(name='test poll name 1').exists())
values['name'] = 'test poll name 1'
values['poll_type'] = 1
values['poll_answers-0-text'] = 'answer1' # too many answers
values['poll_answers-1-text'] = 'answer2'
values['poll_answers-2-text'] = 'answer3'
values['poll_answers-TOTAL_FORMS'] = 3
response = self.client.post(add_topic_url, values, follow=True)
self.assertFalse(Topic.objects.filter(name='test poll name 1').exists())
values['name'] = 'test poll name 1'
values['poll_type'] = 1 # poll type = single choice, create answers
values['poll_question'] = 'q1'
values['poll_answers-0-text'] = 'answer1' # two answers - what do we need to create poll
values['poll_answers-1-text'] = 'answer2'
values['poll_answers-TOTAL_FORMS'] = 2
response = self.client.post(add_topic_url, values, follow=True)
self.assertEqual(response.status_code, 200)
new_topic = Topic.objects.get(name='test poll name 1')
self.assertEqual(new_topic.poll_question, 'q1')
self.assertEqual(PollAnswer.objects.filter(topic=new_topic).count(), 2)
def test_regression_adding_poll_with_removed_answers(self):
add_topic_url = reverse('pybb:add_topic', kwargs={'forum_id': self.forum.id})
self.login_client()
response = self.client.get(add_topic_url)
values = self.get_form_values(response)
values['body'] = 'test poll body'
values['name'] = 'test poll name'
values['poll_type'] = 1
values['poll_question'] = 'q1'
values['poll_answers-0-text'] = ''
values['poll_answers-0-DELETE'] = 'on'
values['poll_answers-1-text'] = ''
values['poll_answers-1-DELETE'] = 'on'
values['poll_answers-TOTAL_FORMS'] = 2
response = self.client.post(add_topic_url, values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertFalse(Topic.objects.filter(name='test poll name').exists())
def test_regression_poll_deletion_after_second_post(self):
self.login_client()
add_topic_url = reverse('pybb:add_topic', kwargs={'forum_id': self.forum.id})
response = self.client.get(add_topic_url)
values = self.get_form_values(response)
values['body'] = 'test poll body'
values['name'] = 'test poll name'
values['poll_type'] = 1 # poll type = single choice, create answers
values['poll_question'] = 'q1'
values['poll_answers-0-text'] = 'answer1' # two answers - what do we need to create poll
values['poll_answers-1-text'] = 'answer2'
values['poll_answers-TOTAL_FORMS'] = 2
response = self.client.post(add_topic_url, values, follow=True)
self.assertEqual(response.status_code, 200)
new_topic = Topic.objects.get(name='test poll name')
self.assertEqual(new_topic.poll_question, 'q1')
self.assertEqual(PollAnswer.objects.filter(topic=new_topic).count(), 2)
add_post_url = reverse('pybb:add_post', kwargs={'topic_id': new_topic.id})
response = self.client.get(add_post_url)
values = self.get_form_values(response)
values['body'] = 'test answer body'
response = self.client.post(add_post_url, values, follow=True)
self.assertEqual(PollAnswer.objects.filter(topic=new_topic).count(), 2)
def test_poll_edit(self):
edit_topic_url = reverse('pybb:edit_post', kwargs={'pk': self.post.id})
self.login_client()
response = self.client.get(edit_topic_url)
values = self.get_form_values(response)
values['poll_type'] = 1 # add_poll
values['poll_question'] = 'q1'
values['poll_answers-0-text'] = 'answer1'
values['poll_answers-1-text'] = 'answer2'
values['poll_answers-TOTAL_FORMS'] = 2
response = self.client.post(edit_topic_url, values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(Topic.objects.get(id=self.topic.id).poll_type, 1)
self.assertEqual(Topic.objects.get(id=self.topic.id).poll_question, 'q1')
self.assertEqual(PollAnswer.objects.filter(topic=self.topic).count(), 2)
values = self.get_form_values(self.client.get(edit_topic_url))
values['poll_type'] = 2 # change_poll type
values['poll_question'] = 'q100' # change poll question
values['poll_answers-0-text'] = 'answer100' # change poll answers
values['poll_answers-1-text'] = 'answer200'
values['poll_answers-TOTAL_FORMS'] = 2
response = self.client.post(edit_topic_url, values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(Topic.objects.get(id=self.topic.id).poll_type, 2)
self.assertEqual(Topic.objects.get(id=self.topic.id).poll_question, 'q100')
self.assertEqual(PollAnswer.objects.filter(topic=self.topic).count(), 2)
self.assertTrue(PollAnswer.objects.filter(text='answer100').exists())
self.assertTrue(PollAnswer.objects.filter(text='answer200').exists())
self.assertFalse(PollAnswer.objects.filter(text='answer1').exists())
self.assertFalse(PollAnswer.objects.filter(text='answer2').exists())
values['poll_type'] = 0 # remove poll
values['poll_answers-0-text'] = 'answer100' # no matter how many answers we provide
values['poll_answers-TOTAL_FORMS'] = 1
response = self.client.post(edit_topic_url, values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(Topic.objects.get(id=self.topic.id).poll_type, 0)
self.assertIsNone(Topic.objects.get(id=self.topic.id).poll_question)
self.assertEqual(PollAnswer.objects.filter(topic=self.topic).count(), 0)
def test_poll_voting(self):
def recreate_poll(poll_type):
self.topic.poll_type = poll_type
self.topic.save()
PollAnswer.objects.filter(topic=self.topic).delete()
PollAnswer.objects.create(topic=self.topic, text='answer1')
PollAnswer.objects.create(topic=self.topic, text='answer2')
self.login_client()
recreate_poll(poll_type=Topic.POLL_TYPE_SINGLE)
vote_url = reverse('pybb:topic_poll_vote', kwargs={'pk': self.topic.id})
my_answer = PollAnswer.objects.all()[0]
values = {'answers': my_answer.id}
response = self.client.post(vote_url, data=values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(Topic.objects.get(id=self.topic.id).poll_votes(), 1)
self.assertEqual(PollAnswer.objects.get(id=my_answer.id).votes(), 1)
self.assertEqual(PollAnswer.objects.get(id=my_answer.id).votes_percent(), 100.0)
# already voted
response = self.client.post(vote_url, data=values, follow=True)
self.assertEqual(response.status_code, 403) # bad request status
recreate_poll(poll_type=Topic.POLL_TYPE_MULTIPLE)
values = {'answers': [a.id for a in PollAnswer.objects.all()]}
response = self.client.post(vote_url, data=values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertListEqual([a.votes() for a in PollAnswer.objects.all()], [1, 1])
self.assertListEqual([a.votes_percent() for a in PollAnswer.objects.all()], [50.0, 50.0])
response = self.client.post(vote_url, data=values, follow=True)
self.assertEqual(response.status_code, 403) # already voted
cancel_vote_url = reverse('pybb:topic_cancel_poll_vote', kwargs={'pk': self.topic.id})
response = self.client.post(cancel_vote_url, data=values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertListEqual([a.votes() for a in PollAnswer.objects.all()], [0, 0])
self.assertListEqual([a.votes_percent() for a in PollAnswer.objects.all()], [0, 0])
response = self.client.post(vote_url, data=values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertListEqual([a.votes() for a in PollAnswer.objects.all()], [1, 1])
self.assertListEqual([a.votes_percent() for a in PollAnswer.objects.all()], [50.0, 50.0])
def tearDown(self):
defaults.PYBB_POLL_MAX_ANSWERS = self.PYBB_POLL_MAX_ANSWERS
class FiltersTest(TestCase, SharedTestModule):
def setUp(self):
self.create_user()
self.create_initial(post=False)
def test_filters(self):
add_post_url = reverse('pybb:add_post', kwargs={'topic_id': self.topic.id})
self.login_client()
response = self.client.get(add_post_url)
values = self.get_form_values(response)
values['body'] = 'test\n \n \n\nmultiple empty lines\n'
response = self.client.post(add_post_url, values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(Post.objects.all()[0].body, 'test\nmultiple empty lines')
class CustomPermissionHandler(permissions.DefaultPermissionHandler):
"""
a custom permission handler which changes the meaning of "hidden" forum:
"hidden" forum or category is visible for all logged on users, not only staff
"""
def filter_categories(self, user, qs):
return qs.filter(hidden=False) if user.is_anonymous() else qs
def may_view_category(self, user, category):
return user.is_authenticated() if category.hidden else True
def filter_forums(self, user, qs):
if user.is_anonymous():
qs = qs.filter(Q(hidden=False) & Q(category__hidden=False))
return qs
def may_view_forum(self, user, forum):
return user.is_authenticated() if forum.hidden or forum.category.hidden else True
def filter_topics(self, user, qs):
if user.is_anonymous():
qs = qs.filter(Q(forum__hidden=False) & Q(forum__category__hidden=False))
qs = qs.filter(closed=False) # filter out closed topics for test
return qs
def may_view_topic(self, user, topic):
return self.may_view_forum(user, topic.forum)
def filter_posts(self, user, qs):
if user.is_anonymous():
qs = qs.filter(Q(topic__forum__hidden=False) & Q(topic__forum__category__hidden=False))
return qs
def may_view_post(self, user, post):
return self.may_view_forum(user, post.topic.forum)
def may_create_poll(self, user):
return False
def _attach_perms_class(class_name):
"""
override the permission handler. this cannot be done with @override_settings as
permissions.perms is already imported at import point, instead we got to monkeypatch
the modules (not really nice, but only an issue in tests)
"""
pybb_views.perms = permissions.perms = permissions._resolve_class(class_name)
def _detach_perms_class():
"""
reset permission handler (otherwise other tests may fail)
"""
pybb_views.perms = permissions.perms = permissions._resolve_class('pybb.permissions.DefaultPermissionHandler')
class CustomPermissionHandlerTest(TestCase, SharedTestModule):
""" test custom permission handler """
def setUp(self):
self.create_user()
# create public and hidden categories, forums, posts
c_pub = Category(name='public')
c_pub.save()
c_hid = Category(name='private', hidden=True)
c_hid.save()
self.forum = Forum.objects.create(name='pub1', category=c_pub)
Forum.objects.create(name='priv1', category=c_hid)
Forum.objects.create(name='private_in_public_cat', hidden=True, category=c_pub)
for f in Forum.objects.all():
t = Topic.objects.create(name='a topic', forum=f, user=self.user)
Post.objects.create(topic=t, user=self.user, body='test')
# make some topics closed => hidden
for t in Topic.objects.all()[0:2]:
t.closed = True
t.save()
_attach_perms_class('pybb.tests.CustomPermissionHandler')
def tearDown(self):
_detach_perms_class()
def test_category_permission(self):
for c in Category.objects.all():
# anon user may not see category
r = self.get_with_user(c.get_absolute_url())
if c.hidden:
self.assertEqual(r.status_code, 302)
else:
self.assertEqual(r.status_code, 200)
# logged on user may see all categories
r = self.get_with_user(c.get_absolute_url(), 'zeus', 'zeus')
self.assertEqual(r.status_code, 200)
def test_forum_permission(self):
for f in Forum.objects.all():
r = self.get_with_user(f.get_absolute_url())
self.assertEqual(r.status_code, 302 if f.hidden or f.category.hidden else 200)
r = self.get_with_user(f.get_absolute_url(), 'zeus', 'zeus')
self.assertEqual(r.status_code, 200)
self.assertEqual(r.context['object_list'].count(), f.topics.filter(closed=False).count())
def test_topic_permission(self):
for t in Topic.objects.all():
r = self.get_with_user(t.get_absolute_url())
self.assertEqual(r.status_code, 302 if t.forum.hidden or t.forum.category.hidden else 200)
r = self.get_with_user(t.get_absolute_url(), 'zeus', 'zeus')
self.assertEqual(r.status_code, 200)
def test_post_permission(self):
for p in Post.objects.all():
r = self.get_with_user(p.get_absolute_url())
self.assertEqual(r.status_code, 302 if p.topic.forum.hidden or p.topic.forum.category.hidden else 301)
r = self.get_with_user(p.get_absolute_url(), 'zeus', 'zeus')
self.assertEqual(r.status_code, 301)
def test_poll_add(self):
add_topic_url = reverse('pybb:add_topic', kwargs={'forum_id': self.forum.id})
self.login_client()
response = self.client.get(add_topic_url)
values = self.get_form_values(response)
values['body'] = 'test poll body'
values['name'] = 'test poll name'
values['poll_type'] = 1 # poll_type = 1, create topic with poll
values['poll_question'] = 'q1'
values['poll_answers-0-text'] = 'answer1'
values['poll_answers-1-text'] = 'answer2'
values['poll_answers-TOTAL_FORMS'] = 2
response = self.client.post(add_topic_url, values, follow=True)
self.assertEqual(response.status_code, 200)
new_topic = Topic.objects.get(name='test poll name')
self.assertIsNone(new_topic.poll_question)
self.assertFalse(PollAnswer.objects.filter(topic=new_topic).exists()) # no answers here
class RestrictEditingHandler(permissions.DefaultPermissionHandler):
def may_create_topic(self, user, forum):
return False
def may_create_post(self, user, topic):
return False
def may_edit_post(self, user, post):
return False
class LogonRedirectTest(TestCase, SharedTestModule):
""" test whether anonymous user gets redirected, whereas unauthorized user gets PermissionDenied """
def setUp(self):
# create users
staff = User.objects.create_user('staff', 'staff@localhost', 'staff')
staff.is_staff = True
staff.save()
nostaff = User.objects.create_user('nostaff', 'nostaff@localhost', 'nostaff')
nostaff.is_staff = False
nostaff.save()
# create topic, post in hidden category
self.category = Category(name='private', hidden=True)
self.category.save()
self.forum = Forum(name='priv1', category=self.category)
self.forum.save()
self.topic = Topic(name='a topic', forum=self.forum, user=staff)
self.topic.save()
self.post = Post(body='body post', topic=self.topic, user=staff, on_moderation=True)
self.post.save()
def test_redirect_category(self):
# access without user should be redirected
r = self.get_with_user(self.category.get_absolute_url())
self.assertRedirects(r, settings.LOGIN_URL + '?next=%s' % self.category.get_absolute_url())
# access with (unauthorized) user should get 403 (forbidden)
r = self.get_with_user(self.category.get_absolute_url(), 'nostaff', 'nostaff')
self.assertEquals(r.status_code, 403)
# allowed user is allowed
r = self.get_with_user(self.category.get_absolute_url(), 'staff', 'staff')
self.assertEquals(r.status_code, 200)
def test_redirect_forum(self):
# access without user should be redirected
r = self.get_with_user(self.forum.get_absolute_url())
self.assertRedirects(r, settings.LOGIN_URL + '?next=%s' % self.forum.get_absolute_url())
# access with (unauthorized) user should get 403 (forbidden)
r = self.get_with_user(self.forum.get_absolute_url(), 'nostaff', 'nostaff')
self.assertEquals(r.status_code, 403)
# allowed user is allowed
r = self.get_with_user(self.forum.get_absolute_url(), 'staff', 'staff')
self.assertEquals(r.status_code, 200)
def test_redirect_topic(self):
# access without user should be redirected
r = self.get_with_user(self.topic.get_absolute_url())
self.assertRedirects(r, settings.LOGIN_URL + '?next=%s' % self.topic.get_absolute_url())
# access with (unauthorized) user should get 403 (forbidden)
r = self.get_with_user(self.topic.get_absolute_url(), 'nostaff', 'nostaff')
self.assertEquals(r.status_code, 403)
# allowed user is allowed
r = self.get_with_user(self.topic.get_absolute_url(), 'staff', 'staff')
self.assertEquals(r.status_code, 200)
def test_redirect_post(self):
# access without user should be redirected
r = self.get_with_user(self.post.get_absolute_url())
self.assertRedirects(r, settings.LOGIN_URL + '?next=%s' % self.post.get_absolute_url())
# access with (unauthorized) user should get 403 (forbidden)
r = self.get_with_user(self.post.get_absolute_url(), 'nostaff', 'nostaff')
self.assertEquals(r.status_code, 403)
# allowed user is allowed
r = self.get_with_user(self.post.get_absolute_url(), 'staff', 'staff')
self.assertEquals(r.status_code, 301)
@override_settings(PYBB_ENABLE_ANONYMOUS_POST=False)
def test_redirect_topic_add(self):
_attach_perms_class('pybb.tests.RestrictEditingHandler')
# access without user should be redirected
add_topic_url = reverse('pybb:add_topic', kwargs={'forum_id': self.forum.id})
r = self.get_with_user(add_topic_url)
self.assertRedirects(r, settings.LOGIN_URL + '?next=%s' % add_topic_url)
# access with (unauthorized) user should get 403 (forbidden)
r = self.get_with_user(add_topic_url, 'staff', 'staff')
self.assertEquals(r.status_code, 403)
_detach_perms_class()
# allowed user is allowed
r = self.get_with_user(add_topic_url, 'staff', 'staff')
self.assertEquals(r.status_code, 200)
def test_redirect_post_edit(self):
_attach_perms_class('pybb.tests.RestrictEditingHandler')
# access without user should be redirected
edit_post_url = reverse('pybb:edit_post', kwargs={'pk': self.post.id})
r = self.get_with_user(edit_post_url)
self.assertRedirects(r, settings.LOGIN_URL + '?next=%s' % edit_post_url)
# access with (unauthorized) user should get 403 (forbidden)
r = self.get_with_user(edit_post_url, 'staff', 'staff')
self.assertEquals(r.status_code, 403)
_detach_perms_class()
# allowed user is allowed
r = self.get_with_user(edit_post_url, 'staff', 'staff')
self.assertEquals(r.status_code, 200)
| acamposruiz/quecoins | pybb/tests.py | Python | bsd-2-clause | 81,999 | [
"VisIt"
] | 7fb6abb88890beb321434a005c98a667b962321c4303e1e3489b6b27d57aaf23 |
NAMES = {
'lane': 'Test1',
'lb': None,
'pu': 'Test1',
'sample': 'Test1',
'rg': 'Test1',
'pl': 'illumina'
}
DATA = {
'files': [
'/bcbio-nextgen/tests/test_automated_output/trimmed/1_1_Test1.trimmed.fq.gz',
'/bcbio-nextgen/tests/test_automated_output/trimmed/1_2_Test1.trimmed.fq.gz'
],
'dirs': {
'config': '/bcbio-nextgen/tests/test_automated_output',
'fastq': '/bcbio-nextgen/tests/data/test_fusion',
'work': '/bcbio-nextgen/tests/test_automated_output',
'flowcell': '/bcbio-nextgen/tests/data/test_fusion',
'galaxy': '/bcbio-nextgen/tests/data/automated'
},
'lane': '1',
'description': 'Test1',
'reference': {
'genome_context': [
'/bcbio-nextgen/tests/data/genomes/hg19/coverage/problem_regions/GA4GH/test.bed.gz',
'/bcbio-nextgen/tests/data/genomes/hg19/coverage/problem_regions/GA4GH/test2.bed.gz'
],
'fasta': {
'base': '/bcbio-nextgen/tests/data/genomes/hg19/seq/hg19.fa'
},
'star': {
'indexes': [
'/bcbio-nextgen/tests/data/genomes/hg19/star/chrLength.txt',
'/bcbio-nextgen/tests/data/genomes/hg19/star/sjdbList.out.tab',
'/bcbio-nextgen/tests/data/genomes/hg19/star/SA',
'/bcbio-nextgen/tests/data/genomes/hg19/star/Genome',
'/bcbio-nextgen/tests/data/genomes/hg19/star/SAindex',
'/bcbio-nextgen/tests/data/genomes/hg19/star/chrStart.txt',
'/bcbio-nextgen/tests/data/genomes/hg19/star/chrName.txt',
'/bcbio-nextgen/tests/data/genomes/hg19/star/chrNameLength.txt',
'/bcbio-nextgen/tests/data/genomes/hg19/star/genomeParameters.txt',
'/bcbio-nextgen/tests/data/genomes/hg19/star/Log.out'
]
},
'rtg': '/bcbio-nextgen/tests/data/genomes/hg19/rtg/hg19.sdf'
},
'sam_ref': '/bcbio-nextgen/tests/data/genomes/hg19/seq/hg19.fa',
'genome_resources': {
'rnaseq': {
'transcripts': '/bcbio-nextgen/tests/data/genomes/hg19/rnaseq/ref-transcripts.gtf',
'transcripts_mask': '/bcbio-nextgen/tests/data/genomes/hg19/rnaseq/ref-transcripts-mask.gtf',
'gene_bed': '/bcbio-nextgen/tests/data/genomes/hg19/rnaseq/ref-transcripts.bed'
},
'version': 7,
'variation': {
'train_omni': '/bcbio-nextgen/tests/data/genomes/hg19/variation/1000G_omni2.5.vcf.gz',
'dbnsfp': '/bcbio-nextgen/tests/data/genomes/hg19/variation/dbNSFP_v2.5.gz',
'cosmic': '/bcbio-nextgen/tests/data/genomes/hg19/variation/cosmic-v68-hg19.vcf.gz',
'ancestral': '/bcbio-nextgen/tests/data/genomes/hg19/variation/human_ancestor.fa',
'train_hapmap': '/bcbio-nextgen/tests/data/genomes/hg19/variation/hapmap_3.3.vcf.gz',
'train_1000g': '/bcbio-nextgen/tests/data/genomes/hg19/variation/1000G_phase1.snps.high_confidence.vcf.gz',
'dbsnp': '/bcbio-nextgen/tests/data/genomes/hg19/variation/dbsnp_132.vcf.gz',
'train_indels': '/bcbio-nextgen/tests/data/genomes/hg19/variation/Mills_Devine_2hit.indels.vcf.gz'
},
'srnaseq': {
'srna-trasncripts': '/bcbio-nextgen/tests/data/genomes/hg19/srnaseq/srna-transcripts.gtf',
'mirbase': '/bcbio-nextgen/tests/data/genomes/hg19/srnaseq/hairpin.fa'
},
'aliases': {
'snpeff': 'hg19',
'human': True,
'ensembl': 'homo_sapiens_vep_83_GRCh37'
}
},
'provenance': {
'data': '/bcbio-nextgen/tests/test_automated_output/provenance/data_versions.csv',
'entity': 'bcdd2c84-b800-11e6-a323-0242ac110002.prepare_sample.0.trim_sample.0.process_alignment.0',
'db': None,
'programs': '/bcbio-nextgen/tests/test_automated_output/provenance/programs.txt'
},
'rgnames': {
'lane': 'Test1',
'lb': None,
'pu': 'Test1',
'sample': 'Test1',
'rg': 'Test1',
'pl': 'illumina'
},
'upload': {
'dir': '/bcbio-nextgen/tests/test_automated_output/upload',
'run_id': ''
},
'analysis': 'RNA-seq',
'name': ['', 'Test1'],
'genome_build': 'hg19',
'config': {
'galaxy_config': '/bcbio-nextgen/tests/data/automated/universe_wsgi.ini',
'resources': {
'gatk': {
'jvm_opts': ['-Xms500m', '-Xmx3500m']
},
'default': {
'cores': 16,
'jvm_opts': ['-Xms750m', '-Xmx3500m'],
'memory': '3G'
},
'express': {'memory': '8g'},
'seqcluster': {'memory': '8g'},
'program_versions': '/bcbio-nextgen/tests/test_automated_output/provenance/programs.txt',
'dexseq': {'memory': '10g'},
'macs2': {'memory': '8g'},
'snpeff': {
'jvm_opts': ['-Xms750m', '-Xmx4g']
},
'qualimap': {'memory': '4g'}
},
'log_dir': '/var/log/bcbio',
'algorithm': {
'nomap_split_targets': 200,
'trim_reads': 'read_through',
'qc': ['fastqc', 'qualimap_rnaseq', 'samtools', 'gemini'],
'archive': [],
'recalibrate': False,
'mark_duplicates': True,
'nomap_split_size': 250,
'quality_format': 'illumina',
'aligner': 'star',
'validate_regions': None,
'realign': False,
'tools_off': [],
'fusion_mode': True,
'variant_regions': None,
'coverage_interval': None,
'adapters': ['truseq', 'polya'],
'validate': None, 'num_cores': 1, 'tools_on': []
},
'bcbio_system': '/bcbio-nextgen/tests/test_automated_output/bcbio_system-merged.yaml'
},
'resources': {},
'metadata': {
'batch': None,
'phenotype': ''
}
}
CONFIG = [[{'align_bam': '/bcbio-nextgen/tests/test_automated_output/align/Test1/Test1_star/Test1.bam',
'analysis': 'RNA-seq',
'config': {'algorithm': {'adapters': ['truseq', 'polya'],
'aligner': 'star',
'archive': [],
'coverage_interval': None,
'fusion_mode': True,
'mark_duplicates': True,
'nomap_split_size': 250,
'nomap_split_targets': 200,
'num_cores': 1,
'qc': ['fastqc',
'qualimap_rnaseq',
'samtools',
'gemini'],
'quality_format': 'illumina',
'realign': False,
'recalibrate': False,
'tools_off': [],
'tools_on': [],
'trim_reads': 'read_through',
'validate': None,
'validate_regions': None,
'variant_regions': None},
'bcbio_system': '/bcbio-nextgen/tests/test_automated_output/bcbio_system-merged.yaml',
'galaxy_config': '/bcbio-nextgen/tests/data/automated/universe_wsgi.ini',
'log_dir': '/var/log/bcbio',
'resources': {'default': {'cores': 16,
'jvm_opts': ['-Xms750m',
'-Xmx3500m'],
'memory': '3G'},
'dexseq': {'memory': '10g'},
'express': {'memory': '8g'},
'gatk': {'jvm_opts': ['-Xms500m',
'-Xmx3500m']},
'macs2': {'memory': '8g'},
'program_versions': '/bcbio-nextgen/tests/test_automated_output/provenance/programs.txt',
'qualimap': {'memory': '4g'},
'seqcluster': {'memory': '8g'},
'snpeff': {'jvm_opts': ['-Xms750m', '-Xmx4g']}}},
'description': 'Test1',
'dirs': {'config': '/bcbio-nextgen/tests/test_automated_output',
'fastq': '/bcbio-nextgen/tests/data/test_fusion',
'flowcell': '/bcbio-nextgen/tests/data/test_fusion',
'galaxy': '/bcbio-nextgen/tests/data/automated',
'work': '/bcbio-nextgen/tests/test_automated_output'},
'files': ['/bcbio-nextgen/tests/test_automated_output/trimmed/1_1_Test1.trimmed.fq.gz',
'/bcbio-nextgen/tests/test_automated_output/trimmed/1_2_Test1.trimmed.fq.gz'],
'genome_build': 'hg19',
'genome_resources': {'aliases': {'ensembl': 'homo_sapiens_vep_83_GRCh37',
'human': True,
'snpeff': 'hg19'},
'rnaseq': {'gene_bed': '/bcbio-nextgen/tests/data/genomes/hg19/rnaseq/ref-transcripts.bed',
'transcripts': '/bcbio-nextgen/tests/data/genomes/hg19/rnaseq/ref-transcripts.gtf',
'transcripts_mask': '/bcbio-nextgen/tests/data/genomes/hg19/rnaseq/ref-transcripts-mask.gtf'},
'srnaseq': {'mirbase': '/bcbio-nextgen/tests/data/genomes/hg19/srnaseq/hairpin.fa',
'srna-trasncripts': '/bcbio-nextgen/tests/data/genomes/hg19/srnaseq/srna-transcripts.gtf'},
'variation': {'ancestral': '/bcbio-nextgen/tests/data/genomes/hg19/variation/human_ancestor.fa',
'cosmic': '/bcbio-nextgen/tests/data/genomes/hg19/variation/cosmic-v68-hg19.vcf.gz',
'dbnsfp': '/bcbio-nextgen/tests/data/genomes/hg19/variation/dbNSFP_v2.5.gz',
'dbsnp': '/bcbio-nextgen/tests/data/genomes/hg19/variation/dbsnp_132.vcf.gz',
'train_1000g': '/bcbio-nextgen/tests/data/genomes/hg19/variation/1000G_phase1.snps.high_confidence.vcf.gz',
'train_hapmap': '/bcbio-nextgen/tests/data/genomes/hg19/variation/hapmap_3.3.vcf.gz',
'train_indels': '/bcbio-nextgen/tests/data/genomes/hg19/variation/Mills_Devine_2hit.indels.vcf.gz',
'train_omni': '/bcbio-nextgen/tests/data/genomes/hg19/variation/1000G_omni2.5.vcf.gz'},
'version': 7},
'hla': {'fastq': None},
'lane': '1',
'metadata': {'batch': None, 'phenotype': ''},
'name': ['', 'Test1'],
'provenance': {'data': '/bcbio-nextgen/tests/test_automated_output/provenance/data_versions.csv',
'db': None,
'entity': '21efc524-bc79-11e6-a323-0242ac110002.prepare_sample.0.trim_sample.0.process_alignment.0',
'programs': '/bcbio-nextgen/tests/test_automated_output/provenance/programs.txt'},
'reference': {'fasta': {'base': '/bcbio-nextgen/tests/data/genomes/hg19/seq/hg19.fa'},
'genome_context': ['/bcbio-nextgen/tests/data/genomes/hg19/coverage/problem_regions/GA4GH/test.bed.gz',
'/bcbio-nextgen/tests/data/genomes/hg19/coverage/problem_regions/GA4GH/test2.bed.gz'],
'rtg': '/bcbio-nextgen/tests/data/genomes/hg19/rtg/hg19.sdf',
'star': {'indexes': ['/bcbio-nextgen/tests/data/genomes/hg19/star/chrLength.txt',
'/bcbio-nextgen/tests/data/genomes/hg19/star/sjdbList.out.tab',
'/bcbio-nextgen/tests/data/genomes/hg19/star/SA',
'/bcbio-nextgen/tests/data/genomes/hg19/star/Genome',
'/bcbio-nextgen/tests/data/genomes/hg19/star/SAindex',
'/bcbio-nextgen/tests/data/genomes/hg19/star/chrStart.txt',
'/bcbio-nextgen/tests/data/genomes/hg19/star/chrName.txt',
'/bcbio-nextgen/tests/data/genomes/hg19/star/chrNameLength.txt',
'/bcbio-nextgen/tests/data/genomes/hg19/star/genomeParameters.txt',
'/bcbio-nextgen/tests/data/genomes/hg19/star/Log.out']}},
'resources': {},
'rgnames': {'lane': 'Test1',
'lb': None,
'pl': 'illumina',
'pu': 'Test1',
'rg': 'Test1',
'sample': 'Test1'},
'sam_ref': '/bcbio-nextgen/tests/data/genomes/hg19/seq/hg19.fa',
'transcriptome_bam': None,
'upload': {'dir': '/bcbio-nextgen/tests/test_automated_output/upload',
'run_id': ''},
'work_bam': '/bcbio-nextgen/tests/test_automated_output/align/Test1/Test1_star/Test1.bam'}]]
| a113n/bcbio-nextgen | tests/unit/data.py | Python | mit | 13,077 | [
"Galaxy"
] | 5975c5138281a81bcecea235d0eed2513c4a4ace7b5d61ff8e9e29d962e73c00 |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Automatic differentiation variational inference in Numpy and JAX.
This demo fits a Gaussian approximation to an intractable, unnormalized
density, by differentiating through a Monte Carlo estimate of the
variational evidence lower bound (ELBO)."""
from functools import partial
import matplotlib.pyplot as plt
from jax import jit, grad, vmap
from jax import random
from jax.example_libraries import optimizers
import jax.numpy as jnp
import jax.scipy.stats.norm as norm
# ========= Functions to define the evidence lower bound. =========
def diag_gaussian_sample(rng, mean, log_std):
# Take a single sample from a diagonal multivariate Gaussian.
return mean + jnp.exp(log_std) * random.normal(rng, mean.shape)
def diag_gaussian_logpdf(x, mean, log_std):
# Evaluate a single point on a diagonal multivariate Gaussian.
return jnp.sum(vmap(norm.logpdf)(x, mean, jnp.exp(log_std)))
def elbo(logprob, rng, mean, log_std):
# Single-sample Monte Carlo estimate of the variational lower bound.
sample = diag_gaussian_sample(rng, mean, log_std)
return logprob(sample) - diag_gaussian_logpdf(sample, mean, log_std)
def batch_elbo(logprob, rng, params, num_samples):
# Average over a batch of random samples.
rngs = random.split(rng, num_samples)
vectorized_elbo = vmap(partial(elbo, logprob), in_axes=(0, None, None))
return jnp.mean(vectorized_elbo(rngs, *params))
# ========= Helper function for plotting. =========
@partial(jit, static_argnums=(0, 1, 2, 4))
def _mesh_eval(func, x_limits, y_limits, params, num_ticks):
# Evaluate func on a 2D grid defined by x_limits and y_limits.
x = jnp.linspace(*x_limits, num=num_ticks)
y = jnp.linspace(*y_limits, num=num_ticks)
X, Y = jnp.meshgrid(x, y)
xy_vec = jnp.stack([X.ravel(), Y.ravel()]).T
zs = vmap(func, in_axes=(0, None))(xy_vec, params)
return X, Y, zs.reshape(X.shape)
def mesh_eval(func, x_limits, y_limits, params, num_ticks=101):
return _mesh_eval(func, x_limits, y_limits, params, num_ticks)
# ========= Define an intractable unnormalized density =========
def funnel_log_density(params):
return norm.logpdf(params[0], 0, jnp.exp(params[1])) + \
norm.logpdf(params[1], 0, 1.35)
if __name__ == "__main__":
num_samples = 40
@jit
def objective(params, t):
rng = random.PRNGKey(t)
return -batch_elbo(funnel_log_density, rng, params, num_samples)
# Set up figure.
fig = plt.figure(figsize=(8,8), facecolor='white')
ax = fig.add_subplot(111, frameon=False)
plt.ion()
plt.show(block=False)
x_limits = (-2, 2)
y_limits = (-4, 2)
target_dist = lambda x, _: jnp.exp(funnel_log_density(x))
approx_dist = lambda x, params: jnp.exp(diag_gaussian_logpdf(x, *params))
def callback(params, t):
print("Iteration {} lower bound {}".format(t, objective(params, t)))
plt.cla()
X, Y, Z = mesh_eval(target_dist, x_limits, y_limits, 1)
ax.contour(X, Y, Z, cmap='summer')
X, Y, Z = mesh_eval(approx_dist, x_limits, y_limits, params)
ax.contour(X, Y, Z, cmap='winter')
ax.set_xlim(x_limits)
ax.set_ylim(y_limits)
ax.set_yticks([])
ax.set_xticks([])
# Plot random samples from variational distribution.
# Here we clone the rng used in computing the objective
# so that we can show exactly the same samples.
rngs = random.split(random.PRNGKey(t), num_samples)
samples = vmap(diag_gaussian_sample, in_axes=(0, None, None))(rngs, *params)
ax.plot(samples[:, 0], samples[:, 1], 'b.')
plt.draw()
plt.pause(1.0/60.0)
# Set up optimizer.
D = 2
init_mean = jnp.zeros(D)
init_std = jnp.zeros(D)
init_params = (init_mean, init_std)
opt_init, opt_update, get_params = optimizers.momentum(step_size=0.1, mass=0.9)
opt_state = opt_init(init_params)
@jit
def update(i, opt_state):
params = get_params(opt_state)
gradient = grad(objective)(params, i)
return opt_update(i, gradient, opt_state)
# Main loop.
print("Optimizing variational parameters...")
for t in range(100):
opt_state = update(t, opt_state)
params = get_params(opt_state)
callback(params, t)
plt.show(block=True)
| google/jax | examples/advi.py | Python | apache-2.0 | 4,900 | [
"Gaussian"
] | 6db8cc65f64cd48843318d2577ab8fba41b3eafb646f323a8e519349f3eb13bb |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007-2008 Douglas S. Blank
# Copyright (C) 2004-2007 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2011 Tim G L Lyons
# Copyright (C) 2013 Vassilii Khachaturov
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"Export to CSV Spreadsheet."
#-------------------------------------------------------------------------
#
# Standard Python Modules
#
#-------------------------------------------------------------------------
import os
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
import csv
from io import StringIO
import codecs
#------------------------------------------------------------------------
#
# Set up logging
#
#------------------------------------------------------------------------
import logging
import collections
LOG = logging.getLogger(".ExportCSV")
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.lib import EventType, Person
from gramps.gen.lib.eventroletype import EventRoleType
from gramps.gui.plug.export import WriterOptionBox
from gramps.gen.utils.string import gender as gender_map
from gramps.gen.datehandler import get_date
from gramps.gen.display.place import displayer as _pd
from gramps.gui.glade import Glade
from gramps.gen.constfunc import win
#-------------------------------------------------------------------------
#
# The function that does the exporting
#
#-------------------------------------------------------------------------
def exportData(database, filename, user, option_box=None):
gw = CSVWriter(database, filename, user, option_box)
return gw.export_data()
#-------------------------------------------------------------------------
#
# Support Functions
#
#-------------------------------------------------------------------------
def sortable_string_representation(text):
numeric = ""
alpha = ""
for s in text:
if s.isdigit():
numeric += s
else:
alpha += s
return alpha + (("0" * 10) + numeric)[-10:]
def get_primary_event_ref_from_type(db, person, event_name):
"""
>>> get_primary_event_ref_from_type(db, Person(), "Baptism"):
"""
for ref in person.event_ref_list:
if ref.get_role() == EventRoleType.PRIMARY:
event = db.get_event_from_handle(ref.ref)
if event and event.type.is_type(event_name):
return ref
return None
def get_primary_source_title(db, obj):
for citation_handle in obj.get_citation_list():
citation = db.get_citation_from_handle(citation_handle)
source = db.get_source_from_handle(citation.get_reference_handle())
if source:
return source.get_title()
return ""
#-------------------------------------------------------------------------
#
# CSVWriter Options
#
#-------------------------------------------------------------------------
class CSVWriterOptionBox(WriterOptionBox):
"""
Create a VBox with the option widgets and define methods to retrieve
the options.
"""
def __init__(self, person, dbstate, uistate, track=[], window=None):
WriterOptionBox.__init__(self, person, dbstate, uistate, track=track,
window=window)
## TODO: add place filter selection
self.include_individuals = 1
self.include_marriages = 1
self.include_children = 1
self.include_places = 1
self.translate_headers = 1
self.include_individuals_check = None
self.include_marriages_check = None
self.include_children_check = None
self.include_places_check = None
self.translate_headers_check = None
def get_option_box(self):
from gi.repository import Gtk
option_box = WriterOptionBox.get_option_box(self)
self.include_individuals_check = Gtk.CheckButton(label=_("Include people"))
self.include_marriages_check = Gtk.CheckButton(label=_("Include marriages"))
self.include_children_check = Gtk.CheckButton(label=_("Include children"))
self.include_places_check = Gtk.CheckButton(label=_("Include places"))
self.translate_headers_check = Gtk.CheckButton(label=_("Translate headers"))
self.include_individuals_check.set_active(1)
self.include_marriages_check.set_active(1)
self.include_children_check.set_active(1)
self.include_places_check.set_active(1)
self.translate_headers_check.set_active(1)
option_box.pack_start(self.include_individuals_check, False, True, 0)
option_box.pack_start(self.include_marriages_check, False, True, 0)
option_box.pack_start(self.include_children_check, False, True, 0)
option_box.pack_start(self.include_places_check, False, True, 0)
option_box.pack_start(self.translate_headers_check, False, True, 0)
return option_box
def parse_options(self):
WriterOptionBox.parse_options(self)
if self.include_individuals_check:
self.include_individuals = self.include_individuals_check.get_active()
self.include_marriages = self.include_marriages_check.get_active()
self.include_children = self.include_children_check.get_active()
self.include_places = self.include_places_check.get_active()
self.translate_headers = self.translate_headers_check.get_active()
#-------------------------------------------------------------------------
#
# CSVWriter class
#
#-------------------------------------------------------------------------
class CSVWriter:
def __init__(self, database, filename, user, option_box=None):
self.db = database
self.option_box = option_box
self.filename = filename
self.user = user
if isinstance(self.user.callback, collections.Callable): # callback is really callable
self.update = self.update_real
else:
self.update = self.update_empty
self.plist = {}
self.flist = {}
self.place_list = {}
self.persons_details_done = []
self.persons_notes_done = []
self.person_ids = {}
if not option_box:
self.include_individuals = 1
self.include_marriages = 1
self.include_children = 1
self.include_places = 1
self.translate_headers = 1
else:
self.option_box.parse_options()
self.db = option_box.get_filtered_database(self.db)
self.include_individuals = self.option_box.include_individuals
self.include_marriages = self.option_box.include_marriages
self.include_children = self.option_box.include_children
self.include_places = self.option_box.include_places
self.translate_headers = self.option_box.translate_headers
self.plist = [x for x in self.db.iter_person_handles()]
# make place list so that dependencies are first:
self.place_list = []
place_list = sorted([x for x in self.db.iter_place_handles()])
while place_list:
handle = place_list[0]
place = self.db.get_place_from_handle(handle)
if place:
if all([(x.ref in self.place_list) for x in place.placeref_list]):
self.place_list.append(place_list.pop(0))
else: # put at the back of the line:
place_list.append(place_list.pop(0))
else:
place_list.pop(0)
# get the families for which these people are spouses:
self.flist = {}
for key in self.plist:
p = self.db.get_person_from_handle(key)
if p:
for family_handle in p.get_family_handle_list():
self.flist[family_handle] = 1
# now add the families for which these people are a child:
for family_handle in self.db.iter_family_handles():
family = self.db.get_family_from_handle(family_handle)
if family:
for child_ref in family.get_child_ref_list():
if child_ref:
child_handle = child_ref.ref
if child_handle in self.plist:
self.flist[family_handle] = 1
def update_empty(self):
pass
def update_real(self):
self.count += 1
newval = int(100*self.count/self.total)
if newval != self.oldval:
self.user.callback(newval)
self.oldval = newval
def writeln(self):
self.g.writerow([])
def write_csv(self, *items):
self.g.writerow(items)
def export_data(self):
self.dirname = os.path.dirname (self.filename)
try:
self.fp = open(self.filename, "w",
encoding='utf_8_sig' if win() else 'utf_8',
newline='')
self.g = csv.writer(self.fp)
except IOError as msg:
msg2 = _("Could not create %s") % self.filename
self.user.notify_error(msg2,str(msg))
return False
except:
self.user.notify_error(_("Could not create %s") % self.filename)
return False
######################### initialize progress bar
self.count = 0
self.total = 0
self.oldval = 0
if self.include_individuals:
self.total += len(self.plist)
if self.include_marriages:
self.total += len(self.flist)
if self.include_children:
self.total += len(self.flist)
if self.include_places:
self.total += len(self.place_list)
########################
LOG.debug("Possible people to export: %s", len(self.plist))
LOG.debug("Possible families to export: %s", len(self.flist))
LOG.debug("Possible places to export: %s", len(self.place_list))
###########################
if self.include_places:
if self.translate_headers:
self.write_csv(_("Place"), _("Title"), _("Name"),
_("Type"), _("Latitude"), _("Longitude"),
_("Code"), _("Enclosed_by"), _("Date"))
else:
self.write_csv("Place", "Title", "Name",
"Type", "Latitude", "Longitude",
"Code", "Enclosed_by", "Date")
for key in self.place_list:
place = self.db.get_place_from_handle(key)
if place:
place_id = place.gramps_id
place_title = place.title
place_name = place.name.value
place_type = str(place.place_type)
place_latitude = place.lat
place_longitude = place.long
place_code = place.code
if place.placeref_list:
for placeref in place.placeref_list:
placeref_obj = self.db.get_place_from_handle(placeref.ref)
placeref_date = ""
if not placeref.date.is_empty():
placeref_date = placeref.date
placeref_id = ""
if placeref_obj:
placeref_id = "[%s]" % placeref_obj.gramps_id
self.write_csv("[%s]" % place_id, place_title, place_name, place_type,
place_latitude, place_longitude, place_code, placeref_id,
placeref_date)
else:
self.write_csv("[%s]" % place_id, place_title, place_name, place_type,
place_latitude, place_longitude, place_code, "",
"")
self.update()
self.writeln()
########################### sort:
sortorder = []
dropped_surnames = set()
for key in self.plist:
person = self.db.get_person_from_handle(key)
if person:
primary_name = person.get_primary_name()
first_name = primary_name.get_first_name()
surname_obj = primary_name.get_primary_surname()
surname = surname_obj.get_surname()
# See bug #6955
nonprimary_surnames = set(primary_name.get_surname_list())
nonprimary_surnames.remove(surname_obj)
dropped_surnames.update(nonprimary_surnames)
sortorder.append( (surname, first_name, key) )
if dropped_surnames:
LOG.warning(
_("CSV export doesn't support non-primary surnames, "
"{count} dropped").format(
count=len(dropped_surnames)) )
LOG.debug(
"Dropped surnames: " +
', '.join([("%s %s %s" % (surname.get_prefix(),
surname.get_surname(), surname.get_connector())).strip()
for surname in dropped_surnames]))
sortorder.sort() # will sort on tuples
plist = [data[2] for data in sortorder]
###########################
if self.include_individuals:
if self.translate_headers:
self.write_csv(
_("Person"), _("Surname"), _("Given"),
_("Call"), _("Suffix"), _("Prefix"),
_("Person|Title"), _("Gender"),
_("Birth date"), _("Birth place"), _("Birth source"),
_("Baptism date"), _("Baptism place"), _("Baptism source"),
_("Death date"), _("Death place"), _("Death source"),
_("Burial date"), _("Burial place"), _("Burial source"),
_("Note"))
else:
self.write_csv(
"Person", "Surname", "Given",
"Call", "Suffix", "Prefix",
"Title", "Gender",
"Birth date", "Birth place", "Birth source",
"Baptism date", "Baptism place", "Baptism source",
"Death date", "Death place", "Death source",
"Burial date", "Burial place", "Burial source",
"Note")
for key in plist:
person = self.db.get_person_from_handle(key)
if person:
primary_name = person.get_primary_name()
first_name = primary_name.get_first_name()
surname_obj = primary_name.get_primary_surname()
surname = surname_obj.get_surname()
prefix = surname_obj.get_prefix()
suffix = primary_name.get_suffix()
title = primary_name.get_title()
grampsid = person.get_gramps_id()
grampsid_ref = ""
if grampsid != "":
grampsid_ref = "[" + grampsid + "]"
note = '' # don't export notes
callname = primary_name.get_call_name()
gender = person.get_gender()
if gender == Person.MALE:
gender = gender_map[Person.MALE]
elif gender == Person.FEMALE:
gender = gender_map[Person.FEMALE]
else:
gender = gender_map[Person.UNKNOWN]
# Birth:
birthdate = ""
birthplace = ""
birthsource = ""
birth_ref = person.get_birth_ref()
if birth_ref:
birth = self.db.get_event_from_handle(birth_ref.ref)
if birth:
birthdate = self.format_date( birth)
birthplace = self.format_place(birth)
birthsource = get_primary_source_title(self.db, birth)
# Baptism:
baptismdate = ""
baptismplace = ""
baptismsource = ""
baptism_ref = get_primary_event_ref_from_type(
self.db, person, "Baptism")
if baptism_ref:
baptism = self.db.get_event_from_handle(baptism_ref.ref)
if baptism:
baptismdate = self.format_date( baptism)
baptismplace = self.format_place(baptism)
baptismsource = get_primary_source_title(self.db, baptism)
# Death:
deathdate = ""
deathplace = ""
deathsource = ""
death_ref = person.get_death_ref()
if death_ref:
death = self.db.get_event_from_handle(death_ref.ref)
if death:
deathdate = self.format_date( death)
deathplace = self.format_place(death)
deathsource = get_primary_source_title(self.db, death)
# Burial:
burialdate = ""
burialplace = ""
burialsource = ""
burial_ref = get_primary_event_ref_from_type(
self.db, person, "Burial")
if burial_ref:
burial = self.db.get_event_from_handle(burial_ref.ref)
if burial:
burialdate = self.format_date( burial)
burialplace = self.format_place(burial)
burialsource = get_primary_source_title(self.db, burial)
# Write it out:
self.write_csv(grampsid_ref, surname, first_name, callname,
suffix, prefix, title, gender,
birthdate, birthplace, birthsource,
baptismdate, baptismplace, baptismsource,
deathdate, deathplace, deathsource,
burialdate, burialplace, burialsource,
note)
self.update()
self.writeln()
########################### sort:
sortorder = []
for key in self.flist:
family = self.db.get_family_from_handle(key)
if family:
marriage_id = family.get_gramps_id()
sortorder.append(
(sortable_string_representation(marriage_id), key)
)
sortorder.sort() # will sort on tuples
flist = [data[1] for data in sortorder]
###########################
if self.include_marriages:
if self.translate_headers:
self.write_csv(_("Marriage"), _("Husband"), _("Wife"),
_("Date"), _("Place"), _("Source"), _("Note"))
else:
self.write_csv("Marriage", "Husband", "Wife",
"Date", "Place", "Source", "Note")
for key in flist:
family = self.db.get_family_from_handle(key)
if family:
marriage_id = family.get_gramps_id()
if marriage_id != "":
marriage_id = "[" + marriage_id + "]"
mother_id = ''
father_id = ''
father_handle = family.get_father_handle()
if father_handle:
father = self.db.get_person_from_handle(father_handle)
father_id = father.get_gramps_id()
if father_id != "":
father_id = "[" + father_id + "]"
mother_handle = family.get_mother_handle()
if mother_handle:
mother = self.db.get_person_from_handle(mother_handle)
mother_id = mother.get_gramps_id()
if mother_id != "":
mother_id = "[" + mother_id + "]"
# get mdate, mplace
mdate, mplace, source = '', '', ''
event_ref_list = family.get_event_ref_list()
for event_ref in event_ref_list:
event = self.db.get_event_from_handle(event_ref.ref)
if event.get_type() == EventType.MARRIAGE:
mdate = self.format_date( event)
mplace = self.format_place(event)
source = get_primary_source_title(self.db, event)
note = ''
self.write_csv(marriage_id, father_id, mother_id, mdate,
mplace, source, note)
self.update()
self.writeln()
if self.include_children:
if self.translate_headers:
self.write_csv(_("Family"), _("Child"))
else:
self.write_csv("Family", "Child")
for key in flist:
family = self.db.get_family_from_handle(key)
if family:
family_id = family.get_gramps_id()
if family_id != "":
family_id = "[" + family_id + "]"
for child_ref in family.get_child_ref_list():
child_handle = child_ref.ref
child = self.db.get_person_from_handle(child_handle)
grampsid = child.get_gramps_id()
grampsid_ref = ""
if grampsid != "":
grampsid_ref = "[" + grampsid + "]"
self.write_csv(family_id, grampsid_ref)
self.update()
self.writeln()
self.fp.close()
return True
def format_date(self, date):
return get_date(date)
def format_place(self, event):
"""
If places are included in the export return a link, else return a
formatted place for the given event.
"""
if self.include_places:
place_handle = event.get_place_handle()
if place_handle:
place = self.db.get_place_from_handle(place_handle)
if place:
return "[%s]" % place.get_gramps_id()
return ""
else:
return _pd.display_event(self.db, event)
| prculley/gramps | gramps/plugins/export/exportcsv.py | Python | gpl-2.0 | 23,778 | [
"Brian"
] | ea8ea88250f2d50dad4f50f4f7c960915c2061fa8312fe1f0741f464e1d02a59 |
#!/usr/bin/env python
import sys
import os
import subprocess
import logging
import argparse
#import align
import pysam
import numpy as np
from uuid import uuid4
FORMAT = '%(asctime)s %(message)s'
logging.basicConfig(format=FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def rc(dna):
''' reverse complement '''
complements = maketrans('acgtrymkbdhvACGTRYMKBDHV', 'tgcayrkmvhdbTGCAYRKMVHDB')
return dna.translate(complements)[::-1]
def align(qryseq, refseq, elt='PAIR', minmatch=90.0):
rnd = str(uuid4())
tgtfa = 'tmp.' + rnd + '.tgt.fa'
qryfa = 'tmp.' + rnd + '.qry.fa'
tgt = open(tgtfa, 'w')
qry = open(qryfa, 'w')
tgt.write('>ref' + '\n' + refseq + '\n')
qry.write('>qry' + '\n' + qryseq + '\n')
tgt.close()
qry.close()
cmd = ['exonerate', '--bestn', '1', '-m', 'ungapped', '--showalignment','0', '--ryo', elt + '\t%s\t%qab\t%qae\t%tab\t%tae\t%pi\t%qS\t%tS\n', qryfa, tgtfa]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
best = []
topscore = 0
for pline in p.stdout.readlines():
pline = pline.decode()
if pline.startswith(elt):
c = pline.strip().split()
if int(c[1]) > topscore and float(c[6]) >= minmatch:
topscore = int(c[1])
best = c
os.remove(tgtfa)
os.remove(qryfa)
return best
def flip_ends(rec):
rec['5_Prime_End'], rec['3_Prime_End'] = rec['3_Prime_End'], rec['5_Prime_End']
rec['Orient_5p'], rec['Orient_3p'] = rec['Orient_3p'], rec['Orient_5p']
rec['5p_Elt_Match'], rec['3p_Elt_Match'] = rec['3p_Elt_Match'], rec['5p_Elt_Match']
rec['5p_Genome_Match'], rec['3p_Genome_Match'] = rec['3p_Genome_Match'], rec['5p_Genome_Match']
rec['Split_reads_5prime'], rec['Split_reads_3prime'] = rec['Split_reads_3prime'], rec['Split_reads_5prime']
rec['5p_Cons_Len'], rec['3p_Cons_Len'] = rec['3p_Cons_Len'], rec['5p_Cons_Len']
rec['5p_Improved'], rec['3p_Improved'] = rec['3p_Improved'], rec['5p_Improved']
rec['TSD_3prime'], rec['TSD_5prime'] = rec['TSD_5prime'], rec['TSD_3prime']
rec['Genomic_Consensus_5p'], rec['Genomic_Consensus_3p'] = rec['Genomic_Consensus_3p'], rec['Genomic_Consensus_5p']
rec['Insert_Consensus_5p'], rec['Insert_Consensus_3p'] = rec['Insert_Consensus_3p'], rec['Insert_Consensus_5p']
return rec
def load_falib(infa):
seqdict = {}
with open(infa, 'r') as fa:
seqid = ''
seq = ''
for line in fa:
if line.startswith('>'):
if seq != '':
seqdict[seqid] = seq
seqid = line.lstrip('>').strip().split()[0]
seq = ''
else:
assert seqid != ''
seq = seq + line.strip()
if seqid not in seqdict and seq != '':
seqdict[seqid] = seq
return seqdict
def fix_ins_id(ins_id, inslib):
superfam, subfam = ins_id.split(':')
for i in inslib.keys():
if i.split(':')[-1] == subfam:
superfam = i.split(':')[0]
return '%s:%s' % (superfam, subfam)
def main(args):
inslib = None
if args.insref:
inslib = load_falib(args.insref)
ref = pysam.Fastafile(args.refgenome)
header = []
count_5p_diff = 0
count_3p_diff = 0
count_5p_switchcons = 0
count_3p_switchcons = 0
with open(args.table, 'r') as table:
for i, line in enumerate(table):
if i == 0:
header = line.strip().split('\t')
print(line.strip())
else:
rec = {}
for n, field in enumerate(line.strip().split('\t')):
rec[header[n]] = field
ins_id = '%s:%s' % (rec['Superfamily'], rec['Subfamily'])
if rec['Superfamily'] == 'NA':
ins_id = rec['Subfamily']
if rec['Subfamily'] == 'NA':
ins_id = rec['Superfamily']
if ins_id not in inslib:
ins_id = fix_ins_id(ins_id, inslib)
if ins_id not in inslib:
logger.warn('No insertion identification for %s (ins_id %s)' % (rec['UUID'], ins_id))
continue
refseq = ref.fetch(rec['Chromosome'], int(rec['Left_Extreme']), int(rec['Right_Extreme']))
#print rec['Genomic_Consensus_5p'], inslib[ins_id]
elt_5p_align = align(rec['Genomic_Consensus_5p'], inslib[ins_id])
elt_3p_align = align(rec['Genomic_Consensus_3p'], inslib[ins_id])
gen_5p_align = align(rec['Genomic_Consensus_5p'], refseq)
gen_3p_align = align(rec['Genomic_Consensus_3p'], refseq)
# try using the insertion-based consensus if no luck with the genomic one
if not elt_5p_align or not gen_5p_align:
retry_elt_5p_align = align(rec['Insert_Consensus_5p'], inslib[ins_id])
retry_gen_5p_align = align(rec['Insert_Consensus_5p'], refseq)
if retry_gen_5p_align and retry_elt_5p_align:
elt_5p_align = retry_elt_5p_align
gen_5p_align = retry_gen_5p_align
count_5p_switchcons += 1
if not elt_3p_align or not gen_3p_align:
retry_elt_3p_align = align(rec['Insert_Consensus_3p'], inslib[ins_id])
retry_gen_3p_align = align(rec['Insert_Consensus_3p'], refseq)
if retry_gen_3p_align and retry_elt_3p_align:
elt_3p_align = retry_elt_3p_align
gen_3p_align = retry_gen_3p_align
count_3p_switchcons += 1
elt_5p_orient = 'NA'
elt_3p_orient = 'NA'
gen_5p_orient = 'NA'
gen_3p_orient = 'NA'
if elt_5p_align:
elt_5p_orient = elt_5p_align[-1]
if elt_3p_align:
elt_3p_orient = elt_3p_align[-1]
if gen_5p_align:
gen_5p_orient = gen_5p_align[-1]
if gen_3p_align:
gen_3p_orient = gen_3p_align[-1]
new_5p_orient = 'NA'
new_3p_orient = 'NA'
if 'NA' not in (elt_5p_orient, gen_5p_orient):
if elt_5p_orient == gen_5p_orient:
new_5p_orient = '+'
else:
new_5p_orient = '-'
if 'NA' not in (elt_3p_orient, gen_3p_orient):
if elt_3p_orient == gen_3p_orient:
new_3p_orient = '+'
else:
new_3p_orient = '-'
coords_5p = []
coords_3p = []
if elt_5p_align:
coords_5p = sorted(map(int, (elt_5p_align[4], elt_5p_align[5])))
if elt_3p_align:
coords_3p = sorted(map(int, (elt_3p_align[4], elt_3p_align[5])))
flip = False
if coords_5p and coords_3p and coords_5p[1] > coords_3p[1]:
flip = True
if rec['Orient_5p'] != new_5p_orient:
logger.info('Changed 5p orientation for %s' % rec['UUID'])
if rec['Orient_3p'] != new_3p_orient:
logger.info('Changed 3p orientation for %s' % rec['UUID'])
rec['Orient_5p'] = new_5p_orient
rec['Orient_3p'] = new_3p_orient
if 'NA' not in (new_5p_orient, new_3p_orient) and 'None' not in (rec['Orient_5p'], rec['Orient_3p']):
if rec['Orient_5p'] != rec['Orient_3p']:
rec['Inversion'] = 'Y'
else:
rec['Inversion'] = 'N'
else:
rec['Inversion'] = 'N'
if flip:
rec = flip_ends(rec)
out = [rec[h] for h in header]
print('\t'.join(out))
logger.info('Changed orientation on %d 5p ends' % count_5p_diff)
logger.info('Changed orientation on %d 3p ends' % count_3p_diff)
logger.info('Used insertion consensus for %d 5p ends' % count_5p_switchcons)
logger.info('Used insertion consensus for %d 3p ends' % count_3p_switchcons)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='foo')
parser.add_argument('-t', '--table', required=True, help='tabular output from resolve.py, requires header to be present')
parser.add_argument('-i', '--insref', required=True, help='insertion sequence reference')
parser.add_argument('-r', '--refgenome', required=True, help='reference genome')
args = parser.parse_args()
main(args)
| adamewing/tebreak | scripts/misc/fixorient.py | Python | mit | 8,916 | [
"pysam"
] | af2de0d1cf5ee4aaebb5f28b791ff3671e5f2ba06774eb9bfa5b1b1e5df4ecae |
#!/usr/bin/env python
"""%prog [options] DENSITY
Generate the hopping trajectory from the DENSITY and the original
trajectory. DENSITY must be a pickle file produced by hop and it
should contain the bulk site; otherwise most other downstream analysis
makes little sense.
If ``-s topology`` and ``-f trajectory`` are left out then the values
for *topology* and *trajectory* stored in DENSITY are used; the
paths might be incorrect relative to the starting directory so it is
recommended to supply them on the command line.
"""
import os.path, errno
import MDAnalysis
import hop.sitemap, hop.trajectory
from hop.utilities import unlink_f, mkdir_p
import logging
logger = logging.getLogger('MDAnalysis.app.hop')
def generate_hoptraj_locally(topology, trajectory, density, filename, atomselection,
localcopy=False, **hopargs):
def _generate_hoptraj(traj):
try:
if len(density.sites) < 2:
raise ValueError
except AttributeError,ValueError:
errmsg = 'The density misses a site map or has only one site.'
logger.fatal(errmsg)
raise ValueError(errmsg)
u = MDAnalysis.Universe(topology, traj)
group = u.select_atoms(atomselection)
hops = hop.trajectory.HoppingTrajectory(u.trajectory,group,density,**hopargs)
hops.write(filename)
return hops
if localcopy:
# should probably also write locally and then copy back
from tempfile import mkstemp
from shutil import copy
root,ext = os.path.splitext(trajectory)
fd, tmptrajectory = mkstemp(suffix=ext)
logger.info("Making local copy to improve read performance: %(trajectory)r --> %(tmptrajectory)r" % vars())
try:
copy(trajectory, tmptrajectory)
hops = _generate_hoptraj(tmptrajectory)
finally:
unlink_f(tmptrajectory)
else:
hops = _generate_hoptraj(trajectory)
return hops
if __name__ == "__main__":
import sys
from optparse import OptionParser
parser = OptionParser(usage=__doc__)
parser.add_option("-s", "--topology", dest="topology",
metavar="FILE",
help="topology to go with the trajectories; can be a PSF "
"PDB, GRO, or whatever else MDAnalysis accepts; default is to "
"try the path recorded in DENSITY")
parser.add_option("-f", "--trajectory", dest="trajectory",
metavar="FILE",
help="rms-fitted trajectory (default is to take the path recorded in DENSITY)")
parser.add_option("-A", "--atom-selection", dest="atomselection",
metavar="SELECTION",
help="MDAnalysis selection string to pick which atoms are being counted; "
"for water one typically chooses the water oxygen. The value depends crucially "
"on the atom names defined in the topology. The default is to use the value "
"recorded in DENSITY.")
parser.add_option("-o", "--output-name", dest="output",
metavar="FILENAME",
help="the hopping trajectory FILENAME.dcd and FILENAME.psf [%default]")
parser.add_option("-l", "--local-copy", dest="localcopy",
action='store_true',
help="copy trajectory to a temporary local disk for better read performance. "
"Requires sufficient space in TEMP.")
parser.set_defaults(topology=None, trajectory=None,
atomselection=None,
output="hoptraj")
opts,args = parser.parse_args()
MDAnalysis.start_logging()
if len(args) == 0:
logger.fatal("A pickled density with bulk site is required. See --help.")
sys.exit(1)
density = hop.sitemap.Density(filename=args[0])
if opts.topology:
topology = os.path.abspath(opts.topology)
else:
topology = os.path.abspath(density.metadata['psf'])
if not os.path.exists(topology):
errmsg = "Topology %(topology)r not found; (use --topology)" % vars()
logger.fatal(errmsg)
raise IOError(errno.ENOENT, errmsg)
if opts.trajectory:
trajectory = os.path.abspath(opts.trajectory)
else:
trajectory = os.path.abspath(density.metadata['dcd'])
if not os.path.exists(trajectory):
errmsg = "Trajectory %(trajectory)r not found; (use --trajectory)" % vars()
logger.fatal(errmsg)
raise IOError(errno.ENOENT, errmsg)
if opts.atomselection:
atomselection = opts.atomselection
else:
atomselection = density.metadata['atomselection']
#startdirectory = os.path.abspath(os.curdir)
#os.chdir(startdirectory)
logger.info("Generating hopping trajectory for density %r", args[0])
logger.debug("density = %r", args[0])
logger.debug("topology = %(topology)r", vars())
logger.debug("trajectory = %(trajectory)r", vars())
logger.debug("selection = %(atomselection)r", vars())
if not density.has_bulk():
raise ValueError("The density does not have a bulk site---insert one!")
hoptraj = generate_hoptraj_locally(topology, trajectory, density, opts.output,
atomselection, localcopy=opts.localcopy)
logger.info("Created hopping trajectory %(output)s.dcd with %(output)s.psf", vars(opts))
MDAnalysis.stop_logging()
| Becksteinlab/hop | scripts/hop-generate-hoptraj.py | Python | gpl-3.0 | 5,537 | [
"MDAnalysis"
] | 73ecd8bdeba29b2387be48e8e7c45c73cab7ef7a72f89e6caf530f39b5c9f6f3 |
#!/usr/bin/env python
#PBS -N NiC8
#PBS -e NiC8.err
#PBS -o NiC8.log
#PBS -m ae
#PBS -q verylong
#PBS -l nodes=4:ppn=8:xeon5570
from gpaw import *
from ase import *
from gpaw.transport.calculator import Transport
from ase.lattice.surface import fcc100
from gpaw.transport.tools import sort_atoms
system = read('NiC8.traj')
system.set_pbc([1,1,1])
sort_atoms(system)
system.center()
lead_cell = np.diag(system.cell)
lead_cell[2] = 7.040
pl_atoms1 = range(36)
pl_atoms2 = range(98,134)
pl_cell1 = lead_cell
pl_cell2 = pl_cell1
magmoms = np.zeros([134])
magmoms[:54]=0.7
magmoms[-54:] = -0.7
system.set_initial_magnetic_moments(magmoms)
t = Transport( h=0.2,
xc='RPBE',
basis={'Ni': 'szp', 'H': 'szp', 'C': 'szp', 'S':'szp'},
kpts=(2, 2, 1),
occupations=FermiDirac(0.2),
mode='lcao',
txt='NiC8.txt',
buffer_guess=True,
lead_guess=True,
spinpol=True,
guess_steps=80,
beta_guess=0.003,
alpha=0.1,
poissonsolver=PoissonSolver(nn=2),
mixer=MixerSum(0.005, 5, weight=100.0),
extra_density=True,
pl_atoms=[pl_atoms1, pl_atoms2],
pl_cells=[pl_cell1, pl_cell2],
pl_kpts=[2 , 2 , 15],
edge_atoms=[[ 0, 35],[0 , 133]],
mol_atoms=range(36, 98),
nleadlayers=[1,1])
system.set_calculator(t)
t.calculate_iv()
t = Transport( h=0.2,
xc='RPBE',
basis={'Ni': 'szp', 'H': 'szp', 'C': 'szp', 'S':'szp'},
kpts=(12, 12, 1),
occupations=FermiDirac(0.2),
parallel={'domain':(1,1,1)},
mode='lcao',
txt='NiC8.txt',
buffer_guess=True,
lead_guess=True,
spinpol=True,
guess_steps=80,
beta_guess=0.003,
alpha=0.1,
poissonsolver=PoissonSolver(nn=2),
mixer=MixerSum(0.005, 5, weight=100.0),
extra_density=True,
analysis_mode=True,
pl_atoms=[pl_atoms1, pl_atoms2],
pl_cells=[pl_cell1, pl_cell2],
pl_kpts=[12 , 12 , 15],
edge_atoms=[[ 0, 35],[0 , 133]],
mol_atoms=range(36, 98),
nleadlayers=[1,1])
system.set_calculator(t)
t.analysis(1)
| robwarm/gpaw-symm | doc/documentation/transport/spin_transport.py | Python | gpl-3.0 | 2,491 | [
"ASE",
"GPAW"
] | edd7177aead3970937261f12e30e689a672b3c33137871ed5b0edbe019505ef6 |
from django.db import models
from django.contrib.auth.models import User
from multiselectfield import MultiSelectField
class Doctor(models.Model):
user = models.OneToOneField(User, primary_key=True)
def __str__(self):
return self.user.first_name + ' ' + self.user.last_name
class Patient(models.Model):
first_name = models.CharField("Nombre", max_length=200)
last_name = models.CharField("Apellido", max_length=200)
birth_date = models.DateField()
ci = models.CharField("C.I.", max_length=50)
occupation = models.CharField("Ocupacion", max_length=200)
phone = models.CharField("Telefono", max_length=100)
cellphone = models.CharField("Celular", max_length=100)
background = models.CharField("Antecedentes", max_length=200)
GENDER_CHOICES = (
('M', 'Masculino'),
('F', 'Femenino'),
)
gender = models.CharField("Sexo", max_length=1, choices=GENDER_CHOICES)
def __str__(self):
return self.last_name + ', ' + self.first_name
class Visit(models.Model):
doctor = models.ForeignKey(Doctor, verbose_name="Doctor")
patient = models.ForeignKey(Patient, verbose_name="Paciente")
date = models.DateTimeField("Fecha")
BACKGROUND_CHOICES = (
('first_time', 'Primera Vez'),
('broken_lasses', 'Rotura de lentes'),
('lost_glasses', 'Extravio de Lentes'),
('uses_lc', 'Lentes de Contacto'),
('uses_jf', 'Usa solo L'),
('uses_jn', 'Usa solo C'),
('bifocales', 'Bifocales'),
('rgp', 'RGP'),
('astenopia', 'Astenopia'),
('progresive', 'Progresivas'),
('hidrofilas', 'Hidrofilas')
)
title = MultiSelectField("Consulta", choices=BACKGROUND_CHOICES, null=True)
mc = models.CharField("Motivo de la Consulta", max_length=2000, null=True)
app = models.CharField("APP", max_length=2000, null=True)
apf = models.CharField("APF", max_length=2000, null=True)
od = models.CharField("OD", max_length=50, null=True)
ol = models.CharField("OL", max_length=50, null=True)
convergencia = models.CharField("Convergencia", max_length=50, null=True)
eyere = models.CharField("Reflejos pupilares", max_length=100, null=True)
comments = models.CharField(max_length=2000)
# Create your models here.
| Ronald-Tuco/Tuco | clinic/models.py | Python | mit | 2,296 | [
"VisIt"
] | fbf70c32fe35e6f346b57835d561932203c1109cb306710d4274e360f1a2c9df |
#!/usr/bin/env python
"""
Artificial Intelligence for Humans
Volume 2: Nature-Inspired Algorithms
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2014 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
============================================================================================================
This example uses crossover to combine two parent genomes to produce two children.
Both repeating and non-repeating splice are used.
Crossover Splice
Parent 1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
Parent 2: [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
Offspring 1: [1, 2, 3, 4, 5, 6, 4, 3, 2, 1]
Offspring 2: [10, 9, 8, 7, 6, 5, 7, 8, 9, 10]
Crossover Splice No Repeat
Parent 1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
Parent 2: [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
Offspring 1: [10, 3, 2, 4, 5, 6, 7, 8, 9, 1]
Offspring 2: [1, 8, 9, 7, 6, 5, 4, 3, 2, 10]
"""
import sys
import os
# Find the AIFH core files
aifh_dir = os.path.dirname(os.path.abspath(__file__))
aifh_dir = os.path.abspath(aifh_dir + os.sep + ".." + os.sep + "lib" + os.sep + "aifh")
sys.path.append(aifh_dir)
from genetic import *
from genetic import Genome
from genetic import Species
p1 = [ 1,2,3,4,5,6,7,8,9,10 ]
p2 = [ 10,9,8,7,6,5,4,3,2,1 ]
off = [[],[]]
pop = Population()
print("Crossover Splice")
crossover_splice(pop, p1,p2,off)
print("Parent 1: " + str(p1))
print("Parent 2: " + str(p2))
print("Offspring 1: " + str(off[0]))
print("Offspring 2: " + str(off[1]))
print()
print("Crossover Splice No Repeat")
crossover_splice_no_repeat(pop, p1,p2,off)
print("Parent 1: " + str(p1))
print("Parent 2: " + str(p2))
print("Offspring 1: " + str(off[0]))
print("Offspring 2: " + str(off[1])) | trenton3983/Artificial_Intelligence_for_Humans | vol2/vol2-python-examples/examples/example_crossover.py | Python | apache-2.0 | 2,433 | [
"VisIt"
] | 1049ef7077d3452dbfcf95d87ab9cd0174113ea43448e615b1d7756decaa27f4 |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2016 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
#from __future__ import absolute_import
#from psi4 import *
#import readline # optional, will allow Up/Down/History in the console
#import code
#def run():
# print_out("\nStarting interactive session.\n\n")
#
# vars = globals().copy()
# vars.update(locals())
# shell = code.InteractiveConsole(vars)
# shell.interact()
| kannon92/psi4 | psi4/driver/interactive.py | Python | gpl-2.0 | 1,283 | [
"Psi4"
] | e2bc53f1a7bd8da46c413bacf902fff267b9cbf0d8316a003b4169f6de77bb4f |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import io
import unittest
from tempfile import NamedTemporaryFile, mkdtemp
from os.path import exists, join
from shutil import rmtree
from uuid import uuid4
from skbio.util import (cardinal_to_ordinal, safe_md5, remove_files,
create_dir, find_duplicates, is_casava_v180_or_later)
from skbio.util._misc import (
_handle_error_codes, MiniRegistry, chunk_str, resolve_key)
class TestMiniRegistry(unittest.TestCase):
def setUp(self):
self.registry = MiniRegistry()
def test_decoration(self):
self.assertNotIn("name1", self.registry)
self.assertNotIn("name2", self.registry)
self.n1_called = False
self.n2_called = False
@self.registry("name1")
def some_registration1():
self.n1_called = True
@self.registry("name2")
def some_registration2():
self.n2_called = True
self.assertIn("name1", self.registry)
self.assertEqual(some_registration1, self.registry["name1"])
self.assertIn("name2", self.registry)
self.assertEqual(some_registration2, self.registry["name2"])
self.registry["name1"]()
self.assertTrue(self.n1_called)
self.registry["name2"]()
self.assertTrue(self.n2_called)
def test_copy(self):
@self.registry("name")
def some_registration():
pass
new = self.registry.copy()
self.assertIsNot(new, self.registry)
@new("other")
def other_registration():
pass
self.assertIn("name", self.registry)
self.assertNotIn("other", self.registry)
self.assertIn("other", new)
self.assertIn("name", new)
def test_everything(self):
class SomethingToInterpolate:
def interpolate_me():
"""First line
Some description of things, also this:
Other things are happening now.
"""
def dont_interpolate_me():
"""First line
Some description of things, also this:
Other things are happening now.
"""
class Subclass(SomethingToInterpolate):
pass
@self.registry("a")
def a():
"""x"""
@self.registry("b")
def b():
"""y"""
@self.registry("c")
def c():
"""z"""
subclass_registry = self.registry.copy()
@subclass_registry("o")
def o():
"""p"""
self.registry.interpolate(SomethingToInterpolate, "interpolate_me")
subclass_registry.interpolate(Subclass, "interpolate_me")
self.assertEqual(SomethingToInterpolate.interpolate_me.__doc__,
"First line\n\n Some description of th"
"ings, also this:\n\n\t'a'\n\t x\n\t'b'\n\t y\n\t'c"
"'\n\t z\n\n Other things are happeni"
"ng now.\n ")
self.assertEqual(SomethingToInterpolate.dont_interpolate_me.__doc__,
"First line\n\n Some description of th"
"ings, also this:\n\n Other things are"
" happening now.\n ")
self.assertEqual(Subclass.interpolate_me.__doc__,
"First line\n\n Some description of th"
"ings, also this:\n\n\t'a'\n\t x\n\t'b'\n\t y\n\t'c"
"'\n\t z\n\t'o'\n\t p\n\n Other thin"
"gs are happening now.\n ")
self.assertEqual(Subclass.dont_interpolate_me.__doc__,
"First line\n\n Some description of th"
"ings, also this:\n\n Other things are"
" happening now.\n ")
class ResolveKeyTests(unittest.TestCase):
def test_callable(self):
def func(x):
return str(x)
self.assertEqual(resolve_key(1, func), "1")
self.assertEqual(resolve_key(4, func), "4")
def test_index(self):
class MetadataHaver(dict):
metadata = {}
@property
def metadata(self):
return self
obj = MetadataHaver({'foo': 123})
self.assertEqual(resolve_key(obj, 'foo'), 123)
obj = MetadataHaver({'foo': 123, 'bar': 'baz'})
self.assertEqual(resolve_key(obj, 'bar'), 'baz')
def test_wrong_type(self):
with self.assertRaises(TypeError):
resolve_key({'foo': 1}, 'foo')
class ChunkStrTests(unittest.TestCase):
def test_even_split(self):
self.assertEqual(chunk_str('abcdef', 6, ' '), 'abcdef')
self.assertEqual(chunk_str('abcdef', 3, ' '), 'abc def')
self.assertEqual(chunk_str('abcdef', 2, ' '), 'ab cd ef')
self.assertEqual(chunk_str('abcdef', 1, ' '), 'a b c d e f')
self.assertEqual(chunk_str('a', 1, ' '), 'a')
self.assertEqual(chunk_str('abcdef', 2, ''), 'abcdef')
def test_no_split(self):
self.assertEqual(chunk_str('', 2, '\n'), '')
self.assertEqual(chunk_str('a', 100, '\n'), 'a')
self.assertEqual(chunk_str('abcdef', 42, '|'), 'abcdef')
def test_uneven_split(self):
self.assertEqual(chunk_str('abcdef', 5, '|'), 'abcde|f')
self.assertEqual(chunk_str('abcdef', 4, '|'), 'abcd|ef')
self.assertEqual(chunk_str('abcdefg', 3, ' - '), 'abc - def - g')
def test_invalid_n(self):
with self.assertRaisesRegex(ValueError, 'n=0'):
chunk_str('abcdef', 0, ' ')
with self.assertRaisesRegex(ValueError, 'n=-42'):
chunk_str('abcdef', -42, ' ')
class MiscTests(unittest.TestCase):
def setUp(self):
self.dirs_to_remove = []
def tearDown(self):
for element in self.dirs_to_remove:
rmtree(element)
def test_is_casava_v180_or_later(self):
self.assertFalse(is_casava_v180_or_later(b'@foo'))
id_ = b'@M00176:17:000000000-A0CNA:1:1:15487:1773 1:N:0:0'
self.assertTrue(is_casava_v180_or_later(id_))
with self.assertRaises(ValueError):
is_casava_v180_or_later(b'foo')
def test_safe_md5(self):
exp = 'ab07acbb1e496801937adfa772424bf7'
fd = io.BytesIO(b'foo bar baz')
obs = safe_md5(fd)
self.assertEqual(obs.hexdigest(), exp)
fd.close()
def test_remove_files(self):
# create list of temp file paths
test_fds = [NamedTemporaryFile(delete=False) for i in range(5)]
test_filepaths = [element.name for element in test_fds]
# should work just fine
remove_files(test_filepaths)
# check that an error is raised on trying to remove the files...
self.assertRaises(OSError, remove_files, test_filepaths)
# touch one of the filepaths so it exists
extra_file = NamedTemporaryFile(delete=False).name
test_filepaths.append(extra_file)
# no error is raised on trying to remove the files
# (although 5 don't exist)...
remove_files(test_filepaths, error_on_missing=False)
# ... and the existing file was removed
self.assertFalse(exists(extra_file))
# try to remove them with remove_files and verify that an IOError is
# raises
self.assertRaises(OSError, remove_files, test_filepaths)
# now get no error when error_on_missing=False
remove_files(test_filepaths, error_on_missing=False)
def test_create_dir(self):
# create a directory
tmp_dir_path = mkdtemp()
# create a random temporary directory name
tmp_dir_path2 = join(mkdtemp(), str(uuid4()))
tmp_dir_path3 = join(mkdtemp(), str(uuid4()))
self.dirs_to_remove += [tmp_dir_path, tmp_dir_path2, tmp_dir_path3]
# create on existing dir raises OSError if fail_on_exist=True
self.assertRaises(OSError, create_dir, tmp_dir_path,
fail_on_exist=True)
self.assertEqual(create_dir(tmp_dir_path, fail_on_exist=True,
handle_errors_externally=True), 1)
# return should be 1 if dir exist and fail_on_exist=False
self.assertEqual(create_dir(tmp_dir_path, fail_on_exist=False), 1)
# if dir not there make it and return always 0
self.assertEqual(create_dir(tmp_dir_path2), 0)
self.assertEqual(create_dir(tmp_dir_path3, fail_on_exist=True), 0)
def test_handle_error_codes_no_error(self):
obs = _handle_error_codes('/foo/bar/baz')
self.assertEqual(obs, 0)
class CardinalToOrdinalTests(unittest.TestCase):
def test_valid_range(self):
# taken and modified from http://stackoverflow.com/a/20007730/3776794
exp = ['0th', '1st', '2nd', '3rd', '4th', '5th', '6th', '7th', '8th',
'9th', '10th', '11th', '12th', '13th', '14th', '15th', '16th',
'17th', '18th', '19th', '20th', '21st', '22nd', '23rd', '24th',
'25th', '26th', '27th', '28th', '29th', '30th', '31st', '32nd',
'100th', '101st', '42042nd']
obs = [cardinal_to_ordinal(n) for n in
list(range(0, 33)) + [100, 101, 42042]]
self.assertEqual(obs, exp)
def test_invalid_n(self):
with self.assertRaisesRegex(ValueError, '-1'):
cardinal_to_ordinal(-1)
class TestFindDuplicates(unittest.TestCase):
def test_empty_input(self):
def empty_gen():
yield from ()
for empty in [], (), '', set(), {}, empty_gen():
self.assertEqual(find_duplicates(empty), set())
def test_no_duplicates(self):
self.assertEqual(find_duplicates(['a', 'bc', 'def', 'A']), set())
def test_one_duplicate(self):
self.assertEqual(find_duplicates(['a', 'bc', 'def', 'a']), set(['a']))
def test_many_duplicates(self):
self.assertEqual(find_duplicates(['a', 'bc', 'bc', 'def', 'a']),
set(['a', 'bc']))
def test_all_duplicates(self):
self.assertEqual(
find_duplicates(('a', 'bc', 'bc', 'def', 'a', 'def', 'def')),
set(['a', 'bc', 'def']))
def test_mixed_types(self):
def gen():
yield from ('a', 1, 'bc', 2, 'a', 2, 2, 3.0)
self.assertEqual(find_duplicates(gen()), set(['a', 2]))
if __name__ == '__main__':
unittest.main()
| anderspitman/scikit-bio | skbio/util/tests/test_misc.py | Python | bsd-3-clause | 10,934 | [
"scikit-bio"
] | 493fb1685a7a759e06d697431c2a37c4c5e667bec3bac823668845f6f9529728 |
# Copyright (C) 2017,2018
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
**************************************
espressopp.integrator.PIAdressIntegrator
**************************************
The PIAdressIntegrator implements the integration method for Hamiltonian Adaptive Resolution Path Integral Simulations proposed in J. Chem. Phys 147, 244104 (2017) (PI-AdResS). It can be used to run path integral molecular dynamics as well as ring polymer and centroid molecular dynamics in a quantum-classical adaptive resolution fashion, using different empirical force fields. To facilitate an efficient integration, the integrator uses a 3-layer RESPA (J. Chem. Phys. 97, 1990 (1992)) multiple timestepping scheme (inner level: intraatomic spring forces between the Trotter beads. medium level: interatomic bonded forces. outer level: interatomic non-bonded forces). Importantly, the integrator should only be used in combination with PI-AdResS interactions. Furthermore, the integrator has its own thermostat (Langevin), and the only extensions that should be used with it are the Free Energy Compensation (FreeEnergyCompensation) and the Thermodynamic Force (TDforce).
Example:
>>> integrator = espressopp.integrator.PIAdressIntegrator(system, verletlist, timestep_short, timesteps_outerlevel, timesteps_centrallevel, nTrotter, realkinmass, constkinmass, temperature, gamma, centroidthermostat, CMDparameter, PILE, PILElambda, clmassmultiplier, speedupFreezeRings, KTI)
>>> ...
>>> integrator.run(nsteps)
.. function:: espressopp.integrator.PIAdressIntegrator(system, verletlist, timestep, sSteps, mSteps, nTrotter, realKinMass, constKinMass, temperature, gamma, centroidThermostat, CMDparameter, PILE, PILElambda, CLmassmultiplier, speedup, KTI)
Constructs the PIAdressIntegrator object. Note that all parameters can also be set and fetched via setter and getter functions. Additionally, all parameters except the system and the Verletlist are implemented as class variables that can be directly accessed and modified.
:param system: system object
:param verletlist: Verletlist object. Should be an AdResS Verletlist
:param timestep: (default: 0.0) the inner (shortest) timestep for the calculation of the intraatomic spring forces between the Trotter beads
:param sSteps: (default: 1) multiplier to construct medium timestep (interatomic bonded forces) as mediumstep = sSteps * timestep
:param mSteps: (default: 1) multiplier to construct longest timestep (interatomic non-bonded forces) as longstep = mSteps * sSteps * timestep
:param nTrotter: (default: 32) Trotter number. Should be even and greather than zero.
:param realKinMass: (default: True) Flag to choose whether to use real kinetic masses. If False, the higher modes' kinetic masses are multiplied with their corresponding eigenvalues of the normal mode transformation. In this way, all higher modes oscillate with the same frequency. If True, we use the kinetic masses for the higher modes which corresponding to the real dynamics (see J. Chem. Phys 147, 244104 (2017) for details)
:param constKinMass: (default: False) If False, the higher modes' kinetic masses also adaptively change (AKM scheme in J. Chem. Phys 147, 244104 (2017)). If True, the higher modes' kinetic masses are constant throughout the system (CKM scheme in J. Chem. Phys 147, 244104 (2017))
:param temperature: (default: 2.494353 - this corresponds to 300 Kelvin) the temperature in gromacs units (Boltzmann constant kb is 1)
:param gamma: (default: 1.0) the Langevin thermostat's friction parameter in 1/ps
:param centroidThermostat: (default: True) If True, the centroid mode is also thermostated, otherwise only the higher modes' (relevant for centroid molecular dynamics)
:param CMDparameter: (default: 1.0) The gamma^2 parameter used in centroid molecular dynamics. The higher modes' kinetic masses are rescaled by CMDparameter
:param PILE: (default: True) If True, the higher modes are thermostated according to the PILE scheme by Ceriotti et al. (J. Chem. Phys 133, 124104 (2010)). Only makes sense in combination when using real kinetic masses (realKinMass = True)
:param PILElambda: (default: 0.5) lambda parameter to rescale the friction matrix. Default should be good for most applications (J. Chem. Phys 140, 234116 (2014))
:param CLmassmultiplier: (default: 100.0) multiplier by which the higher modes' spring masses (if constKinMass = False also the kinetic masses) are increased in the classical region
:param speedup: (default: True) If True, the higher modes' are not integrated in the classical region and also the intraatomistic forces between the Trotter beads are not calculated in the classical region
:param KTI: (default: False) If True, the particles' resolution parameters and adaptive masses are not updated but can be set by hand everywhere. This is necessary when running Kirkwood Thermodynamic Integration (KTI)
:type system: shared_ptr<System>
:type verletlist: shared_ptr<VerletListAdress>
:type timestep: real
:type sSteps: int
:type mSteps: int
:type nTrotter: int
:type realKinMass: bool
:type constKinMass: bool
:type temperature: real
:type gamma: real
:type centroidThermostat: bool
:type CMDparameter: real
:type PILE: bool
:type PILElambda: real
:type CLmassmultiplier: real
:type speedup: bool
:type KTI: bool
.. function:: espressopp.integrator.PIAdressIntegrator.setVerletList(verletlist)
Sets the VerletList.
:param verletlist: The VerletListAdress object.
:type verletlist: espressopp.VerletListAdress
.. function:: espressopp.integrator.PIAdressIntegrator.getVerletList()
Gets the VerletList.
:return: the Adress VerletList
:rtype: shared_ptr<VerletListAdress>
.. function:: espressopp.integrator.PIAdressIntegrator.setTimeStep(timestep)
Sets the inner (shortest) timestep.
:param timestep: the inner timestep
:type timestep: real
.. function:: espressopp.integrator.PIAdressIntegrator.getTimeStep()
Gets the inner (shortest) timestep.
:return: the inner timestep
:rtype: real
.. function:: espressopp.integrator.PIAdressIntegrator.setsStep(sSteps)
Sets the multiplier to construct medium timestep (interatomic bonded forces) as mediumstep = sSteps * timestep.
:param sSteps: multiplier to construct medium timestep
:type sSteps: int
.. function:: espressopp.integrator.PIAdressIntegrator.getsStep()
Gets the multiplier to construct medium timestep (interatomic bonded forces) as mediumstep = sSteps * timestep.
:return: multiplier to construct medium timestep
:rtype: int
.. function:: espressopp.integrator.PIAdressIntegrator.setmStep(mSteps)
Sets the multiplier to construct longest timestep (interatomic non-bonded forces) as longstep = mSteps * sSteps * timestep.
:param mSteps: multiplier to construct longest timestep
:type mSteps: int
.. function:: espressopp.integrator.PIAdressIntegrator.getmStep()
Gets the multiplier to construct longest timestep (interatomic non-bonded forces) as longstep = mSteps * sSteps * timestep.
:return: multiplier to construct longest timestep
:rtype: int
.. function:: espressopp.integrator.PIAdressIntegrator.setNtrotter(nTrotter)
Sets the Trotter number nTrotter. Should be even and greather than zero. Note that when calling this function, also the normal mode transformation matrix and the eigenvalues are recalculated.
:param ntrotter: the Trotter number
:type ntrotter: int
.. function:: espressopp.integrator.PIAdressIntegrator.getNtrotter()
Gets the Trotter number nTrotter.
:return: the Trotter number
:rtype: int
.. function:: espressopp.integrator.PIAdressIntegrator.setRealKinMass(realKinMass)
Sets the real kinetic mass flag.
:param realKinMass: the real kinetic mass flag
:type realKinMass: bool
.. function:: espressopp.integrator.PIAdressIntegrator.getRealKinMass()
Gets the real kinetic mass flag.
:return: the real kinetic mass flag
:rtype: bool
.. function:: espressopp.integrator.PIAdressIntegrator.setConstKinMass(constKinMass)
Sets the constant kinetic mass flag.
:param constKinMass: the constant kinetic mass flag
:type constKinMass: bool
.. function:: espressopp.integrator.PIAdressIntegrator.getConstKinMass()
Gets the constant kinetic mass flag.
:return: the constant kinetic mass flag
:rtype: bool
.. function:: espressopp.integrator.PIAdressIntegrator.setTemperature(temperature)
Sets the temperature (gromacs units with kb = 1).
:param temperature: the temperature
:type temperature: real
.. function:: espressopp.integrator.PIAdressIntegrator.getTemperature()
Gets the temperature (gromacs units with kb = 1).
:return: the temperature
:rtype: real
.. function:: espressopp.integrator.PIAdressIntegrator.setGamma(gamma)
Sets the friction constant gamma (in 1/ps).
:param gamma: the friction constant gamma
:type gamma: real
.. function:: espressopp.integrator.PIAdressIntegrator.getGamma()
Gets the friction constant gamma (in 1/ps).
:return: the friction constant gamma
:rtype: real
.. function:: espressopp.integrator.PIAdressIntegrator.setCentroidThermostat(centroidThermostat)
Sets the centroid thermostat flag.
:param centroidThermostat: the centroid thermostat flag
:type centroidThermostat: bool
.. function:: espressopp.integrator.PIAdressIntegrator.getCentroidThermostat()
Gets the centroid thermostat flag.
:return: the centroid thermostat flag
:rtype: bool
.. function:: espressopp.integrator.PIAdressIntegrator.setCMDparameter(CMDparameter)
Sets the centroid molecular dynamics parameter gamma^2 for scaling the kinetic mass.
:param CMDparameter: the CMD parameter gamma^2
:type CMDparameter: real
.. function:: espressopp.integrator.PIAdressIntegrator.getCMDparameter()
Gets the centroid molecular dynamics parameter gamma^2 for scaling the kinetic mass.
:return: the CMD parameter gamma^2
:rtype: real
.. function:: espressopp.integrator.PIAdressIntegrator.setPILE(PILE)
Sets the PILE flag.
:param PILE: the PILE flag
:type PILE: bool
.. function:: espressopp.integrator.PIAdressIntegrator.getPILE()
Gets the PILE flag.
:return: the PILE flag
:rtype: bool
.. function:: espressopp.integrator.PIAdressIntegrator.setPILElambda(PILElambda)
Sets the scaling parameter lambda of the PILE thermostat.
:param PILElambda: the scaling parameter lambda
:type PILElambda: real
.. function:: espressopp.integrator.PIAdressIntegrator.getPILElambda()
Gets the scaling parameter lambda of the PILE thermostat.
:return: the scaling parameter lambda
:rtype: real
.. function:: espressopp.integrator.PIAdressIntegrator.setClmassmultiplier(CLmassmultiplier)
Sets the multiplier for the higher modes' spring masses in the classical region.
:param CLmassmultiplier: the classical spring mass multiplier
:type CLmassmultiplier: real
.. function:: espressopp.integrator.PIAdressIntegrator.getClmassmultiplier()
Gets the multiplier for the higher modes' spring masses in the classical region.
:return: the classical spring mass multiplier
:rtype: real
.. function:: espressopp.integrator.PIAdressIntegrator.setSpeedup(speedup)
Sets the speedup flag.
:param speedup: the speedup flag
:type speedup: bool
.. function:: espressopp.integrator.PIAdressIntegrator.getSpeedup()
Gets the speedup flag.
:return: the speedup flag
:rtype: bool
.. function:: espressopp.integrator.PIAdressIntegrator.setKTI(KTI)
Sets the KTI flag.
:param speedup: the KTI flag
:type speedup: bool
.. function:: espressopp.integrator.PIAdressIntegrator.getKTI()
Gets the KTI flag.
:return: the KTI flag
:rtype: bool
.. function:: espressopp.integrator.PIAdressIntegrator.getVerletlistBuilds()
Gets the number of Verletlist builds.
:return: number of Verletlist builds
:rtype: int
.. function:: espressopp.integrator.PIAdressIntegrator.computeRingEnergy()
Calculates the total configurational energy of all ring polymers in the system based on the springs between the Trotter beads (calculation done using mode coordinates).
:return: total configurational ring polymer energy
:rtype: real
.. function:: espressopp.integrator.PIAdressIntegrator.computeRingEnergyRaw()
Calculates the total configurational energy of all ring polymers in the system based on the springs between the Trotter beads (calculation done using the Trotter beads' real space positions).
:return: total configurational ring polymer energy
:rtype: real
.. function:: espressopp.integrator.PIAdressIntegrator.computeKineticEnergy()
Calculates the total kinetic energy using the modes' momenta.
:return: total kinetic energy
:rtype: real
.. function:: espressopp.integrator.PIAdressIntegrator.computePositionDrift(parttype)
Calculates the average drift force due to the position-dependent spring masses (see Section 5.C. Eq. 63 in J. Chem. Phys 147, 244104 (2017)) on particles of type parttype. To be used during KTI for construction of free energy compensation.
:param parttype: the particle or atom type
:type parttype: int
:return: average drift force due to the position-dependent spring masses
:rtype: real
.. function:: espressopp.integrator.PIAdressIntegrator.computeMomentumDrift(parttype)
Calculates the average drift force due to the position-dependent kinetic masses (see Section 5.C. Eq. 62 in J. Chem. Phys 147, 244104 (2017)) on particles of type parttype. To be used during KTI for construction of free energy compensation.
:param parttype: the particle or atom type
:type parttype: int
:return: average drift force due to the position-dependent kinetic masses
:rtype: real
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.integrator.MDIntegrator import *
from _espressopp import integrator_PIAdressIntegrator
import numpy as np
class PIAdressIntegratorLocal(MDIntegratorLocal, integrator_PIAdressIntegrator):
'The (local) PIAdress Integrator.'
def __init__(self, system, verletlist, timestep = 0.0, sSteps = 1, mSteps = 1, nTrotter = 32, realKinMass = True, constKinMass = False, temperature = 2.494353, gamma = 1.0, centroidThermostat = True, CMDparameter = 1.0, PILE = True, PILElambda = 0.5, CLmassmultiplier = 100.0, speedup = True, KTI = False):
if mSteps <= 0 or sSteps <= 0:
raise ValueError('mSteps and sSteps must be larger than zero. Your inputs: mSteps={}, sSteps={}'.format(mSteps, sSteps))
if nTrotter <= 0 or nTrotter % 2 != 0:
raise ValueError('nTrotter must be even and larger than zero. Your input: {}'.format(nTrotter))
if temperature < 0.0:
raise ValueError('temperature must be larger or equal zero. Your input: {}'.format(temperature))
if gamma < 0.0:
raise ValueError('gamma must be larger or equal zero. Your input: {}'.format(gamma))
if CMDparameter <= 0.0:
raise ValueError('CMDparameter must be larger than zero. Your input: {}'.format(CMDparameter))
if PILElambda < 0.0:
raise ValueError('PILElambda must be larger or equal zero. Your input: {}'.format(PILElambda))
if CLmassmultiplier <= 0.0:
raise ValueError('CLmassmultiplier must be larger than zero. Your input: {}'.format(CLmassmultiplier))
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, integrator_PIAdressIntegrator, system, verletlist)
self.setTimeStep(timestep)
self.setsStep(sSteps)
self.setmStep(mSteps)
self.setNtrotter(nTrotter)
self.setRealKinMass(realKinMass)
self.setConstKinMass(constKinMass)
self.setTemperature(temperature)
self.setGamma(gamma)
self.setCentroidThermostat(centroidThermostat)
self.setCMDparameter(CMDparameter)
self.setPILE(PILE)
self.setPILElambda(PILElambda)
self.setClmassmultiplier(CLmassmultiplier)
self.setSpeedup(speedup)
self.setKTI(KTI)
def setTimeStep(self, timestep):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setTimeStep(self, timestep)
def getTimeStep(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getTimeStep(self)
def setsStep(self, sSteps):
if sSteps <= 0:
raise ValueError('sSteps must be larger than zero. Your input: {}'.format(sSteps))
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setsStep(self, sSteps)
def getsStep(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getsStep(self)
def setmStep(self, mSteps):
if mSteps <= 0:
raise ValueError('mSteps must be larger than zero. Your input: {}'.format(mSteps))
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setmStep(self, mSteps)
def getmStep(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getmStep(self)
def setNtrotter(self, nTrotter):
if nTrotter <= 0:
raise ValueError('nTrotter must be larger than zero. Your input: {}'.format(nTrotter))
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setNtrotter(self, nTrotter)
# Calculate eigenvalues of the normal mode transformation
for i in range(0,nTrotter):
self.cxxclass.addEigenValues(self, 4.0*np.sin(np.pi*i/nTrotter)*np.sin(np.pi*i/nTrotter))
# Calculate the transformation matrix of the normal mode transformation
transposed_eigenvectors = []
for k in range(1,nTrotter+1):
vector = []
for i in range(0,nTrotter):
if i == 0:
self.cxxclass.addEVcomponent(self, 1.0/np.sqrt(nTrotter))
if i != 0 and i < nTrotter/2:
self.cxxclass.addEVcomponent(self, np.sqrt(2.0/nTrotter) * np.cos(2.0*np.pi*k*i/nTrotter))
if i == nTrotter/2:
self.cxxclass.addEVcomponent(self, (1.0/np.sqrt(nTrotter)) * ((-1.0)**k))
if i > nTrotter/2:
self.cxxclass.addEVcomponent(self, np.sqrt(2.0/nTrotter) * np.sin(2.0*np.pi*k*i/nTrotter))
self.cxxclass.addTransposedEigenVector(self)
self.cxxclass.transp(self)
def getNtrotter(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getNtrotter(self)
def setTemperature(self, temperature):
if temperature < 0.0:
raise ValueError('temperature must be larger or equal zero. Your input: {}'.format(temperature))
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setTemperature(self, temperature)
def getTemperature(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getTemperature(self)
def setGamma(self, gamma):
if gamma < 0.0:
raise ValueError('gamma must be larger or equal zero. Your input: {}'.format(gamma))
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setGamma(self, gamma)
def getGamma(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getGamma(self)
def setCMDparameter(self, CMDparameter):
if CMDparameter <= 0.0:
raise ValueError('CMDparameter must be larger than zero. Your input: {}'.format(CMDparameter))
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setCMDparameter(self, CMDparameter)
def getCMDparameter(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getCMDparameter(self)
def setPILElambda(self, PILElambda):
if PILElambda < 0.0:
raise ValueError('PILElambda must be larger or equal zero. Your input: {}'.format(PILElambda))
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPILElambda(self, PILElambda)
def getPILElambda(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPILElambda(self)
def setClmassmultiplier(self, CLmassmultiplier):
if CLmassmultiplier <= 0.0:
raise ValueError('CLmassmultiplier must be larger than zero. Your input: {}'.format(CLmassmultiplier))
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setClmassmultiplier(self, CLmassmultiplier)
def getClmassmultiplier(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getClmassmultiplier(self)
def setSpeedup(self, speedup):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setSpeedup(self, speedup)
def getSpeedup(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getSpeedup(self)
def setKTI(self, KTI):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setKTI(self, KTI)
def getKTI(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getKTI(self)
def setCentroidThermostat(self, centroidThermostat):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setCentroidThermostat(self, centroidThermostat)
def getCentroidThermostat(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getCentroidThermostat(self)
def setPILE(self, PILE):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPILE(self, PILE)
def getPILE(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPILE(self)
def setRealKinMass(self, realKinMass):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setRealKinMass(self, realKinMass)
def getRealKinMass(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getRealKinMass(self)
def setConstKinMass(self, constKinMass):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setConstKinMass(self, constKinMass)
def getConstKinMass(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getConstKinMass(self)
def setVerletList(self, verletlist):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setVerletList(self, verletlist)
def getVerletList(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getVerletList(self)
def computeRingEnergy(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.computeRingEnergy(self)
def computeRingEnergyRaw(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.computeRingEnergyRaw(self)
def computeKineticEnergy(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.computeKineticEnergy(self)
def computePositionDrift(self, parttype):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.computePositionDrift(self, parttype)
def computeMomentumDrift(self, parttype):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.computeMomentumDrift(self, parttype)
def getVerletlistBuilds(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getVerletlistBuilds(self)
if pmi.isController :
class PIAdressIntegrator(MDIntegrator):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.integrator.PIAdressIntegratorLocal',
pmiproperty = ['timestep', 'sSteps', 'mSteps', 'nTrotter', 'gamma', 'CMDparameter', 'PILElambda', 'temperature', 'CLmassmultiplier', 'speedup', 'KTI', 'constKinMass', 'verletList', 'centroidThermostat', 'PILE', 'realKinMass', 'verletlistBuilds' ],
pmicall = ['setTimeStep', 'setmStep', 'setsStep', 'setNtrotter', 'setTemperature', 'setGamma', 'setCMDparameter', 'setPILElambda', 'setClmassmultiplier', 'setSpeedup', 'setKTI', 'setPILE', 'setRealKinMass', 'setCentroidThermostat', 'setConstKinMass', 'setVerletList', 'computeKineticEnergy', 'computeRingEnergy', 'computeRingEnergyRaw', 'computeMomentumDrift', 'computePositionDrift', 'getTimeStep', 'getmStep', 'getsStep', 'getNtrotter', 'getTemperature', 'getGamma', 'getCMDparameter', 'getPILElambda', 'getClmassmultiplier', 'getSpeedup', 'getKTI', 'getPILE', 'getRealKinMass', 'getCentroidThermostat', 'getConstKinMass', 'getVerletList', 'getVerletlistBuilds']
)
| niktre/espressopp | src/integrator/PIAdressIntegrator.py | Python | gpl-3.0 | 28,813 | [
"ESPResSo",
"Gromacs"
] | 4f068149621d99372d6a2a9f561c68ffacd0dd75b1b1aad153caeb82353fcc5f |
from __future__ import print_function, absolute_import, division
import os
import sys
import time
import signal
import traceback
from socket import gethostname
from getpass import getuser
from datetime import datetime
from six import iteritems
from six.moves import cStringIO
from sqlalchemy import func
from sklearn.base import clone, BaseEstimator
import numpy as np
from . import __version__
from .config import Config
from .trials import Trial
from .fit_estimator import fit_and_score_estimator
from .utils import Unbuffered, format_timedelta, current_pretty_time
from .utils import is_msmbuilder_estimator, num_samples
def execute(args, parser):
start_time = datetime.now()
sys.stdout = Unbuffered(sys.stdout)
# Load the config file and extract the fields
print_header()
config = Config(args.config)
estimator = config.estimator()
searchspace = config.search_space()
strategy = config.strategy()
config_sha1 = config.sha1()
scoring = config.scoring()
project_name = config.project_name()
if is_msmbuilder_estimator(estimator):
print_msmbuilder_version()
print('\nLoading dataset...\n')
X, y = config.dataset()
print('Dataset contains %d element(s) with %s labels'
% (num_samples(X), 'out' if y is None else ''))
print('The elements have shape: [%s' %
', '.join([str(X[i].shape)
if isinstance(X[i], (np.ndarray, np.generic))
else '(%s,)' % num_samples(X[i])
for i in range(min(num_samples(X), 20))]), end='')
print(', ...]' if (num_samples(X) > 20) else ']')
print('Instantiated estimator:')
print(' %r' % estimator)
print(searchspace)
# set up cross-validation
cv = config.cv(X, y)
statuses = [None for _ in range(args.n_iters)]
# install a signal handler to print the footer before exiting
# from sigterm (e.g. PBS job kill)
def signal_hander(signum, frame):
print_footer(statuses, start_time, signum)
sys.exit(1)
signal.signal(signal.SIGTERM, signal_hander)
for i in range(args.n_iters):
print('\n' + '-'*70)
print('Beginning iteration %50s' % ('%d / %d' % (i+1, args.n_iters)))
print('-'*70)
trial_id, params = initialize_trial(
strategy, searchspace, estimator, config_sha1=config_sha1,
project_name=project_name,
sessionbuilder=config.trialscontext)
s = run_single_trial(
estimator=estimator, params=params, trial_id=trial_id,
scoring=scoring, X=X, y=y, cv=cv,
sessionbuilder=config.trialscontext)
statuses[i] = s
print_footer(statuses, start_time)
def initialize_trial(strategy, searchspace, estimator, config_sha1,
project_name, sessionbuilder):
with sessionbuilder() as session:
# requery the history ever iteration, because another worker
# process may have written to it in the mean time
history = [[t.parameters, t.test_scores, t.status]
for t in session.query(Trial).all()
if t.project_name == project_name]
print('History contains: %d trials' % len(history))
print('Choosing next hyperparameters with %s...' % strategy.short_name)
start = time.time()
params = strategy.suggest(history, searchspace)
print(' %r' % params)
print('(%s took %.3f s)\n' % (strategy.short_name,
time.time() - start))
assert len(params) == searchspace.n_dims
# make sure we get _all_ the parameters, including defaults on the
# estimator class, to save in the database
params = clone(estimator).set_params(**params).get_params()
params = dict((k, v) for k, v in iteritems(params)
if not isinstance(v, BaseEstimator) and
(k != 'steps'))
t = Trial(status='PENDING', parameters=params, host=gethostname(),
user=getuser(), started=datetime.now(),
config_sha1=config_sha1)
session.add(t)
session.commit()
trial_id = t.id
return trial_id, params
def run_single_trial(estimator, params, trial_id, scoring, X, y, cv,
sessionbuilder):
status = None
try:
score = fit_and_score_estimator(
estimator, params, cv=cv, scoring=scoring, X=X, y=y, verbose=1)
with sessionbuilder() as session:
trial = session.query(Trial).get(trial_id)
trial.mean_test_score = score['mean_test_score']
trial.mean_train_score = score['mean_train_score']
trial.test_scores = score['test_scores']
trial.train_scores = score['train_scores']
trial.n_test_samples = score['n_test_samples']
trial.n_train_samples = score['n_train_samples']
trial.status = 'SUCCEEDED'
best_so_far = session.query(
func.max(Trial.mean_test_score)).first()
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Success! Model score = %f' % trial.mean_test_score)
print('(best score so far = %f)' %
max(trial.mean_test_score, best_so_far[0]))
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
trial.completed = datetime.now()
trial.elapsed = trial.completed - trial.started
session.commit()
status = trial.status
except Exception:
buf = cStringIO()
traceback.print_exc(file=buf)
with sessionbuilder() as session:
trial = session.query(Trial).get(trial_id)
trial.traceback = buf.getvalue()
trial.status = 'FAILED'
print('-'*78, file=sys.stderr)
print('Exception encountered while fitting model')
print('-'*78, file=sys.stderr)
traceback.print_exc(file=sys.stderr)
print('-'*78, file=sys.stderr)
session.commit()
status = trial.status
except (KeyboardInterrupt, SystemExit):
with sessionbuilder() as session:
trial = session.query(Trial).get(trial_id)
trial.status = 'FAILED'
session.commit()
sys.exit(1)
return status
def print_header():
print('='*70)
print('= osprey is a tool for machine learning '
'hyperparameter optimization. =')
print('='*70)
print()
print('osprey version: %s' % __version__)
print('time: %s' % current_pretty_time())
print('hostname: %s' % gethostname())
print('cwd: %s' % os.path.abspath(os.curdir))
print('pid: %s' % os.getpid())
print()
def print_msmbuilder_version():
from msmbuilder.version import full_version as msmb_version
from mdtraj.version import full_version as mdtraj_version
print()
print('msmbuilder version: %s' % msmb_version)
print('mdtraj version: %s' % mdtraj_version)
print()
def print_footer(statuses, start_time, signum=None):
n_successes = sum(s == 'SUCCEEDED' for s in statuses)
elapsed = format_timedelta(datetime.now() - start_time)
print()
if signum is not None:
sigmap = dict((k, v) for v, k in iteritems(signal.__dict__)
if v.startswith('SIG'))
signame = sigmap.get(signum, 'Unknown')
print('== osprey worker received signal %s!' % signame,
file=sys.stderr)
print('== exiting immediately.', file=sys.stderr)
print('%d/%d models fit successfully.' % (n_successes, len(statuses)))
print('time: %s' % current_pretty_time())
print('elapsed: %s.' % elapsed)
print('osprey worker exiting.')
| cxhernandez/osprey | osprey/execute_worker.py | Python | apache-2.0 | 7,839 | [
"MDTraj"
] | d0f7bf4cc8a4a95f8054b154890247401fa33edd356f24c9ff58c9a80f92664e |
#!/usr/bin/env python
"""Search for inlined CPP macros in ABINIT src files"""
from __future__ import unicode_literals, division, print_function, absolute_import
__author__ = "M. Giantomassi"
import os
import re
import sys
from abirules_tools import find_src_dirs
# Files that will be checked.
re_srcfile = re.compile("\.([Ff]|[Ff]90|finc)$")
def is_srcfile(dirpath, fname):
return re_srcfile.search(fname)
# List of macros that should not be inlined with if statements or other instructions.
# These macros are defined in src/incs/abi_common.h
#
MACRO_NAMES = [
"ABI_ALLOCATE",
"ABI_DEALLOCATE",
"ABI_STAT_ALLOCATE",
"ABI_STAT_DEALLOCATE",
"ABI_DATATYPE_ALLOCATE",
"ABI_DATATYPE_DEALLOCATE",
"ABI_STAT_DATATYPE_ALLOCATE",
"ABI_STAT_DATATYPE_DEALLOCATE",
"ABI_MALLOC",
"ABI_STAT_MALLOC",
"ABI_DT_MALLOC",
"ABI_STAT_DT_MALLOC",
"ABI_CALLOC",
"ABI_STAT_CALLOC",
"ABI_FREE",
"ABI_STAT_FREE",
"ABI_CHECK_ALLOC",
]
# Regular expressions for each macro.
regexps = dict()
for name in MACRO_NAMES:
regexps[name] = re.compile(name + " ?\(.*\)")
#regexps[name] = re.compile(name + "\(.*\)") blanks between macro name and () are not permitted
def wrong_string(string):
"Return an empty string if input string does not contain inlined macros."
string = string.strip()
for macro_name in MACRO_NAMES:
pattern = regexps[macro_name]
if macro_name in string and not string.startswith("!"):
s = re.sub(pattern, "", string, count=0).strip()
if not s.startswith("!"): return s
else:
return ""
def main():
print("-------------------------------------------------------")
print(" Searching for inlined CPP macros in ABINIT src files ")
print("-------------------------------------------------------")
exit_status = 0
for top in find_src_dirs():
assert os.path.isdir(top)
for dirpath, dirnames, files in os.walk(top):
for src in files:
if not is_srcfile(dirpath, src): continue
fpath = os.path.join(dirpath,src)
with open(fpath, "rt") as fh:
for lno, line in enumerate(fh):
s = wrong_string(line)
if s:
print("(INLINED MACRO at %s:%d): %s " % (src, lno+1, line))
exit_status += 1
if exit_status > 0:
err_msg = """
Please, avoid instructions like:
if (allocated(arr)) ABI_DEALLOCATE(arr)
When the code is compiled in profile mode, indeed, ABI_DEALLOCATE expands to
the set of Fortran instructions:
deallocate(arr)
call memocc_abi()
These instructions MUST be placed inside an "if then" "end if" block.
This limitation can be lifted, but we need support at the level of the build system.
For the time being, one has to use the more verbose form:
if (allocated(arr)) then
ABI_DEALLOCATE(arr)
end if
This is the list of macros that cannot be inlined:
%(MACRO_NAMES)s
""" % globals()
print(err_msg)
return exit_status
if __name__ == "__main__":
sys.exit(main())
| abinit/abinit | abichecks/scripts/check_inlined_macros.py | Python | gpl-3.0 | 3,029 | [
"ABINIT"
] | 16403fa927ab083681354581e2bdc5e69b0c7cc040212e7d76dead53187b6f3e |
#!/usr/bin/env python2
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Convert Android xml resources to API 14 compatible.
There are two reasons that we cannot just use API 17 attributes,
so we are generating another set of resources by this script.
1. paddingStart attribute can cause a crash on Galaxy Tab 2.
2. There is a bug that paddingStart does not override paddingLeft on
JB-MR1. This is fixed on JB-MR2.
Therefore, this resource generation script can be removed when
we drop the support for JB-MR1.
Please refer to http://crbug.com/235118 for the details.
"""
import optparse
import os
import re
import shutil
import sys
import xml.dom.minidom as minidom
from util import build_utils
# Note that we are assuming 'android:' is an alias of
# the namespace 'http://schemas.android.com/apk/res/android'.
GRAVITY_ATTRIBUTES = ('android:gravity', 'android:layout_gravity')
# Almost all the attributes that has "Start" or "End" in
# its name should be mapped.
ATTRIBUTES_TO_MAP = {'paddingStart' : 'paddingLeft',
'drawableStart' : 'drawableLeft',
'layout_alignStart' : 'layout_alignLeft',
'layout_marginStart' : 'layout_marginLeft',
'layout_alignParentStart' : 'layout_alignParentLeft',
'layout_toStartOf' : 'layout_toLeftOf',
'paddingEnd' : 'paddingRight',
'drawableEnd' : 'drawableRight',
'layout_alignEnd' : 'layout_alignRight',
'layout_marginEnd' : 'layout_marginRight',
'layout_alignParentEnd' : 'layout_alignParentRight',
'layout_toEndOf' : 'layout_toRightOf'}
ATTRIBUTES_TO_MAP = dict(['android:' + k, 'android:' + v] for k, v
in ATTRIBUTES_TO_MAP.iteritems())
ATTRIBUTES_TO_MAP_REVERSED = dict([v,k] for k, v
in ATTRIBUTES_TO_MAP.iteritems())
def IterateXmlElements(node):
"""minidom helper function that iterates all the element nodes.
Iteration order is pre-order depth-first."""
if node.nodeType == node.ELEMENT_NODE:
yield node
for child_node in node.childNodes:
for child_node_element in IterateXmlElements(child_node):
yield child_node_element
def AssertNotDeprecatedAttribute(name, value, filename):
"""Raises an exception if the given attribute is deprecated."""
msg = None
if name in ATTRIBUTES_TO_MAP_REVERSED:
msg = '{0} should use {1} instead of {2}'.format(filename,
ATTRIBUTES_TO_MAP_REVERSED[name], name)
elif name in GRAVITY_ATTRIBUTES and ('left' in value or 'right' in value):
msg = '{0} should use start/end instead of left/right for {1}'.format(
filename, name)
if msg:
msg += ('\nFor background, see: http://android-developers.blogspot.com/'
'2013/03/native-rtl-support-in-android-42.html\n'
'If you have a legitimate need for this attribute, discuss with '
'kkimlabs@chromium.org or newt@chromium.org')
raise Exception(msg)
def WriteDomToFile(dom, filename):
"""Write the given dom to filename."""
build_utils.MakeDirectory(os.path.dirname(filename))
with open(filename, 'w') as f:
dom.writexml(f, '', ' ', '\n', encoding='utf-8')
def HasStyleResource(dom):
"""Return True if the dom is a style resource, False otherwise."""
root_node = IterateXmlElements(dom).next()
return bool(root_node.nodeName == 'resources' and
list(root_node.getElementsByTagName('style')))
def ErrorIfStyleResourceExistsInDir(input_dir):
"""If a style resource is in input_dir, raises an exception."""
for input_filename in build_utils.FindInDirectory(input_dir, '*.xml'):
dom = minidom.parse(input_filename)
if HasStyleResource(dom):
raise Exception('error: style file ' + input_filename +
' should be under ' + input_dir +
'-v17 directory. Please refer to '
'http://crbug.com/243952 for the details.')
def GenerateV14LayoutResourceDom(dom, filename, assert_not_deprecated=True):
"""Convert layout resource to API 14 compatible layout resource.
Args:
dom: Parsed minidom object to be modified.
filename: Filename that the DOM was parsed from.
assert_not_deprecated: Whether deprecated attributes (e.g. paddingLeft) will
cause an exception to be thrown.
Returns:
True if dom is modified, False otherwise.
"""
is_modified = False
# Iterate all the elements' attributes to find attributes to convert.
for element in IterateXmlElements(dom):
for name, value in list(element.attributes.items()):
# Convert any API 17 Start/End attributes to Left/Right attributes.
# For example, from paddingStart="10dp" to paddingLeft="10dp"
# Note: gravity attributes are not necessary to convert because
# start/end values are backward-compatible. Explained at
# https://plus.sandbox.google.com/+RomanNurik/posts/huuJd8iVVXY?e=Showroom
if name in ATTRIBUTES_TO_MAP:
element.setAttribute(ATTRIBUTES_TO_MAP[name], value)
del element.attributes[name]
is_modified = True
elif assert_not_deprecated:
AssertNotDeprecatedAttribute(name, value, filename)
return is_modified
def GenerateV14StyleResourceDom(dom, filename, assert_not_deprecated=True):
"""Convert style resource to API 14 compatible style resource.
Args:
dom: Parsed minidom object to be modified.
filename: Filename that the DOM was parsed from.
assert_not_deprecated: Whether deprecated attributes (e.g. paddingLeft) will
cause an exception to be thrown.
Returns:
True if dom is modified, False otherwise.
"""
is_modified = False
for style_element in dom.getElementsByTagName('style'):
for item_element in style_element.getElementsByTagName('item'):
name = item_element.attributes['name'].value
value = item_element.childNodes[0].nodeValue
if name in ATTRIBUTES_TO_MAP:
item_element.attributes['name'].value = ATTRIBUTES_TO_MAP[name]
is_modified = True
elif assert_not_deprecated:
AssertNotDeprecatedAttribute(name, value, filename)
return is_modified
def GenerateV14LayoutResource(input_filename, output_v14_filename,
output_v17_filename):
"""Convert API 17 layout resource to API 14 compatible layout resource.
It's mostly a simple replacement, s/Start/Left s/End/Right,
on the attribute names.
If the generated resource is identical to the original resource,
don't do anything. If not, write the generated resource to
output_v14_filename, and copy the original resource to output_v17_filename.
"""
dom = minidom.parse(input_filename)
is_modified = GenerateV14LayoutResourceDom(dom, input_filename)
if is_modified:
# Write the generated resource.
WriteDomToFile(dom, output_v14_filename)
# Copy the original resource.
build_utils.MakeDirectory(os.path.dirname(output_v17_filename))
shutil.copy2(input_filename, output_v17_filename)
def GenerateV14StyleResource(input_filename, output_v14_filename):
"""Convert API 17 style resources to API 14 compatible style resource.
Write the generated style resource to output_v14_filename.
It's mostly a simple replacement, s/Start/Left s/End/Right,
on the attribute names.
"""
dom = minidom.parse(input_filename)
GenerateV14StyleResourceDom(dom, input_filename)
# Write the generated resource.
WriteDomToFile(dom, output_v14_filename)
def GenerateV14LayoutResourcesInDir(input_dir, output_v14_dir, output_v17_dir):
"""Convert layout resources to API 14 compatible resources in input_dir."""
for input_filename in build_utils.FindInDirectory(input_dir, '*.xml'):
rel_filename = os.path.relpath(input_filename, input_dir)
output_v14_filename = os.path.join(output_v14_dir, rel_filename)
output_v17_filename = os.path.join(output_v17_dir, rel_filename)
GenerateV14LayoutResource(input_filename, output_v14_filename,
output_v17_filename)
def GenerateV14StyleResourcesInDir(input_dir, output_v14_dir):
"""Convert style resources to API 14 compatible resources in input_dir."""
for input_filename in build_utils.FindInDirectory(input_dir, '*.xml'):
rel_filename = os.path.relpath(input_filename, input_dir)
output_v14_filename = os.path.join(output_v14_dir, rel_filename)
GenerateV14StyleResource(input_filename, output_v14_filename)
def VerifyV14ResourcesInDir(input_dir, resource_type):
"""Verify that the resources in input_dir is compatible with v14, i.e., they
don't use attributes that cause crashes on certain devices. Print an error if
they have."""
for input_filename in build_utils.FindInDirectory(input_dir, '*.xml'):
exception_message = ('error : ' + input_filename + ' has an RTL attribute, '
'i.e., attribute that has "start" or "end" in its name.'
' Pre-v17 resources should not include it because it '
'can cause crashes on certain devices. Please refer to '
'http://crbug.com/243952 for the details.')
dom = minidom.parse(input_filename)
if resource_type in ('layout', 'xml'):
if GenerateV14LayoutResourceDom(dom, input_filename, False):
raise Exception(exception_message)
elif resource_type == 'values':
if GenerateV14StyleResourceDom(dom, input_filename, False):
raise Exception(exception_message)
def AssertNoDeprecatedAttributesInDir(input_dir, resource_type):
"""Raises an exception if resources in input_dir have deprecated attributes,
e.g., paddingLeft, paddingRight"""
for input_filename in build_utils.FindInDirectory(input_dir, '*.xml'):
dom = minidom.parse(input_filename)
if resource_type in ('layout', 'xml'):
GenerateV14LayoutResourceDom(dom, input_filename)
elif resource_type == 'values':
GenerateV14StyleResourceDom(dom, input_filename)
def ParseArgs():
"""Parses command line options.
Returns:
An options object as from optparse.OptionsParser.parse_args()
"""
parser = optparse.OptionParser()
parser.add_option('--res-dir',
help='directory containing resources '
'used to generate v14 compatible resources')
parser.add_option('--res-v14-compatibility-dir',
help='output directory into which '
'v14 compatible resources will be generated')
parser.add_option('--stamp', help='File to touch on success')
parser.add_option('--verify-only', action="store_true", help='Do not generate'
' v14 resources. Instead, just verify that the resources are already '
"compatible with v14, i.e. they don't use attributes that cause crashes "
'on certain devices.')
options, args = parser.parse_args()
if args:
parser.error('No positional arguments should be given.')
# Check that required options have been provided.
required_options = ('res_dir', 'res_v14_compatibility_dir')
build_utils.CheckOptions(options, parser, required=required_options)
return options
def main(argv):
options = ParseArgs()
build_utils.DeleteDirectory(options.res_v14_compatibility_dir)
build_utils.MakeDirectory(options.res_v14_compatibility_dir)
for name in os.listdir(options.res_dir):
if not os.path.isdir(os.path.join(options.res_dir, name)):
continue
dir_pieces = name.split('-')
resource_type = dir_pieces[0]
qualifiers = dir_pieces[1:]
api_level_qualifier_index = -1
api_level_qualifier = ''
for index, qualifier in enumerate(qualifiers):
if re.match('v[0-9]+$', qualifier):
api_level_qualifier_index = index
api_level_qualifier = qualifier
break
# Android pre-v17 API doesn't support RTL. Skip.
if 'ldrtl' in qualifiers:
continue
input_dir = os.path.abspath(os.path.join(options.res_dir, name))
if options.verify_only:
if not api_level_qualifier or int(api_level_qualifier[1:]) < 17:
VerifyV14ResourcesInDir(input_dir, resource_type)
else:
AssertNoDeprecatedAttributesInDir(input_dir, resource_type)
else:
# We also need to copy the original v17 resource to *-v17 directory
# because the generated v14 resource will hide the original resource.
output_v14_dir = os.path.join(options.res_v14_compatibility_dir, name)
output_v17_dir = os.path.join(options.res_v14_compatibility_dir, name +
'-v17')
# We only convert layout resources under layout*/, xml*/,
# and style resources under values*/.
if resource_type in ('layout', 'xml'):
if not api_level_qualifier:
GenerateV14LayoutResourcesInDir(input_dir, output_v14_dir,
output_v17_dir)
elif resource_type == 'values':
if api_level_qualifier == 'v17':
output_qualifiers = qualifiers[:]
del output_qualifiers[api_level_qualifier_index]
output_v14_dir = os.path.join(options.res_v14_compatibility_dir,
'-'.join([resource_type] +
output_qualifiers))
GenerateV14StyleResourcesInDir(input_dir, output_v14_dir)
elif not api_level_qualifier:
ErrorIfStyleResourceExistsInDir(input_dir)
if options.stamp:
build_utils.Touch(options.stamp)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| MirakelX/old-mirakel-android | scripts/generate_v14_compatible_resources.py | Python | gpl-3.0 | 13,800 | [
"Galaxy"
] | dc2b606170e7c4e8b0f45bd7950b65f32c44174d445966488467ef6e398674a5 |
import discord
from discord.ext import commands
import random
def setup(bot):
bot.add_cog(Claptrap(bot))
class Claptrap(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def claptrap(self, ctx):
"""Can I shoot something now? Or climb some stairs? SOMETHING exciting?"""
claptraps = [
"Hey everybody! Check out my package!",
"Let's get this party started!",
"Glitching weirdness is a term of endearment, right?",
"Recompiling my combat code!",
"This time it'll be awesome, I promise!",
"Look out everybody! Things are about to get awesome!",
"Health! Eww, what flavor is red?",
"Health over here!",
"Sweet life juice!",
"I found health!",
"Healsies!",
"Where'd all my bullets go?",
"Bullets are dumb.",
"Who needs ammo anyway, am I right?",
"I need tiny death pellets!",
"Need some ammo!",
"Dangit, I'm out!",
"Ammo reserves are spent!",
"Crap, no more shots left!",
"Hnngh! Empty!",
"Coming up empty!",
"Wheeeee!",
"Yahooooo!",
"Aaaaaaahhh!",
"Watch as we observe the rare and beautiful Clappy Bird!",
"I'm flying! I'm really flying!",
"Look out below!",
"Yipe!",
"Yikes!",
"Yeehaw!",
"Hyah!",
"Heyyah!",
"Take that!",
"Bop!",
"Badass!",
"Badass?! Aaahhh!",
"Look out, a Badass!",
"RUN FOR YOUR LIIIIIVES!!!",
"Oh, he's big... REALLY big!",
"Scary Badass dude, over there!",
"Oh no, Badass!",
"Save me from the Badass!",
"Psst! Ad-ass-bay, over ere-bay!",
"That guy looks an awful lot like a Badass!",
"Step right up, to the Bulletnator 9000!",
"I am a tornado of death and bullets!",
"Stop me before I kill again, except don't!",
"Hehehehe, mwaa ha ha ha, MWAA HA HA HA!",
"I'm on a roll!",
"Unts unts unts unts!",
"Ha ha ha! Fall before your robot overlord!",
"Can't touch this!",
"Ha! Keep 'em coming!",
"There is no way this ends badly!",
"This is why I was built!",
"You call yourself a badass?",
"Wow, did I really do that?",
"Is it dead? Can, can I open my eyes now?",
"I didn't panic! Nope, not me!",
"Not so tough after all!",
"One down, any other takers?",
"I have gaskets tougher than you!",
"That was me! I did that!",
"Like running over a bug!",
"That was a close one!",
"Don't tell me *that* wasn't awesome!",
"Ha ha ha! Suck it!",
"Wait, did I really do that?",
"Holy moly!",
"'Nade out!",
"Grenade!",
"Grenaaaade!",
"Hot potato!",
"Pull pin, throw!",
"Take that!",
"Throwing grenade!",
"Bad guy go boom!",
"Eat bomb, baddie!",
"Present for you!",
"Aww! Now I want a snow cone.",
"Take a chill pill!",
"Cryo me a river!",
"Freeze! I don't know why I said that.",
"Don't cryo!",
"Frigid.",
"Solid! Get it? As in... frozen?",
"Icely done.",
"You're a tiny glacier!",
"Frozen and doh-zen.",
"Freeze, in the reference of emotion!",
"Freezy peezy!",
"My assets... frozen!",
"I can't feel my fingers! Gah! I don't have any fingers!",
"Too cold... can't move!",
"I am a robot popsicle!",
"Brrh... So cold... brrh...",
"Metal gears... frozen solid!",
"*Why* do I even *feel* pain?!",
"Why did they build me out of galvanized flesh?!",
"Ow hohoho, that hurts! Yipes!",
"My robotic flesh! AAHH!",
"Yikes! Ohhoho!",
"Woah! Oh! Jeez!",
"If only my chassis... weren't made of recycled human body parts! Wahahaha!",
"Pop pop!",
"Crit-i-cal!",
"*That* looks like it hurts!",
"WOW! I hit 'em!",
"Extra ouch!",
"Shwing!",
"Flesh fireworks!",
"Oh, quit falling to pieces.",
"Is that what people look like inside?",
"Ooh, squishy bits!",
"Meat confetti!",
"Huh, robot's don't do that.",
"Exploded!",
"Eww! Cool.",
"Heh heh heh, squishy bits!",
"Disgusting. I love it!",
"Personfetti.",
"There is now gunk on my chassis.",
"Oooh! Gigabits!",
"Ooooh! Terrabits!",
"Meatsplosion!",
"This time it'll be awesome, I promise!",
"Hey everybody, check out my package!",
"Place your bets!",
"Defragmenting!",
"Recompiling my combat code!",
"Running the sequencer!",
"It's happening... it's *happening*!",
"It's about to get magical!",
"I'm pulling tricks outta my hat!",
"You can't just program this level of excitement!",
"What will he do next?",
"Things are about to get awesome!",
"Let's get this party started!",
"Glitchy weirdness is term of endearment, right?",
"Push this button, flip this dongle, voila! Help me!",
"square the I, carry the 1... YES!",
"Resequencing combat protocols!",
"Look out everybody, things are about to get awesome!",
"I have an IDEA!",
"Round and around and around she goes!",
"It's like a box of chocolates...",
"Step right up to the sequence of Trapping!",
"Hey everybody, check out my package!",
"Loading combat packages!",
"F to the R to the 4 to the G to the WHAAT!",
"I'm a sexy dinosaur! Rawr!",
"Oh god I can't stop!",
"Don't ask me where this ammo's coming from!",
"If I had veins, they'd be popping out right now!",
"(unintelligible snarling)",
"It's the only way to stop the voices!",
"This was a *reeeally* bad idea!",
"I AM ON FIRE!!! OH GOD, PUT ME OUT!!!",
"I'm cloaking...",
"Shoot him... he's the real one...",
"I'm a robot ninja...",
"I'm invisible!",
"Mini-trap, pretend you're a Siren!",
"Aww, I should've drawn tattoos on you!",
"Burn them, my mini-phoenix!",
"All burn before the mighty Siren-trap!",
"Calm down!",
"It's time to *phase* you suckers out!",
"Tell me I'm the prettiest!",
"Hack the planet!",
"Activating good cop mode...",
"To the skies, mini-trap!",
"Fly mini-trap! Fly!",
"I have *two* robot arms!",
"Punch 'em in the face, mini-trap!",
"Anarchy and mini-trap and awesomeness, oh my!",
"Ratattattattatta! Powpowpowpow! Powpowpowpow! Pew-pew, pew-pew-pewpew!",
"Score one for the turret-trap!",
"Mini-trap on the field!",
"100% more mini-trap turret!",
"I'm going commando!",
"Boiyoiyoiyoiyoing!",
"Zing! Bullet reflection!",
"I am rubber, and you are *so* dead!",
"I'm a superball!",
"Trouncy, flouncy... founcy... those aren't words.",
"For you...I commit...seddoku...",
"The robot is dead, long live the robot!",
"Go on without me!",
"Don't forget me!",
"Love bullets!",
"Never fear, sugar!",
"Nurse Clap is here!",
"Poof, all better, doll!",
"Sugar, this won't hurt a bit!",
"Take these, gorgeous, you'll feel better!",
"Some days, you just can't get rid of an obscure pop-culture reference.",
"Here, take this!",
"Oh darn, oh boy, oh crap, oh boy, oh darn.",
"Gotta blow up a bad guy, GOTTA BLOW UP A BAD GUY!",
"Uh, how do I cast magic missile?",
"Do *not* look behind my curtain!",
"I'm made of magic!",
"You can call me Gundalf!",
"Avada kedavra!",
"Kill, reload! *Kill, reload!* *KILL! RELOAD!*",
"Like those guys who made only one song ever.",
"All these bullets in just one shot.",
"One shot, make it count!",
"A whole lotta bullets in just one trigger pull!",
"Boogie time!",
"Laaasers! ",
"Psychedelic, man! ",
"One for you, one for you, one for you!",
"It's time for my free grenade giveaway!",
"How many ways can I say... THROWING GRENADE?!",
"Grenade confetti!",
"I brought you a present: EXPLOSIONS!",
"Avast ye scurvy dogs!",
"Is this really canon?",
"Time to get swabby!",
"I feel a joke about poop decks coming on!",
"Hard to port whine!",
"I'll stop talking when I'm dead!",
"I'll die the way I lived: annoying!",
"Come back here! I'll gnaw your legs off!",
"This could've gone better!",
"You look like something a skag barfed up!",
"What's that smell? Oh wait, it's just *you*!",
"Yo momma's *so* dumb, she couldn't think of a good ending for this 'yo momma' joke!",
"You're one screw short of a screw!",
"I bet your mom could do better!",
"You look like something a skag barfed up!",
"Oh yeah? Well, uh... yeah.",
"What is that smell? Oh, never mind... it's just you!",
"I'm leaking!",
"Good thing I don't have a soul!",
"Aww!",
"Aww! Come on!",
"You can't kill me!",
"I'm too pretty to die!",
"Crap!",
"Robot down!",
"No, nononono NO!",
"I'll never go back to the bad place!",
"I have many regrets!",
"Can I just say... yeehaw.",
"You are ace high!",
"You're the wub to my dub!",
"Hahaha... I ascend!",
"Ha ha ha! I LIVE! Hahaha!",
"Hahahahaha! I'm alive!",
"Good, I didn't want any spare parts!",
"Wow, that actually worked?",
"You can't keep a good 'bot down!",
"I'm back! Woo!",
"Holy crap, that worked?",
"Better lucky than good!",
"Back for more!",
"Here we go again!",
"So... does this make me your favorite?",
"What are YOU doing down here?",
"We're like those buddies in that one show!",
"This is no time to be lazy!",
"You can thank me later!",
"You love me, right?",
"You, me... keeping on... together?",
"I will save you!",
"Up you go!",
"We're like those buddies in that one show!",
"You versus me! Me versus you! Either way!",
"I will prove to you my robotic superiority!",
"Dance battle! Or, you know... regular battle.",
"Man versus machine! Very tiny streamlined machine!",
"Care to have a friendly duel?",
"I can take ya! ... I think.",
"Ow, what was that for?",
"Oh, it's on now!",
"You wanna fight with me?! Put 'em up! ... Put 'em up?",
"A million baddies, and you wanna hit me? Aww!",
"Now? But I... I just... okay...",
"Aw yeah!",
"Woohoo! In your face!",
"Who's a badass robot? This guy!",
"I am so impressed with myself!",
"Ha ha, this is in no way surprising! Ha ha!",
"NOOO!",
"Poop.",
"I'll get you next time!",
"No fair! I wasn't ready.",
"You got me!",
"Argh arghargh death gurgle gurglegurgle urgh... death.",
"Oh well.",
"Crap happens.",
"So, it's a draw, eh?",
"Until we meet again on the battlefield, friendo!",
"What? No way, I totally had you!",
"Wow, who say that coming?",
"Yay! We both win!",
"Is this any good? 'Cause it looks awesome!",
"Mine!",
"I'm rich!",
"Oooh, shiny!",
"Phat loots!",
"That is some sweet lookin' stuff!",
"Check me out!",
"Now I will dominate!",
"I'm so sexy!",
"Roadkill!",
"I am NOT sorry!",
"Did someone feel something?",
"Don't bother with plastic surgery - there's NO fixing that!",
"Does this thing have whindshield wipers?",
"Uh... wasn't me!",
"Did you scratch the paint?",
"My bad?",
"Speedbump much?",
"Honk honk!",
"Didn't see you there.",
"Well, this is awkward.",
"Sorry! Sorry!",
"Oh crap.",
"Get outta the way!",
"Didn't see you there!",
"Move please!",
"Woah Nelly!",
"Woah!",
"Switch with me... uh, please?",
"Let's switch!",
"Can we change seats?",
"Let me try!",
"Change places!",
"Shiela! Noooo!",
"She's ready to blow!",
"I don't know how much longer I can hold on!",
"Is it warm in here, or is it just me?",
"You have served me well, car.",
"Shield me, maiden!",
"Nice shield, maiden!",
"Go get them Athena!",
"That is *so* hot!",
"I am right behind you, Vault Hunting friend!",
"Nice minions!",
"So, uh... what OS does your drone use?",
"Annihilate them for breakfast, Willy!",
"I can do that to! ... Sorta... Except not.",
"Go Wilhelm and company!",
"They're in for a moon of pain!",
"Did you fire six shots, or five?",
"You jerks have NO idea what you're in for!",
"I'm so glad I'm not one of those guys right now!",
"Sling those guns, girl!",
"YOU! ARE! SCARY!",
"Kill 'em, Nisha! Kill 'em dead!",
"This is going to suck for those guys!",
"Bringing down the law, painfully!",
"Nice one, Jack!",
"You really can double your fun!",
"No WAY those guys will know who's who!",
"That is in no way disturbing.",
"Confused, then abused!",
"That is SO cool!",
"Ice see what you did there!",
"Oh my gosh, a challenge!",
"I did a challenge? I did a challenge!",
"Glad I didn't mess that up.",
"I feel... complete! ... That's weird.",
"I actually did something right for once!",
"This, or that...?",
"What's the difference?",
"Perhaps I should test one out first.",
"Hmmm...",
"Maybe this one?",
"So many choices!",
"What to install next?",
"I must be a rogue, 'cause there are so many skills!",
"Hmmm, the possibilities are an infinite recursion.",
"Do any of these come with a new paint job?",
"What else can I do?",
"Skill-icious! Why did I just say that?",
"Which of these gives me my free will back?",
"Parallel and series!",
"GPS calibrated.",
"What's that arrow? Oh, wait! That's me!",
"Um, where am I?",
"Everything's upside down!",
"Where to go next?",
"Shaken, not stirred",
"The moon is not enough!",
"I'm Trap, Claptrap. Double oh... Trap.",
"I expect you to die!",
"I'd do anything for a woman with a gun.",
"In yo' FACE!",
"Get ready for some Fragtrap face time!",
"Chk-chk, BOOM!",
"You're listening to 'Short-Range Damage Radio.'",
"Up close and personal.",
"I'm a tornado of death and bullets!",
"Get off my lawn!",
"Back in my day...",
"At least I still have my teeth!",
"Coffee? Black... like my soul.",
"Crazy young whippersnappers...",
"Take two bullets, then call me in the morning.",
"Now you're sorted!",
"Snoiped!",
"Crack shot!",
"You're brown bread!",
"So amazes with every guns!",
"For I spy... somethin'.",
"This is why you do your homework!",
"Pain school is now in session",
"Guess who?",
"Meet professor punch!",
"Ready for the PUNCHline?!",
"Make my day.",
"Gimme your best shot.",
"Hit me, baby!",
"Ya feeling lucky, punk?",
"Enterrrrr the CHAMPION!",
"Why do I feel radioactive!?",
"Armor soak increased!",
"Ladies looove a tough guy!",
"Insert Juggernaut quote or pun here.",
"I am Fire, I am Death!",
"Burn, baby, burn!",
"Remember, use caution near an open flame!",
"Sizzlin'!",
"Give me your princesses!",
"Da, da da da! It's electric!",
"I'm rubbing my wheel on the carpet!",
"I've finally got an electric personality!",
"Shocking, isn't it?",
"Lightening! Kukachow!",
"Zippity doodah!",
"Wait, this isn't vegetable juice!",
"Something eating you?",
"Gammier than a pumpkin!",
"Time to melt some faces.",
"I'm a mean, green, acid machine!",
"Sip-a-green! Zzzz!",
"Know what killed the baddies? The *Ice* age.",
"The ice-bot cometh.",
"Ice to meet you.",
"Lets kicksome ice.",
"Ooh! Pretty!",
"Things are exploded and... stuff.",
"Take that! And that... and that...",
"Now with extra kapow!",
"Looks like some of my *awesome* rubbed off!",
"Cool! Now we're both super-crazy-amazing!",
"Take this in return!",
"Here you go, chum!",
"These are the best kind of cooties!",
"Get away from me!",
"Eww, get lost!",
"Do I smell funny?",
"Ah! Get 'em away!",
"Scram!",
"Do I smell funny?",
"Coolant, vented!",
"Welcome to the Jam!",
"Ah... Much better!",
"Smells like Pina Coladas!",
"Frost exhaust!",
"Hyperiooooon Punch!",
"YES!",
"Show me what you got",
"Gloves are comin' off!",
"Stinging like a butterfly!",
"One, two... PUNCH!",
"Punching time!",
"Gloves are coming off!",
"One, two punch",
"Sting like a butterfly!",
"Secret handshake!",
"Up top!",
"Gimme five!",
"High five!",
"Up top!",
"We're best friends!",
"Still counts!",
"I'll take what I can get!",
"Close enough!",
"Better than nothing!",
"(Dejected whistling.)",
"I feel like an idiot now.",
"Yeah! Single-player bonus!",
"I must look *really* stupid right now!",
"Aww, way to leave me hanging, friend.",
"Don't you like me?",
"(Sobbing) I just want to be loved!",
"I'm a Pandoracorn's butthole!",
"I fart rainbows!",
"Bask in my aura of death!",
"Did you guys see that?!",
"Can I shoot something now? Or climb some stairs? SOMETHING exciting?",
"Times like these, I really start to question the meaning of my existence. Then I get distra-hey! What's this? This looks cool!",
"It would really stink if I couldn't control what I was thinking. Like, who wants to know that I'm thinking about cheese and lint, right?",
"How does math work? Does this skin make me look fat? If a giraffe and a car had a baby, would it be called a caraffe? Life's big questions, man. ",
"Who needs memories when I can do all this cool stuff? Stuff that I currently am not doing! That's what I'd like to call a 'hint'.",
"Does this mean I can start dancing? Pleeeeeeaaaaase?",
"Ya know when there was that Vault monster scare? I had these friends, and boy times sure were scary! But, I didn't care because I had friends, and they were like... super-friends! And then they left me, but they saved the world and I was like 'I know those guys!' Even though they never came back after that I still knew they cared, because no one had ever been... nice to me before. ... What is this? My eye is like... leaking.",
"It's really quiet... and lonely... (hums briefly) Also this 'stopped moving' thing makes me uncomfortable. It gives me time to stop and think... literally. I'VE STOPPED, AND I'M THINKING! IT HURTS ME!",
"Oh. My. God. What if I'm like... a fish? And, if I'm not moving... I stop breathing? AND THEN I'LL *DIE*! HELP ME! HELP MEEEEE HEE HEE HEEE! *HHHHHHHELP*!",
"So, this one time, I went to a party, and there was a beautiful subatomic particle accelerator there. Our circuits locked across the room and... I don't remember what happened next. I mean, I can't. We coulda gotten married and had gadgets together, but now, I'll never know.",
"I never got to play with guns when I was but a lad, but then ol' Jackie came along, and he was awful mad. 'I need a robot!' he declared, 'that can do my mighty deeds'. Then he saw me standing there, and a thought he did conceive. He told a way to make me rad, he gave me slots for guns, then he sent me on my way and wished me 'have some fun!' Now I'm here, a hired hand, amidst such death and chaos, waiting to be moved around, for my... I have no idea what rhymes with 'chaos'! I REGRET ALL OF THIS!",
"Ahem, ahem. What's going on? Did I break something?",
"Ready to go on where you are, friend. Adiamo!"
]
await ctx.send(random.choice(claptraps)) | corpnewt/CorpBot.py | Cogs/Claptrap.py | Python | mit | 23,857 | [
"exciting"
] | e0d33e1cce4cccc3d9e55a5e7fe1959f08e72b93d64a4d9be0dcd845ee02236a |
import vtk
import wx
def makeBorderPD(ipw):
# setup source
ps = vtk.vtkPlaneSource()
ps.SetOrigin(ipw.GetOrigin())
ps.SetPoint1(ipw.GetPoint1())
ps.SetPoint2(ipw.GetPoint2())
fe = vtk.vtkFeatureEdges()
fe.SetInput(ps.GetOutput())
tubef = vtk.vtkTubeFilter()
tubef.SetNumberOfSides(12)
tubef.SetRadius(0.5)
tubef.SetInput(fe.GetOutput())
return tubef.GetOutput()
className = obj.__class__.__name__
if className == 'slice3dVWR':
sds = obj.sliceDirections.getSelectedSliceDirections()
if len(sds) > 0:
apd = vtk.vtkAppendPolyData()
for sd in sds:
for ipw in sd._ipws:
apd.AddInput(makeBorderPD(ipw))
mapper = vtk.vtkPolyDataMapper()
mapper.ScalarVisibilityOff()
mapper.SetInput(apd.GetOutput())
actor = vtk.vtkActor()
actor.GetProperty().SetColor(0.0, 0.0, 1.0)
actor.SetMapper(mapper)
if hasattr(obj._threedRenderer, 'addBorderToSlicesActor'):
obj._threedRenderer.RemoveProp(
obj._threedRenderer.addBorderToSlicesActor)
obj._threedRenderer.AddProp(actor)
obj._threedRenderer.addBorderToSlicesActor = actor
else:
print "Please select the slices whose opacity you want to set."
else:
print "You have to run this from a slice3dVWR introspect window."
| nagyistoce/devide | snippets/addBorderToSlices.py | Python | bsd-3-clause | 1,391 | [
"VTK"
] | cfc9b7760ef7787dc24c0630f1d3057d81ab99e991cbecad9f0697e23d105af5 |
from ase.structure import molecule
from ase.parallel import parprint
from gpaw import GPAW
from gpaw.cluster import Cluster
from gpaw.analyse.hirshfeld import HirshfeldDensity, HirshfeldPartitioning
from gpaw.analyse.wignerseitz import WignerSeitz
from gpaw.test import equal
h = 0.3
gpwname = 'H2O' + str(h) + '.gpw'
try:
calc = GPAW(gpwname + 'notfound', txt=None)
# calc = GPAW(gpwname, txt=None)
mol = calc.get_atoms()
except:
mol = Cluster(molecule('H2O'))
mol.minimal_box(3, h=h)
calc = GPAW(nbands=6,
h = h,
txt=None)
calc.calculate(mol)
calc.write(gpwname)
# Hirshfeld ----------------------------------------
if 1:
hd = HirshfeldDensity(calc)
# check for the number of electrons
expected = [[None, 10],
[[0, 1, 2], 10],
[[1, 2], 2],
[[0], 8],
]
#expected = [[[0, 2], 9], ]
#expected = [[None, 10], ]
for result in expected:
indicees, result = result
full, gd = hd.get_density(indicees)
parprint('indicees', indicees, end=': ')
parprint('result, expected:', gd.integrate(full), result)
equal(gd.integrate(full), result, 1.e-8)
hp = HirshfeldPartitioning(calc)
vr = hp.get_effective_volume_ratios()
parprint('Hirshfeld:', vr)
# equal(vr[1], vr[2], 1.e-3)
# Wigner-Seitz ----------------------------------------
if 1:
ws = WignerSeitz(calc.density.finegd, mol, calc)
vr = ws.get_effective_volume_ratios()
parprint('Wigner-Seitz:', vr)
| ajylee/gpaw-rtxs | gpaw/test/partitioning.py | Python | gpl-3.0 | 1,576 | [
"ASE",
"GPAW"
] | 055490a649dcfc9fdc6c29855868bc27019c6b79bd815057a705a2a4601c0359 |
'''
As Mayavi is a little outdated, and not support wxphoenix
So I wrote a simple one, and remove two a, M(a)y(a)vi.self.color = cs if isinstance(cs, tuple) else (0,0,0)self.color = cs if isinstance(cs, tuple) else (0,0,0)self.color = cs if isinstance(cs, tuple) else (0,0,0)self.color = cs if isinstance(cs, tuple) else (0,0,0)self.color = cs if isinstance(cs, tuple) else (0,0,0)self.color = cs if isinstance(cs, tuple) else (0,0,0)self.color = cs if isinstance(cs, tuple) else (0,0,0)self.color = cs if isinstance(cs, tuple) else (0,0,0)self.color = cs if isinstance(cs, tuple) else (0,0,0)
'''
from .canvas3d import *
from .frame3d import *
from .manager import *
from .util import * | yxdragon/myvi | myvi/__init__.py | Python | bsd-3-clause | 687 | [
"Mayavi"
] | a4cdd10474e1913be60e69aee61a73b9603b9e39c188736fdaf65d666d237553 |
# -*- coding: utf-8 -*-
"""test_table_stream.py:
Test moose.TableStream.
This class takes over moose.Table and make sure that their vector is written
to stream. Once a chunk of data is written, it is removed from vector.
This way, moose never runs out of memory during simulation.
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2016, Dilawar Singh"
__credits__ = ["NCBS Bangalore"]
__license__ = "GNU GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@ncbs.res.in"
__status__ = "Development"
import sys
import moose
print( '[INFO] Using moose from %s' % moose.__file__ )
t1 = moose.Table( '/t1' )
t2 = moose.Table( '/t1/t1' )
a = moose.Streamer( '/a' )
assert( a.streamname == 'stdout' ), 'default is stdout, got %s' % a.streamname
# Add another table
a.addTable( t1 )
a.addTable( t2 )
a.removeTable( t1 )
| dharmasam9/moose-core | tests/python/streamer.py | Python | gpl-3.0 | 946 | [
"MOOSE"
] | addad33f5081e62fc4acdbdfb31ddbdfc11874e214ee90ee6a74c4c154bd12d1 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAffyrnadegradation(RPackage):
"""The package helps with the assessment and correction of
RNA degradation effects in Affymetrix 3' expression arrays.
The parameter d gives a robust and accurate measure of RNA
integrity. The correction removes the probe positional bias,
and thus improves comparability of samples that are affected
by RNA degradation."""
homepage = "https://www.bioconductor.org/packages/AffyRNADegradation/"
url = "https://git.bioconductor.org/packages/AffyRNADegradation"
version('1.22.0', git='https://git.bioconductor.org/packages/AffyRNADegradation', commit='0fa78f8286494711a239ded0ba587b0de47c15d3')
depends_on('r@3.4.0:3.4.9', when='@1.22.0')
depends_on('r-affy', type=('build', 'run'))
| skosukhin/spack | var/spack/repos/builtin/packages/r-affyrnadegradation/package.py | Python | lgpl-2.1 | 2,026 | [
"Bioconductor"
] | 5af6f723b4d838d3e0660009f323afc138812f74d2c23779255c2eda7512b0cb |
import characterquests
from characterutil import *
def sz_01_05_Gilneas_City(count,datatree,openfile):
characterquests.charquestheaderfaction(count,"01-05: Gilneas City","alliance",openfile)
characterquests.charquestprintfactionrace(count,datatree,openfile,14078,"Lockdown!","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14091,"Something's Amiss","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14093,"All Hell Breaks Loose","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14094,"Salvage the Supplies","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14098,"Evacuate the Merchant Square","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14099,"Royal Orders","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14154,"By the Skin of His Teeth","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14157,"Old Divisions","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14159,"The Rebel Lord's Arsenal","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14204,"From the Shadows","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14212,"Sacrifices","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14214,"Message to Greymane","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14218,"By Blood and Ash","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14221,"Never Surrender, Sometimes Retreat","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14222,"Last Stand","alliance","race_worgen_female")
characterquests.charquestprintfactionclassrace(count,datatree,openfile,14280,"The Winds Know Your Name... Apparently","alliance","class_druid","race_worgen_female")
characterquests.charquestprintfactionclassrace(count,datatree,openfile,14291,"Safety in Numbers","alliance","class_druid","race_worgen_female")
characterquests.charquestprintfactionclassrace(count,datatree,openfile,14275,"Someone's Keeping Track of You","alliance","class_hunter","race_worgen_female")
characterquests.charquestprintfactionclassrace(count,datatree,openfile,14290,"Safety in Numbers","alliance","class_hunter","race_worgen_female")
characterquests.charquestprintfactionclassrace(count,datatree,openfile,14277,"Arcane Inquiries","alliance","class_mage","race_worgen_female")
characterquests.charquestprintfactionclassrace(count,datatree,openfile,14288,"Safety in Numbers","alliance","class_mage","race_worgen_female")
characterquests.charquestprintfactionclassrace(count,datatree,openfile,14278,"Seek the Sister","alliance","class_priest","race_worgen_female")
characterquests.charquestprintfactionclassrace(count,datatree,openfile,14289,"Safety in Numbers","alliance","class_priest","race_worgen_female")
characterquests.charquestprintfactionclassrace(count,datatree,openfile,14269,"Someone's Looking for You","alliance","class_rogue","race_worgen_female")
characterquests.charquestprintfactionclassrace(count,datatree,openfile,14285,"Safety in Numbers","alliance","class_rogue","race_worgen_female")
characterquests.charquestprintfactionclassrace(count,datatree,openfile,14273,"Shady Associates","alliance","class_warlock","race_worgen_female")
characterquests.charquestprintfactionclassrace(count,datatree,openfile,14287,"Safety in Numbers","alliance","class_warlock","race_worgen_female")
characterquests.charquestprintfactionclassrace(count,datatree,openfile,14265,"Your Instructor","alliance","class_warrior","race_worgen_female")
characterquests.charquestprintfactionclassrace(count,datatree,openfile,14286,"Safety in Numbers","alliance","class_warrior","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14293,"Save Krennan Aranas","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14294,"Time to Regroup","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14467,"Alas, Gilneas!","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24930,"While You're At It","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,26129,"Brothers In Arms","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,28850,"The Prison Rooftop","alliance","race_worgen_female")
def sz_01_20_Gilneas(count,datatree,openfile):
characterquests.charquestheaderfaction(count,"01-20: Gilneas","alliance",openfile)
characterquests.charquestprintfactionrace(count,datatree,openfile,14220,"This Is the End","alliance","race_worgen_female")
characterquests.charquestprintfactionclassrace(count,datatree,openfile,14272,"Eviscerate","alliance","class_rogue","race_worgen_female")
characterquests.charquestprintfactionclassrace(count,datatree,openfile,14274,"Corruption","alliance","class_warlock","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14313,"Among Humans Again","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14319,"Further Treatment","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14320,"In Need of Ingredients","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14321,"Invasion","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14336,"Kill or Be Killed","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14347,"Hold the Line","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14348,"You Can't Take 'Em Alone","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14366,"Holding Steady","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14367,"The Allens' Storm Cellar","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14368,"Save the Children!","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14369,"Unleash the Beast","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14375,"Last Chance at Humanity","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14382,"Two By Sea","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14386,"Leader of the Pack","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14395,"Gasping for Breath","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14396,"As the Land Shatters","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14397,"Evacuation","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14398,"Grandma Wahl","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14399,"Grandma's Lost It Alright","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14400,"I Can't Wear This","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14401,"Grandma's Cat","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14402,"Ready to Go","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14403,"The Hayward Brothers","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14404,"Not Quite Shipshape","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14405,"Escape By Sea","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14406,"The Crowley Orchard","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14412,"Washed Up","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14416,"The Hungry Ettin","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14463,"Horses for Duskhaven","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14465,"To Greymane Manor","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14466,"The King's Observatory","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24438,"Exodus","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24468,"Stranded at the Marsh","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24472,"Introductions Are in Order","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24483,"Stormglen","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24484,"Pest Control","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24495,"Pieces of the Past","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24501,"Queen-Sized Troubles","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24575,"Liberation Day","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24578,"The Blackwald","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24592,"Betrayal at Tempest's Reach","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24593,"Neither Human Nor Beast","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24602,"Laid to Rest","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24616,"Losing Your Tail","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24617,"Tal'doren, the Wild Home","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24627,"At Our Doorstep","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24628,"Preparations","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24646,"Take Back What's Ours","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24672,"Onwards and Upwards","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24673,"Return to Stormglen","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24674,"Slaves to No One","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24675,"Last Meal","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24676,"Push Them Out","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24677,"Flank the Forsaken","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24678,"Knee-Deep","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24679,"Patriarch's Blessing","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24680,"Keel Harbor","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24681,"They Have Allies, But So Do We","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24902,"The Hunt For Sylvanas","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24903,"Vengeance or Survival","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24904,"The Battle for Gilneas City","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24920,"Slowing the Inevitable","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,26706,"Endgame","alliance","race_worgen_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14434,"Rut'theran Village","alliance","race_worgen_female")
def sz_10_60_Ruins_of_Gilneas(count,datatree,openfile):
characterquests.charquestheaderfaction(count,"10:60 Ruins of Gilneas","horde",openfile)
characterquests.charquestprintfaction(count,datatree,openfile,27290,"To Forsaken Forward Command","horde")
characterquests.charquestprint(count,datatree,openfile,27322,"Korok the Colossus")
characterquests.charquestprintfaction(count,datatree,openfile,27333,"Losing Ground","horde")
characterquests.charquestprintfaction(count,datatree,openfile,27342,"In Time, All Will Be Revealed","horde")
characterquests.charquestprintfaction(count,datatree,openfile,27345,"The F.C.D.","horde")
characterquests.charquestprintfaction(count,datatree,openfile,27349,"Break in Communications: Dreadwatch Outpost","horde")
characterquests.charquestprint(count,datatree,openfile,27350,"Break in Communications: Rutsak's Guard")
characterquests.charquestprintfaction(count,datatree,openfile,27360,"Vengeance for Our Soldiers","horde")
characterquests.charquestprintfaction(count,datatree,openfile,27364,"On Whose Orders?","horde")
characterquests.charquestprintfaction(count,datatree,openfile,27401,"What Tomorrow Brings","horde")
characterquests.charquestprint(count,datatree,openfile,27405,"Fall Back!")
characterquests.charquestprintfaction(count,datatree,openfile,27406,"A Man Named Godfrey","horde")
characterquests.charquestprint(count,datatree,openfile,27423,"Resistance is Futile")
def sz_01_05_Kezan(count,datatree,openfile):
characterquests.charquestheaderfaction(count,"01-05: Kezan","horde",openfile)
characterquests.charquestprintfactionrace(count,datatree,openfile,14069,"Good Help is Hard to Find","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14070,"Do it Yourself","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14071,"Rolling with my Homies","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14075,"Trouble in the Mines","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14109,"The New You","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14110,"The New You","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14113,"Life of the Party","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14115,"Pirate Party Crashers","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14116,"The Uninvited Guest","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14120,"A Bazillion Macaroons?!","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14121,"Robbing Hoods","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14122,"The Great Bank Heist","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14123,"Waltz Right In","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14124,"Liberate the Kaja'mite","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14125,"447","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14126,"Life Savings","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14138,"Taking Care of Business","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14153,"Life of the Party","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24488,"The Replacements","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24502,"Necessary Roughness","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24503,"Fourth and Goal","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24520,"Give Sassy the News","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24567,"Report for Tryouts","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,25473,"Kaja'Cola","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,26711,"Off to the Bank","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,26712,"Off to the Bank","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,28349,"Megs in Marketing","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,28414,"Fourth and Goal","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,28606,"The Keys to the Hot Rod","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,28607,"The Keys to the Hot Rod","horde","race_goblin_female")
def sz_01_10_Lost_Isles(count,datatree,openfile):
characterquests.charquestheaderfaction(count,"01-20: Lost Isles","horde",openfile)
characterquests.charquestprintfactionrace(count,datatree,openfile,14001,"Goblin Escape Pods","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14014,"Get Our Stuff Back!","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14019,"Monkey Business","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14021,"Miner Troubles","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14031,"Capturing the Unknown","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14233,"Orcs Can Write?","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14234,"The Enemy of My Enemy","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14235,"The Vicious Vale","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14236,"Weed Whacker","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14237,"Forward Movement","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14238,"Infrared = Infradead","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14239,"Don't Go Into the Light!","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14240,"To the Cliffs","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14241,"Get to the Gyrochoppa!","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14242,"Precious Cargo","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14243,"Warchief's Revenge","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14244,"Up, Up & Away!","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14245,"It's a Town-In-A-Box","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14248,"Help Wanted","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14303,"Back to Aggra","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14326,"Meet Me Up Top","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14445,"Farewell, For Now","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14473,"It's Our Problem Now","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,14474,"Goblin Escape Pods","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24671,"Cluster Cluck","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24741,"Trading Up","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24744,"The Biggest Egg Ever","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24816,"Who's Top of the Food Chain Now?","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24817,"A Goblin in Shark's Clothing","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24856,"Invasion Imminent!","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24858,"Bilgewater Cartel Represent","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24859,"Naga Hide","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24864,"Irresistible Pool Pony","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24868,"Surrender or Else!","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24897,"Get Back to Town","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24901,"Town-In-A-Box: Under Attack","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24924,"Oomlot Village","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24925,"Free the Captives","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24929,"Send a Message","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24937,"Oomlot Dealt With","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24940,"Up the Volcano","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24942,"Zombies vs. Super Booster Rocket Boots","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24945,"Three Little Pygmies","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24946,"Rockin' Powder","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24952,"Rocket Boot Boost","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24954,"Children of a Turtle God","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,24958,"Volcanoth!","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,25023,"Old Friends","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,25024,"Repel the Paratroopers","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,25058,"Mine Disposal, the Goblin Way","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,25066,"The Pride of Kezan","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,25093,"The Heads of the SI:7","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,25098,"The Warchief Wants You","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,25099,"Borrow Bastia","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,25100,"Let's Ride","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,25109,"The Gallywix Labor Mine","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,25110,"Kaja'Cola Gives You IDEAS! (TM)","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,25122,"Morale Boost","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,25123,"Throw It On the Ground!","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,25125,"Light at the End of the Tunnel","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,25184,"Wild Mine Cart Ride","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,25200,"Shredder Shutdown","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,25201,"The Ultimate Footbomb Uniform","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,25202,"The Fastest Way to His Heart","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,25203,"What Kind of Name is Chip, Anyway?","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,25204,"Release the Valves","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,25207,"Good-bye, Sweet Oil","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,25213,"The Slave Pits","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,25214,"Escape Velocity","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,25243,"She Loves Me, She Loves Me NOT!","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,25244,"What Kind of Name is Candy, Anyway?","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,25251,"Final Confrontation","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,25265,"Victory!","horde","race_goblin_female")
characterquests.charquestprintfactionrace(count,datatree,openfile,27139,"Hobart Needs You","horde","race_goblin_female")
def z_80_90_Mount_Hyjal(count,datatree,openfile):
characterquests.charquestheader(count,"80-90: Mount Hyjal: Intro",openfile)
characterquests.charquestprintfaction(count,datatree,openfile,27726,"Hero's Call: Mount Hyjal!","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,27721,"Warchief's Command: Mount Hyjal!","horde")
characterquests.charquestprint(count,datatree,openfile,25316,"As Hyjal Burns")
characterquests.charquestprint(count,datatree,openfile,25370,"Inciting the Elements")
characterquests.charquestprint(count,datatree,openfile,25460,"The Earth Rises")
characterquests.charquestprint(count,datatree,openfile,25574,"Flames from Above")
characterquests.charquestprint(count,datatree,openfile,25317,"Protect the World Tree")
characterquests.charquestprint(count,datatree,openfile,25319,"War on the Twilight's Hammer")
characterquests.charquestprint(count,datatree,openfile,25323,"Flamebreaker")
characterquests.charquestprint(count,datatree,openfile,25464,"The Return of Baron Geddon")
characterquests.charquestprint(count,datatree,openfile,25430,"Emerald Allies")
characterquests.charquestprint(count,datatree,openfile,25320,"The Captured Scout")
characterquests.charquestprint(count,datatree,openfile,25321,"Twilight Captivity")
characterquests.charquestprint(count,datatree,openfile,25424,"Return to Alysra")
characterquests.charquestprint(count,datatree,openfile,25324,"A Prisoner of Interest")
characterquests.charquestprint(count,datatree,openfile,25325,"Through the Dream")
characterquests.charquestprint(count,datatree,openfile,25578,"Return to Nordrassil")
characterquests.charquestheader(count,"80-90: Mount Hyjal: Shrine of Goldrinn",openfile)
characterquests.charquestprint(count,datatree,openfile,25584,"The Return of the Ancients")
characterquests.charquestprint(count,datatree,openfile,25233,"End of the Supply Line")
characterquests.charquestprint(count,datatree,openfile,25234,"In the Rear With the Gear")
characterquests.charquestprint(count,datatree,openfile,25255,"Harrying the Hunters")
characterquests.charquestprintfaction(count,datatree,openfile,25268,"The Voice of Goldrinn","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,25269,"The Voice of Lo'Gosh","horde")
characterquests.charquestprintfaction(count,datatree,openfile,25271,"Goldrinn's Ferocity","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,25270,"Howling Mad","horde")
characterquests.charquestprintfaction(count,datatree,openfile,25273,"Lycanthoth the Corruptor","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,25272,"Lycanthoth the Corruptor","horde")
characterquests.charquestprintfaction(count,datatree,openfile,25280,"The Shrine Reclaimed","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,25279,"The Shrine Reclaimed","horde")
characterquests.charquestprintfaction(count,datatree,openfile,25278,"Cleaning House","alliance")
characterquests.charquestprint(count,datatree,openfile,25297,"From the Mouth of Madness")
characterquests.charquestprint(count,datatree,openfile,25298,"Free Your Mind, the Rest Follows")
characterquests.charquestheader(count,"80-90: Mount Hyjal: (Unsorted)",openfile)
characterquests.charquestprint(count,datatree,openfile,25223,"Trial By Fire")
characterquests.charquestprint(count,datatree,openfile,25224,"In Bloom")
characterquests.charquestprint(count,datatree,openfile,25274,"Signed in Blood")
characterquests.charquestprint(count,datatree,openfile,25276,"Your New Identity")
characterquests.charquestprint(count,datatree,openfile,25291,"Twilight Training")
characterquests.charquestprint(count,datatree,openfile,25294,"Walking the Dog")
characterquests.charquestprint(count,datatree,openfile,25296,"Gather the Intelligence")
characterquests.charquestprint(count,datatree,openfile,25299,"Mental Training: Speaking the Truth to Power")
characterquests.charquestprint(count,datatree,openfile,25308,"Seeds of Discord")
characterquests.charquestprint(count,datatree,openfile,25309,"Spiritual Training: Mercy is for the Weak")
characterquests.charquestprint(count,datatree,openfile,25310,"The Greater of Two Evils")
characterquests.charquestprint(count,datatree,openfile,25311,"Twilight Territory")
characterquests.charquestprint(count,datatree,openfile,25314,"Speech Writing for Dummies")
characterquests.charquestprint(count,datatree,openfile,25315,"Graduation Speech")
characterquests.charquestprint(count,datatree,openfile,25330,"Waste of Flesh")
characterquests.charquestprint(count,datatree,openfile,25372,"Aessina's Miracle")
characterquests.charquestprint(count,datatree,openfile,25381,"Fighting Fire With ... Anything")
characterquests.charquestprint(count,datatree,openfile,25382,"Disrupting the Rituals")
characterquests.charquestprint(count,datatree,openfile,25385,"Save the Wee Animals")
characterquests.charquestprint(count,datatree,openfile,25392,"Oh, Deer!")
characterquests.charquestprint(count,datatree,openfile,25404,"If You're Not Against Us...")
characterquests.charquestprint(count,datatree,openfile,25408,"Seeds of Their Demise")
characterquests.charquestprint(count,datatree,openfile,25411,"A New Master")
characterquests.charquestprint(count,datatree,openfile,25412,"The Name Never Spoken")
characterquests.charquestprint(count,datatree,openfile,25428,"Black Heart of Flame")
characterquests.charquestprint(count,datatree,openfile,25462,"The Bears Up There")
characterquests.charquestprint(count,datatree,openfile,25472,"The Flameseer's Staff")
characterquests.charquestprint(count,datatree,openfile,25490,"Smashing Through Ashes")
characterquests.charquestprint(count,datatree,openfile,25491,"Durable Seeds")
characterquests.charquestprint(count,datatree,openfile,25492,"Firebreak")
characterquests.charquestprint(count,datatree,openfile,25493,"Fresh Bait")
characterquests.charquestprint(count,datatree,openfile,25494,"A Champion's Collar")
characterquests.charquestprint(count,datatree,openfile,25496,"Grudge Match")
characterquests.charquestprint(count,datatree,openfile,25499,"Agility Training: Run Like Hell!")
characterquests.charquestprint(count,datatree,openfile,25502,"Prepping the Soil")
characterquests.charquestprint(count,datatree,openfile,25507,"Hell's Shells")
characterquests.charquestprint(count,datatree,openfile,25509,"Physical Training: Forced Labor")
characterquests.charquestprint(count,datatree,openfile,25510,"Tortolla Speaks")
characterquests.charquestprint(count,datatree,openfile,25514,"Breaking the Bonds")
characterquests.charquestprint(count,datatree,openfile,25519,"Children of Tortolla")
characterquests.charquestprint(count,datatree,openfile,25520,"An Ancient Awakens")
characterquests.charquestprint(count,datatree,openfile,25523,"Flight in the Firelands")
characterquests.charquestprint(count,datatree,openfile,25525,"Wave One")
characterquests.charquestprint(count,datatree,openfile,25531,"Twilight Riot")
characterquests.charquestprint(count,datatree,openfile,25544,"Wave Two")
characterquests.charquestprint(count,datatree,openfile,25548,"Might of the Firelord")
characterquests.charquestprint(count,datatree,openfile,25549,"The Sanctum of the Prophets")
characterquests.charquestprint(count,datatree,openfile,25554,"Secrets of the Flame")
characterquests.charquestprint(count,datatree,openfile,25560,"Egg Wave")
characterquests.charquestprint(count,datatree,openfile,25597,"Commander Jarod Shadowsong")
characterquests.charquestprint(count,datatree,openfile,25601,"Head of the Class")
characterquests.charquestprint(count,datatree,openfile,25608,"Slash and Burn")
characterquests.charquestprint(count,datatree,openfile,25653,"The Ancients are With Us")
characterquests.charquestprint(count,datatree,openfile,25655,"The Wormwing Problem")
characterquests.charquestprint(count,datatree,openfile,25656,"Scrambling for Eggs")
characterquests.charquestprint(count,datatree,openfile,25663,"An Offering for Aviana")
characterquests.charquestprint(count,datatree,openfile,25664,"A Prayer and a Wing")
characterquests.charquestprint(count,datatree,openfile,25665,"A Plea From Beyond")
characterquests.charquestprint(count,datatree,openfile,25731,"A Bird in Hand")
characterquests.charquestprint(count,datatree,openfile,25740,"Fact-Finding Mission")
characterquests.charquestprint(count,datatree,openfile,25746,"Sethria's Brood")
characterquests.charquestprint(count,datatree,openfile,25758,"A Gap in Their Armor")
characterquests.charquestprint(count,datatree,openfile,25761,"Disassembly")
characterquests.charquestprint(count,datatree,openfile,25763,"The Codex of Shadows")
characterquests.charquestprint(count,datatree,openfile,25764,"Egg Hunt")
characterquests.charquestprint(count,datatree,openfile,25776,"Sethria's Demise")
characterquests.charquestprint(count,datatree,openfile,25795,"Return to the Shrine")
characterquests.charquestprint(count,datatree,openfile,25807,"An Ancient Reborn")
characterquests.charquestprint(count,datatree,openfile,25810,"The Hatchery Must Burn")
characterquests.charquestprint(count,datatree,openfile,25830,"The Last Living Lorekeeper")
characterquests.charquestprint(count,datatree,openfile,25832,"Return to Aviana")
characterquests.charquestprint(count,datatree,openfile,25842,"Firefight")
characterquests.charquestprint(count,datatree,openfile,25843,"Tortolla's Revenge")
characterquests.charquestprint(count,datatree,openfile,25881,"Lost Wardens")
characterquests.charquestprint(count,datatree,openfile,25886,"Pressing the Advantage")
characterquests.charquestprint(count,datatree,openfile,25899,"Breakthrough")
characterquests.charquestprint(count,datatree,openfile,25901,"Hyjal Recycling Program")
characterquests.charquestprint(count,datatree,openfile,25904,"The Hammer and the Key")
characterquests.charquestprint(count,datatree,openfile,25906,"The Third Flamegate")
characterquests.charquestprint(count,datatree,openfile,25910,"The Time for Mercy has Passed")
characterquests.charquestprint(count,datatree,openfile,25915,"The Strength of Tortolla")
characterquests.charquestprint(count,datatree,openfile,25923,"Finish Nemesis")
characterquests.charquestprint(count,datatree,openfile,25928,"Tortolla's Triumph")
characterquests.charquestprint(count,datatree,openfile,25940,"Last Stand at Whistling Grove")
characterquests.charquestprint(count,datatree,openfile,25985,"Wings Over Mount Hyjal")
characterquests.charquestprint(count,datatree,openfile,27874,"Aviana's Legacy")
characterquests.charquestprint(count,datatree,openfile,29066,"Good News... and Bad News")
characterquests.charquestprint(count,datatree,openfile,25550,"Magma Monarch")
characterquests.charquestprint(count,datatree,openfile,25551,"The Firelord")
characterquests.charquestprint(count,datatree,openfile,25552,"Brood of Evil")
characterquests.charquestprint(count,datatree,openfile,25553,"Death to the Broodmother")
characterquests.charquestprint(count,datatree,openfile,25555,"The Gatekeeper")
characterquests.charquestprint(count,datatree,openfile,25644,"The Twilight Egg")
characterquests.charquestprint(count,datatree,openfile,29234,"Delegation")
characterquests.charquestprint(count,datatree,openfile,29437,"The Fallen Guardian")
def z_80_90_Vashjir(count,datatree,openfile):
characterquests.charquestheader(count,"Vash'jir: Kelp'thar Forest",openfile)
characterquests.charquestprintfaction(count,datatree,openfile,28825,"A Personal Summons","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,28790,"A Personal Summons","horde")
characterquests.charquestprintfaction(count,datatree,openfile,28826,"The Eye of the Storm","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,28805,"The Eye of the Storm","horde")
characterquests.charquestprintfaction(count,datatree,openfile,14481,"Into The Abyss","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,27724,"Hero's Call: Vashj'ir!","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,27718,"Warchief's Command: Vashj'ir!","horde")
characterquests.charquestprintfaction(count,datatree,openfile,28827,"To the Depths","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,28816,"To the Depths","horde")
characterquests.charquestprintfaction(count,datatree,openfile,14482,"Call of Duty","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,25924,"Call of Duty","horde")
characterquests.charquestprintfaction(count,datatree,openfile,24432,"Sea Legs","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,25929,"Sea Legs","horde")
characterquests.charquestprintfaction(count,datatree,openfile,25281,"Pay It Forward","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,25936,"Pay It Forward","horde")
characterquests.charquestprintfaction(count,datatree,openfile,25405,"Rest For the Weary","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,25941,"Rest For the Weary","horde")
characterquests.charquestprintfaction(count,datatree,openfile,25357,"Buy Us Some Time","alliance")
characterquests.charquestprint(count,datatree,openfile,25358,"Nerve Tonic")
characterquests.charquestprint(count,datatree,openfile,25371,"The Abyssal Ride")
characterquests.charquestprint(count,datatree,openfile,25377,"The Horde's Hoard")
characterquests.charquestprint(count,datatree,openfile,25384,"Raw Materials")
characterquests.charquestprint(count,datatree,openfile,25388,"A Case of Crabs")
characterquests.charquestprint(count,datatree,openfile,25389,"A Taste For Tail")
characterquests.charquestprint(count,datatree,openfile,25390,"A Girl's Best Friend")
characterquests.charquestprint(count,datatree,openfile,25413,"Change of Plans")
characterquests.charquestprint(count,datatree,openfile,25419,"Lady La-La's Medallion")
characterquests.charquestprint(count,datatree,openfile,25459,"Ophidophobia")
characterquests.charquestprint(count,datatree,openfile,25467,"Kliklak's Craw")
characterquests.charquestprint(count,datatree,openfile,25477,"Better Late Than Dead")
characterquests.charquestprint(count,datatree,openfile,25497,"Back in the Saddle")
characterquests.charquestprint(count,datatree,openfile,25498,"Shark Bait")
characterquests.charquestprint(count,datatree,openfile,25503,"Blackfin's Booty")
characterquests.charquestprintfaction(count,datatree,openfile,25545,"To Arms!","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,25546,"Traveling on Our Stomachs","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,25547,"On Our Own Terms","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,25558,"All or Nothing","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,25564,"Stormwind Elite Aquatic and Land Forces","alliance")
characterquests.charquestprint(count,datatree,openfile,25587,"Gimme Shelter!")
characterquests.charquestprint(count,datatree,openfile,25598,"Ain't Too Proud to Beg")
characterquests.charquestprint(count,datatree,openfile,25602,"Can't Start a Fire Without a Spark")
characterquests.charquestprint(count,datatree,openfile,25636,"Starve a Fever, Feed a Cold")
characterquests.charquestprint(count,datatree,openfile,25638,"A Desperate Plea")
characterquests.charquestprint(count,datatree,openfile,25651,"Oh, the Insanity!")
characterquests.charquestprint(count,datatree,openfile,25657,"Dah, Nunt... Dah, Nunt...")
characterquests.charquestprint(count,datatree,openfile,25666,"Getting Your Hands Dirty")
characterquests.charquestprint(count,datatree,openfile,25670,"DUN-dun-DUN-dun-DUN-dun")
characterquests.charquestprint(count,datatree,openfile,25732,"A Bone to Pick")
characterquests.charquestprint(count,datatree,openfile,25737,"Tenuous Negotiations")
characterquests.charquestprint(count,datatree,openfile,25738,"Shallow End of the Gene Pool")
characterquests.charquestprint(count,datatree,openfile,25742,"What? This Old Thing?")
characterquests.charquestprint(count,datatree,openfile,25743,"Decisions, Decisions")
characterquests.charquestprint(count,datatree,openfile,25794,"Undersea Sanctuary")
characterquests.charquestprintfaction(count,datatree,openfile,25812,"Spelunking","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,25824,"Debriefing","alliance")
characterquests.charquestprint(count,datatree,openfile,25883,"How Disarming")
characterquests.charquestprint(count,datatree,openfile,25884,"Come Hell or High Water")
characterquests.charquestprintfaction(count,datatree,openfile,25885,"What? What? In My Gut...?","alliance")
characterquests.charquestprint(count,datatree,openfile,25887,"Wake of Destruction")
characterquests.charquestprintfaction(count,datatree,openfile,25888,"Decompression","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,25942,"Buy Us Some Time","horde")
characterquests.charquestprintfaction(count,datatree,openfile,25943,"Traveling on Our Stomachs","horde")
characterquests.charquestprintfaction(count,datatree,openfile,25944,"Girding Our Loins","horde")
characterquests.charquestprintfaction(count,datatree,openfile,25946,"Helm's Deep","horde")
characterquests.charquestprintfaction(count,datatree,openfile,25947,"Finders, Keepers","horde")
characterquests.charquestprintfaction(count,datatree,openfile,25948,"Bring It On!","horde")
characterquests.charquestprintfaction(count,datatree,openfile,25949,"Blood and Thunder!","horde")
characterquests.charquestprintfaction(count,datatree,openfile,26000,"Spelunking","horde")
characterquests.charquestprintfaction(count,datatree,openfile,26007,"Debriefing","horde")
characterquests.charquestprintfaction(count,datatree,openfile,26008,"Decompression","horde")
characterquests.charquestprintfaction(count,datatree,openfile,26040,"What? What? In My Gut...?","horde")
characterquests.charquestprintfaction(count,datatree,openfile,27668,"Pay Attention!","horde")
characterquests.charquestprint(count,datatree,openfile,27685,"Good Deed Left Undone")
characterquests.charquestprint(count,datatree,openfile,27687,"An Opened Can of Whoop Gnash")
characterquests.charquestprint(count,datatree,openfile,27699,"Shark Weak")
characterquests.charquestprint(count,datatree,openfile,27708,"The Warden's Time")
characterquests.charquestprint(count,datatree,openfile,27729,"Once More, With Eeling")
characterquests.charquestheader(count,"Vash'jir: Abyssal Depths",openfile)
characterquests.charquestprintfaction(count,datatree,openfile,25950,"Sira'kess Slaying","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,25974,"Sira'kess Slaying","horde")
characterquests.charquestprintfaction(count,datatree,openfile,25975,"Treasure Reclamation","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,25976,"Treasure Reclamation","horde")
characterquests.charquestprintfaction(count,datatree,openfile,25977,"A Standard Day for Azrajar","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,25980,"A Standard Day for Azrajar","horde")
characterquests.charquestprintfaction(count,datatree,openfile,25981,"Those Aren't Masks","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,25982,"Those Aren't Masks","horde")
characterquests.charquestprintfaction(count,datatree,openfile,25983,"Promontory Point","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,25984,"Promontory Point","horde")
characterquests.charquestprintfaction(count,datatree,openfile,25987,"Put It On","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,25988,"Put It On","horde")
characterquests.charquestprintfaction(count,datatree,openfile,26014,"The Brothers Digsong","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,26015,"Phosphora Hunting","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,26017,"A Lure","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,26018,"Coldlights Out","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,26019,"Enormous Eel Egg","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,26021,"The Brothers Digsong 2: Eel-Egg-Trick Boogaloo","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,26056,"The Wavespeaker","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,26057,"The Wavespeaker","horde")
characterquests.charquestprint(count,datatree,openfile,26065,"Free Wil'hai")
characterquests.charquestprintfaction(count,datatree,openfile,26070,"Clearing the Defiled","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,26071,"Clearing the Defiled","horde")
characterquests.charquestprint(count,datatree,openfile,26072,"Into the Totem")
characterquests.charquestprintfaction(count,datatree,openfile,26080,"One Last Favor","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,26086,"Orako","horde")
characterquests.charquestprintfaction(count,datatree,openfile,26087,"Glow-Juice","horde")
characterquests.charquestprintfaction(count,datatree,openfile,26088,"Here Fishie Fishie","horde")
characterquests.charquestprintfaction(count,datatree,openfile,26089,"Die Fishman Die","horde")
characterquests.charquestprintfaction(count,datatree,openfile,26090,"I Brought You This Egg","horde")
characterquests.charquestprintfaction(count,datatree,openfile,26091,"Here Fishie Fishie 2: Eel-Egg-Trick Boogaloo","horde")
characterquests.charquestprintfaction(count,datatree,openfile,26092,"Orako's Report","horde")
characterquests.charquestprint(count,datatree,openfile,26096,"Scalding Shrooms")
characterquests.charquestprintfaction(count,datatree,openfile,26103,"Bio-Fuel","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,26105,"Claim Korthun's End","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,26106,"Fuel-ology 101","alliance")
characterquests.charquestprint(count,datatree,openfile,26111,"... It Will Come")
characterquests.charquestprintfaction(count,datatree,openfile,26121,"Claim Korthun's End","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,26122,"Environmental Awareness","horde")
characterquests.charquestprintfaction(count,datatree,openfile,26124,"Secure Seabrush","horde")
characterquests.charquestprintfaction(count,datatree,openfile,26125,"Secure Seabrush","horde")
characterquests.charquestprintfaction(count,datatree,openfile,26126,"The Perfect Fuel","horde")
characterquests.charquestprint(count,datatree,openfile,26130,"Unplug L'ghorek")
characterquests.charquestprintfaction(count,datatree,openfile,26132,"Fiends from the Netherworld","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,26133,"Fiends from the Netherworld","horde")
characterquests.charquestprint(count,datatree,openfile,26140,"Communing with the Ancient")
characterquests.charquestprint(count,datatree,openfile,26141,"Runestones of Binding")
characterquests.charquestprint(count,datatree,openfile,26142,"Ascend No More!")
characterquests.charquestprint(count,datatree,openfile,26143,"All that Rises")
characterquests.charquestprintfaction(count,datatree,openfile,26144,"Prisoners","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,26149,"Prisoners","horde")
characterquests.charquestprint(count,datatree,openfile,26154,"Twilight Extermination")
characterquests.charquestprintfaction(count,datatree,openfile,26181,"Back to Darkbreak Cove","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,26182,"Back to the Tenebrous Cavern","horde")
characterquests.charquestprintfaction(count,datatree,openfile,26193,"Defending the Rift","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,26194,"Defending the Rift","horde")
def z_82_90_Deepholm(count,datatree,openfile):
characterquests.charquestheader(count,"82-90: Deepholm: Intro",openfile)
characterquests.charquestprintfaction(count,datatree,openfile,27722,"Warchief's Command: Deepholm!","horde")
characterquests.charquestprintfaction(count,datatree,openfile,27722,"Hero's Call: Deepholm!","alliance")
characterquests.charquestprint(count,datatree,openfile,27203,"The Maelstrom")
characterquests.charquestprint(count,datatree,openfile,27123,"Deepholm, Realm of Earth")
characterquests.charquestheader(count,"82-90: Deepholm: Upper World Pillar Fragment",openfile)
characterquests.charquestprint(count,datatree,openfile,26245,"Gunship Down")
characterquests.charquestprint(count,datatree,openfile,26246,"Captain's Log")
characterquests.charquestprint(count,datatree,openfile,27136,"Elemental Energy")
characterquests.charquestprint(count,datatree,openfile,26244,"The Earth Claims All")
characterquests.charquestprint(count,datatree,openfile,26247,"Diplomacy First")
characterquests.charquestprint(count,datatree,openfile,26248,"All Our Friends Are Dead")
characterquests.charquestprint(count,datatree,openfile,26249,"The Admiral's Cabin")
characterquests.charquestprint(count,datatree,openfile,26427,"Without a Captain or Crew")
characterquests.charquestprint(count,datatree,openfile,26251,"Take No Prisoners")
characterquests.charquestprint(count,datatree,openfile,26250,"On Second Thought, Take One Prisoner")
characterquests.charquestprint(count,datatree,openfile,26254,"Some Spraining to Do")
characterquests.charquestprint(count,datatree,openfile,26255,"Return to the Temple of Earth")
characterquests.charquestprint(count,datatree,openfile,26258,"Deathwing's Fall")
characterquests.charquestprint(count,datatree,openfile,26259,"Blood of the Earthwarder")
characterquests.charquestprint(count,datatree,openfile,26256,"Bleed the Bloodshaper")
characterquests.charquestprint(count,datatree,openfile,26261,"Question the Slaves")
characterquests.charquestprint(count,datatree,openfile,26260,"The Forgemaster's Log")
characterquests.charquestprint(count,datatree,openfile,27007,"Silvermarsh Rendezvous")
characterquests.charquestprint(count,datatree,openfile,27010,"Quicksilver Submersion")
characterquests.charquestprint(count,datatree,openfile,27100,"Twilight Research")
characterquests.charquestprint(count,datatree,openfile,27101,"Maziel's Revelation")
characterquests.charquestprint(count,datatree,openfile,27102,"Maziel's Ascendancy")
characterquests.charquestprint(count,datatree,openfile,27061,"The Twilight Overlook")
characterquests.charquestprint(count,datatree,openfile,26766,"Big Game, Big Bait")
characterquests.charquestprint(count,datatree,openfile,26768,"To Catch a Dragon")
characterquests.charquestprint(count,datatree,openfile,26771,"Testing the Trap")
characterquests.charquestprint(count,datatree,openfile,26857,"Abyssion's Minions")
characterquests.charquestprint(count,datatree,openfile,26861,"Block the Gates")
characterquests.charquestprint(count,datatree,openfile,26876,"The World Pillar Fragment")
characterquests.charquestheader(count,"82-90: Deepholm: Middle World Pillar Fragment",openfile)
characterquests.charquestprint(count,datatree,openfile,26409,"Where's Goldmine?")
characterquests.charquestprint(count,datatree,openfile,26410,"Explosive Bonding Compound")
characterquests.charquestprint(count,datatree,openfile,27135,"Something that Burns")
characterquests.charquestprint(count,datatree,openfile,26411,"Apply and Flash Dry")
characterquests.charquestprint(count,datatree,openfile,26413,"Take Him to the Earthcaller")
characterquests.charquestprint(count,datatree,openfile,26484,"To Stonehearth's Aid")
characterquests.charquestprint(count,datatree,openfile,27931,"The Quaking Fields")
characterquests.charquestprint(count,datatree,openfile,27932,"The Axe of Earthly Sundering")
characterquests.charquestprint(count,datatree,openfile,27933,"Elemental Ore")
characterquests.charquestprint(count,datatree,openfile,27934,"One With the Ground")
characterquests.charquestprint(count,datatree,openfile,27935,"Bring Down the Avalanche")
characterquests.charquestprint(count,datatree,openfile,26499,"Stonefather's Boon")
characterquests.charquestprint(count,datatree,openfile,26501,"Sealing the Way")
characterquests.charquestprint(count,datatree,openfile,26500,"We're Surrounded")
characterquests.charquestprint(count,datatree,openfile,26502,"Thunder Stones")
characterquests.charquestprint(count,datatree,openfile,26537,"Shatter Them!")
characterquests.charquestprint(count,datatree,openfile,26564,"Fixer Upper")
characterquests.charquestprint(count,datatree,openfile,26591,"Battlefront Triage")
characterquests.charquestprint(count,datatree,openfile,26625,"Troggzor the Earthinator")
characterquests.charquestprint(count,datatree,openfile,27126,"Rush Delivery")
characterquests.charquestprint(count,datatree,openfile,26632,"Close Escort")
characterquests.charquestprint(count,datatree,openfile,26755,"Keep Them off the Front")
characterquests.charquestprint(count,datatree,openfile,26762,"Reactivate the Constructs")
characterquests.charquestprint(count,datatree,openfile,26770,"Mystic Masters")
characterquests.charquestprint(count,datatree,openfile,26834,"Down Into the Chasm")
characterquests.charquestprint(count,datatree,openfile,26791,"Sprout No More")
characterquests.charquestprint(count,datatree,openfile,26792,"Fungal Monstrosities")
characterquests.charquestprint(count,datatree,openfile,26835,"A Slight Problem")
characterquests.charquestprint(count,datatree,openfile,26836,"Rescue the Stonefather... and Flint")
characterquests.charquestprint(count,datatree,openfile,27937,"The Hero Returns")
characterquests.charquestprint(count,datatree,openfile,27938,"The Middle Fragment")
characterquests.charquestheader(count,"82-90: Deepholm: Lower World Pillar Fragment",openfile)
characterquests.charquestprint(count,datatree,openfile,26326,"The Very Earth Beneath Our Feet")
characterquests.charquestprint(count,datatree,openfile,26312,"Crumbling Defenses")
characterquests.charquestprint(count,datatree,openfile,26313,"Core of Our Troubles")
characterquests.charquestprint(count,datatree,openfile,26314,"On Even Ground")
characterquests.charquestprint(count,datatree,openfile,26315,"Imposing Confrontation")
characterquests.charquestprint(count,datatree,openfile,26328,"Rocky Relations")
characterquests.charquestprint(count,datatree,openfile,26375,"Loose Stones")
characterquests.charquestprint(count,datatree,openfile,26376,"Hatred Runs Deep")
characterquests.charquestprint(count,datatree,openfile,26377,"Unsolid Ground")
characterquests.charquestprint(count,datatree,openfile,26426,"Violent Gale")
characterquests.charquestprint(count,datatree,openfile,26869,"Depth of the Depths")
characterquests.charquestprint(count,datatree,openfile,26871,"A Rock Amongst Many")
characterquests.charquestprint(count,datatree,openfile,26436,"Entrenched")
characterquests.charquestprint(count,datatree,openfile,26437,"Making Things Crystal Clear")
characterquests.charquestprint(count,datatree,openfile,26438,"Intervention")
characterquests.charquestprint(count,datatree,openfile,26439,"Putting the Pieces Together")
characterquests.charquestprint(count,datatree,openfile,28869,"Pebble")
characterquests.charquestprint(count,datatree,openfile,26440,"Clingy")
characterquests.charquestprint(count,datatree,openfile,26441,"So Big, So Round...")
characterquests.charquestprint(count,datatree,openfile,26575,"Rock Bottom")
characterquests.charquestprint(count,datatree,openfile,26507,"Petrified Delicacies")
characterquests.charquestprint(count,datatree,openfile,26576,"Steady Hand")
characterquests.charquestprint(count,datatree,openfile,26656,"Don't. Stop. Moving.")
characterquests.charquestprint(count,datatree,openfile,26657,"Hard Falls")
characterquests.charquestprint(count,datatree,openfile,26658,"Fragile Values")
characterquests.charquestprint(count,datatree,openfile,26659,"Resonating Blow")
characterquests.charquestprint(count,datatree,openfile,26577,"Rocky Upheaval")
characterquests.charquestprint(count,datatree,openfile,26578,"Doomshrooms")
characterquests.charquestprint(count,datatree,openfile,26579,"Gone Soft")
characterquests.charquestprint(count,datatree,openfile,26580,"Familiar Intruders")
characterquests.charquestprint(count,datatree,openfile,26581,"A Head Full of Wind")
characterquests.charquestprint(count,datatree,openfile,26582,"Unnatural Causes")
characterquests.charquestprint(count,datatree,openfile,26583,"Wrath of the Fungalmancer")
characterquests.charquestprint(count,datatree,openfile,26584,"Shaken and Stirred")
characterquests.charquestprint(count,datatree,openfile,26585,"Corruption Destruction")
characterquests.charquestprint(count,datatree,openfile,26750,"At the Stonemother's Call")
characterquests.charquestprint(count,datatree,openfile,26752,"Audience with the Stonemother")
characterquests.charquestprint(count,datatree,openfile,26827,"Rallying the Earthen Ring")
characterquests.charquestprint(count,datatree,openfile,26828,"Our Part of the Bargain")
characterquests.charquestprint(count,datatree,openfile,26829,"The Stone March")
characterquests.charquestprint(count,datatree,openfile,26832,"Therazane's Mercy")
characterquests.charquestprint(count,datatree,openfile,26831,"The Twilight Flight")
characterquests.charquestprint(count,datatree,openfile,26833,"Word In Stone")
characterquests.charquestprint(count,datatree,openfile,26875,"Undying Twilight")
characterquests.charquestprint(count,datatree,openfile,26971,"The Binding")
characterquests.charquestprint(count,datatree,openfile,26709,"The Stone Throne")
characterquests.charquestheaderfaction(count,"82-90: Deepholm: To Uldum","alliance",openfile)
characterquests.charquestprint(count,datatree,openfile,27952,"The Explorers")
characterquests.charquestprint(count,datatree,openfile,27004,"The Twilight Plot")
characterquests.charquestprint(count,datatree,openfile,27006,"Fly Over")
characterquests.charquestprint(count,datatree,openfile,27040,"Decryption Made Easy")
characterquests.charquestprint(count,datatree,openfile,27042,"Fight Fire and Water and Air with...")
characterquests.charquestprint(count,datatree,openfile,27058,"The Wrong Sequence")
characterquests.charquestprint(count,datatree,openfile,28292,"That's No Pyramid!")
characterquests.charquestheaderfaction(count,"82-90: Deepholm: To Uldum","horde",openfile)
characterquests.charquestprint(count,datatree,openfile,27953,"The Reliquary")
characterquests.charquestprint(count,datatree,openfile,27005,"The Twilight Plot")
characterquests.charquestprint(count,datatree,openfile,27008,"Fly Over")
characterquests.charquestprint(count,datatree,openfile,27041,"Decryption Made Easy")
characterquests.charquestprint(count,datatree,openfile,27043,"Fight Fire and Water and Air with...")
characterquests.charquestprint(count,datatree,openfile,27059,"The Wrong Sequence")
characterquests.charquestprint(count,datatree,openfile,28293,"That's No Pyramid!")
def z_83_90_Uldum(count,datatree,openfile):
characterquests.charquestheader(count,"83-90: Uldum (Unsorted)",openfile)
characterquests.charquestprint(count,datatree,openfile,27003,"Easy Money")
characterquests.charquestprint(count,datatree,openfile,27141,"Premature Explosionation")
characterquests.charquestprint(count,datatree,openfile,27176,"Just the Tip")
characterquests.charquestprint(count,datatree,openfile,27179,"Field Work")
characterquests.charquestprint(count,datatree,openfile,27187,"Do the World a Favor")
characterquests.charquestprint(count,datatree,openfile,27196,"On to Something")
characterquests.charquestprint(count,datatree,openfile,27431,"Tipping the Balance")
characterquests.charquestprint(count,datatree,openfile,27511,"The Thrill of Discovery")
characterquests.charquestprint(count,datatree,openfile,27517,"Be Prepared")
characterquests.charquestprint(count,datatree,openfile,27519,"Under the Choking Sands")
characterquests.charquestprint(count,datatree,openfile,27520,"Minions of Al'Akir")
characterquests.charquestprint(count,datatree,openfile,27541,"Lessons From the Past")
characterquests.charquestprint(count,datatree,openfile,27549,"By the Light of the Stars")
characterquests.charquestprint(count,datatree,openfile,27595,"The Prophet Hadassi")
characterquests.charquestprint(count,datatree,openfile,27602,"The Prophet's Dying Words")
characterquests.charquestprint(count,datatree,openfile,27623,"Colossal Guardians")
characterquests.charquestprint(count,datatree,openfile,27624,"After the Fall")
characterquests.charquestprint(count,datatree,openfile,27627,"Just a Fancy Cockroach")
characterquests.charquestprint(count,datatree,openfile,27628,"Send Word to Phaoris")
characterquests.charquestprint(count,datatree,openfile,27630,"The High Priest's Vote")
characterquests.charquestprint(count,datatree,openfile,27631,"The High Commander's Vote")
characterquests.charquestprint(count,datatree,openfile,27632,"Tanotep's Son")
characterquests.charquestprint(count,datatree,openfile,27669,"Do the Honors")
characterquests.charquestprint(count,datatree,openfile,27706,"The Scepter of Orsis")
characterquests.charquestprint(count,datatree,openfile,27707,"Neferset Prison")
characterquests.charquestprint(count,datatree,openfile,27738,"The Pit of Scales")
characterquests.charquestprint(count,datatree,openfile,27748,"Fortune and Glory")
characterquests.charquestprint(count,datatree,openfile,27755,"The Curse of the Tombs")
characterquests.charquestprint(count,datatree,openfile,27760,"Artificial Intelligence")
characterquests.charquestprint(count,datatree,openfile,27761,"A Disarming Distraction")
characterquests.charquestprint(count,datatree,openfile,27777,"Core Access Codes")
characterquests.charquestprint(count,datatree,openfile,27778,"Hacking the Wibson")
characterquests.charquestprint(count,datatree,openfile,27779,"Gnomebliteration")
characterquests.charquestprint(count,datatree,openfile,27836,"Stopping the Spread")
characterquests.charquestprint(count,datatree,openfile,27837,"Trespassers in the Water")
characterquests.charquestprint(count,datatree,openfile,27838,"The Root of the Corruption")
characterquests.charquestprint(count,datatree,openfile,27839,"Ancient Weapons")
characterquests.charquestprint(count,datatree,openfile,27899,"That Gleam in his Eye")
characterquests.charquestprint(count,datatree,openfile,27900,"I've Got This Guy")
characterquests.charquestprint(count,datatree,openfile,27901,"They Don't Know What They've Got Here")
characterquests.charquestprint(count,datatree,openfile,27903,"Ignition")
characterquests.charquestprint(count,datatree,openfile,27905,"Tailgunner!")
characterquests.charquestprint(count,datatree,openfile,27922,"Traitors!")
characterquests.charquestprint(count,datatree,openfile,27923,"Smoke in Their Eyes")
characterquests.charquestprint(count,datatree,openfile,27924,"Budd's Plan")
characterquests.charquestprint(count,datatree,openfile,27926,"Eastern Hospitality")
characterquests.charquestprint(count,datatree,openfile,27928,"A Favor for the Furrier")
characterquests.charquestprint(count,datatree,openfile,27939,"The Desert Fox")
characterquests.charquestprint(count,datatree,openfile,27941,"Fashionism")
characterquests.charquestprint(count,datatree,openfile,27942,"Idolatry")
characterquests.charquestprint(count,datatree,openfile,27943,"Angered Spirits")
characterquests.charquestprint(count,datatree,openfile,27950,"Gobbles!")
characterquests.charquestprint(count,datatree,openfile,27969,"Make Yourself Useful")
characterquests.charquestprint(count,datatree,openfile,27990,"Battlezone")
characterquests.charquestprint(count,datatree,openfile,27993,"Take it to 'Em!")
characterquests.charquestprint(count,datatree,openfile,28002,"Crisis Management")
characterquests.charquestprint(count,datatree,openfile,28105,"Kavem the Callous")
characterquests.charquestprint(count,datatree,openfile,28112,"Escape From the Lost City")
characterquests.charquestprint(count,datatree,openfile,28132,"Efficient Excavations")
characterquests.charquestprint(count,datatree,openfile,28134,"Impending Retribution")
characterquests.charquestprint(count,datatree,openfile,28135,"Al'Akir's Vengeance")
characterquests.charquestprint(count,datatree,openfile,28141,"Relics of the Sun King")
characterquests.charquestprint(count,datatree,openfile,28145,"Venomblood Antidote")
characterquests.charquestprint(count,datatree,openfile,28187,"Missed Me By Zhat Much!")
characterquests.charquestprint(count,datatree,openfile,28193,"Lockdown!")
characterquests.charquestprint(count,datatree,openfile,28194,"The Great Escape")
characterquests.charquestprint(count,datatree,openfile,28195,"Sending a Message")
characterquests.charquestprint(count,datatree,openfile,28198,"The Weakest Link")
characterquests.charquestprint(count,datatree,openfile,28200,"The Element of Supplies")
characterquests.charquestprint(count,datatree,openfile,28201,"Ploughshares to Swords")
characterquests.charquestprint(count,datatree,openfile,28210,"Shaping Up")
characterquests.charquestprint(count,datatree,openfile,28267,"Firing Squad")
characterquests.charquestprint(count,datatree,openfile,28269,"Meet Me In Vir'sar")
characterquests.charquestprint(count,datatree,openfile,28271,"Reduced Productivity")
characterquests.charquestprint(count,datatree,openfile,28272,"Missing Pieces")
characterquests.charquestprint(count,datatree,openfile,28273,"Friend of a Friend")
characterquests.charquestprint(count,datatree,openfile,28274,"Two Tents")
characterquests.charquestprint(count,datatree,openfile,28276,"Salhet's Secret")
characterquests.charquestprint(count,datatree,openfile,28277,"Salhet the Tactician")
characterquests.charquestprint(count,datatree,openfile,28291,"Return to Camp")
characterquests.charquestprint(count,datatree,openfile,28350,"Master Trapper")
characterquests.charquestprint(count,datatree,openfile,28351,"Unlimited Potential")
characterquests.charquestprint(count,datatree,openfile,28352,"Camel Tow")
characterquests.charquestprint(count,datatree,openfile,28353,"Jonesy Sent For You")
characterquests.charquestprint(count,datatree,openfile,28363,"Stirred the Hornet's Nest")
characterquests.charquestprint(count,datatree,openfile,28367,"Shroud of the Makers")
characterquests.charquestprint(count,datatree,openfile,28376,"Myzerian's Head")
characterquests.charquestprint(count,datatree,openfile,28402,"Schnottz So Fast")
characterquests.charquestprint(count,datatree,openfile,28403,"Bad Datas")
characterquests.charquestprint(count,datatree,openfile,28404,"I'll Do It By Hand")
characterquests.charquestprint(count,datatree,openfile,28482,"Sullah's Gift")
characterquests.charquestprint(count,datatree,openfile,28497,"Fire From the Sky")
characterquests.charquestprint(count,datatree,openfile,28602,"Be Prepared")
characterquests.charquestprint(count,datatree,openfile,28621,"Put That Baby in the Cradle!")
characterquests.charquestprint(count,datatree,openfile,28622,"Three if by Air")
characterquests.charquestprint(count,datatree,openfile,28633,"The Coffer of Promise")
characterquests.charquestprint(count,datatree,openfile,27629,"The Vizier's Vote")
characterquests.charquestprint(count,datatree,openfile,28480,"Lieutenants of Darkness")
characterquests.charquestprint(count,datatree,openfile,28483,"Bleeding the Enemy")
characterquests.charquestprint(count,datatree,openfile,28486,"Salhet's Gambit")
characterquests.charquestprint(count,datatree,openfile,28498,"The Secret of Nahom")
characterquests.charquestprint(count,datatree,openfile,28499,"Punish the Trespassers")
characterquests.charquestprint(count,datatree,openfile,28500,"The Cypher of Keset")
characterquests.charquestprint(count,datatree,openfile,28501,"The Defense of Nahom")
characterquests.charquestprint(count,datatree,openfile,28502,"The Bandit Warlord")
characterquests.charquestprint(count,datatree,openfile,28520,"The Fall of Neferset City")
characterquests.charquestprint(count,datatree,openfile,28533,"The High Council's Decision")
characterquests.charquestprint(count,datatree,openfile,28561,"Nahom Must Hold")
characterquests.charquestprint(count,datatree,openfile,28611,"The Defilers' Ritual")
characterquests.charquestprint(count,datatree,openfile,28623,"The Push Westward")
def z_84_90_Twilight_Highlands(count,datatree,openfile):
characterquests.charquestheaderfaction(count,"84-90: Twilight Highlands: Intro","horde",openfile)
characterquests.charquestprint(count,datatree,openfile,28909,"Sauranok Will Point the Way")
characterquests.charquestprint(count,datatree,openfile,26311,"Unfamiliar Waters")
characterquests.charquestprint(count,datatree,openfile,28717,"Warchief's Command: Twilight Highlands!")
characterquests.charquestprint(count,datatree,openfile,26293,"Machines of War")
characterquests.charquestprint(count,datatree,openfile,26294,"Weapons of Mass Dysfunction")
characterquests.charquestprint(count,datatree,openfile,26324,"Where Is My Warfleet?")
characterquests.charquestprint(count,datatree,openfile,26374,"Ready the Ground Troops")
characterquests.charquestprint(count,datatree,openfile,26335,"Ready the Navy")
characterquests.charquestprint(count,datatree,openfile,26337,"Beating the Market")
characterquests.charquestprint(count,datatree,openfile,26358,"Ready the Air Force")
characterquests.charquestprint(count,datatree,openfile,26361,"Smoot's Samophlange")
characterquests.charquestprint(count,datatree,openfile,26372,"Pre-Flight Checklist")
characterquests.charquestprint(count,datatree,openfile,28849,"Twilight Skies")
characterquests.charquestprint(count,datatree,openfile,26388,"Twilight Skies")
characterquests.charquestheaderfaction(count,"84-90: Twilight Highlands: The Dragonmaw Clan and the Horde","horde",openfile)
characterquests.charquestprint(count,datatree,openfile,26539,"Stalled Negotiations")
characterquests.charquestprint(count,datatree,openfile,26539,"Stalled Negotiations")
characterquests.charquestprint(count,datatree,openfile,26549,"Madness")
characterquests.charquestprint(count,datatree,openfile,26608,"Negotiations Terminated")
characterquests.charquestprint(count,datatree,openfile,26538,"Emergency Aid")
characterquests.charquestprint(count,datatree,openfile,26540,"Dangerous Compassion")
characterquests.charquestprint(count,datatree,openfile,26619,"You Say You Want a Revolution")
characterquests.charquestprint(count,datatree,openfile,26621,"Insurrection")
characterquests.charquestprint(count,datatree,openfile,26622,"Death to Mor'ghor")
characterquests.charquestprint(count,datatree,openfile,26786,"Securing the Beach Head")
characterquests.charquestprint(count,datatree,openfile,26784,"Muddied Waters")
characterquests.charquestprint(count,datatree,openfile,26788,"Cementing Our Victory")
characterquests.charquestprint(count,datatree,openfile,26798,"Saurfang Will be Pleased")
characterquests.charquestprint(count,datatree,openfile,26830,"Traitor's Bait")
characterquests.charquestprint(count,datatree,openfile,26840,"Return to the Highlands")
characterquests.charquestheaderfaction(count,"84-90: Twilight Highlands: The Southern Flank","horde",openfile)
characterquests.charquestprint(count,datatree,openfile,27607,"The Southern Flank")
characterquests.charquestprint(count,datatree,openfile,27610,"Scouting the Shore")
characterquests.charquestprint(count,datatree,openfile,27611,"Blood on the Sand")
characterquests.charquestprint(count,datatree,openfile,27622,"Mo' Better Shredder")
characterquests.charquestheaderfaction(count,"84-90: Twilight Highlands: Krazzworks","horde",openfile)
characterquests.charquestprint(count,datatree,openfile,28583,"Krazzworks")
characterquests.charquestprint(count,datatree,openfile,28584,"Quality Construction")
characterquests.charquestprint(count,datatree,openfile,28586,"Pool Pony Rescue")
characterquests.charquestprint(count,datatree,openfile,28588,"Wildhammer Infestation")
characterquests.charquestprint(count,datatree,openfile,28589,"Everything But the Kitchen Sink")
characterquests.charquestprint(count,datatree,openfile,28590,"Reprisal")
characterquests.charquestprint(count,datatree,openfile,28591,"Off The Wall")
characterquests.charquestprint(count,datatree,openfile,28592,"Parting Packages")
characterquests.charquestprint(count,datatree,openfile,28593,"Of Utmost Importance")
characterquests.charquestprint(count,datatree,openfile,28594,"Highbank, Crybank")
characterquests.charquestprint(count,datatree,openfile,28595,"Krazz Works!")
characterquests.charquestheaderfaction(count,"84-90: Twilight Highlands: The Northern Flank","horde",openfile)
characterquests.charquestprint(count,datatree,openfile,27583,"The Northern Flank")
characterquests.charquestprint(count,datatree,openfile,27584,"Blood in the Surf")
characterquests.charquestprint(count,datatree,openfile,27586,"Shells on the Sea Shore")
characterquests.charquestprint(count,datatree,openfile,27606,"Blast Him!")
characterquests.charquestheaderfaction(count,"84-90: Twilight Highlands: Bloodgulch","horde",openfile)
characterquests.charquestprint(count,datatree,openfile,27690,"Narkrall, the Drake-Tamer")
characterquests.charquestprint(count,datatree,openfile,27751,"Crushing the Wildhammer")
characterquests.charquestprint(count,datatree,openfile,27929,"Drag 'em Down")
characterquests.charquestprint(count,datatree,openfile,27747,"Total War")
characterquests.charquestprint(count,datatree,openfile,27750,"War Forage")
characterquests.charquestprint(count,datatree,openfile,27947,"A Vision of Twilight")
characterquests.charquestprint(count,datatree,openfile,27951,"We All Must Sacrifice")
characterquests.charquestprint(count,datatree,openfile,27954,"The Eyes Have It")
characterquests.charquestprint(count,datatree,openfile,27955,"Eye Spy")
characterquests.charquestprint(count,datatree,openfile,28041,"Bait and Throttle")
characterquests.charquestprint(count,datatree,openfile,28043,"How to Maim Your Dragon")
characterquests.charquestprint(count,datatree,openfile,28123,"The Demon Chain")
characterquests.charquestprint(count,datatree,openfile,28133,"Fury Unbound")
characterquests.charquestprint(count,datatree,openfile,28147,"Purple is Your Color")
characterquests.charquestprint(count,datatree,openfile,28151,"Dressed to Kill")
characterquests.charquestprint(count,datatree,openfile,28149,"Whispers in the Wind")
characterquests.charquestprint(count,datatree,openfile,28166,"Thog's Nightlight")
characterquests.charquestprint(count,datatree,openfile,28170,"Night Terrors")
characterquests.charquestheader(count,"84-90: Twilight Highlands: The Maw of Madness",openfile)
characterquests.charquestprintfaction(count,datatree,openfile,27945,"Paint it Black","horde")
characterquests.charquestprintfaction(count,datatree,openfile,27375,"The Weeping Wound","horde")
characterquests.charquestprintfaction(count,datatree,openfile,27374,"The Maw of Madness","alliance")
characterquests.charquestprint(count,datatree,openfile,27299,"Torn Ground")
characterquests.charquestprint(count,datatree,openfile,27300,"Pushing Back")
characterquests.charquestprint(count,datatree,openfile,27302,"Simple Solutions")
characterquests.charquestprint(count,datatree,openfile,27301,"Unbroken")
characterquests.charquestprint(count,datatree,openfile,27303,"Mercy for the Bound")
characterquests.charquestprint(count,datatree,openfile,27376,"The Maw of Iso'rath")
characterquests.charquestprint(count,datatree,openfile,27377,"Devoured")
characterquests.charquestprint(count,datatree,openfile,27378,"The Worldbreaker")
characterquests.charquestprint(count,datatree,openfile,27379,"The Terrors of Iso'rath")
characterquests.charquestprint(count,datatree,openfile,27380,"Nightmare")
characterquests.charquestheader(count,"84-90: Twilight Highlands: Vermillion Redoubt",openfile)
characterquests.charquestprintfaction(count,datatree,openfile,27485,"Warm Welcome","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,27486,"Warm Welcome","horde")
characterquests.charquestprint(count,datatree,openfile,27504,"Even Dragons Bleed")
characterquests.charquestprint(count,datatree,openfile,27505,"Draconic Mending")
characterquests.charquestprint(count,datatree,openfile,27506,"Life from Death")
characterquests.charquestprint(count,datatree,openfile,27564,"In Defense of the Redoubt")
characterquests.charquestprint(count,datatree,openfile,27507,"Encroaching Twilight")
characterquests.charquestprint(count,datatree,openfile,27508,"Far from the Nest")
characterquests.charquestprint(count,datatree,openfile,27509,"Breach in the Defenses")
characterquests.charquestprintfaction(count,datatree,openfile,28101,"Mathias' Command","alliance")
characterquests.charquestprint(count,datatree,openfile,27576,"Patchwork Command")
characterquests.charquestprintfaction(count,datatree,openfile,28091,"Easy Pickings","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,28103,"Easy Pickings","horde")
characterquests.charquestprintfaction(count,datatree,openfile,28090,"Precious Goods","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,28104,"Precious Goods","horde")
characterquests.charquestprintfaction(count,datatree,openfile,28097,"The Gates of Grim Batol","horde")
characterquests.charquestprintfaction(count,datatree,openfile,28108,"If The Key Fits","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,28092,"If The Key Fits","horde")
characterquests.charquestprintfaction(count,datatree,openfile,28107,"Paving the Way","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,28094,"Paving the Way","horde")
characterquests.charquestprintfaction(count,datatree,openfile,28109,"Pressing Forward","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,28093,"Pressing Forward","horde")
characterquests.charquestprint(count,datatree,openfile,28712,"Enter the Dragon Queen")
characterquests.charquestprint(count,datatree,openfile,28758,"Battle of Life and Death")
characterquests.charquestprint(count,datatree,openfile,28171,"And the Sky Streaked Red")
characterquests.charquestprint(count,datatree,openfile,28191,"A Fitting End")
characterquests.charquestprint(count,datatree,openfile,28173,"Blackout")
characterquests.charquestprint(count,datatree,openfile,28175,"Shining Through the Dark")
characterquests.charquestprint(count,datatree,openfile,28176,"Following the Young Home")
characterquests.charquestprint(count,datatree,openfile,28247,"Last of Her Kind")
characterquests.charquestheader(count,"84-90: Twilight Highlands: Twilight Rising",openfile)
characterquests.charquestprintfaction(count,datatree,openfile,28248,"Victors' Point","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,28249,"Crushblow","horde")
characterquests.charquestprintfaction(count,datatree,openfile,27492,"Ogres & Ettins","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,27493,"Ogres & Ettins","horde")
characterquests.charquestprintfaction(count,datatree,openfile,27496,"Call in the Artillery","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,27497,"Call in the Artillery","horde")
characterquests.charquestprintfaction(count,datatree,openfile,27490,"SI:7 Drop","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,27491,"Kor'kron Drop","horde")
characterquests.charquestprintfaction(count,datatree,openfile,27494,"Move the Mountain","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,27495,"Move the Mountain","horde")
characterquests.charquestprint(count,datatree,openfile,27498,"Signal the Attack")
characterquests.charquestprint(count,datatree,openfile,27499,"Signal the Attack")
characterquests.charquestprintfaction(count,datatree,openfile,27500,"Four Heads are Better than None","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,27501,"Four Heads are Better than None","horde")
characterquests.charquestprintfaction(count,datatree,openfile,27502,"Up to the Citadel","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,27503,"Up to the Citadel","horde")
characterquests.charquestprintfaction(count,datatree,openfile,27636,"Just You and Mathias","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,27638,"Just You and Garona","horde")
characterquests.charquestprintfaction(count,datatree,openfile,27652,"Dark Assassins","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,27653,"Dark Assassins","horde")
characterquests.charquestprintfaction(count,datatree,openfile,27654,"Bring the Hammer Down","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,27655,"Bring the Hammer Down","horde")
characterquests.charquestprintfaction(count,datatree,openfile,27688,"Distract Them for Me","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,27689,"Distract Them for Me","horde")
characterquests.charquestprintfaction(count,datatree,openfile,27695,"The Elementium Axe","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,27696,"The Elementium Axe","horde")
characterquests.charquestprintfaction(count,datatree,openfile,27700,"Dragon, Unchained","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,27701,"Dragon, Unchained","horde")
characterquests.charquestprintfaction(count,datatree,openfile,27702,"Coup de Grace","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,27703,"Coup de Grace","horde")
characterquests.charquestprintfaction(count,datatree,openfile,27657,"Help from the Earthcaller","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,27658,"Help from the Earthcaller","horde")
characterquests.charquestprint(count,datatree,openfile,27660,"Spirit of the Loch")
characterquests.charquestprint(count,datatree,openfile,27659,"Portal Overload")
characterquests.charquestprint(count,datatree,openfile,27662,"Unbinding")
characterquests.charquestprint(count,datatree,openfile,27661,"Fire the Cannon")
characterquests.charquestprintfaction(count,datatree,openfile,27719,"Water of Life","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,27798,"Water of Life","horde")
characterquests.charquestprintfaction(count,datatree,openfile,27711,"Back to the Elementium Depths","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,27712,"Back to the Elementium Depths","horde")
characterquests.charquestprintfaction(count,datatree,openfile,27720,"Mr. Goldmine's Wild Ride","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,28885,"Mr. Goldmine's Wild Ride","horde")
characterquests.charquestprint(count,datatree,openfile,27742,"A Little on the Side")
characterquests.charquestprint(count,datatree,openfile,27743,"While We're Here")
characterquests.charquestprint(count,datatree,openfile,27744,"Rune Ruination")
characterquests.charquestprint(count,datatree,openfile,27745,"A Fiery Reunion")
characterquests.charquestprint(count,datatree,openfile,27782,"Mathias Needs You")
characterquests.charquestprint(count,datatree,openfile,27783,"Garona Needs You")
characterquests.charquestprintfaction(count,datatree,openfile,27784,"The Hammer of Twilight","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,27786,"The Hammer of Twilight","horde")
characterquests.charquestprintfaction(count,datatree,openfile,27787,"Skullcrusher the Mountain","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,27788,"Skullcrusher the Mountain","horde")
characterquests.charquestheader(count,"84-90: Twilight Highlands: The Crucible of Carnage",openfile)
characterquests.charquestprint(count,datatree,openfile,28038,"Blood in the Highlands")
characterquests.charquestprint(count,datatree,openfile,27861,"The Crucible of Carnage: The Bloodeye Bruiser!")
characterquests.charquestprint(count,datatree,openfile,27862,"The Crucible of Carnage: The Bloodeye Bruiser!")
characterquests.charquestprint(count,datatree,openfile,27863,"The Crucible of Carnage: The Bloodeye Bruiser!")
characterquests.charquestprintfaction(count,datatree,openfile,27864,"The Crucible of Carnage: The Deadly Dragonmaw!","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,27865,"The Crucible of Carnage: The Wayward Wildhammer!","horde")
characterquests.charquestprint(count,datatree,openfile,27866,"The Crucible of Carnage: Calder's Creation!")
characterquests.charquestprint(count,datatree,openfile,27867,"The Crucible of Carnage: The Earl of Evisceration!")
characterquests.charquestprint(count,datatree,openfile,27868,"The Crucible of Carnage: The Twilight Terror!")
def z_85_Molten_Front(count,datatree,openfile):
characterquests.charquestheader(count,"85: Molten Front: The Invasion",openfile)
characterquests.charquestprintfaction(count,datatree,openfile,29391,"Guardians of Hyjal: Call of the Ancients","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,29390,"Guardians of Hyjal: Call of the Ancients","horde")
characterquests.charquestprintfaction(count,datatree,openfile,29387,"Guardians of Hyjal: Firelands Invasion!","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,29388,"Guardians of Hyjal: Firelands Invasion!","horde")
characterquests.charquestprint(count,datatree,openfile,29145,"Opening the Door")
characterquests.charquestprint(count,datatree,openfile,29195,"A Ritual of Flame")
characterquests.charquestprint(count,datatree,openfile,29196,"To the Sanctuary!")
characterquests.charquestprint(count,datatree,openfile,29197,"Caught Unawares")
characterquests.charquestprint(count,datatree,openfile,29198,"The Sanctuary Must Not Fall")
characterquests.charquestheader(count,"85: Molten Front: The Sanctuary of Malorn",openfile)
characterquests.charquestprint(count,datatree,openfile,29199,"Calling for Reinforcements")
characterquests.charquestprint(count,datatree,openfile,29200,"Leyara")
characterquests.charquestprint(count,datatree,openfile,29201,"Through the Gates of Hell")
characterquests.charquestheader(count,"85: Molten Front: Druids of the Talon",openfile)
characterquests.charquestprint(count,datatree,openfile,29181,"Druids of the Talon")
characterquests.charquestprint(count,datatree,openfile,29182,"Flight of the Storm Crows")
characterquests.charquestprint(count,datatree,openfile,29272,"Need... Water... Badly...")
characterquests.charquestheader(count,"85: Molten Front: The Shadow Wardens",openfile)
characterquests.charquestprint(count,datatree,openfile,29214,"The Shadow Wardens")
characterquests.charquestprint(count,datatree,openfile,29215,"The Hunt Begins")
characterquests.charquestprint(count,datatree,openfile,29245,"The Mysterious Seed")
characterquests.charquestprint(count,datatree,openfile,29249,"Planting Season")
characterquests.charquestprint(count,datatree,openfile,29254,"Little Lasher")
characterquests.charquestheader(count,"85: Molten Front: Additional Armaments",openfile)
characterquests.charquestprint(count,datatree,openfile,29281,"Additional Armaments")
characterquests.charquestprint(count,datatree,openfile,29282,"Well Armed")
characterquests.charquestheader(count,"85: Molten Front: Calling the Ancients",openfile)
characterquests.charquestprint(count,datatree,openfile,29283,"Calling the Ancients")
characterquests.charquestprint(count,datatree,openfile,29284,"Aid of the Ancients")
characterquests.charquestheader(count,"85: Molten Front: Filling the Moonwell",openfile)
characterquests.charquestprint(count,datatree,openfile,29279,"Filling the Moonwell")
characterquests.charquestprint(count,datatree,openfile,29280,"Nourishing Waters")
characterquests.charquestprint(count,datatree,openfile,29203,"Into the Depths")
characterquests.charquestprint(count,datatree,openfile,29298,"A Smoke-Stained Locket")
characterquests.charquestprint(count,datatree,openfile,29302,"Unlocking the Secrets Within")
characterquests.charquestprint(count,datatree,openfile,29303,"Tragedy and Family")
characterquests.charquestprint(count,datatree,openfile,29310,"The Tipping Point")
characterquests.charquestprint(count,datatree,openfile,29311,"The Rest is History")
| DreamsofPeace/WoW-Quests | characterquests4cata.py | Python | mit | 92,686 | [
"BLAST",
"CRYSTAL"
] | c03fe9fbf27ce4048af9467b864be4e0dbde32264e28c0d66bb0986378cc07a7 |
"""
The package containing the i/o functionalities required by the
dimerizer script.
Dimerizer can deal with the following files:
* # pdb
The configuration of the system provided in pdb format.
This file is read and the specified part of the system will be
dimerized.
* # topology
The Gromacs topology file describing the
bonded and pair interactions between atoms.
* # mdp
The Gromacs settings .mdp file. Some options have to be
activated/deactivated in order to set up a Dimer simulation.
* # index
Different energy groups are declared in the .mdp file and
the relative index files are created here.
* # plumed
The Plumed plugin is required for Metadynamics and to
introduce the Dimer interaction in the system. A minimalistic file can be
created with this package.
"""
| marckn/dimerizer | dimerizer/__init__.py | Python | gpl-3.0 | 808 | [
"Gromacs"
] | f95970071e8066f2a750944978c90590729c9f1a4e234899f6f00fd0b265ae24 |
# -*- coding: utf-8 -*-
from pymol.cgo import *
from pymol import cmd
from random import randint
from pymol.vfont import plain
import sys
##############################################################################
# GetBox Plugin.py -- Draws a box surrounding a selection and gets box information
# This script is used to get box information for LeDock, Autodock Vina and AutoDock Vina.
# Copyright (C) 2014 by Mengwu Xiao (Hunan University)
#
# USAGES: See function GetBoxHelp()
# REFERENCE: drawBoundingBox.py written by Jason Vertrees
# EMAIL: mwxiao AT hnu DOT edu DOT cn
# Changes:
# 2014-07-30 first version was uploaded to BioMS http://bioms.org/forum.php?mod=viewthread&tid=1234
# 2018-02-04 uploaded to GitHub https://github.com/MengwuXiao/Getbox-PyMOL-Plugin
# fixed some bugs: python 2/3 and PyMOL 1.x are supported;
# added support to AutoDock;
# added tutorials in English;
# NOTES:
# This program is free software; you can redistribute it and/or modify it under the terms of the GNU
# General Public License as published by the Free Software Foundation version 3 of the License.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU General Public License for more details.
##############################################################################
def __init__(self):
self.menuBar.addcascademenu('Plugin','GetBox Plugin','GetBox PyMOL Plugin',label = 'GetBox Plugin')
self.menuBar.addmenuitem('GetBox Plugin', 'command','GetBoxHelp',label = 'Advanced usage',command = lambda s=self : GetBoxHelp())
self.menuBar.addmenuitem('GetBox Plugin', 'command','AutoBox',label = 'Autodetect box',command = lambda s=self : autobox())
self.menuBar.addmenuitem('GetBox Plugin', 'command','GetBox',label = 'Get box from selection (sele) ', command = lambda s=self : getbox())
self.menuBar.addmenuitem('GetBox Plugin', 'command','Remove HETATM',label = 'Remove HETATM ', command = lambda s=self : rmhet())
# to deal with print
def printf(str):
if sys.version < '3':
exec ("print str")
else:
exec ("print(str)")
def GetBoxHelp():
Usages = '''get latest plugin and tutorials at https://github.com/MengwuXiao/Getbox-PyMOL-Plugin
Usages:
this plugin is a simple tool to get box information for LeDock and Autodock Vina or other molecular docking soft. Using the following functions to get box is recommended.
* autobox [extending] (NOTES: solvent & some anions will be removed)
this function autodetects box in chain A with one click of mouse, but sometimes it fails for too many ligands or no ligand
e.g. autobox
* getbox [selection = (sele), [extending = 5.0]]
this function creates a box that around the selected objects (residues or ligands or HOH or others). Selecting ligands or residues in the active cavity reported in papers is recommended
e.g. getbox
e.g. getbox (sele), 6.0
* resibox [Residues String, [extending = 5.0]]
this function creates a box that arroud the input residues in chain A. Selecting residues in the active cavity reported in papers is recommended\n\
e.g. resibox resi 214+226+245, 8.0
e.g. resibox resi 234 + resn HEM, 6.0
* showbox [minX, maxX, minY, maxY, minZ, maxZ]
this function creates a box based on the input axis, used to visualize box or amend box coordinate
e.g. showbox 2,3,4,5,6,7
* rmhet
remove HETATM, remove all HETATM in the screen
Notes:
* If you have any questions or advice, please do not hesitate to contact me (mwxiao AT hnu DOT edu DOT cn), thank you!'''
printf (Usages)
return
def showaxes(minX, minY, minZ):
cmd.delete('axes')
w = 0.5 # cylinder width
l = 5.0 # cylinder length
obj = [
CYLINDER, minX, minY, minZ, minX + l, minY, minZ, w, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0,
CYLINDER, minX, minY, minZ, minX, minY + l, minZ, w, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0,
CYLINDER, minX, minY, minZ, minX, minY, minZ + l, w, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0,
]
cyl_text(obj,plain,[minX + l, minY, minZ - w],'X',0.20,axes=[[3,0,0],[0,3,0],[0,0,3]])
cyl_text(obj,plain,[minX - w, minY + l , minZ],'Y',0.20,axes=[[3,0,0],[0,3,0],[0,0,3]])
cyl_text(obj,plain,[minX-w, minY, minZ + l],'Z',0.20,axes=[[3,0,0],[0,3,0],[0,0,3]])
cmd.load_cgo(obj,'axes')
return
def showbox(minX, maxX, minY, maxY, minZ, maxZ):
linewidth = 3.0
minX = float(minX)
minY = float(minY)
minZ = float(minZ)
maxX = float(maxX)
maxY = float(maxY)
maxZ = float(maxZ)
showaxes(minX, minY, minZ)
boundingBox = [
LINEWIDTH, float(linewidth),
BEGIN, LINES,
# x lines
COLOR, 1.0, 0.0, 0.0, #red
VERTEX, minX, minY, minZ, #1
VERTEX, maxX, minY, minZ, #5
VERTEX, minX, maxY, minZ, #3
VERTEX, maxX, maxY, minZ, #7
VERTEX, minX, maxY, maxZ, #4
VERTEX, maxX, maxY, maxZ, #8
VERTEX, minX, minY, maxZ, #2
VERTEX, maxX, minY, maxZ, #6
# y lines
COLOR, 0.0, 1.0, 0.0, #green
VERTEX, minX, minY, minZ, #1
VERTEX, minX, maxY, minZ, #3
VERTEX, maxX, minY, minZ, #5
VERTEX, maxX, maxY, minZ, #7
VERTEX, minX, minY, maxZ, #2
VERTEX, minX, maxY, maxZ, #4
VERTEX, maxX, minY, maxZ, #6
VERTEX, maxX, maxY, maxZ, #8
# z lines
COLOR, 0.0, 0.0, 1.0, #blue
VERTEX, minX, minY, minZ, #1
VERTEX, minX, minY, maxZ, #2
VERTEX, minX, maxY, minZ, #3
VERTEX, minX, maxY, maxZ, #4
VERTEX, maxX, minY, minZ, #5
VERTEX, maxX, minY, maxZ, #6
VERTEX, maxX, maxY, minZ, #7
VERTEX, maxX, maxY, maxZ, #8
END
]
boxName = "box_" + str(randint(0, 10000))
while boxName in cmd.get_names():
boxName = "box_" + str(randint(0, 10000))
cmd.load_cgo(boundingBox, boxName)
SizeX = maxX - minX
SizeY = maxY - minY
SizeZ = maxZ - minZ
CenterX = (maxX + minX)/2
CenterY = (maxY + minY)/2
CenterZ = (maxZ + minZ)/2
BoxCode = "BoxCode(" + boxName + ") = showbox %0.1f, %0.1f, %0.1f, %0.1f, %0.1f, %0.1f" % (minX, maxX, minY, maxY, minZ, maxZ)
# output LeDock input file
LeDockBox = "*********LeDock Binding Pocket*********\n" + \
"Binding pocket\n%.1f %.1f\n%.1f %.1f\n%.1f %.1f\n" % (minX, maxX, minY, maxY, minZ, maxZ)
# output AutoDock Vina input file
VinaBox = "*********AutoDock Vina Binding Pocket*********\n" + \
"--center_x %.1f --center_y %.1f --center_z %.1f --size_x %.1f --size_y %.1f --size_z %.1f\n" % (CenterX, CenterY, CenterZ, SizeX, SizeY, SizeZ)
# output AutoDock box information
# add this function in 2016-6-25 by mwxiao
AutoDockBox ="*********AutoDock Grid Option*********\n" + \
"npts %d %d %d # num. grid points in xyz\n" % (SizeX/0.375, SizeY/0.375, SizeZ/0.375) + \
"spacing 0.375 # spacing (A)\n" + \
"gridcenter %.3f %.3f %.3f # xyz-coordinates or auto\n" % (CenterX, CenterY, CenterZ)
printf(VinaBox)
printf(AutoDockBox)
printf(LeDockBox)
printf(BoxCode)
cmd.zoom(boxName)
#cmd.show('surface')
return boxName
def getbox(selection = "(sele)", extending = 5.0):
cmd.hide("spheres")
cmd.show("spheres", selection)
([minX, minY, minZ],[maxX, maxY, maxZ]) = cmd.get_extent(selection)
minX = minX - float(extending)
minY = minY - float(extending)
minZ = minZ - float(extending)
maxX = maxX + float(extending)
maxY = maxY + float(extending)
maxZ = maxZ + float(extending)
cmd.zoom(showbox(minX, maxX, minY, maxY, minZ, maxZ))
return
# remove ions
def removeions():
cmd.select("Ions", "((resn PO4) | (resn SO4) | (resn ZN) | (resn CA) | (resn MG) | (resn CL)) & hetatm")
cmd.remove("Ions")
cmd.delete("Ions")
return
# autodedect box
def autobox(extending = 5.0):
cmd.remove('solvent')
removeions()
cmd.select("ChainAHet","hetatm & chain A") #found error in pymol 1.8 change "chain a" to "chain A"
getbox("ChainAHet", extending)
return
# remove hetatm
def rmhet(extending = 5.0):
cmd.select("rmhet","hetatm")
cmd.remove("rmhet")
return
# getbox from cavity residues that reported in papers
def resibox(ResiduesStr = "", extending = 5.0):
cmd.select("Residues", ResiduesStr + " & chain A")
getbox("Residues", extending)
return
cmd.extend ("getbox", getbox)
cmd.extend ("showbox", showbox)
cmd.extend ("autobox", autobox)
cmd.extend ("resibox", resibox)
cmd.extend ("GetBoxHelp", GetBoxHelp)
cmd.extend ("rmhet", rmhet)
| MengwuXiao/Getbox-PyMOL-Plugin | GetBox Plugin.py | Python | gpl-3.0 | 8,874 | [
"PyMOL"
] | ade43e522ede56b1f60cb4d04073e1dc95466de7d32ec465eca663fad3a98570 |
# -*- coding: utf-8 -*-
import copy
import datetime
import logging
from pymongo import MongoClient
import time
import os
from urlparse import urlsplit, urljoin
import launchpadlib.launchpad
from launchpadlib.credentials import Consumer
from launchpadlib.credentials import authorize_token_page
from launchpadlib.uris import (LPNET_SERVICE_ROOT, STAGING_SERVICE_ROOT,
lookup_service_root)
from lazr.restfulclient.authorize.oauth import SystemWideConsumer
from lazr.restfulclient.resource import ServiceRoot
from bug import Bug
from launchpad_reporting.db.util import serialize_bug
from project import Project
from ttl_cache import ttl_cache
LOG = logging.getLogger(__name__)
def authorization_url(web_root, request_token,
allow_access_levels=["DESKTOP_INTEGRATION"]):
"""Return the authorization URL for a request token.
This is the URL the end-user must visit to authorize the
token. How exactly does this happen? That depends on the
subclass implementation.
"""
allow_access_levels = allow_access_levels or []
page = "%s?oauth_token=%s" % (authorize_token_page, request_token)
allow_permission = "&allow_permission="
if len(allow_access_levels) > 0:
page += (
allow_permission
+ allow_permission.join(allow_access_levels))
return urljoin(web_root, page)
class SimpleLaunchpad(ServiceRoot):
"""Custom Launchpad API class.
Provides simplified launchpad authentication way,
without using complex RequestTokenAuthorizationEngine machinery.
"""
DEFAULT_VERSION = '1.0'
RESOURCE_TYPE_CLASSES = {
'bugs': launchpadlib.launchpad.BugSet,
'distributions': launchpadlib.launchpad.DistributionSet,
'people': launchpadlib.launchpad.PersonSet,
'project_groups': launchpadlib.launchpad.ProjectGroupSet,
'projects': launchpadlib.launchpad.ProjectSet,
}
RESOURCE_TYPE_CLASSES.update(ServiceRoot.RESOURCE_TYPE_CLASSES)
def __init__(self, credentials, service_root=STAGING_SERVICE_ROOT,
cache=None, timeout=None, proxy_info=None,
version=DEFAULT_VERSION):
service_root = lookup_service_root(service_root)
if (service_root.endswith(version) or
service_root.endswith(version + '/')):
error = ("It looks like you're using a service root that "
"incorporates the name of the web service version "
'("%s"). Please use one of the constants from '
"launchpadlib.uris instead, or at least remove "
"the version name from the root URI." % version)
raise ValueError(error)
super(SimpleLaunchpad, self).__init__(
credentials, service_root, cache, timeout, proxy_info, version)
@classmethod
def set_credentials_consumer(cls, credentials, consumer_name):
if isinstance(consumer_name, Consumer):
consumer = consumer_name
else:
# Create a system-wide consumer. lazr.restfulclient won't
# do this automatically, but launchpadlib's default is to
# do a desktop-wide integration.
consumer = SystemWideConsumer(consumer_name)
credentials.consumer = consumer
@classmethod
def login_with(cls, credentials, application_name=None,
service_root=STAGING_SERVICE_ROOT,
launchpadlib_dir=None, timeout=None, proxy_info=None,
allow_access_levels=None, max_failed_attempts=None,
version=DEFAULT_VERSION, consumer_name=None):
(service_root, launchpadlib_dir, cache_path,
service_root_dir) = cls._get_paths(service_root, launchpadlib_dir)
if (application_name is None and consumer_name is None):
raise ValueError("At least one of application_name or"
"consumer_name must be provided.")
cls.set_credentials_consumer(credentials, consumer_name)
return cls(credentials, service_root, cache_path, timeout, proxy_info,
version)
@classmethod
def _get_paths(cls, service_root, launchpadlib_dir=None):
if launchpadlib_dir is None:
launchpadlib_dir = os.path.join('~', '.launchpadlib')
launchpadlib_dir = os.path.expanduser(launchpadlib_dir)
if launchpadlib_dir[:1] == '~':
raise ValueError("Must set $HOME or pass 'launchpadlib_dir' to "
"indicate location to store cached data")
if not os.path.exists(launchpadlib_dir):
os.makedirs(launchpadlib_dir, 0700)
os.chmod(launchpadlib_dir, 0700)
# Determine the real service root.
service_root = lookup_service_root(service_root)
# Each service root has its own cache and credential dirs.
scheme, host_name, path, query, fragment = urlsplit(
service_root)
service_root_dir = os.path.join(launchpadlib_dir, host_name)
cache_path = os.path.join(service_root_dir, 'cache')
if not os.path.exists(cache_path):
os.makedirs(cache_path, 0700)
return (service_root, launchpadlib_dir, cache_path, service_root_dir)
class LaunchpadAnonymousData(object):
BUG_STATUSES = {"New": ["New"],
"Incomplete": ["Incomplete"],
"Open": ["Triaged", "In Progress", "Confirmed"],
"Closed": ["Fix Committed", "Fix Released",
"Won't Fix", "Invalid", "Expired",
"Opinion", "Incomplete"],
"All": ["New", "Incomplete", "Invalid",
"Won't Fix", "Confirmed", "Triaged",
"In Progress", "Fix Committed",
"Fix Released",
"Opinion", "Expired"],
"NotDone": ["New", "Confirmed", "Triaged", "In Progress"],
"Fixed": ["Fix Committed", "Fix Released"]}
BUG_STATUSES_ALL = []
for k in BUG_STATUSES:
BUG_STATUSES_ALL.append(BUG_STATUSES[k])
def __init__(
self,
bugs_db,
cachedir="~/.launchpadlib/cache/",
):
self.bugs_db = bugs_db
self.launchpad = launchpadlib.launchpad.Launchpad.login_anonymously(
'launchpad-reporting-www', service_root=LPNET_SERVICE_ROOT,
launchpadlib_dir=cachedir)
def _get_project(self, project_name):
return self.launchpad.projects[project_name]
@ttl_cache(minutes=5)
def get_project(self, project_name):
return Project(self._get_project(project_name))
def get_bugs(self, project_name, statuses, milestone_name=None,
tags=[], importance=[], **kwargs):
project = self.bugs_db[project_name]
search = [{"status": {"$in": statuses}}]
if milestone_name:
search.append({'milestone': milestone_name})
if importance:
search.append({"importance": {"$in": importance}})
if tags:
if kwargs.get("condition"):
search.append({"tags": {"$nin": tags}})
else:
search.append({"tags": {"$in": tags}})
return [Bug(r) for r in project.find({"$and": search})]
@ttl_cache(minutes=5)
def get_all_bugs(self, project, milestone=None):
def timestamp_to_utc_date(timestamp):
return (datetime.datetime.utcfromtimestamp(timestamp).
strftime('%Y-%m-%d'))
update_time = None
try:
update_time = self.bugs_db.update_date.find_one()["Update_date"]
update_time = timestamp_to_utc_date(update_time)
except:
pass
return project.searchTasks(status=self.BUG_STATUSES["All"],
milestone=milestone,
modified_since=update_time,
omit_duplicates=False)
@ttl_cache(minutes=5)
def get_bug_targets(self, bug):
targets = set()
targets.add(bug.bug_target_name.split('/')[0])
for task in bug.related_tasks:
targets.add(task.bug_target_name.split('/')[0])
return targets
@staticmethod
def dump_object(object):
for name in dir(object):
try:
value = getattr(object, name)
except AttributeError:
value = "n/a"
try:
print name + " --- " + str(value)
except ValueError:
print name + " --- " + "n/a"
def common_milestone(self, pr_a, pr_b):
return list(set(pr_a) & set(pr_b))
def bugs_ids(self, tag, milestone):
sum_without_duplicity = {"done": "",
"total": "",
"high": ""}
def count(milestone, tag, bug_type, importance):
bugs_fuel = self.get_bugs("fuel", self.BUG_STATUSES[bug_type],
milestone, tag, importance)
bugs_mos = self.get_bugs("mos", self.BUG_STATUSES[bug_type],
milestone, tag, importance)
ids = []
for bug in bugs_fuel:
ids.append(bug.id)
for bug in bugs_mos:
ids.append(bug.id)
return len(list(set(ids)))
sum_without_duplicity["done"] = count(milestone, tag, "Closed", None)
sum_without_duplicity["total"] = count(milestone, tag, "All", None)
sum_without_duplicity["high"] = count(milestone, tag, "NotDone",
["Critical", "High"])
return sum_without_duplicity
def common_statistic_for_project(self, project_name, milestone_name, tag):
page_statistic = dict.fromkeys(["total",
"critical",
"new_for_week",
"fixed_for_week",
"new_for_month",
"fixed_for_month",
"unresolved"])
def criterion(dict_, tag):
if tag:
internal = copy.deepcopy(dict_)
internal["$and"].append({"tags": {"$in": ["{0}".format(tag)]}})
return internal
return dict_
page_statistic["total"] = self.bugs_db['{0}'.format(project_name)].find(
criterion(
{"$and": [{"milestone": {"$in": milestone_name}}]},
tag)).count()
page_statistic["critical"] = self.bugs_db[
'{0}'.format(project_name)
].find(
criterion(
{"$and": [{"status": {"$in": self.BUG_STATUSES["NotDone"]}},
{"importance": "Critical"},
{"milestone": {"$in": milestone_name}}]},
tag)).count()
page_statistic["unresolved"] = self.bugs_db[
'{0}'.format(project_name)
].find(
criterion(
{"$and": [{"status": {"$in": self.BUG_STATUSES["NotDone"]}},
{"milestone": {"$in": milestone_name}}]},
tag)).count()
page_statistic["new_for_week"] = self.bugs_db[
'{0}'.format(project_name)
].find(
criterion({"$and": [
{"status": {"$in": self.BUG_STATUSES["New"]}},
{"created less than week": {"$ne": False}},
{"milestone": {"$in": milestone_name}}]}, tag)).count()
page_statistic["fixed_for_week"] = self.bugs_db[
'{0}'.format(project_name)
].find(
criterion({"$and": [
{"status": {"$in": self.BUG_STATUSES["Fixed"]}},
{"fixed less than week": {"$ne": False}},
{"milestone": {"$in": milestone_name}}]}, tag)).count()
page_statistic["new_for_month"] = self.bugs_db[
'{0}'.format(project_name)
].find(
criterion({"$and": [
{"status": {"$in": self.BUG_STATUSES["New"]}},
{"created less than month": {"$ne": False}},
{"milestone": {"$in": milestone_name}}]}, tag)).count()
page_statistic["fixed_for_month"] = self.bugs_db[
'{0}'.format(project_name)
].find(
criterion({"$and": [{"status": {"$in": self.BUG_STATUSES["Fixed"]}},
{"fixed less than month": {"$ne": False}},
{"milestone": {"$in": milestone_name}}]},
tag)).count()
return page_statistic
def get_update_time(self):
update_time = time.time()
try:
update_time = self.bugs_db.update_date.find_one()["Update_date"]
except:
pass
return update_time
def filter_bugs(self, bugs, filters, teams_data):
def _filter(bugs, parameter):
filtered_bugs = []
for b in bugs:
if getattr(b, parameter) in filters[parameter]:
filtered_bugs.append(b)
return filtered_bugs
def transform_date(str):
if str is not None:
return datetime.datetime.strptime(str, "%Y-%m-%d")
for team in bugs["DATA"]:
if filters['status']:
team["bugs"] = _filter(team["bugs"], 'status')
if filters['importance']:
team["bugs"] = _filter(team["bugs"], 'importance')
if filters['criteria']:
team["bugs"] = _filter(team["bugs"], 'criteria')
if filters['tags']:
filtered_bugs = []
for b in team["bugs"]:
print(getattr(b, 'tags'), filters['tags'])
if set(getattr(b, 'tags')) & set(filters['tags']):
filtered_bugs.append(b)
print(True)
team["bugs"] = filtered_bugs
if filters['assignee']:
new_teams_data = {}
for x in teams_data.values():
new_teams_data.update(x)
all_people = new_teams_data.keys()
for vals in new_teams_data.values():
all_people.extend(vals)
newbugs = []
for b in team["bugs"]:
if ('unknown' in filters['assignee'] and b.assignee
not in all_people):
newbugs.append(b)
for name, lst in new_teams_data.items():
if (name in filters['assignee'] and
(b.assignee == name or b.assignee in lst)):
if b not in newbugs:
newbugs.append(b)
team["bugs"] = newbugs
date_state = ["created", "triaged", "fix_committed", "fix_released"]
filtered_bugs = []
for bug in team["bugs"]:
satisfies = True
for state in date_state:
bug_date = getattr(bug, 'date_{0}'.format(state))
if bug_date is not None:
if (filters[state+"_from"] is not None and
bug_date < transform_date(filters[state+"_from"])):
satisfies = False
break
if (filters[state+"_to"] is not None and
bug_date > transform_date(filters[state+"_to"])):
satisfies = False
break
else:
if (filters[state+"_from"] is not None
or filters[state+"_to"] is not None):
satisfies = False
break
if satisfies:
filtered_bugs.append(bug)
team["bugs"] = filtered_bugs
return bugs
class LaunchpadData(LaunchpadAnonymousData):
PRIVATE_BUG_TYPES = ['Private', 'Private Security', 'Proprietary']
def __init__(
self,
db,
credentials,
):
self.launchpad = SimpleLaunchpad.login_with(
credentials, 'launchpad-reporting-www',
service_root=LPNET_SERVICE_ROOT)
@ttl_cache(minutes=5)
def serialize_private(self, task):
return serialize_bug(task)
def get_bugs(self, project_name, statuses, milestone_name=None,
tags=[], importance=[], **kwargs):
result_bugs = self.get_all_bugs_by(project_name, milestone_name)
result_bugs = [task for task in result_bugs
if task.status in statuses]
if milestone_name:
result_bugs = [task for task in result_bugs
if task.milestone_link.split('/')[-1] in milestone_name]
if importance:
result_bugs = [task for task in result_bugs
if task.importance in importance]
if tags:
if kwargs.get("condition"):
result_bugs = [task for task in result_bugs
if len(set(task.bug.tags).difference(set(tags))) > 0]
else:
result_bugs = [task for task in result_bugs
if len(set(task.bug.tags).intersection(set(tags))) > 0]
return [Bug(self.serialize_private(bug)) for bug in result_bugs]
@ttl_cache(minutes=5)
def get_all_bugs(self, project):
project_tasks = project.searchTasks(status=self.BUG_STATUSES["All"],
milestone=[
i.self_link
for i in project.active_milestones])
private_tasks = [task for task
in project_tasks
if "Private" in task.bug.information_type]
return private_tasks
@ttl_cache(minutes=5)
def get_all_bugs_by(self, project_name, milestone):
project = self.launchpad.projects[project_name]
try:
milestone = [unicode('https://api.launchpad.net/1.0/{0}/+milestone/{1!s}').format(
project_name, ms) for ms in milestone]
return project.searchTasks(status=self.BUG_STATUSES["All"],
milestone=milestone,
information_type=self.PRIVATE_BUG_TYPES)
except Exception as e:
print e
| Mirantis/launchpad-reports-summary | launchpad_reporting/launchpad/lpdata.py | Python | mit | 18,939 | [
"VisIt"
] | 53b4f37ac29917295cadb78d079dd49fcc9b58cebd50f7fa38be901a1b36eb37 |
# $HeadURL$
__RCSID__ = "$Id$"
import types
from DIRAC import S_OK, S_ERROR, gConfig
from DIRAC.AccountingSystem.DB.AccountingDB import AccountingDB
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.Core.Utilities import Time
from DIRAC.Core.Utilities.ThreadScheduler import gThreadScheduler
gAccountingDB = False
def initializeDataStoreHandler( serviceInfo ):
global gAccountingDB
gAccountingDB = AccountingDB()
gAccountingDB.autoCompactDB()
result = gAccountingDB.markAllPendingRecordsAsNotTaken()
if not result[ 'OK' ]:
return result
gThreadScheduler.addPeriodicTask( 60, gAccountingDB.loadPendingRecords )
return S_OK()
class DataStoreHandler( RequestHandler ):
types_registerType = [ types.StringType, types.ListType, types.ListType, types.ListType ]
def export_registerType( self, typeName, definitionKeyFields, definitionAccountingFields, bucketsLength ):
"""
Register a new type. (Only for all powerful admins)
(Bow before me for I am admin! :)
"""
retVal = gConfig.getSections( "/DIRAC/Setups" )
if not retVal[ 'OK' ]:
return retVal
errorsList = []
for setup in retVal[ 'Value' ]:
setupTypeName = "%s_%s" % ( setup, typeName )
retVal = gAccountingDB.registerType( setupTypeName, definitionKeyFields, definitionAccountingFields, bucketsLength )
if not retVal[ 'OK' ]:
errorsList.append( retVal[ 'Message' ] )
if errorsList:
return S_ERROR( "Error while registering type:\n %s" % "\n ".join( errorsList ) )
return S_OK()
types_setBucketsLength = [ types.StringType, types.ListType ]
def export_setBucketsLength( self, typeName, bucketsLength ):
"""
Change the buckets Length. (Only for all powerful admins)
(Bow before me for I am admin! :)
"""
retVal = gConfig.getSections( "/DIRAC/Setups" )
if not retVal[ 'OK' ]:
return retVal
errorsList = []
for setup in retVal[ 'Value' ]:
setupTypeName = "%s_%s" % ( setup, typeName )
retVal = gAccountingDB.changeBucketsLength( setupTypeName, bucketsLength )
if not retVal[ 'OK' ]:
errorsList.append( retVal[ 'Message' ] )
if errorsList:
return S_ERROR( "Error while changing bucketsLength type:\n %s" % "\n ".join( errorsList ) )
return S_OK()
types_regenerateBuckets = [ types.StringType ]
def export_regenerateBuckets( self, typeName ):
"""
Recalculate buckets. (Only for all powerful admins)
(Bow before me for I am admin! :)
"""
retVal = gConfig.getSections( "/DIRAC/Setups" )
if not retVal[ 'OK' ]:
return retVal
errorsList = []
for setup in retVal[ 'Value' ]:
setupTypeName = "%s_%s" % ( setup, typeName )
retVal = gAccountingDB.regenerateBuckets( setupTypeName )
if not retVal[ 'OK' ]:
errorsList.append( retVal[ 'Message' ] )
if errorsList:
return S_ERROR( "Error while recalculating buckets for type:\n %s" % "\n ".join( errorsList ) )
return S_OK()
types_getRegisteredTypes = []
def export_getRegisteredTypes( self ):
"""
Get a list of registered types (Only for all powerful admins)
(Bow before me for I am admin! :)
"""
return gAccountingDB.getRegisteredTypes()
types_deleteType = [ types.StringType ]
def export_deleteType( self, typeName ):
"""
Delete accounting type and ALL its contents. VERY DANGEROUS! (Only for all powerful admins)
(Bow before me for I am admin! :)
"""
retVal = gConfig.getSections( "/DIRAC/Setups" )
if not retVal[ 'OK' ]:
return retVal
errorsList = []
for setup in retVal[ 'Value' ]:
setupTypeName = "%s_%s" % ( setup, typeName )
retVal = gAccountingDB.deleteType( setupTypeName )
if not retVal[ 'OK' ]:
errorsList.append( retVal[ 'Message' ] )
if errorsList:
return S_ERROR( "Error while deleting type:\n %s" % "\n ".join( errorsList ) )
return S_OK()
types_commit = [ types.StringType, Time._dateTimeType, Time._dateTimeType, types.ListType ]
def export_commit( self, typeName, startTime, endTime, valuesList ):
"""
Add a record for a type
"""
setup = self.serviceInfoDict[ 'clientSetup' ]
typeName = "%s_%s" % ( setup, typeName )
startTime = int( Time.toEpoch( startTime ) )
endTime = int( Time.toEpoch( endTime ) )
return gAccountingDB.insertRecordThroughQueue( typeName, startTime, endTime, valuesList )
types_commitRegisters = [ types.ListType ]
def export_commitRegisters( self, entriesList ):
"""
Add a record for a type
"""
setup = self.serviceInfoDict[ 'clientSetup' ]
expectedTypes = [ types.StringType, Time._dateTimeType, Time._dateTimeType, types.ListType ]
for entry in entriesList:
if len( entry ) != 4:
return S_ERROR( "Invalid records" )
for i in range( len( entry ) ):
if type( entry[i] ) != expectedTypes[i]:
return S_ERROR( "%s field in the records should be %s" % ( i, expectedTypes[i] ) )
records = []
for entry in entriesList:
typeName = "%s_%s" % ( setup, entry[0] )
startTime = int( Time.toEpoch( entry[1] ) )
endTime = int( Time.toEpoch( entry[2] ) )
records.append( ( typeName, startTime, endTime, entry[3] ) )
return gAccountingDB.insertRecordBundleThroughQueue( records )
types_compactDB = []
def export_compactDB( self ):
"""
Compact the db by grouping buckets
"""
return gAccountingDB.compactBuckets()
types_remove = [ types.StringType, Time._dateTimeType, Time._dateTimeType, types.ListType ]
def export_remove( self, typeName, startTime, endTime, valuesList ):
"""
Remove a record for a type
"""
setup = self.serviceInfoDict[ 'clientSetup' ]
typeName = "%s_%s" % ( setup, typeName )
startTime = int( Time.toEpoch( startTime ) )
endTime = int( Time.toEpoch( endTime ) )
return gAccountingDB.deleteRecord( typeName, startTime, endTime, valuesList )
types_removeRegisters = [ types.ListType ]
def export_removeRegisters( self, entriesList ):
"""
Remove a record for a type
"""
setup = self.serviceInfoDict[ 'clientSetup' ]
expectedTypes = [ types.StringType, Time._dateTimeType, Time._dateTimeType, types.ListType ]
for entry in entriesList:
if len( entry ) != 4:
return S_ERROR( "Invalid records" )
for i in range( len( entry ) ):
if type( entry[i] ) != expectedTypes[i]:
return S_ERROR( "%s field in the records should be %s" % ( i, expectedTypes[i] ) )
ok = 0
for entry in entriesList:
typeName = "%s_%s" % ( setup, entry[0] )
startTime = int( Time.toEpoch( entry[1] ) )
endTime = int( Time.toEpoch( entry[2] ) )
record = entry[3]
result = gAccountingDB.deleteRecord( typeName, startTime, endTime, record )
if not result[ 'OK' ]:
return S_OK( ok )
ok += 1
return S_OK( ok )
| avedaee/DIRAC | AccountingSystem/Service/DataStoreHandler.py | Python | gpl-3.0 | 6,949 | [
"DIRAC"
] | 91d2323f396bf156282fcc844cd581b9583f521d115dc2b8824fce4f0548e7eb |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2013 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
"""
Identify and apply to a product, the defined taxes by
IBPT - Instituto Brasileiro de Planejamento Tributário (Brazilian Institute
of Tributary Planning)
According to Law 12,741 of 12/08/2012 - Taxes in Coupon.
"""
from collections import namedtuple
from kiwi.environ import environ
import csv
from decimal import Decimal
from stoqlib.database.runtime import get_current_branch, new_store
taxes_data = {}
TaxInfo = namedtuple('TaxInfo', 'nacionalfederal, importadosfederal, estadual, chave')
def load_taxes_csv():
""" Load the fields of IBPT table.
- Fields:
- ncm: Nomenclatura Comum do Sul.
- ex: Exceção fiscal da NCM.
- tipo: Código que pertence a uma NCM.
- descricao: Nome do produto.
- nacionalfederal: Carga tributária para os produtos nacionais.
- importadosfederal: Carga tributária para os produtos importados.
- estadual: Carga tributária estadual
- municipal: Carga tributária municipal
- vigenciainicio: Data de início da vigência desta alíquota.
- vigenciafim: Data de fim da vigência desta alíquota.
- chave: Chave que associa a Tabela IBPT baixada com a empresa.
- versao: Versão das alíquotas usadas para cálculo.
"""
# Avoid load taxes more than once.
if taxes_data:
return
store = new_store()
branch = get_current_branch(store)
address = branch.person.get_main_address()
state = address.city_location.state
# Change the version according to the updates of IBPT tables.
version = '15.1.C'
filename = environ.get_resource_filename('stoq', 'csv', 'ibpt_tables',
'TabelaIBPTax%s%s.csv'
% (state, version))
csv_file = (csv.reader(open(filename, "r"), delimiter=';'))
for (ncm, ex, tipo, descricao, nacionalfederal, importadosfederal,
estadual, municipal, vigenciainicio, vigenciafim, chave,
versao, fonte) in csv_file:
# Ignore service codes (NBS - Nomenclatura Brasileira de Serviços)
if tipo == '1':
continue
tax_dict = taxes_data.setdefault(ncm, {})
tax_dict[ex] = TaxInfo(nacionalfederal, importadosfederal, estadual, chave)
class IBPTGenerator(object):
def __init__(self, items):
load_taxes_csv()
self.items = items
def _format_ex(self, ex_tipi):
if not ex_tipi:
return ''
ex = int(ex_tipi)
return str(ex).zfill(2)
def _load_tax_values(self, item):
assert item
sellable = item.sellable
product = sellable.product
if not product:
return None
ncm = product.ncm or ''
ex_tipi = self._format_ex(product.ex_tipi)
options = taxes_data.get(ncm, {})
n_options = len(options)
if n_options == 0:
tax_values = TaxInfo('0', '0', '0', '0')
elif n_options == 1:
tax_values = options['']
else:
tax_values = options.get(ex_tipi) or options['']
return tax_values
def _calculate_federal_tax(self, item, tax_values):
""" Calculate the IBPT tax for a give item.
:param item: a |saleitem|
:returns: the IBPT tax or ``0`` if it does not exist
:rtype: decimal
"""
if tax_values is None:
return Decimal("0")
sellable = item.sellable
product = sellable.product
if product.icms_template:
origin = product.icms_template.orig
else:
# If the product does not have any fiscal information, defaults to
# national origin
origin = 0
# Values (0, 3, 4, 5, 8) represent the taxes codes of brazilian origin.
if origin in [0, 3, 4, 5, 8]:
federal_tax = Decimal(tax_values.nacionalfederal) / 100
# Different codes, represent taxes of international origin.
else:
federal_tax = Decimal(tax_values.importadosfederal) / 100
total_item = item.price * item.quantity
return total_item * federal_tax
def _calculate_state_tax(self, item, tax_values):
if tax_values is None:
return Decimal("0")
total_item = item.price * item.quantity
state_tax = Decimal(tax_values.estadual) / 100
return total_item * state_tax
def get_ibpt_message(self):
federal_tax = state_tax = 0
key = "0"
for item in self.items:
tax_values = self._load_tax_values(item)
federal_tax += self._calculate_federal_tax(item, tax_values)
state_tax += self._calculate_state_tax(item, tax_values)
if tax_values:
key = tax_values.chave
federal_msg = "%0.2f Federal" % federal_tax
state_msg = "%0.2f Estadual" % state_tax
final_msg = ("Trib aprox R$: {federal} e {state}\n"
"Fonte: IBPT/FECOMERCIO RJ {key} ")
return final_msg.format(federal=federal_msg, state=state_msg, key=key)
def generate_ibpt_message(items):
generator = IBPTGenerator(items)
return generator.get_ibpt_message()
| tiagocardosos/stoq | stoqlib/lib/ibpt.py | Python | gpl-2.0 | 6,098 | [
"VisIt"
] | 1d7cf7b849cdc4277444420560dd7df80e883a36faf2b94376809989f780dc83 |
#!/usr/bin/python
#
# Inline, Crossline, True dip, Dip Azimuth and Plane using the Gradient Structure Tensor
# Gradients calculated using Kroon's 3 point derivative filter
#
import sys,os
import numpy as np
# Import the module with the I/O scaffolding of the External Attribute
#
sys.path.insert(0, os.path.join(sys.path[0], '..'))
import extattrib as xa
import extlib as xl
#
# These are the attribute parameters
#
xa.params = {
'Inputs': ['Input'],
'Output': ['Crl_dip', 'Inl_dip', 'True Dip', 'Dip Azimuth', 'Cplane'],
'ZSampMargin' : {'Value':[-2,2], 'Minimum': [-2,2], 'Symmetric': True},
'StepOut' : {'Value': [2,2], 'Minimum':[2,2], 'Symmetric': True},
'Help': 'http://waynegm.github.io/OpendTect-Plugin-Docs/external_attributes/DipandAzimuth.html'
}
#
# Define the compute function
#
def doCompute():
xs = xa.SI['nrinl']
ys = xa.SI['nrcrl']
zs = xa.params['ZSampMargin']['Value'][1] - xa.params['ZSampMargin']['Value'][0] + 1
kernel = xl.getGaussian(xs-2, ys-2, zs-2)
inlFactor = xa.SI['zstep']/xa.SI['inldist'] * xa.SI['dipFactor']
crlFactor = xa.SI['zstep']/xa.SI['crldist'] * xa.SI['dipFactor']
while True:
xa.doInput()
g = xa.Input['Input']
#
# Compute gradients
gx = xl.kroon3( g, axis=0 )
gy = xl.kroon3( g, axis=1 )
gz = xl.kroon3( g, axis=2 )
#
# Inner product of gradients
gx2 = gx[1:xs-1,1:ys-1,:] * gx[1:xs-1,1:ys-1,:]
gy2 = gy[1:xs-1,1:ys-1,:] * gy[1:xs-1,1:ys-1,:]
gz2 = gz[1:xs-1,1:ys-1,:] * gz[1:xs-1,1:ys-1,:]
gxgy = gx[1:xs-1,1:ys-1,:] * gy[1:xs-1,1:ys-1,:]
gxgz = gx[1:xs-1,1:ys-1,:] * gz[1:xs-1,1:ys-1,:]
gygz = gy[1:xs-1,1:ys-1,:] * gz[1:xs-1,1:ys-1,:]
#
# Outer gaussian smoothing
rgx2 = xl.sconvolve(gx2, kernel)
rgy2 = xl.sconvolve(gy2, kernel)
rgz2 = xl.sconvolve(gz2, kernel)
rgxgy = xl.sconvolve(gxgy, kernel)
rgxgz = xl.sconvolve(gxgz, kernel)
rgygz = xl.sconvolve(gygz, kernel)
#
# Form the structure tensor
T = np.rollaxis(np.array([ [rgx2, rgxgy, rgxgz],
[rgxgy, rgy2, rgygz],
[rgxgz, rgygz, rgz2 ]]), 2)
#
# Get the eigenvalues and eigen vectors and calculate the dips
evals, evecs = np.linalg.eigh(T)
ndx = evals.argsort()
evecs = evecs[np.arange(0,T.shape[0],1),:,ndx[:,2]]
e1 = evals[np.arange(0,T.shape[0],1),ndx[:,2]]
e2 = evals[np.arange(0,T.shape[0],1),ndx[:,1]]
xa.Output['Crl_dip'] = -evecs[:,1]/evecs[:,2]*crlFactor
xa.Output['Inl_dip'] = -evecs[:,0]/evecs[:,2]*inlFactor
xa.Output['True Dip'] = np.sqrt(xa.Output['Crl_dip']*xa.Output['Crl_dip']+xa.Output['Inl_dip']*xa.Output['Inl_dip'])
xa.Output['Dip Azimuth'] = np.degrees(np.arctan2(xa.Output['Inl_dip'],xa.Output['Crl_dip']))
xa.Output['Cplane'] = (e1-e2)/e1
xa.doOutput()
#
# Assign the compute function to the attribute
#
xa.doCompute = doCompute
#
# Do it
#
xa.run(sys.argv[1:])
| waynegm/OpendTect-External-Attributes | Python_3/DipAndAzimuth/ex_gradient3_st_dip.py | Python | mit | 2,785 | [
"Gaussian"
] | e2d04b86ec431d34f728099cd217fed9e5e9f2f2e7f9a18f47f8d4c178557f89 |
# Copyright 2020-2021, Anthony Criscione
# Developed for the Center for Phage Technology, Texas A&M University
#
# Distributed under the BSD 3-Clause License, see included LICENSE file
from Bio import SeqIO, SeqFeature
from Bio.SeqRecord import SeqRecord
from Bio.SeqFeature import FeatureLocation, CompoundLocation
from Bio.Seq import Seq, UnknownSeq
from collections import OrderedDict
import sys
#Try/Except blocks used for limited python 2.7 compatibility. Python3 spec is within the try block
try:
from collections.abc import Iterable
except:
from collections import Iterable
try:
import urllib.parse
except:
import urllib
class gffSeqFeature(SeqFeature.SeqFeature):
def __init__(
self,
location=None,
type="",
location_operator="",
strand=None,
id="<unknown id>",
qualifiers=None,
sub_features=None,
ref=None,
ref_db=None,
shift=0,
score=0.0,
source="feature"
):
"""Reimplementation of SeqFeature for use with GFF3 Parsing
Does not remove the sub_feature functionality, as unlike
Genbank, this is baked into the core concept of GFF
"""
if (
location is not None
and not isinstance(location, FeatureLocation)
and not isinstance(location, CompoundLocation)
):
raise TypeError(
"FeatureLocation, CompoundLocation (or None) required for the location"
)
self.location = location
self.type = type
self.shift = shift
self.score = score
self.source = source
if location_operator:
# TODO - Deprecation warning
self.location_operator = location_operator
if strand is not None:
# TODO - Deprecation warning
self.strand = strand
self.id = id
if qualifiers is None:
try:
qualifiers = OrderedDict()
except:
qualifiers = {}
self.qualifiers = qualifiers
if sub_features is None:
sub_features = []
self._sub_features = sub_features
if ref is not None:
# TODO - Deprecation warning
self.ref = ref
if ref_db is not None:
# TODO - Deprecation warning
self.ref_db = ref_db
def _get_subfeatures(self):
"""Get function for the sub_features property (PRIVATE)."""
try:
return self._sub_features
except AttributeError:
return None
def _set_subfeatures(self, value):
"""Set function for the sub_features property (PRIVATE)."""
if isinstance(value, list):
self._sub_features = value
else:
raise ValueError("sub_feature must be a list of gffSeqFeature objects")
sub_features = property(
fget=_get_subfeatures,
fset=_set_subfeatures,
doc="Sub-features for GFF Heirarchy",
)
def _shift(self, offset):
"""Return a copy of the feature with its location shifted (PRIVATE).
The annotation qaulifiers are copied.
"""
for x in self.sub_features:
x._shift(offset)
return gffSeqFeature(
location=self.location._shift(offset),
type=self.type,
location_operator=self.location_operator,
id=self.id,
qualifiers=OrderedDict(self.qualifiers.items()),
sub_features=self.sub_features,
shift=self.shift,
score=self.score,
source=self.source
)
def translate(
self,
parent_sequence,
table="Standard",
start_offset=None,
stop_symbol="*",
to_stop=False,
cds=None,
gap=None,
):
"""
Identical to the implementation found in
Biopython SeqFeature, but will use .shift value instead
if start_offset is not set and start_codon is not present
Deferred to codon_start under reasoning that some bioinformatic scripts
may edit the codon_start field, but not change the .shift value
"""
# see if this feature should be translated in a different
# frame using the "codon_start" qualifier
if start_offset is None:
try:
start_offset = int(self.qualifiers["codon_start"][0]) - 1
except KeyError:
start_offset = self.shift
if start_offset not in [0, 1, 2]:
raise ValueError("The start_offset must be 0, 1, or 2. The supplied value is '%s'. Check the value of either the codon_start qualifier, the .phase property, or the start_offset argument" % (start_offset))
feat_seq = self.extract(parent_sequence)[start_offset:]
codon_table = self.qualifiers.get("transl_table", [table])[0]
if cds is None:
cds = self.type == "CDS"
return feat_seq.translate(
table=codon_table,
stop_symbol=stop_symbol,
to_stop=to_stop,
cds=cds,
gap=gap,
)
def convertSeqFeat(inFeat, defaultSource = "gffSeqFeature"):
featLoc = inFeat.location
IDName = inFeat.id
qualDict = inFeat.qualifiers
parentCands = inFeat.qualifiers.get("Parent", [])
for x in parentCands:
if x == inFeat.id: # Cannot allow self-loops
raise Exception("Cannot convert SeqRecord, feature %s lists itself as a parent feature" % (cand.id))
if "codon_start" in inFeat.qualifiers.keys():
shiftIn = int(inFeat.qualifiers["codon_start"][0])
else:
shiftIn = 0
if "score" in inFeat.qualifiers.keys():
scoreIn = float(inFeat.qualifiers["score"][0])
else:
scoreIn = "."
if "source" in inFeat.qualifiers.keys():
sourceIn = inFeat.qualifiers["source"][0]
else:
sourceIn = defaultSource
return gffSeqFeature(featLoc, inFeat.type, '', featLoc.strand, IDName, qualDict, [], None, None, shiftIn, scoreIn, sourceIn)
def convertSeqRec(inRec, defaultSource = "gffSeqFeature", deriveSeqRegion = True, createMetaFeat = None):
# Assumes an otherwise well-constructed SeqRecord that just wants to replace its features with gffSeqFeatures
if not isinstance(inRec, list):
inRec = [inRec]
outRec = []
for rec in inRec:
topList = []
childList = []
lastCount = 0
maxLoc = 0
for feat in rec.features:
if "Parent" in feat.qualifiers.keys():
childList.append((convertSeqFeat(feat, defaultSource), len(feat.qualifiers["Parent"]))) # Possible to have more than one parent
lastCount += childList[-1][1]
elif feat.id and feat.id != "<unknown id>": # Do not accept the default value
topList.append(convertSeqFeat(feat, defaultSource))
maxLoc = max(maxLoc, feat.location.end)
if deriveSeqRegion:
rec.annotations["sequence-region"] = "%s 1 %s" % (rec.id, str(maxLoc))
popList = []
thisCount = -1
while lastCount != thisCount:
thisCount = 0
for child in childList: # Check for subfeatures of subfeatures first
foundItem = child[1]
for cand in childList:
if foundItem > 0:
for childID in child[0].qualifiers["Parent"]:
if cand[0].id == childID:
cand[0].sub_features.append(child[0])
foundItem -= 1
elif foundItem == 0:
break
if foundItem > 0:
popList.append((child[0], foundItem))
thisCount += popList[-1][1]
childList = popList
if thisCount != lastCount:
popList = []
lastCount = thisCount
thisCount = 0
lastCount = -1
thisCount = -1
while lastCount != 0: # This shouldn't need to actually loop
thisCount = 0
popList = []
for child in childList:
foundItem = child[1]
for cand in topList:
if foundItem > 0:
for childID in child[0].qualifiers["Parent"]:
if cand.id == childID:
cand.sub_features.append(child[0])
foundItem -= 1
elif foundItem == 0:
break
if foundItem > 0:
popList.append((child[0], foundItem))
thisCount += popList[-1][1]
childList = popList
if thisCount != 0:
popList = []
lastCount = thisCount
thisCount = 0
elif thisCount == lastCount or thisCount > 0:
badIDs = []
for x in childList:
badIDs.append(x[0].id)
outStr = ", ".join(badIDs)
sys.stderr.write("Unable to convert SeqRecord %s: could not find parents for features [%s]\n" % (rec.id, outStr))
sys.stderr.write("Note that this error will also occur if sub_feature relationships between features ever form a cycle/loop.\n")
raise Exception("Could not convert features of SeqRecord %s to gffSeqFeature format, see stderr\n" % (rec.id))
else:
break
if createMetaFeat:
qualDict = {}
for x in res.annotations.keys():
outVal = ""
if isinstance(res.annotations[x], list):
outVal = " ".join(res.annotations[x])
else:
outVal = str(res.annotations[x])
outVal = outVal.replace("\n"," ")
qualDict[x] = [outVal]
topList.append(gffSeqFeature(FeatureLocation(0, maxLoc), createMetaFeat, '', 0, IDName, qualDict, [], None, None, 0, ".", defaultSource))
topList = sorted(topList, key=lambda feature: feature.location.start)
rec.features = topList
outRec.append(rec)
return outRec
disallowArray = ["&", ",", ";", "="]
validArray = ["%26", "%2C", "%3B", "%3D"]
encoders = "ABCDEF1234567890"
validID = '.:^*$@!+_?-|'
def writeMetaQuals(qualList):
outLines = ""
for x in qualList.keys():
if x == "sequence-region":
try:
if isinstance(qualList[x], str):
if qualList[x][0] == "(" and qualList[x][-1] == ")":
fields = (qualList[x][1:-1]).split(" ")
else:
fields = qualList[x].split(" ")
if len(fields[0]) > 2 and fields[0][0] in ["'", '"'] and fields[0][0] == fields[0][-1]:
fields[0] = fields[0][1:-1]
if "%" in fields[1]:
fields[1] = int(fields[1][:fields[1].find("%")])
elif "," in fields[1]:
fields[1] = int(fields[1][:fields[1].find("%")])
else:
fields[1] = int(fields[1])
if "%" in fields[2]:
fields[2] = int(fields[2][:fields[2].find("%")])
else:
fields[2] = int(fields[2])
else:
fields = qualList[x]
outLines += "##sequence-region %s %d %d\n" % (fields[0], fields[1], fields[2])
except:
sys.stderr.write("Annotation Error: Unable to parse sequence-region in metadata feature. Value was %s" % (qualList[x]))
elif x != "gff-version":
outLines += "##%s" % (x)
if isinstance(qualList[x], str):
outLines += " %s" % (qualList[x].replace("\n", " "))
elif isinstance(qualList[x], Iterable):
for i in qualList[x]:
outLines += " %s" % (str(i).replace("\n", " "))
else:
outLines += " %s" % (str(qualList[x]).replace("\n", " "))
outLines += "\n"
return outLines
def validateID(idIn):
badChar = []
for x in idIn:
if (ord(x) > 47 and ord(x) < 58) or (ord(x) > 64 and ord(x) < 91) or (ord(x) > 96 and ord(x) < 123) or (x in validID):
continue
else:
if not(x in badChar):
badChar.append(x)
return badChar
def replaceBadChars(qualIn):
newQual = ""
for x in qualIn:
goodVal = True
for y in range(0, len(disallowArray)):
if x == disallowArray[y]:
goodVal = False
newQual += validArray[y]
if goodVal:
newQual += x
return newQual
def validateQual(qualIn):
badChar = []
for x in qualIn:
if x in disallowArray:
if not(x in badChar):
badChar.append(x)
return badChar
def rAddDict(lDict, rDict):
for x in rDict.keys():
val = lDict.get(x, [])
val += rDict[x]
lDict[x] = val
return lDict
def checkCycle(orgDict):
badOrgs = {}
for org in orgDict.keys():
for feat in orgDict[org]:
if foundID(feat, feat.id):
if org in badOrgs.keys():
badOrgs[org].append(feat.id)
else:
badOrgs[org] = [feat.id]
return badOrgs
def resolveParent(orgDict, indexDict):
errOut = ""
for org in indexDict.keys():
for ind in indexDict[org]:
for x in orgDict[org][ind].qualifiers['Parent']:
for y in orgDict[org]:
found = False
if "ID" in y.qualifiers.keys() and x in y.qualifiers["ID"]:
y.sub_features.append(orgDict[org][ind])
found = True
break
if not found:
errOut += ("Organism %s: Unable to find parent %s of feature %s\n" % (org, x, orgDict[org][ind].id))
cycles = checkCycle(orgDict)
if cycles.keys() != []:
for x in cycles.keys():
errOut += ("Organism %s: Cycle/ loop of features found involving feature IDs %s.\n" % (x, str(cycles[x])[1:-1]))
if errOut != "":
return None, errOut
return orgDict, None
def foundID(featIn, topID):
if not len(featIn.sub_features):
return False
for x in featIn.sub_features:
if x.id == topID:
return True
for y in x.sub_features:
if foundID(y, topID):
return True
return False
# A check for if an unencoded semicolon made it into the body of a qualifier value
# Sometimes occurs from manually edited Notes qualifiers
def encodeFromLookahead(remLine):
for x in remLine:
if x == "=":
return False
if x in ";,":
return True
return True # x == newline or EOF
def isNum(evalString):
for x in range(0, len(evalString)):
if not(ord(evalString[x]) > 47 and ord(evalString[x]) < 58):
return False
return True
def qualsToAnnotes(inDict, feat, orgID):
for x in feat.qualifiers.keys():
if x not in inDict.keys():
dictVal = " ".join(feat.qualifiers[x])
outStr = writeMetaQuals({x: dictVal})
if outStr == "":
if x == "gff-version":
outStr = feat.qualifiers[x][0]
else:
continue
else:
outStr = outStr[outStr.find(" ") + 1:-1]
inDict[x] = [[outStr, orgID]]
else:
contains = False
for pragma in inDict.keys():
for val in inDict[pragma]:
if orgID in val[1:]:
contains = True
break
if not contains:
dictVal = " ".join(feat.qualifiers[x])
outStr = writeMetaQuals({x: dictVal})
if outStr == "":
if x == "gff-version":
outStr = feat.qualifiers[x][0]
else:
continue
else:
outStr = outStr[outStr.find(" ") + 1:-1]
inDict[x].append([outStr, orgID])
return inDict
def lineAnalysis(line, codingTypes = ["CDS"]):
IDName = ""
startLoc = -1
endLoc = -1
scoreIn = 0.0
if len(line.strip()) == 0 or line.strip() == "\n":
return None, None, None
if line[0] == "#":
if len(line) > 2 and line[1] == "#":
return None, line[2:-1], None
# else handle ## Pragmas
else:
return None, None, None
errorMessage = ""
fields = line.split("\t")
if len(fields) != 9:
errorMessage += "GFF3 is a 9-column tab-separated format, line has %d columns.\n" % (len(fields))
if len(fields) > 9:
errorMessage += "Possible unescaped tab in a qualifier field.\n"
return errorMessage, None, None
for x in range(0, len(fields)):
if fields[x] == "":
errorMessage += "Field #%d is empty. Please supply correct or default value.\n" % (x + 1)
if errorMessage != "":
return errorMessage, None, None
idEval = validateID(fields[0])
if len(idEval) != 0:
errorMessage += "Organism ID contains the following invalid characters: %s\n" % (idEval)
# fields[1]
# fields[2]
# fields[3]
if fields[3][0] in "<>":
uncert = 1
fields[3] = (fields[3][1:]).strip()
else:
uncert = 0
fields[3].strip()
if isNum(fields[3]):
startLoc = int(fields[3])
else:
errorMessage += "Feature location start contains non-numeric character.\n"
# fields[4]
if fields[4][0] in "<>":
uncert = 1
fields[4] = (fields[4][1:]).strip()
else:
uncert = 0
fields[4].strip()
if isNum(fields[4]):
endLoc = int(fields[4])
else:
errorMessage += "Feature location start contains non-numeric character.\n"
if startLoc >= 0 and endLoc >= 0 and endLoc < startLoc:
errorMessage += "Feature Location end is less than start (GFF spec requires all features, regardless of strand, to have the lower number as the start).\n"
# fields[5]
if fields[5] != ".":
try:
scoreIn = float(fields[5])
except:
scoreIn = 0.0
errorMessage += "Score field could not be interpreted as a floating-point (real) number. Ensure notation is correct.\n"
# fields[6]
if fields[6] not in ['-', '+', '.', '?']:
errorMessage += "Feature strand must be '+', '-', '.', or '?', actual value is '%s'.\n" % (fields[6])
# fields[7]
if fields[7] not in ['.', '0', '1', '2']:
errorMessage += "Expected 0, 1, 2, or . for Phase field value, actual value is '%s'.\n" % (fields[7])
elif fields[7] =='.' and fields[1] in codingTypes:
errorMessage += "Expected 0, 1, or 2 in Phase field for %s-type feature, actual value is '%s'.\n" % (fields[1], fields[7])
if fields[7] == '.':
shiftIn = 0
else:
shiftIn = int(fields[7])
# fields[8]
keyName = ""
valNames = [""]
valInd = 0
parseMode = 0
qualDict = {}
contCounter = 0
for x in range(0, len(fields[8])):
currChar = fields[8][x]
if contCounter:
contCounter += -1
continue
if parseMode == 0:
if not (currChar in "=,;%\n"):
keyName += currChar
elif currChar == "=":
if len(keyName) == 0:
errorMessage += "No ID name supplied for a value in the qualifiers field, aborting.\n"
break
parseMode = 1
continue
elif currChar == "%" and (fields[8][x+1] in encoders) and (fields[8][x+2] in encoders):
try:
keyName += urllib.parse.unquote(fields[8][x:x+3])
except:
keyName += urllib.unquote(fields[8][x:x+3])
contCounter = 2
else: #Encode special char
encoded = str(hex(ord(currChar)))
keyName += "%" + encoded[2:].upper()
elif parseMode == 1:
if not (currChar in "=,;%\n"):
valNames[valInd] += currChar
elif currChar == ",":
valInd += 1
valNames.append("")
elif currChar == "=":
valNames[valInd] += "%3D"
elif currChar == "%" and (fields[8][x+1] in encoders) and (fields[8][x+2] in encoders):
try:
valNames[valInd] += urllib.parse.unquote(fields[8][x:x+3])
except:
valNames[valInd] += urllib.unquote(fields[8][x:x+3])
contCounter = 2
elif currChar == "\n":
parseMode = 2
else:
if x == len(fields[8]) - 2: # Assume if last char in fields[8] is a semicolon, then just the end of qualifier
parseMode = 2
elif encodeFromLookahead(fields[8][x+1:]):
valNames[valInd] += "%3B"
continue
else:
parseMode = 2
if parseMode == 2: # Do not elif, this is used as a wrapup for each qualifier and we want it checked if parsemode == 1 incremented itself
if keyName not in qualDict.keys():
qualDict[keyName] = valNames
else:
for x in valNames:
qualDict[keyName].append(x)
keyName = ""
valNames = [""]
valInd = 0
parseMode = 0
for x in qualDict.keys():
if x == "ID":
#if len(qualDict[x]) > 1:
#errorMessage += "More than one ID supplied for feature.\n"
IDName = qualDict[x][0]
if startLoc == -1 or endLoc == -1 or (not(fields[6] in '-+.?')):
errorMessage += "Unable to construct feature location, aborting.\n"
elif fields[6] == '+':
featLoc = FeatureLocation(startLoc - 1, endLoc, strand = +1)
elif fields[6] == '-':
featLoc = FeatureLocation(startLoc - 1, endLoc, strand = -1)
else:
featLoc = FeatureLocation(startLoc - 1, endLoc, strand = 0)
if "Parent" in qualDict.keys():
for x in qualDict["Parent"]:
if x == IDName:
errorMessage += "Feature lists itself as a sub_feature, cycles/loops not permitted in GFF format.\n"
if errorMessage != "":
return errorMessage, None, None
return None, fields[0], gffSeqFeature(featLoc, fields[2], '', featLoc.strand, IDName, qualDict, None, None, None, shiftIn, scoreIn, fields[1])
def gffParse(gff3In, base_dict = {}, outStream = sys.stderr, codingTypes=["CDS"], metaTypes = ["remark"], suppressMeta = 2, pragmaPriority = True, pragmaOverridePriority = True):
# gff3In --- source file
# base_dict --- file with additional SeqRecord information. Keys are OrganismIDs and values are SeqRecords.
# For BCBio backwards compatibility.
# outStream --- output filestream or stringstream
# codingTypes --- list of feature types where a non-'.' phase value is expected, passed along to lineAnalysis
# metaTypes --- list of metadata feature types. Features of this type will be affected by the remaining arguments
# suppressMeta --- Suppress metadata fields. Int, where 0 == no suppression, all metadata from features and pragmas
# will be read and output to the SeqRecord as .annotation entries.
# 1 == As above, but metadata features will not be entered into the SeqRecord's feature list
# after their metadata is recorded and entered into the SeqRecord.annotation
# 2 == Total suppression, no metadata features will even be processed, and no pragmas except
# those related to sequence length (##FASTA and ##sequence-region) and ##gff-version (required
# by GFF spec) will be utilized.
# pragmaPriority --- In cases where pragmas and metadata features disagree/conflict, pragmas will take precedence for creating
# SeqRecord.annotation value if true, else the feature will.
# pragmaOverridePriority --- Similar to above, in the event of a conflict between metadata features and pragmas, the pragma's
# value will override the metadata gffSeqFeature.qualifier value with the pragma's own. This will force
# the metadata and pragmas to sync, and avoid future discrepancies. Should only be used with pragmaPrority
fastaDirective = False # Once true, must assume remainder of file is a FASTA, per spec
errOut = ""
warnOut = ""
lineInd = 0
pragmaAnnotesDict = {} # Annotations dictionaries, one for ones derived via pragmas and another for ones derived from meta features
metaAnnotesDict = {} # Keys are annotation title, values are a list of lists, where the first entry in a list is the value, and
# the rest the orgIDs which correspond to the value
orgDict = {} # Dictionary of orgIDs, with a list of their features (Top-level or otherwise) as its value
finalOrg = {}
seekParentDict = {} # Dictionary of orgIDs, with a list of indexes in the equivalent orgDict list which are subfeatures
seqDict = {}
regionDict = {} # Dictionary of max length for records (For handling circular and bound checking)
# Values are (maxVal, status), where status is 0 if maxVal is derived from features, 1 if derived from sequence-region
# pragma (Enforce this boundary), or -1 if derived from a feature with is_circular (Unable to construct record)
currFastaKey = ""
if pragmaPriority: # In cases where pragmas and meta-features conflict, such as sequence-region
pragBit = 1 # pragmas will take priority
annoteBit = 0
else: # Else meta features will
pragBit = 0 # Split into ints to cleanly slot into regionDict format
annoteBit = 1
for line in gff3In:
lineInd += 1
err = None
prag = None
res = None
### FASTA Pragma Handling
if line[0] == ">": # For compatibility with Artemis-style GFF
fastaDirective = True
if not fastaDirective:
err, prag, res = lineAnalysis(line, codingTypes)
else:
if line[0] == ">":
currFastaKey = line[1:-1]
if currFastaKey not in seqDict.keys():
seqDict[currFastaKey] = ""
elif line[0] == "#":
continue
elif line:
seqDict[currFastaKey] += (line[:-1]).strip()
continue
### Error message construction
if err:
errOut += "Line %d: %s\n" % (lineInd, err)
### Pragma handling
if prag and not res:
prag = prag.split(" ")
if prag[0] == "FASTA":
fastaDirective = True
elif prag[0] == "sequence-region":
if prag[1] not in regionDict.keys():
regionDict[prag[1]] = (int(prag[2]) - 1, int(prag[3]), pragBit)
elif pragBit > regionDict[prag[1]]:
regionDict[prag[1]] = (int(prag[2]) - 1, int(prag[3]), pragBit)
elif prag[0] == "#":
orgDict, resolveErr = resolveParent(orgDict, seekParentDict)
if resolveErr:
errOut += resolveErr
finalOrg = rAddDict(finalOrg, orgDict)
seekParentDict = {}
orgDict = {}
elif suppressMeta < 2:
if prag[0] in pragmaAnnotesDict.keys():
dictVal = " ".join(prag[1:])
pragmaAnnotesDict[prag[0]].append([dictVal])
else:
dictVal = " ".join(prag[1:])
pragmaAnnotesDict[prag[0]] = [[dictVal]]
### Feature Handling
if res:
if suppressMeta == 2 and res.type in metaTypes:
continue
## First time encountering orgID
if prag not in orgDict.keys():
orgDict[prag] = [res]
seekParentDict[prag] = []
possSeq = base_dict.get(prag, None)
# Process base_dict
# .seq priority is: ##FASTA directives will always define sequence-region and seq if present (done further down)
# base_dict is next, and will also accept an empty seq, so take care with what's passed in this field
# Finally, parser will infer an UnknownSeq from either ##sequence-region pragma or the 'last' feature,
# depending on arguments passed to parser.
if isinstance(possSeq, SeqRecord):
if possSeq.seq == None:
seqDict[prag] = ""
else:
seqDict[prag] = str(possSeq.seq)
else:
seqDict[prag] = ""
for x in pragmaAnnotesDict.keys():
if prag in pragmaAnnotesDict[x][-1]:
continue
pragmaAnnotesDict[x][-1].append(prag)
if prag not in regionDict.keys() or (prag in regionDict.keys() and regionDict[prag][-1] != 1):
if res.qualifiers.get("sequence-region") and res.type in metaTypes:
fields = res.qualifiers["sequence-region"][0].split(" ")
regStr = writeMetaQuals({"sequence-region": res.qualifiers["sequence-region"][0]})
regStr = regStr[regStr.find(" ") + 1:-1]
fields = regStr.split(" ")
regionDict[prag] = (int(fields[1]) - 1, int(fields[2]), annoteBit)
elif res.qualifiers.get("is_circular") == ['True']:
regionDict[prag] = (0, int(res.location.end), -1)
else:
regionDict[prag] = (0, int(res.location.end), 0)
if suppressMeta <= 1 and res.type in metaTypes:
qualsToAnnotes(metaAnnotesDict, res, prag)
if suppressMeta == 1:
orgDict[prag] = []
if "Parent" in res.qualifiers.keys():
seekParentDict[prag].append(0)
else: # Check if it's possible to resolve as a CompoundLocation feature
if suppressMeta <= 1 and res.type in metaTypes:
qualsToAnnotes(metaAnnotesDict, res, prag)
if suppressMeta == 1:
break
incInd = True
if regionDict[prag][2] < 1:
if res.qualifiers.get("is_circular") == ['True'] and int(res.location.end) > regionDict[prag][1]:
regionDict[prag] = (0, int(res.location.end), -1)
elif int(res.location.end) > regionDict[prag][1]: # Can't just max() or else we'll maybe overwrite -1 status
regionDict[prag] = (0, int(res.location.end), 0)
if res.id:
for x in range(0, len(orgDict[prag])):
if res.id == orgDict[prag][x].id:
if orgDict[prag][x].type != res.type:
errOut += ("Line %d: Duplicate IDs in file but differing types. Cannot assume CompoundFeature/ join location, please resolve type descrepancy or de-duplicate ID %d.\n" % (lineInd, res.id))
orgDict[prag][x].location = orgDict[prag][x].location + res.location
incInd = False
break
# If incInd is still true, then it's a unique feature, append to list
if incInd:
orgDict[prag].append(res)
if "Parent" in res.qualifiers.keys():
seekParentDict[prag].append(len(orgDict[prag])-1)
orgDict, resolveErr = resolveParent(orgDict, seekParentDict)
if resolveErr:
errOut += resolveErr
finalOrg = rAddDict(finalOrg, orgDict)
# All features and pragmas should be read in by now, resolve any outstanding
# annotation or sequence associations
for x in regionDict.keys():
if x in seqDict.keys() and seqDict[x] != "": ## If x not in SeqDict, then a sequence-region pragma was made for organism with no features
regionDict[x] = (0, len(seqDict[x]), 1) # Make FASTA the final arbiter of region if present
for x in regionDict.keys():
if regionDict[x][2] == -1:
errOut += "Organism %s: No sequence-region specified and last feature is labeled circular, unable to infer organism length.\n" % (x)
for x in finalOrg.keys():
for i in finalOrg[x]:
circ = False
badIDs = []
checkList = [i]
for j in checkList: # make is_circular retroactively applicable to all features in a tree
for k in j.sub_features:
checkList.append(k)
if j.qualifiers.get("is_circular") == ['True']:
circ = True
break
if int(j.location.start) < regionDict[x][0] or int(j.location.end) > regionDict[x][1]:
badIDs.append(j.id)
if badIDs != [] and circ == False:
errOut += "Organism %s: The following features fall outside of the specified sequence region: %s.\n" % (x, str(badIDs)[1:-1])
# By this point, all features and pragmas should be processed and resolved
if errOut:
outStream.write(errOut + "\n")
raise Exception("Failed GFF Feature Parsing, error log output to stderr\n")
# Construct a SeqRecord from all processed OrgIDs
res = []
for x in finalOrg.keys():
finalOrgHeirarchy = []
annoteDict = {}
for pragma in pragmaAnnotesDict.keys():
for vals in pragmaAnnotesDict[pragma]:
if x in vals[1:]:
annoteDict[pragma]=vals[0]
break
for pragma in metaAnnotesDict.keys():
for vals in metaAnnotesDict[pragma]:
if x in vals[1:]:
if pragma in annoteDict.keys():
if pragmaOverridePriority == False and pragBit < annoteBit:
annoteDict[pragma]=vals[0]
break
else:
annoteDict[pragma]=vals[0]
for i in finalOrg[x]:
if "Parent" not in i.qualifiers.keys():
finalOrgHeirarchy.append(i)
if i.type in metaTypes:
if pragmaOverridePriority == False:
for key in annoteDict.keys():
if key not in finalOrgHeirarchy[-1].qualifiers.keys():
finalOrgHeirarchy[-1].qualifiers[key] = [annoteDict[key]]
else:
for key in annoteDict.keys():
finalOrgHeirarchy[-1].qualifiers[key] = [annoteDict[key]]
if seqDict[x]:
if x in regionDict.keys():
annoteDict["sequence-region"] = "%s %s %s" % (x, regionDict[x][0] + 1, regionDict[x][1])
if len(seqDict[x]) < regionDict[x][1] - regionDict[x][0]:
seqDict[x] += "?" * (regionDict[x][1] - regionDict[x][0] - len(seqDict[x]))
else:
seqDict[x] = seqDict[x][regionDict[x][0]:regionDict[x][1]]
else:
annoteDict["sequence-region"] = "%s 1 %s" % (x, int(len(seqDict[x])))
seqDict[x] = str(seqDict[x])
elif x in regionDict.keys():
annoteDict["sequence-region"] = "%s %s %s" % (x, regionDict[x][0] + 1, regionDict[x][1])
seqDict[x] = str(UnknownSeq(regionDict[x][1] - regionDict[x][0]))
else: # Should actually no longer be reachable
seqDict[x] = ""
res.append(SeqRecord(Seq(seqDict[x]), x, "<unknown name>", "<unknown description>", None, finalOrgHeirarchy, annoteDict, None))
return res
def printFeatLine(inFeat, orgName, source = 'feature', score = None, shift = None, outStream = sys.stdout, parents = None, codingTypes = ["CDS"]):
for loc in inFeat.location.parts:
line = orgName + "\t"
if source:
line += source + "\t"
else:
line += ".\t"
line += inFeat.type + "\t"
startStr = str(min(loc.start, loc.end) + 1)
endStr = str(max(loc.start, loc.end))
if startStr[0] == "<":
startStr = startStr[1:]
if endStr[0] == ">":
endStr = endStr[1:]
line += startStr + "\t" + endStr + "\t"
if score:
line += str(score) + "\t"
else:
line += ".\t"
if inFeat.location.strand == 0:
line += ".\t"
elif inFeat.location.strand == 1:
line += "+\t"
elif inFeat.location.strand == -1:
line += "-\t"
else:
line += "?\t"
if inFeat.type in codingTypes:
if shift or shift == 0:
line += str(shift) + "\t"
else:
line += "0\t"
elif shift != 0:
line += str(shift) + "\t"
else:
line += ".\t"
if parents and "Parent" not in inFeat.qualifiers.keys():
inFeat.qualifiers["Parent"] = parents.qualifiers["ID"]
for qual in inFeat.qualifiers.keys():
for keyChar in str(qual):
if keyChar in "%,=;":
encoded = str(hex(ord(keyChar)))
line += "%" + encoded[2:].upper()
else:
line += keyChar
line += "="
if type(inFeat.qualifiers[qual]) != list:
inFeat.qualifiers[qual] = [inFeat.qualifiers[qual]]
for ind in range(0, len(inFeat.qualifiers[qual])):
for valChar in str(inFeat.qualifiers[qual][ind]):
if valChar in "%,=;":
encoded = str(hex(ord(valChar)))
line += "%" + encoded[2:].upper()
else:
line += valChar
if ind < len(inFeat.qualifiers[qual]) - 1:
line += ","
else:
line += ";"
outStream.write(line + "\n")
if type(inFeat) == gffSeqFeature and inFeat.sub_features:
for x in inFeat.sub_features:
printFeatLine(x, orgName, x.source, x.score, x.shift, outStream, inFeat)
def gffWrite(inRec, outStream = sys.stdout, suppressMeta = 1, suppressFasta=True, codingTypes = ["CDS"], metaTypes = ["remark"], validPragmas = None, recPriority = True, createMetaFeat=None):
writeFasta = False
verOut = "3"
firstRec = True
if not inRec:
outStream.write("##gff-version 3\n")
return
if type(inRec) != list:
inRec = [inRec]
for rec in inRec:
if not isinstance(rec.seq, UnknownSeq):
writeFasta = True
seenList = []
outList = {}
if suppressMeta < 2:
outList = rec.annotations
#outStr = writeMetaQuals(rec.annotations)
#outStream.write(outStr)
metaFeats = []
for feat in rec.features:
if feat.type in metaTypes:
metaFeats.append(feat)
for feat in metaFeats:
for x in feat.qualifiers.keys():
if recPriority == False or x not in outList.keys():
outList[x] = " ".join(feat.qualifiers[x])
if "gff-version" in outList.keys() and outList["gff-version"] != verOut:
verOut = outList["gff-version"]
outStream.write("##gff-version %s\n" % verOut)
elif firstRec:
outStream.write("##gff-version %s\n" % verOut)
if validPragmas == None:
outStr = writeMetaQuals(outList)
else:
whiteList = {}
for x in outList.keys():
if x in validPragmas:
whiteList[x] = outList[x]
outStr = writeMetaQuals(whiteList)
outList = whiteList
outStream.write(outStr)
elif firstRec:
outStream.write("##gff-version 3\n")
foundMeta = False
if createMetaFeat != None:
for key in outList.keys(): # Change to GFF format qualifier dict
outList[key] = [outList[key]]
for feat in rec.features:
if feat.type in metaTypes:
foundMeta = True
for key in outList.keys():
if recPriority or key not in feat.qualifiers.keys():
feat.qualifiers[key] = outList[key]
break
if not foundMeta:
tempSeq = gffSeqFeature(FeatureLocation(0, len(rec.seq), 0), createMetaFeat, '', 0, 0, outList, None, None, None, '.', '.', "CPT_GFFParse")
printFeatLine(tempSeq, rec.id, source = tempSeq.source, score = tempSeq.score, shift = tempSeq.shift, outStream = outStream)
for feat in rec.features:
if suppressMeta > 0 and feat.type in metaTypes:
continue
printFeatLine(feat, rec.id, source = feat.source, score = feat.score, shift = feat.shift, outStream = outStream)
firstRec = False
if writeFasta and not suppressFasta:
outStream.write("##FASTA\n")
for rec in inRec:
rec.description = ""
if not isinstance(rec.seq, UnknownSeq):
SeqIO.write(rec, outStream, "fasta")
| TAMU-CPT/galaxy-tools | tools/webapollo/cpt_gffParser.py | Python | gpl-3.0 | 39,217 | [
"Biopython"
] | d3ad48ff5672c9e8e3659526c520e0368a3a1b510d4ddf7d9adfdba2b63a20eb |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def populate_enrollment_info(apps, schema_editor):
"""
Populate the Enrollment info based on static text
"""
Location = apps.get_model('portal', 'Location')
for loc in Location.objects.all():
if loc.is_community_based:
loc.enrollment_en = """<p>Visit this location to begin the enrollment process. Many people find it helpful to make a plan to visit. You can make your plan <a href="/static/files/enrollment-plan-community.pdf">here</a>.</p>"""
loc.save()
if loc.is_cps_based:
loc.enrollment_en = """<p>Visit a child-friendly location near you:</p><ul><li><strong>Loop</strong> 42 W. Madison Street Hours: 9:00 AM - 5:00 PM</li><li><strong>Colman</strong> 4655 S. Dearborn Street Hours: 9:00 AM - 5:00 PM</li><li><strong>Hall Mall</strong> 4638 W. Diversey Avenue Hours 8:00 AM - 5:00 PM</li></ul><p>All sites are open until 7:00 PM on Wednesdays!</p><p>Many people find it helpful to make a plan to visit. You can make your plan <a href="/static/files/enrollment-plan-cps.pdf">here</a>.</p>"""
loc.save()
class Migration(migrations.Migration):
dependencies = [
('portal', '0006_auto_20150721_1725'),
]
operations = [
migrations.RunPython(populate_enrollment_info),
]
| smartchicago/chicago-early-learning | python/ecep/portal/migrations/0007_auto_20150803_1503.py | Python | mit | 1,397 | [
"VisIt"
] | e3d8bedb306beed82ed630420fef9d57a6cba27d77d12a5f14ef15d2455a62e7 |
# Copyright 2015 CERN. This software is distributed under the
# terms of the GNU General Public Licence version 3 (GPL Version 3),
# copied verbatim in the file LICENCE.md.
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization or
# submit itself to any jurisdiction.
# Project website: http://blond.web.cern.ch/
'''
**Module to compute intensity effects**
:Authors: **Danilo Quartullo**, **Alexandre Lasheen**
'''
from __future__ import division
import numpy as np
from next_regular import next_regular
from numpy.fft import rfft, irfft, rfftfreq
import ctypes
from setup_cpp import libfib
from scipy.constants import e
from scipy.signal import filtfilt
import scipy.ndimage as ndimage
import matplotlib.pyplot as plt
import sys
class TotalInducedVoltage(object):
'''
*Object gathering all the induced voltage contributions. The input is a
list of objects able to compute induced voltages (InducedVoltageTime,
InducedVoltageFreq, InductiveImpedance). All the induced voltages will
be summed in order to reduce the computing time. All the induced
voltages should have the same slicing resolution.*
'''
def __init__(self, Beam, Slices, induced_voltage_list, n_turns_memory=0, rev_time_array=None):
'''
*Constructor.*
'''
#: *Copy of the Beam object in order to access the beam info.*
self.beam = Beam
#: *Copy of the Slices object in order to access the profile info.*
self.slices = Slices
#: *Induced voltage list.*
self.induced_voltage_list = induced_voltage_list
#: *Induced voltage from the sum of the wake sources in [V]*
self.induced_voltage = 0
#: *Time array of the wake in [s]*
self.time_array = self.slices.bin_centers
#: *Creation of fundamental objects/parameters in case of multi-turn wake.*
if n_turns_memory > 0:
self.n_turns_memory = n_turns_memory
self.rev_time_array = rev_time_array
self.counter_turn = 0
self.inductive_impedance_on = False
i = 0
for induced_voltage_object in self.induced_voltage_list:
if type(induced_voltage_object) is InducedVoltageFreq:
self.sum_impedances_memory = induced_voltage_object.total_impedance_memory
self.len_array_memory = induced_voltage_object.len_array_memory
self.len_array_memory_extended = induced_voltage_object.len_array_memory_extended
self.n_points_fft = induced_voltage_object.n_points_fft
self.frequency_array_memory = induced_voltage_object.frequency_array_memory
self.omegaj_array_memory = 2.0j * np.pi * self.frequency_array_memory
self.coefficient = - self.beam.charge * e * self.beam.ratio / (self.slices.bin_centers[1]-self.slices.bin_centers[0])
i += 1
print len(self.frequency_array_memory), self.n_points_fft, self.len_array_memory, self.len_array_memory_extended
elif type(induced_voltage_object) is InductiveImpedance:
self.inductive_impedance_on = True
self.index_inductive_impedance = i
i += 1
else:
raise RuntimeError('Memory feature not available for InducedVoltageTime objects! Aborting...')
self.array_memory = np.zeros(len(self.frequency_array_memory), complex)
def reprocess(self, new_slicing):
'''
*Reprocess the impedance contributions with respect to the new_slicing.*
'''
self.slices = new_slicing
for induced_voltage_object in self.induced_voltage_list:
induced_voltage_object.reprocess(self.slices)
def induced_voltage_sum(self, Beam, length = 'slice_frame'):
'''
*Method to sum all the induced voltages in one single array.*
'''
temp_induced_voltage = 0
extended_induced_voltage = 0
for induced_voltage_object in self.induced_voltage_list:
if isinstance(length, int):
extended_induced_voltage += induced_voltage_object.induced_voltage_generation(Beam, length)
else:
induced_voltage_object.induced_voltage_generation(Beam, length)
temp_induced_voltage += induced_voltage_object.induced_voltage
self.induced_voltage = temp_induced_voltage
if isinstance(length, int):
return extended_induced_voltage
def track(self):
'''
*Track method to apply the induced voltage kick on the beam.*
'''
self.induced_voltage_sum(self.beam)
libfib.linear_interp_kick(self.beam.dt.ctypes.data_as(ctypes.c_void_p),
self.beam.dE.ctypes.data_as(ctypes.c_void_p),
(self.beam.charge * self.induced_voltage).ctypes.data_as(ctypes.c_void_p),
self.slices.bin_centers.ctypes.data_as(ctypes.c_void_p),
ctypes.c_uint(self.slices.n_slices),
ctypes.c_uint(self.beam.n_macroparticles),
ctypes.c_double(0.))
def track_memory(self):
'''
Calculates the induced voltage energy kick to particles taking into
account multi-turn induced voltage plus inductive impedance contribution.
'''
# Contribution from multi-turn induced voltage.
self.array_memory *= np.exp(self.omegaj_array_memory * self.rev_time_array[self.counter_turn])
induced_voltage = irfft(self.array_memory + rfft(self.slices.n_macroparticles, self.n_points_fft) * self.sum_impedances_memory, self.n_points_fft)
self.induced_voltage = self.coefficient * induced_voltage[:self.slices.n_slices]
induced_voltage[self.len_array_memory:]=0
self.array_memory = rfft(induced_voltage, self.n_points_fft)
# Contribution from inductive impedance
if self.inductive_impedance_on:
self.induced_voltage_list[self.index_inductive_impedance].induced_voltage_generation(self.beam, 'slice_frame')
self.induced_voltage += self.induced_voltage_list[self.index_inductive_impedance].induced_voltage
# Induced voltage energy kick to particles through linear interpolation
libfib.linear_interp_kick(self.beam.dt.ctypes.data_as(ctypes.c_void_p),
self.beam.dE.ctypes.data_as(ctypes.c_void_p),
self.induced_voltage.ctypes.data_as(ctypes.c_void_p),
self.slices.bin_centers.ctypes.data_as(ctypes.c_void_p),
ctypes.c_uint(self.slices.n_slices),
ctypes.c_uint(self.beam.n_macroparticles),
ctypes.c_double(0.))
# Counter update
self.counter_turn += 1
def track_ghosts_particles(self, ghostBeam):
libfib.linear_interp_kick(ghostBeam.dt.ctypes.data_as(ctypes.c_void_p),
ghostBeam.dE.ctypes.data_as(ctypes.c_void_p),
self.induced_voltage.ctypes.data_as(ctypes.c_void_p),
self.slices.bin_centers.ctypes.data_as(ctypes.c_void_p),
ctypes.c_uint(self.slices.n_slices),
ctypes.c_uint(ghostBeam.n_macroparticles),
ctypes.c_double(0.))
class InducedVoltageTime(object):
'''
*Induced voltage derived from the sum of several wake fields (time domain).*
'''
def __init__(self, Slices, wake_source_list, n_turns_memory=0, time_or_freq = 'freq'):
#: *Copy of the Slices object in order to access the profile info.*
self.slices = Slices
#: *Wake sources inputed as a list (eg: list of BBResonators objects)*
self.wake_source_list = wake_source_list
#: *Time array of the wake in [s]*
self.time_array = 0
#: *Total wake array of all sources in* [:math:`\Omega / s`]
self.total_wake = 0
#: *Induced voltage from the sum of the wake sources in [V]*
self.induced_voltage = 0
# Pre-processing the wakes
self.time_array = self.slices.bin_centers - self.slices.bin_centers[0]
self.sum_wakes(self.time_array)
self.cut = len(self.time_array) + len(self.slices.n_macroparticles) - 1
self.fshape = next_regular(self.cut)
self.time_or_freq = time_or_freq
def reprocess(self, new_slicing):
'''
*Reprocess the wake contributions with respect to the new_slicing.*
'''
self.slices = new_slicing
self.time_array = self.slices.bin_centers - self.slices.bin_centers[0]
self.sum_wakes(self.time_array)
self.cut = len(self.time_array) + len(self.slices.n_macroparticles) - 1
self.fshape = next_regular(self.cut)
def sum_wakes(self, time_array):
'''
*Summing all the wake contributions in one total wake.*
'''
self.total_wake = np.zeros(time_array.shape)
for wake_object in self.wake_source_list:
wake_object.wake_calc(time_array)
self.total_wake += wake_object.wake
def induced_voltage_generation(self, Beam, length = 'slice_frame'):
'''
*Method to calculate the induced voltage from wakes with convolution.*
'''
if self.time_or_freq == 'freq':
induced_voltage = - Beam.charge * e * Beam.intensity / Beam.n_macroparticles * irfft(rfft(self.slices.n_macroparticles, self.fshape) * rfft(self.total_wake, self.fshape), self.fshape)
elif self.time_or_freq == 'time':
induced_voltage = - Beam.charge * e * Beam.intensity / Beam.n_macroparticles * np.convolve(self.total_wake, self.slices.n_macroparticles)
else:
raise RuntimeError('Error: just freq ot time are allowed!')
self.induced_voltage = induced_voltage[0:self.slices.n_slices]
if isinstance(length, int):
max_length = len(induced_voltage)
if length > max_length:
induced_voltage = np.lib.pad(induced_voltage, (0,length - max_length), 'constant', constant_values=(0,0))
return induced_voltage[0:length]
def track(self, Beam):
'''
*Tracking method.*
'''
self.induced_voltage_generation(Beam)
libfib.linear_interp_kick(self.beam.dt.ctypes.data_as(ctypes.c_void_p),
self.beam.dE.ctypes.data_as(ctypes.c_void_p),
(self.beam.charge * self.induced_voltage).ctypes.data_as(ctypes.c_void_p),
self.slices.bin_centers.ctypes.data_as(ctypes.c_void_p),
ctypes.c_uint(self.slices.n_slices),
ctypes.c_uint(self.beam.n_macroparticles),
ctypes.c_double(0.))
class InducedVoltageFreq(object):
'''
*Induced voltage derived from the sum of several impedances.
frequency_resolution is equal to 1/(dist_centers * n) where dist_centers is
the distance between the centers of two consecutive slides and (n/2 + 1)
is the number of sampling points for the frequency array; see the
frequency_array method.
Sum_slopes_from_induc_imp is the sum of all the inductances derived from
all the inductive impedance, included space charge; see in addition the
ind_vol_derivative method.
The frequency resolution is defined by your input, but this value will
be adapted in order to optimize the FFT. The number of points used in the
FFT should be a power of 2, to be faster, but this number of points also
changes the frequency resolution. The frequency is then set to be the
closest power of two to have the closest resolution wrt your input. The
way the code chooses the power is set by the freq_res_option. If this is set
to 'round' (default), the closest (higher or lower) resolution that also
fulfills optimisation will be used. If set to 'best', the frequency resolution
will be at least your input, so you always have a better resolution.*
'''
def __init__(self, Slices, impedance_source_list, frequency_resolution_input = None,
freq_res_option = 'round', n_turns_memory = 0, recalculation_impedance = False, save_individual_voltages = False):
#: *Copy of the Slices object in order to access the profile info.*
self.slices = Slices
#: *Impedance sources inputed as a list (eg: list of BBResonators objects)*
self.impedance_source_list = impedance_source_list
#: *Input frequency resolution in [Hz], the beam profile sampling for the spectrum
#: will be adapted according to the freq_res_option.*
self.frequency_resolution_input = frequency_resolution_input
#: *Number of turns to be considered as memory for induced voltage calculation.*
self.n_turns_memory = n_turns_memory
#: *Length of one slice.*
time_resolution = (self.slices.bin_centers[1] - self.slices.bin_centers[0])
self.recalculation_impedance = recalculation_impedance
if n_turns_memory==0:
if self.frequency_resolution_input == None:
self.n_fft_sampling = self.slices.n_slices
else:
self.freq_res_option = freq_res_option
if self.freq_res_option is 'round':
self.n_fft_sampling = next_regular(int(np.round(1/(self.frequency_resolution_input * time_resolution))))
elif self.freq_res_option is 'ceil':
self.n_fft_sampling = next_regular(int(np.ceil(1/(self.frequency_resolution_input * time_resolution))))
elif self.freq_res_option is 'floor':
self.n_fft_sampling = next_regular(int(np.floor(1/(self.frequency_resolution_input * time_resolution))))
else:
raise RuntimeError('The input freq_res_option is not recognized')
if self.n_fft_sampling < self.slices.n_slices:
print 'The input frequency resolution step is too big, and the whole \
bunch is not sliced... The number of sampling points for the \
FFT is corrected in order to sample the whole bunch (and \
you might consider changing the input in order to have \
a finer resolution).'
self.n_fft_sampling = next_regular(self.slices.n_slices)
#: *Real frequency resolution in [Hz], according to the obtained n_fft_sampling.*
self.frequency_resolution = 1 / (self.n_fft_sampling * time_resolution)
#: *Frequency array of the impedance in [Hz]*
self.frequency_array = rfftfreq(self.n_fft_sampling, self.slices.bin_centers[1] - self.slices.bin_centers[0])
#: *Total impedance array of all sources in* [:math:`\Omega`]
self.total_impedance = 0
self.sum_impedances(self.frequency_array)
self.save_individual_voltages = save_individual_voltages
if self.save_individual_voltages:
self.len_impedance_source_list = len(impedance_source_list)
self.matrix_save_individual_impedances = np.zeros((self.len_impedance_source_list, len(self.frequency_array)))
self.matrix_save_individual_voltages = np.zeros((self.len_impedance_source_list, self.slices.n_slices))
for i in self.len_impedance_source_list:
self.matrix_save_individual_impedances[i,:] = impedance_source_list[i].impedance
#: *Induced voltage from the sum of the wake sources in [V]*
self.induced_voltage = 0
else:
self.n_turns_memory = n_turns_memory
self.len_array_memory = (self.n_turns_memory+1) * self.slices.n_slices
self.len_array_memory_extended = (self.n_turns_memory+2) * self.slices.n_slices
self.n_points_fft = next_regular(self.len_array_memory_extended)
self.frequency_array_memory = rfftfreq(self.n_points_fft, time_resolution)
self.total_impedance_memory = np.zeros(self.frequency_array_memory.shape) + 0j
self.time_array_memory = self.slices.bin_centers
for i in range(1, self.n_turns_memory+1):
self.time_array_memory = np.concatenate((self.time_array_memory, self.slices.bin_centers+(self.slices.edges[-1]-self.slices.edges[0])*i))
for imped_object in self.impedance_source_list:
imped_object.imped_calc(self.frequency_array_memory)
self.total_impedance_memory += imped_object.impedance
def reprocess(self, new_slicing):
'''
*Reprocess the impedance contributions with respect to the new_slicing.*
'''
self.slices = new_slicing
time_resolution = (self.slices.bin_centers[1] - self.slices.bin_centers[0])
if self.frequency_resolution_input == None:
self.n_fft_sampling = self.slices.n_slices
else:
if self.freq_res_option is 'round':
self.n_fft_sampling = next_regular(int(np.round(1/(self.frequency_resolution_input * time_resolution))))
elif self.freq_res_option is 'ceil':
self.n_fft_sampling = next_regular(int(np.ceil(1/(self.frequency_resolution_input * time_resolution))))
elif self.freq_res_option is 'floor':
self.n_fft_sampling = next_regular(int(np.floor(1/(self.frequency_resolution_input * time_resolution))))
else:
raise RuntimeError('The input freq_res_option is not recognized')
if self.n_fft_sampling < self.slices.n_slices:
print 'The input frequency resolution step is too big, and the whole \
bunch is not sliced... The number of sampling points for the \
FFT is corrected in order to sample the whole bunch (and \
you might consider changing the input in order to have \
a finer resolution).'
self.n_fft_sampling = next_regular(self.slices.n_slices)
#: *Real frequency resolution in [Hz], according to the obtained n_fft_sampling.*
self.frequency_resolution = 1 / (self.n_fft_sampling * time_resolution)
self.slices.beam_spectrum_generation(self.n_fft_sampling, only_rfft = True)
#: *Frequency array of the impedance in [Hz]*
self.frequency_array = self.slices.beam_spectrum_freq
#: *Total impedance array of all sources in* [:math:`\Omega`]
self.total_impedance = 0
self.sum_impedances(self.frequency_array)
def sum_impedances(self, frequency_array):
'''
*Summing all the wake contributions in one total impedance.*
'''
self.total_impedance = np.zeros(frequency_array.shape) + 0j
for i in range(len(self.impedance_source_list)):
self.impedance_source_list[i].imped_calc(frequency_array)
self.total_impedance += self.impedance_source_list[i].impedance
def induced_voltage_generation(self, Beam, length = 'slice_frame'):
'''
*Method to calculate the induced voltage from the inverse FFT of the
impedance times the spectrum (fourier convolution).*
'''
if self.recalculation_impedance:
self.sum_impedances(self.frequency_array)
self.slices.beam_spectrum_generation(self.n_fft_sampling)
if self.save_individual_voltages:
for i in range(self.len_impedance_source_list):
self.matrix_save_individual_voltages[:,i] = - Beam.charge * e * Beam.ratio * irfft(self.matrix_save_individual_impedances[:,i] * self.slices.beam_spectrum)[0:self.slices.n_slices] * self.slices.beam_spectrum_freq[1] * 2*(len(self.slices.beam_spectrum)-1)
self.induced_voltage = np.sum(self.matrix_save_individual_voltages,axis=0)
else:
induced_voltage = - Beam.charge * e * Beam.ratio * irfft(self.total_impedance * self.slices.beam_spectrum) * self.slices.beam_spectrum_freq[1] * 2*(len(self.slices.beam_spectrum)-1)
self.induced_voltage = induced_voltage[0:self.slices.n_slices]
if isinstance(length, int):
max_length = len(induced_voltage)
if length > max_length:
induced_voltage = np.lib.pad(induced_voltage, (0, length-max_length), 'constant', constant_values=(0,0))
return induced_voltage[0:length]
def track(self, Beam):
'''
*Tracking method.*
'''
self.induced_voltage_generation(Beam)
libfib.linear_interp_kick(self.beam.dt.ctypes.data_as(ctypes.c_void_p),
self.beam.dE.ctypes.data_as(ctypes.c_void_p),
(self.beam.charge * self.induced_voltage).ctypes.data_as(ctypes.c_void_p),
self.slices.bin_centers.ctypes.data_as(ctypes.c_void_p),
ctypes.c_uint(self.slices.n_slices),
ctypes.c_uint(self.beam.n_macroparticles),
ctypes.c_double(0.))
class InductiveImpedance(object):
'''
*Constant imaginary Z/n impedance. This needs to be extended to the
cases where there is acceleration as the revolution frequency f0 used
in the calculation of n=f/f0 is changing (general_params as input ?).*
'''
def __init__(self, Slices, Z_over_n, revolution_frequency, current_turn,
deriv_mode = 'gradient', periodicity = False,
smooth_before_after = [False, False], filter_ind_imp = None, filter_options = None, t_rev = None):
#: *Copy of the Slices object in order to access the profile info.*
self.slices = Slices
#: *Constant imaginary Z/n in* [:math:`\Omega / Hz`]
self.Z_over_n = Z_over_n
#: *Revolution frequency in [Hz]*
self.revolution_frequency = revolution_frequency
#: *Frequency array of the impedance in [Hz]*
self.freq_array = 0
#: *Impedance array in* [:math:`\Omega`]
self.impedance = 0
#: *Induced voltage from the sum of the wake sources in [V]*
self.induced_voltage = 0
#: *Derivation method to compute induced voltage*
self.deriv_mode = deriv_mode
#: *Current turn taken from RFSectionParameters*
self.current_turn = current_turn
self.periodicity = periodicity
self.smooth_before_after = smooth_before_after
self.filter_ind_imp = filter_ind_imp
self.filter_options = filter_options
self.t_rev = t_rev
def reprocess(self, new_slicing):
'''
*Reprocess the impedance contributions with respect to the new_slicing.*
'''
self.slices = new_slicing
def imped_calc(self, freq_array):
'''
*Impedance calculation method as a function of frequency.*
'''
index = self.current_turn[0]
self.freq_array = freq_array
self.impedance = (self.freq_array / self.revolution_frequency[index]) * \
self.Z_over_n[0][index] * 1j
def induced_voltage_generation(self, Beam, length = 'slice_frame'):
'''
*Method to calculate the induced voltage through the derivative of the
profile; the impedance must be of inductive type.*
'''
index = self.current_turn[0]
if self.periodicity:
self.derivative_line_density_not_filtered = np.zeros(self.slices.n_slices)
find_index_slice = np.searchsorted(self.slices.edges, self.t_rev[index])
if self.smooth_before_after[0]:
if self.filter_ind_imp == 'gaussian':
self.slices.n_macroparticles = ndimage.gaussian_filter1d(self.slices.n_macroparticles, sigma=self.filter_options, mode='wrap')
elif self.filter_ind_imp == 'chebyshev':
nCoefficients, b, a = self.slices.beam_profile_filter_chebyshev(self.filter_options)
else:
raise RuntimeError('filter method not recognised')
if (self.t_rev[index]-self.slices.bin_centers[find_index_slice])>0:
temp = np.concatenate((np.array([self.slices.n_macroparticles[find_index_slice]]), self.slices.n_macroparticles[:find_index_slice+1], np.array([self.slices.n_macroparticles[0]])))
else:
temp = np.concatenate((np.array([self.slices.n_macroparticles[find_index_slice-1]]), self.slices.n_macroparticles[:find_index_slice], self.slices.n_macroparticles[:2]))
self.derivative_line_density_not_filtered[: find_index_slice+1] = np.gradient(temp, self.slices.bin_centers[1]-self.slices.bin_centers[0])[1:-1] / (self.slices.bin_centers[1] - self.slices.bin_centers[0])
if self.smooth_before_after[1]:
if self.filter_ind_imp == 'gaussian':
self.derivative_line_density_filtered = ndimage.gaussian_filter1d(self.derivative_line_density_not_filtered, sigma=self.filter_options, mode='wrap')
elif self.filter_ind_imp == 'chebyshev':
self.derivative_line_density_filtered = filtfilt(b, a, self.derivative_line_density_not_filtered)
self.derivative_line_density_filtered = np.ascontiguousarray(self.derivative_line_density_filtered)
else:
raise RuntimeError('filter method not recognised')
induced_voltage = - Beam.charge * e * Beam.ratio * \
self.Z_over_n[0][index] * \
self.derivative_line_density_filtered / (2 * np.pi * self.revolution_frequency[index])
else:
induced_voltage = - Beam.charge * e * Beam.ratio * \
self.Z_over_n[0][index] * \
self.derivative_line_density_not_filtered / (2 * np.pi * self.revolution_frequency[index])
else:
induced_voltage = - Beam.charge * e / (2 * np.pi) * Beam.ratio * \
self.Z_over_n[0][index] / self.revolution_frequency[index] * \
self.slices.beam_profile_derivative(self.deriv_mode)[1] / \
(self.slices.bin_centers[1] - self.slices.bin_centers[0])
self.induced_voltage = induced_voltage[0:self.slices.n_slices]
if isinstance(length, int):
max_length = len(induced_voltage)
if length > max_length:
induced_voltage = np.lib.pad(self.induced_voltage, (0, length - max_length), 'constant', constant_values=(0,0))
return induced_voltage[0:length]
def track(self, Beam):
'''
*Track method.*
'''
self.induced_voltage_generation(Beam)
libfib.linear_interp_kick(self.beam.dt.ctypes.data_as(ctypes.c_void_p),
self.beam.dE.ctypes.data_as(ctypes.c_void_p),
(self.beam.charge * self.induced_voltage).ctypes.data_as(ctypes.c_void_p),
self.slices.bin_centers.ctypes.data_as(ctypes.c_void_p),
ctypes.c_uint(self.slices.n_slices),
ctypes.c_uint(self.beam.n_macroparticles),
ctypes.c_double(0.))
class InputTable(object):
'''
*Intensity effects from impedance and wake tables.
If this constructor takes just two arguments, then a wake table is passed;
if it takes three arguments, then an impedance table is passed. Be careful
that if you input a wake, the input wake for W(t=0) should be already
divided by two (beam loading theorem) ; and that if you input impedance,
only the positive frequencies of the impedance is needed (the impedance
will be assumed to be Hermitian (Real part symmetric and Imaginary part
antisymmetric).Note that we add the point (f, Z(f)) = (0, 0) to the
frequency and impedance arrays derived from the table.*
'''
def __init__(self, input_1, input_2, input_3 = None):
if input_3 is None:
#: *Time array of the wake in [s]*
self.time_array = input_1
#: *Wake array in* [:math:`\Omega / s`]
self.wake_array = input_2
else:
#: *Frequency array of the impedance in [Hz]*
self.frequency_array_loaded = input_1
#: *Real part of impedance in* [:math:`\Omega`]
self.Re_Z_array_loaded = input_2
#: *Imaginary part of impedance in* [:math:`\Omega`]
self.Im_Z_array_loaded = input_3
#: *Impedance array in* [:math:`\Omega`]
self.impedance_loaded = self.Re_Z_array_loaded + 1j * self.Im_Z_array_loaded
if self.frequency_array_loaded[0] != 0:
self.frequency_array_loaded = np.hstack((0, self.frequency_array_loaded))
self.Re_Z_array_loaded = np.hstack((0, self.Re_Z_array_loaded))
self.Im_Z_array_loaded = np.hstack((0, self.Im_Z_array_loaded))
def wake_calc(self, new_time_array):
'''
*The wake is interpolated in order to scale with the new time array.*
'''
wake = np.interp(new_time_array, self.time_array, self.wake_array,
right = 0)
self.wake_array = wake
self.time_array = new_time_array
def imped_calc(self, new_frequency_array):
'''
*The impedance is interpolated in order to scale with the new frequency
array.*
'''
Re_Z = np.interp(new_frequency_array, self.frequency_array_loaded, self.Re_Z_array_loaded,
right = 0)
Im_Z = np.interp(new_frequency_array, self.frequency_array_loaded, self.Im_Z_array_loaded,
right = 0)
self.frequency_array = new_frequency_array
self.Re_Z_array = Re_Z
self.Im_Z_array = Im_Z
self.impedance = Re_Z + 1j * Im_Z
class Resonators(object):
'''
*Impedance contribution from resonators, analytic formulas for both wake
and impedance. The resonant modes (and the corresponding R and Q)
can be inputed as a list in case of several modes.*
*The model is the following:*
.. math::
Z(f) = \\frac{R}{1 + j Q \\left(\\frac{f}{f_r}-\\frac{f_r}{f}\\right)}
.. math::
W(t>0) = 2\\alpha R e^{-\\alpha t}\\left(\\cos{\\bar{\\omega}t} - \\frac{\\alpha}{\\bar{\\omega}}\\sin{\\bar{\\omega}t}\\right)
W(0) = \\alpha R
.. math::
\\omega_r = 2 \\pi f_r
\\alpha = \\frac{\\omega_r}{2Q}
\\bar{\\omega} = \\sqrt{\\omega_r^2 - \\alpha^2}
'''
def __init__(self, R_S, frequency_R, Q):
#: *Shunt impepdance in* [:math:`\Omega`]
self.R_S = np.array([R_S]).flatten()
#: *Resonant frequency in [Hz]*
self.frequency_R = np.array([frequency_R]).flatten()
#: *Resonant angular frequency in [rad/s]*
self.omega_R = 2 *np.pi * self.frequency_R
#: *Quality factor*
self.Q = np.array([Q]).flatten()
#: *Number of resonant modes*
self.n_resonators = len(self.R_S)
#: *Time array of the wake in [s]*
self.time_array = 0
#: *Wake array in* [:math:`\Omega / s`]
self.wake = 0
#: *Frequency array of the impedance in [Hz]*
self.freq_array = 0
#: *Impedance array in* [:math:`\Omega`]
self.impedance = 0
def wake_calc(self, time_array):
'''
*Wake calculation method as a function of time.*
'''
self.time_array = time_array
self.wake = np.zeros(self.time_array.shape)
for i in range(0, self.n_resonators):
alpha = self.omega_R[i] / (2 * self.Q[i])
omega_bar = np.sqrt(self.omega_R[i] ** 2 - alpha ** 2)
self.wake += (np.sign(self.time_array) + 1) * self.R_S[i] * alpha * \
np.exp(-alpha * self.time_array) * \
(np.cos(omega_bar * self.time_array) -
alpha / omega_bar * np.sin(omega_bar * self.time_array))
def imped_calc(self, freq_array):
'''
*Impedance calculation method as a function of frequency.*
'''
self.freq_array = freq_array
self.impedance = np.zeros(len(self.freq_array)) + 0j
for i in range(0, self.n_resonators):
self.impedance[1:] += self.R_S[i] / (1 + 1j * self.Q[i] *
(self.freq_array[1:] / self.frequency_R[i] -
self.frequency_R[i] / self.freq_array[1:]))
class TravelingWaveCavity(object):
'''
*Impedance contribution from traveling wave cavities, analytic formulas for
both wake and impedance. The resonance modes (and the corresponding R and a)
can be inputed as a list in case of several modes.*
*The model is the following:*
.. math::
Z_+(f) = R \\left[\\left(\\frac{\\sin{\\frac{a\\left(f-f_r\\right)}{2}}}{\\frac{a\\left(f-f_r\\right)}{2}}\\right)^2 - 2i \\frac{a\\left(f-f_r\\right) - \\sin{a\\left(f-f_r\\right)}}{\\left(a\\left(f-f_r\\right)\\right)^2}\\right]
Z_-(f) = R \\left[\\left(\\frac{\\sin{\\frac{a\\left(f+f_r\\right)}{2}}}{\\frac{a\\left(f+f_r\\right)}{2}}\\right)^2 - 2i \\frac{a\\left(f+f_r\\right) - \\sin{a\\left(f+f_r\\right)}}{\\left(a\\left(f+f_r\\right)\\right)^2}\\right]
Z = Z_+ + Z_-
.. math::
W(0<t<\\tilde{a}) = \\frac{4R}{\\tilde{a}}\\left(1-\\frac{t}{\\tilde{a}}\\right)\\cos{\\omega_r t}
W(0) = \\frac{2R}{\\tilde{a}}
.. math::
a = 2 \\pi \\tilde{a}
'''
def __init__(self, R_S, frequency_R, a_factor):
#: *Shunt impepdance in* [:math:`\Omega`]
self.R_S = np.array([R_S]).flatten()
#: *Resonant frequency in [Hz]*
self.frequency_R = np.array([frequency_R]).flatten()
#: *Damping time a in [s]*
self.a_factor = np.array([a_factor]).flatten()
#: *Number of resonant modes*
self.n_twc = len(self.R_S)
#: *Time array of the wake in [s]*
self.time_array = 0
#: *Wake array in* [:math:`\Omega / s`]
self.wake = 0
#: *Frequency array of the impedance in [Hz]*
self.freq_array = 0
#: *Impedance array in* [:math:`\Omega`]
self.impedance = 0
def wake_calc(self, time_array):
'''
*Wake calculation method as a function of time.*
'''
self.time_array = time_array
self.wake = np.zeros(self.time_array.shape)
for i in range(0, self.n_twc):
a_tilde = self.a_factor[i] / (2 * np.pi)
indexes = np.where(self.time_array <= a_tilde)
self.wake[indexes] += (np.sign(self.time_array[indexes]) + 1) * 2 * self.R_S[i] / a_tilde * \
(1 - self.time_array[indexes] / a_tilde) * \
np.cos(2 * np.pi * self.frequency_R[i] * self.time_array[indexes])
def imped_calc(self, freq_array):
'''
*Impedance calculation method as a function of frequency.*
'''
self.freq_array = freq_array
self.impedance = np.zeros(len(self.freq_array)) + 0j
for i in range(0, self.n_twc):
Zplus = self.R_S[i] * ((np.sin(self.a_factor[i] / 2 * (self.freq_array - self.frequency_R[i])) /
(self.a_factor[i] / 2 * (self.freq_array - self.frequency_R[i])))**2 -
2j*(self.a_factor[i] * (self.freq_array - self.frequency_R[i]) -
np.sin(self.a_factor[i] * (self.freq_array - self.frequency_R[i]))) / \
(self.a_factor[i] * (self.freq_array - self.frequency_R[i]))**2)
Zminus = self.R_S[i] * ((np.sin(self.a_factor[i] / 2 * (self.freq_array + self.frequency_R[i])) /
(self.a_factor[i] / 2 * (self.freq_array + self.frequency_R[i])))**2 -
2j*(self.a_factor[i] * (self.freq_array + self.frequency_R[i]) -
np.sin(self.a_factor[i] * (self.freq_array + self.frequency_R[i]))) / \
(self.a_factor[i] * (self.freq_array + self.frequency_R[i]))**2)
self.impedance += Zplus + Zminus
| kiliakis/BLonD | impedances/impedance.py | Python | gpl-3.0 | 38,779 | [
"Gaussian"
] | c34754d708a11761971713418098dec335f972196b19b82f2353e2fa5c76bd4a |
#!/usr/bin/env python
"""
Description:
Utility to partse and validate a CSAF Common Vulnerability Reporting Framework (CVRF)
file and display user-specified fields.
For additional information about CSAF or CVRF visit:
https://www.oasis-open.org/committees/tc_home.php?wg_abbrev=csaf
Requirements:
* lxml
This tool is based on the original cvrfparse utility created by Mike Schiffman of
Farsight Security under the MIT License. https://github.com/mschiffm/cvrfparse
"""
import os
import sys
import copy
import codecs
import urllib2
import argparse
from lxml import etree
__revision__ = "1.0"
class CVRF_Syntax(object):
"""
CVRF Elements and Namespaces.
"""
NAMESPACES = {x.upper(): "{http://docs.oasis-open.org/csaf/ns/csaf-cvrf/v1.2/%s}" % x for x in ("cvrf", "vuln", "prod")}
CVRF_ARGS = ["all", "DocumentTitle", "DocumentType", "DocumentPublisher", "DocumentTracking", "DocumentNotes",
"DocumentDistribution", "AggregateSeverity", "DocumentReferences", "Acknowledgments"]
VULN_ARGS = ["all", "Title", "ID", "Notes", "DiscoveryDate", "ReleaseDate", "Involvements", "CVE", "CWE",
"ProductStatuses", "Threats", "CVSSScoreSets", "Remediations", "References", "Acknowledgments"]
PROD_ARGS = ["all", "Branch", "FullProductName", "Relationship", "ProductGroups"]
CVRF_SCHEMA = "http://docs.oasis-open.org/csaf/csaf-cvrf/v1.2/cs01/schemas/cvrf.xsd"
CVRF_CATALOG = "./cvrfparse/schemata/catalog.xml"
class PrependerAction(argparse.Action):
"""
Customization for argparse. Prepends some static text to an accumalated list.
"""
prepend_text = ""
def __call__(self, parser, namespace, values, option_string=None):
orig = getattr(namespace, self.dest, None)
items = [] if orig is None else copy.copy(orig)
for value in values:
items.append(self.prepend_text + value)
setattr(namespace, self.dest, items)
class NonDupBracketFormatter(argparse.HelpFormatter):
"""
Customization for argparse. A formatter that is a more terse in repeated arguments.
"""
def _format_args(self, action, default_metavar):
get_metavar = self._metavar_formatter(action, default_metavar)
if action.nargs == argparse.ZERO_OR_MORE:
result = "[%s ...]" % get_metavar(1)
elif action.nargs == argparse.ONE_OR_MORE:
result = "%s [...]" % get_metavar(1)
else:
result = super(NonDupBracketFormatter, self)._format_args(
action, default_metavar)
return result
def namespace_prepend(namespace):
"""
Returns a dynamic class (not instance) with appropriate prepend_text.
"""
return type("Prepend_%s" % namespace, (PrependerAction,),
{"prepend_text": CVRF_Syntax.NAMESPACES[namespace]})
def chop_ns_prefix(element):
"""
Return the element of a fully qualified namespace URI
element: a fully qualified ET element tag
"""
return element[element.rindex("}") + 1:]
def print_node(node, strip_ns, f=sys.stdout):
"""
Print each XML node
node: the ElementTree node to be printed
strip_ns: boolean that when true indicates the namespace prefix will be chomped
f: the file to print to (default is stdout)
"""
if node.tag:
print >> f, "[%s]" % (chop_ns_prefix(node.tag) if strip_ns else node.tag),
if node.text:
print >> f, node.text.strip()
if node.attrib:
for key in node.attrib:
print >> f, "(%s: %s)" % (key, node.attrib[key])
print >> f
def cvrf_validate(f, cvrf_doc):
"""
Validates a CVRF document
f: file object containing the schema
cvrf_doc: the serialized CVRF ElementTree object
returns: a code (True for valid / False for invalid) and a reason for the code
"""
try:
xmlschema_doc = etree.parse(f)
except etree.XMLSyntaxError as e:
log = e.error_log.filter_from_level(etree.ErrorLevels.FATAL)
return False, "Parsing error, schema document \"{0}\" is not well-formed: {1}".format(f.name, log)
xmlschema = etree.XMLSchema(xmlschema_doc)
try:
xmlschema.assertValid(cvrf_doc)
return True, "Valid"
except etree.DocumentInvalid:
return False, xmlschema.error_log
def cvrf_dump(results, strip_ns):
"""
Iterates over results and dumps to the dictionary key (which is a file handle)
results: a dictionary of the format: {filename, [ElementTree node, ...], ...}
strip_ns: boolean that when true indicates the namespace prefix will be chomped
"""
for key in results:
if key == "stdout":
f = codecs.EncodedFile(sys.stdout, data_encoding="UTF-8")
else:
try:
f = codecs.open(key, "w", encoding="UTF-8")
except IOError as e:
sys.exit("{0}: I/O error({1}) \"{2}\": {3}".format(progname, e.errno, key, e.strerror))
for item in results[key]:
print_node(item, strip_ns, f)
f.close()
def cvrf_dispatch(cvrf_doc, parsables, collate_vuln, strip_ns):
"""
Filter through a CVRF document and perform user-specified actions and report the results
cvrf_doc: the serialized CVRF ElementTree object
collate_vuln: boolean indicating whether or not to collate the vulnerabilities
strip_ns: boolean that when true indicates the namespace prefix will be chomped
returns: N/A
"""
if parsables:
results = cvrf_parse(cvrf_doc, parsables)
cvrf_dump(results, strip_ns)
if collate_vuln:
results = cvrf_collate_vuln(cvrf_doc)
cvrf_dump(results, strip_ns)
def cvrf_parse(cvrf_doc, parsables):
"""
Parse a cvrf_doc and return a list of elements as determined by parsables
cvrf_doc: the serialized CVRF ElementTree object
parsables: list of elements to parse from a CVRF doc
returns: a dictionary of the format {filename:[item, ...]}
"""
items = []
for element in parsables:
for node in cvrf_doc.iter(element):
for child in node.iter():
items.append(child)
# Hardcoded output for now, eventually make this user-tunable
return {"stdout": items}
def cvrf_collate_vuln(cvrf_doc):
"""
Zip through a cvrf_doc and return all vulnerability elements collated by ordinal
cvrf_doc: the serialized CVRF ElementTree object
returns: a dictionary of the format {filename:[item, ...], filename:[item, ...]}
"""
results = {}
# Obtain document title to use in the filename(s) tiptoeing around around the curly braces in our NS definition
document_title = cvrf_doc.findtext("cvrf:DocumentTitle",
namespaces={"cvrf": CVRF_Syntax.NAMESPACES["CVRF"].replace("{", "").replace("}", "")}).strip().replace(" ", "_")
# Constrain Xpath search to the Vulnerability container
for node in cvrf_doc.findall(".//" + CVRF_Syntax.NAMESPACES["VULN"] + "Vulnerability"):
# Create filename based on ordinal number to use as a key for results dictionary
filename = "cvrfparse-" + document_title + "-ordinal-" + node.attrib["Ordinal"] + ".txt"
# Create an iterator to iterate over each child element and populate results dictionary values
results[filename] = node.iter()
return results
def post_process_arglist(arg, namespace, valid_args):
parsables = []
if CVRF_Syntax.NAMESPACES[namespace] + "all" in arg:
for element in valid_args:
parsables.append(CVRF_Syntax.NAMESPACES[namespace] + element)
parsables.remove(CVRF_Syntax.NAMESPACES[namespace] + "all")
else:
for element in arg:
parsables.append(element)
return parsables
def main(progname=None):
progname = progname if progname else os.path.basename(sys.argv[0])
parser = argparse.ArgumentParser(formatter_class=NonDupBracketFormatter,
description="Validate/parse a CVRF 1.1 document and emit user-specified bits.")
parser.add_argument("-f", "--file", required="True", action="store",
help="candidate CVRF 1.1 XML file")
parser.add_argument('--cvrf', nargs="*", choices=CVRF_Syntax.CVRF_ARGS,
action=namespace_prepend("CVRF"),
help="emit CVRF elements, use \"all\" to glob all CVRF elements.")
parser.add_argument("--vuln", nargs="*", choices=CVRF_Syntax.VULN_ARGS,
action=namespace_prepend("VULN"),
help="emit Vulnerability elements, use \"all\" to glob all Vulnerability elements.")
parser.add_argument("--prod", nargs="*", choices=CVRF_Syntax.PROD_ARGS,
action=namespace_prepend("PROD"),
help="emit ProductTree elements, use \"all\" to glob all ProductTree elements.")
parser.add_argument("-c", "--collate", dest="collate_vuln", default=False,
action="store_true",
help="collate all of the Vulnerability elements by ordinal into separate files")
parser.add_argument("-s", "--strip-ns", dest="strip_ns", default=False, action="store_true",
help="strip namespace header from element tags before printing")
parser.add_argument("-V", "--validate", default=False, action="store_true",
help="validate the CVRF document")
parser.add_argument("-S", "--schema", action="store",
help="specify local alternative for cvrf.xsd")
parser.add_argument("-C", "--catalog", action="store",
help="specify location for catalog.xml (default is {0})".format(CVRF_Syntax.CVRF_CATALOG))
parser.add_argument("-v", "--version", action="version", version="%(prog)s " + __revision__)
args = parser.parse_args()
# Post process argument lists into a single list, handling 'all' globs if present
# this block should probably eventually be folded into argparse
parsables = []
if args.cvrf:
parsables.extend(post_process_arglist(args.cvrf, "CVRF", CVRF_Syntax.CVRF_ARGS))
if args.vuln:
parsables.extend(post_process_arglist(args.vuln, "VULN", CVRF_Syntax.VULN_ARGS))
if args.prod:
parsables.extend(post_process_arglist(args.prod, "PROD", CVRF_Syntax.PROD_ARGS))
# First things first: parse the document (to ensure it is well-formed XML) to obtain an ElementTree object
# to pass to the CVRF validator/parser
try:
cvrf_doc = etree.parse(args.file, etree.XMLParser(encoding="utf-8"))
except IOError:
sys.exit("{0}: I/O error: \"{1}\" does not exist".format(progname, args.file))
except etree.XMLSyntaxError as e:
sys.exit("{0}: Parsing error, document \"{1}\" is not well-formed: {2}".format(progname, args.file, e.error_log.filter_from_level(etree.ErrorLevels.FATAL)))
if args.validate is True:
try:
if args.schema:
# Try to use local schema files
f = open(args.schema, 'r')
# If the supplied file is not a valid catalog.xml or doesn't exist lxml will fall back to
# using remote validation
catalog = args.catalog if args.catalog else CVRF_Syntax.CVRF_CATALOG
os.environ.update(XML_CATALOG_FILES=catalog)
else:
print >> sys.stderr, "Fetching schemata..."
f = urllib2.urlopen(CVRF_Syntax.CVRF_SCHEMA)
except IOError as e:
sys.exit("{0}: I/O error({1}) \"{2}\": {3}".format(progname, e.errno, args.schema, e.strerror))
(code, result) = cvrf_validate(f, cvrf_doc)
f.close()
if code is False:
sys.exit("{0}: {1}".format(progname, result))
else:
print >> sys.stderr, result
cvrf_dispatch(cvrf_doc, parsables, collate_vuln=args.collate_vuln, strip_ns=args.strip_ns)
if __name__ == "__main__":
progname = os.path.basename(sys.argv[0])
try:
main(progname)
except Exception, value:
(exc_type, exc_value, exc_tb) = sys.exc_info()
sys.excepthook(exc_type, exc_value, exc_tb) # if debugging
sys.exit("%s: %s: %s" % (progname, exc_type.__name__, exc_value))
sys.exit(0)
| CiscoPSIRT/cvrf-util | cvrf_util.py | Python | mit | 12,314 | [
"VisIt"
] | 31ed3d07b9a3cf08827fd8db926178f0844b434377fb3caa5758776530298790 |
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from lettuce import world, step
from common import *
from terrain.steps import reload_the_page
from selenium.common.exceptions import InvalidElementStateException
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from contentstore.utils import reverse_course_url
from nose.tools import assert_in, assert_not_in, assert_equal, assert_not_equal # pylint: disable=no-name-in-module
@step(u'I am viewing the grading settings')
def view_grading_settings(step):
world.click_course_settings()
link_css = 'li.nav-course-settings-grading a'
world.css_click(link_css)
@step(u'I add "([^"]*)" new grade')
def add_grade(step, many):
grade_css = '.new-grade-button'
for i in range(int(many)):
world.css_click(grade_css)
@step(u'I delete a grade')
def delete_grade(step):
#grade_css = 'li.grade-specific-bar > a.remove-button'
#range_css = '.grade-specific-bar'
#world.css_find(range_css)[1].mouseover()
#world.css_click(grade_css)
world.browser.execute_script('document.getElementsByClassName("remove-button")[0].click()')
@step(u'I see I now have "([^"]*)" grades$')
def view_grade_slider(step, how_many):
grade_slider_css = '.grade-specific-bar'
all_grades = world.css_find(grade_slider_css)
assert_equal(len(all_grades), int(how_many))
@step(u'I move a grading section')
def move_grade_slider(step):
moveable_css = '.ui-resizable-e'
f = world.css_find(moveable_css).first
f.action_chains.drag_and_drop_by_offset(f._element, 100, 0).perform()
@step(u'I see that the grade range has changed')
def confirm_change(step):
range_css = '.range'
all_ranges = world.css_find(range_css)
for i in range(len(all_ranges)):
assert_not_equal(world.css_html(range_css, index=i), '0-50')
@step(u'I change assignment type "([^"]*)" to "([^"]*)"$')
def change_assignment_name(step, old_name, new_name):
name_id = '#course-grading-assignment-name'
index = get_type_index(old_name)
f = world.css_find(name_id)[index]
assert_not_equal(index, -1)
for __ in xrange(len(old_name)):
f._element.send_keys(Keys.END, Keys.BACK_SPACE)
f._element.send_keys(new_name)
@step(u'I go back to the main course page')
def main_course_page(step):
main_page_link = reverse_course_url('course_handler', world.scenario_dict['COURSE'].id)
world.visit(main_page_link)
assert_in('Course Outline', world.css_text('h1.page-header'))
@step(u'I do( not)? see the assignment name "([^"]*)"$')
def see_assignment_name(step, do_not, name):
# TODO: rewrite this once grading has been added back to the course outline
pass
# assignment_menu_css = 'ul.menu > li > a'
# # First assert that it is there, make take a bit to redraw
# assert_true(
# world.css_find(assignment_menu_css),
# msg="Could not find assignment menu"
# )
#
# assignment_menu = world.css_find(assignment_menu_css)
# allnames = [item.html for item in assignment_menu]
# if do_not:
# assert_not_in(name, allnames)
# else:
# assert_in(name, allnames)
@step(u'I delete the assignment type "([^"]*)"$')
def delete_assignment_type(step, to_delete):
delete_css = '.remove-grading-data'
world.css_click(delete_css, index=get_type_index(to_delete))
@step(u'I add a new assignment type "([^"]*)"$')
def add_assignment_type(step, new_name):
add_button_css = '.add-grading-data'
world.css_click(add_button_css)
name_id = '#course-grading-assignment-name'
new_assignment = world.css_find(name_id)[-1]
new_assignment._element.send_keys(new_name)
@step(u'I set the assignment weight to "([^"]*)"$')
def set_weight(step, weight):
weight_id = '#course-grading-assignment-gradeweight'
weight_field = world.css_find(weight_id)[-1]
old_weight = world.css_value(weight_id, -1)
for count in range(len(old_weight)):
weight_field._element.send_keys(Keys.END, Keys.BACK_SPACE)
weight_field._element.send_keys(weight)
@step(u'the assignment weight is displayed as "([^"]*)"$')
def verify_weight(step, weight):
weight_id = '#course-grading-assignment-gradeweight'
assert_equal(world.css_value(weight_id, -1), weight)
@step(u'I do not see the changes persisted on refresh$')
def changes_not_persisted(step):
reload_the_page(step)
name_id = '#course-grading-assignment-name'
assert_equal(world.css_value(name_id), 'Homework')
@step(u'I see the assignment type "(.*)"$')
def i_see_the_assignment_type(_step, name):
assignment_css = '#course-grading-assignment-name'
assignments = world.css_find(assignment_css)
types = [ele['value'] for ele in assignments]
assert_in(name, types)
@step(u'I change the highest grade range to "(.*)"$')
def change_grade_range(_step, range_name):
range_css = 'span.letter-grade'
grade = world.css_find(range_css).first
grade.value = range_name
@step(u'I see the highest grade range is "(.*)"$')
def i_see_highest_grade_range(_step, range_name):
range_css = 'span.letter-grade'
grade = world.css_find(range_css).first
assert_equal(grade.value, range_name)
@step(u'I cannot edit the "Fail" grade range$')
def cannot_edit_fail(_step):
range_css = 'span.letter-grade'
ranges = world.css_find(range_css)
assert_equal(len(ranges), 2)
assert_not_equal(ranges.last.value, 'Failure')
# try to change the grade range -- this should throw an exception
try:
ranges.last.value = 'Failure'
except (InvalidElementStateException):
pass # We should get this exception on failing to edit the element
# check to be sure that nothing has changed
ranges = world.css_find(range_css)
assert_equal(len(ranges), 2)
assert_not_equal(ranges.last.value, 'Failure')
@step(u'I change the grace period to "(.*)"$')
def i_change_grace_period(_step, grace_period):
grace_period_css = '#course-grading-graceperiod'
ele = world.css_find(grace_period_css).first
# Sometimes it takes a moment for the JavaScript
# to populate the field. If we don't wait for
# this to happen, then we can end up with
# an invalid value (e.g. "00:0048:00")
# which prevents us from saving.
assert_true(world.css_has_value(grace_period_css, "00:00"))
# Set the new grace period
ele.value = grace_period
@step(u'I see the grace period is "(.*)"$')
def the_grace_period_is(_step, grace_period):
grace_period_css = '#course-grading-graceperiod'
# The default value is 00:00
# so we need to wait for it to change
world.wait_for(
lambda _: world.css_has_value(grace_period_css, grace_period)
)
def get_type_index(name):
name_id = '#course-grading-assignment-name'
all_types = world.css_find(name_id)
for index in range(len(all_types)):
if world.css_value(name_id, index=index) == name:
return index
return -1
| olexiim/edx-platform | cms/djangoapps/contentstore/features/grading.py | Python | agpl-3.0 | 6,983 | [
"VisIt"
] | e33dedb050fca5fb22011b0e363a3e50209df1e8062189e6644793fc645d42f9 |
#!/usr/bin/env python
#Last-update: 12/05/07 21:02:10
import re
import sys
from sets import Set
##
# External files
#
AJS_SRC = 'AJS.js'
AJS_MINI_SRC = 'AJS_compressed.js'
##
# Standard stuff that may change in the future
#
DOM_SHORTCUTS = [
"ul", "li", "td", "tr", "th",
"tbody", "table", "input", "span", "b",
"a", "div", "img", "button", "h1",
"h2", "h3", "br", "textarea", "form",
"p", "select", "option", "iframe", "script",
"center", "dl", "dt", "dd", "small",
"pre", "tn"
]
FN_SHORTCUTS = {
'$': 'getElement',
'$$': 'getElements',
'$f': 'getFormElement',
'$b': 'bind',
'$p': 'partial',
'$A': 'createArray',
'DI': 'documentInsert',
'ACN': 'appendChildNodes',
'RCN': 'replaceChildNodes',
'AEV': 'addEventListener',
'REV': 'removeEventListener',
'$bytc': 'getElementsByTagAndClassName',
'$AP': 'absolutePosition',
'$FA': 'forceArray'
}
AJS_TEMPLATE = """//AJS JavaScript library (minify'ed version)
//Copyright (c) 2006 Amir Salihefendic. All rights reserved.
//Copyright (c) 2005 Bob Ippolito. All rights reserved.
//License: http://www.opensource.org/licenses/mit-license.php
//Visit http://orangoo.com/AmiNation/AJS for full version.
AJS = {
BASE_URL: "",
drag_obj: null,
drag_elm: null,
_drop_zones: [],
_cur_pos: null,
ready_bound: false,
is_ready: false,
ready_list: [],
_f_guid: 0,
_wipe_guid: 0,
%(functions)s
}
AJS.$ = AJS.getElement;
AJS.$$ = AJS.getElements;
AJS.$f = AJS.getFormElement;
AJS.$p = AJS.partial;
AJS.$b = AJS.bind;
AJS.$A = AJS.createArray;
AJS.DI = AJS.documentInsert;
AJS.ACN = AJS.appendChildNodes;
AJS.RCN = AJS.replaceChildNodes;
AJS.AEV = AJS.addEventListener;
AJS.REV = AJS.removeEventListener;
AJS.$bytc = AJS.getElementsByTagAndClassName;
AJS.$AP = AJS.absolutePosition;
AJS.$FA = AJS.forceArray;
AJS._createDomShortcuts();
%(AJSClass)s
%(AJSDeferred)s
script_loaded = true;
"""
def getAjsCode():
return open(AJS_SRC).read()
def writeAjsMini(code):
open(AJS_MINI_SRC, "w").write(code)
class AjsAnalyzer:
def __init__(self):
self.code = getAjsCode()
self.ajs_fns = {}
self.ajs_deps = {}
self._parseAJS()
self._findDeps()
def _parseAJS(self):
ajs_code = re.search("AJS =(.|\n)*\n}\n", self.code).group(0)
fns = re.findall("\s+((\w*?):.*?{(.|\n)*?\n\s*})(,|\n+})\n", ajs_code)
for f in fns:
self.ajs_fns[f[1]] = f[0]
for shortcut in FN_SHORTCUTS:
self.ajs_fns[shortcut] = self.ajs_fns[FN_SHORTCUTS[shortcut]]
def getFnCode(self, fn_name, caller=None):
"""
Returns the code of function and it's dependencies as a list
"""
fn_name = self._unfoldFn(fn_name)
r = []
if self.ajs_fns.get(fn_name):
r.append(self.ajs_fns[fn_name])
for dep_fn in self.ajs_deps[fn_name]:
if fn_name != dep_fn and dep_fn != caller:
r.extend(self.getFnCode(dep_fn, fn_name))
elif fn_name not in ['listeners', 'Class',
'ready_list', '_agent']:
print 'Could not find "%s"' % fn_name
return r
def getAjsClassCode(self):
return re.search("AJS.Class =(.|\n)*\n};//End class", self.code).group(0)
def getAjsDeferredCode(self):
return re.search("AJSDeferred =(.|\n)*\n};//End deferred", self.code).group(0)
def _findDeps(self):
"""
Parses AJS and for every function it finds dependencies for the other functions.
"""
for fn_name, fn_code in self.ajs_fns.items():
self.ajs_deps[fn_name] = self._findFns(fn_code)
def _findFns(self, inner):
"""
Searches after AJS.fnX( in inner and returns all the fnX in a Set.
"""
s = re.findall("AJS\.([\w_$]*?)(?:\(|,|\.)", inner)
s = list(Set(s))
return self._unfoldFns(s)
def _unfoldFns(self, list):
"""
Unfolds:
AJS.B, AJS.H1 etc. to _createDomShortcuts
AJS.$ to AJS.getElement etc.
"""
return [self._unfoldFn(n) for n in list]
def _unfoldFn(self, fn_name):
if fn_name.lower() in DOM_SHORTCUTS:
return "_createDomShortcuts"
elif FN_SHORTCUTS.get(fn_name):
return FN_SHORTCUTS[fn_name]
else:
return fn_name
class ExternalCodeAnalyzer:
def __init__(self, files, ajs_analyzer):
self.found_ajs_fns = []
self.files = files
self.ajs_analyzer = ajs_analyzer
def findFunctions(self):
for f in self.files:
self.found_ajs_fns.extend( self._parseFile(f) )
return list(Set(self.found_ajs_fns))
def _parseFile(self, f):
"""
Parses the file, looks for AJS functions and returns all the found functions.
"""
code = open(f).read()
found_fns = []
for ajs_fn in self.ajs_analyzer.ajs_fns:
if re.search(r"%s\(" % ajs_fn.replace('$', '\\$'), code):
found_fns.append(ajs_fn)
return found_fns
class AjsComposer:
def __init__(self, fn_list, analyzer):
self.code = getAjsCode()
self.analyzer = analyzer
self.fn_list = fn_list
#Append standard functions
req = [ 'createDOM', '_createDomShortcuts',
'log', 'addEventListener', 'HTML2DOM',
'preloadImages' ]
self.fn_list.extend(req)
#Append AJSDeferred only if needed
in_list = lambda x: x in self.fn_list
if in_list('getRequest') or in_list('loadJSONDoc'):
self.deferred = self._minify(self.analyzer.getAjsDeferredCode())
self.fn_list.append('isObject')
else:
self.deferred = ''
def writeToOutput(self):
fns = self._getFns()
d = {}
d['functions'] = ",\n".join(fns)
d['AJSDeferred'] = self.deferred
d['AJSClass'] = self._minify(self.analyzer.getAjsClassCode())
mini_code = AJS_TEMPLATE % d
writeAjsMini(mini_code)
def _minify(self, code):
new_lines = []
for l in code.split("\n"):
if l not in ['\n', '']:
new_lines.append(l.lstrip())
return "\n".join(new_lines)
def _getFns(self):
"""
Returns a list with real code of functions
"""
r = []
for fn in self.fn_list:
r.extend(self.analyzer.getFnCode(fn))
r = list(Set(r))
return [self._minify(fn) for fn in r]
if __name__ == '__main__':
args = sys.argv
if len(args) < 3:
print """Usage is:
python AJS_minify.py [-o output_file] ajs_file js_file.js html_using_ajs.html ...
Example usage:
Using relative paths:
python AJS_minify.py -o AJS_mini.js AJS.js test.js index.html
This will create AJS_mini.js from test.js and index.html.
Using absolute paths:
python AJS_minify.py ~/Desktop/AJS/AJS.js ~/Desktop/GreyBox_v3_42/greybox/greybox.js
This will create a new file called '%s' that has the needed AJS functions.""" % AJS_MINI_SRC
sys.exit(0)
if sys.argv[1] == '-o':
AJS_MINI_SRC = sys.argv[2]
AJS_SRC = sys.argv[3]
FILES = sys.argv[4:]
else:
AJS_SRC = sys.argv[1]
FILES = sys.argv[2:]
print 'Parsing through:\n %s' % "\n ".join(FILES)
ajs_analyzer = AjsAnalyzer()
code_analyzer = ExternalCodeAnalyzer(FILES, ajs_analyzer)
found_fns = code_analyzer.findFunctions()
print 'Found following AJS functions:\n %s' % ("\n ".join(found_fns))
composer = AjsComposer(found_fns, ajs_analyzer)
composer.writeToOutput()
print "Written the minified code to '%s'" % AJS_MINI_SRC
| theonlyzby/SmartCAN | var/www/smartcan/www/lib/greybox/GreyBox_v5_54/compression_lib/AJS_minify.py | Python | apache-2.0 | 7,781 | [
"VisIt"
] | 87416aac1bf3913f473113b37a9770f614411ceb1aed50ebb367cef01c4b416f |
from __future__ import absolute_import
import os
import numpy as nm
from sfepy.base.testing import TestCommon
from six.moves import range
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
test = Test(conf=conf, options=options)
test.join = lambda x: os.path.join(test.options.out_dir, x)
return test
def test_linearization(self):
from sfepy.base.base import Struct
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy import data_dir
geometries = ['2_3', '2_4', '3_4', '3_8']
approx_orders = [1, 2]
funs = [nm.cos, nm.sin, lambda x: x]
ok = True
for geometry in geometries:
name = os.path.join(data_dir,
'meshes/elements/%s_1.mesh' % geometry)
mesh = Mesh.from_file(name)
domain = FEDomain('', mesh)
domain = domain.refine()
domain.mesh.write(self.join('linearizer-%s-0.mesh' % geometry))
omega = domain.create_region('Omega', 'all')
for approx_order in approx_orders:
for dpn in [1, mesh.dim]:
self.report('geometry: %s, approx. order: %d, dpn: %d' %
(geometry, approx_order, dpn))
field = Field.from_args('fu', nm.float64, dpn, omega,
approx_order=approx_order)
cc = field.get_coor()
dofs = nm.zeros((field.n_nod, dpn), dtype=nm.float64)
for ic in range(dpn):
dofs[:, ic] = funs[ic](3 * (cc[:, 0] * cc[:, 1]))
vmesh, vdofs, level = field.linearize(dofs,
min_level=0,
max_level=3,
eps=1e-2)
if approx_order == 1:
_ok = level == 0
else:
_ok = level > 0
self.report('max. refinement level: %d: %s' % (level, _ok))
ok = ok and _ok
rdofs = nm.zeros((vmesh.n_nod, dpn), dtype=nm.float64)
cc = vmesh.coors
for ic in range(dpn):
rdofs[:, ic] = funs[ic](3 * (cc[:, 0] * cc[:, 1]))
_ok = nm.allclose(rdofs, vdofs, rtol=0.0, atol=0.03)
self.report('interpolation: %s' % _ok)
ok = ok and _ok
out = {
'u' : Struct(name='output_data',
mode='vertex', data=vdofs,
var_name='u', dofs=None)
}
name = self.join('linearizer-%s-%d-%d'
% (geometry, approx_order, dpn))
vmesh.write(name + '.mesh')
vmesh.write(name + '.vtk', out=out)
return ok
| rc/sfepy | tests/test_linearization.py | Python | bsd-3-clause | 3,116 | [
"VTK"
] | 2d4682f128a6ffb7ad22b61e2f9469506711e19c1c9b0f36a251f9ef8b2ce050 |
# -*- coding: utf-8 -*-
"""
glashammer.utils.forms
~~~~~~~~~~~~~~~~~~~~~~
This module implements a sophisticated form validation and rendering
system that is based on diva with concepts from django newforms and
wtforms incorporated.
It can validate nested structures and works in both ways. It can also
handle intelligent backredirects (via :mod:`glashammer.utils.http`) and supports
basic CSRF protection.
For usage informations see :class:`Form`
:copyright: Copyright 2007-2008 by Armin Ronacher, Christopher Lenz.
:license: GNU GPL.
"""
import re
from datetime import datetime
from unicodedata import normalize
from time import strptime
from itertools import chain
try:
from hashlib import sha1
except ImportError:
from sha import new as sha1
from werkzeug import html, escape, MultiDict
from glashammer.utils.local import get_request, url_for
from glashammer.bundles.sqladb import db
from glashammer.bundles.i18n import _, ngettext, parse_datetime, \
format_system_datetime
lazy_gettext = lambda s: s
from glashammer.utils.http import get_redirect_target, redirect
from glashammer.utils.crypto import gen_random_identifier
class ValidationError(ValueError):
"""Exception raised when invalid data is encountered."""
def __init__(self, message):
if not isinstance(message, (list, tuple)):
messages = [message]
# make all items in the list unicode (this also evaluates
# lazy translations in there)
messages = map(unicode, messages)
Exception.__init__(self, messages[0])
self.messages = ErrorList(messages)
def unpack(self, key=None):
return {key: self.messages}
class DataIntegrityError(ValueError):
"""Raised if data was fed to the form handling system that was not
generated by it.
"""
def _decode(data):
"""Decodes the flat dictionary d into a nested structure.
>>> _decode({'foo': 'bar'})
{'foo': 'bar'}
>>> _decode({'foo.0': 'bar', 'foo.1': 'baz'})
{'foo': ['bar', 'baz']}
>>> data = _decode({'foo.bar': '1', 'foo.baz': '2'})
>>> data == {'foo': {'bar': '1', 'baz': '2'}}
True
More complex mappings work too:
>>> _decode({'foo.bar.0': 'baz', 'foo.bar.1': 'buzz'})
{'foo': {'bar': ['baz', 'buzz']}}
>>> _decode({'foo.0.bar': '23', 'foo.1.baz': '42'})
{'foo': [{'bar': '23'}, {'baz': '42'}]}
>>> _decode({'foo.0.0': '23', 'foo.0.1': '42'})
{'foo': [['23', '42']]}
>>> _decode({'foo': ['23', '42']})
{'foo': ['23', '42']}
Missing items in lists are ignored for convenience reasons:
>>> _decode({'foo.42': 'a', 'foo.82': 'b'})
{'foo': ['a', 'b']}
This can be used for help client side DOM processing (inserting and
deleting rows in dynamic forms).
It also supports werkzeug's multi dicts:
>>> _decode(MultiDict({"foo": ['1', '2']}))
{'foo': ['1', '2']}
>>> _decode(MultiDict({"foo.0": '1', "foo.1": '2'}))
{'foo': ['1', '2']}
Those two submission ways can also be used combined:
>>> _decode(MultiDict({"foo": ['1'], "foo.0": '2', "foo.1": '3'}))
{'foo': ['1', '2', '3']}
This function will never raise exceptions except for argument errors
but the recovery behavior for invalid form data is undefined.
"""
list_marker = object()
value_marker = object()
if isinstance(data, MultiDict):
listiter = data.iterlists()
else:
listiter = ((k, [v]) for k, v in data.iteritems())
def _split_key(name):
result = name.split('.')
for idx, part in enumerate(result):
if part.isdigit():
result[idx] = int(part)
return result
def _enter_container(container, key):
if key not in container:
return container.setdefault(key, {list_marker: False})
return container[key]
def _convert(container):
if value_marker in container:
force_list = False
values = container.pop(value_marker)
if container.pop(list_marker):
force_list = True
values.extend(_convert(x[1]) for x in
sorted(container.items()))
if not force_list and len(values) == 1:
values = values[0]
return values
elif container.pop(list_marker):
return [_convert(x[1]) for x in sorted(container.items())]
return dict((k, _convert(v)) for k, v in container.iteritems())
result = {list_marker: False}
for key, values in listiter:
parts = _split_key(key)
if not parts:
continue
container = result
for part in parts:
last_container = container
container = _enter_container(container, part)
last_container[list_marker] = isinstance(part, (int, long))
container[value_marker] = values[:]
return _convert(result)
def _bind(obj, form, memo):
"""Helper for the field binding. This is inspired by the way `deepcopy`
is implemented.
"""
if memo is None:
memo = {}
obj_id = id(obj)
if obj_id in memo:
return memo[obj_id]
rv = obj._bind(form, memo)
memo[obj_id] = rv
return rv
def _force_dict(value, silent=False):
"""If the value is not a dict, raise an exception."""
if value is None:
return {}
elif not isinstance(value, dict):
if silent:
return {}
raise DataIntegrityError('expected a dict, got %r' %
value.__class__.__name__)
return value
def _force_list(value, silent=False):
"""If the value is not a list, raise an exception"""
if value is None:
return []
try:
if isinstance(value, basestring):
raise TypeError()
return list(value)
except TypeError:
if silent:
return []
raise DataIntegrityError('expected a list, got %r' %
value.__class__.__name__)
def _make_widget(field, name, value, errors):
"""Shortcut for widget creation."""
return field.widget(field, name, value, errors)
def _make_name(parent, child):
"""Joins a name."""
if parent is None:
result = child
else:
result = '%s.%s' % (parent, child)
# try to return a ascii only bytestring if possible
try:
return str(result)
except UnicodeError:
return unicode(result)
def _to_string(value):
"""Convert a value to unicode, None means empty string."""
if value is None:
return u''
return unicode(value)
def _to_list(value):
"""Similar to `_force_list` but always succeeds and never drops data."""
if isinstance(value, basestring):
return [value]
try:
return list(value)
except TypeError:
return [value]
def _iter_choices(choices):
"""Iterate over choices."""
if choices is not None:
for choice in choices:
if not isinstance(choice, tuple):
choice = (choice, choice)
yield choice
class _ListSupport(object):
"""A mixin for iterable objects that yield html."""
def as_ul(self, **attrs):
if attrs.pop('hide_empty', False) and not self:
return u''
return html.ul(*(html.li(unicode(item)) for item in self), **attrs)
def as_ol(self, **attrs):
if attrs.pop('hide_empty', False) and not self:
return u''
return html.ol(*(html.li(unicode(item)) for item in self), **attrs)
class _Renderable(object):
"""Mixin for renderable HTML objects."""
def render(self):
return u''
def __call__(self, *args, **kwargs):
return self.render(*args, **kwargs)
def __unicode__(self):
return self()
def __str__(self):
return unicode(self).encode('utf-8')
class Widget(_Renderable):
"""Baseclass for all widgets. All widgets share a common interface
that can be used from within templates.
Take this form as an example:
>>> class LoginForm(Form):
... username = TextField(required=True)
... password = TextField(widget=PasswordInput)
... flags = MultiChoiceField(choices=[1, 2, 3])
...
>>> form = LoginForm()
>>> form.validate({'username': '', 'password': '',
... 'flags': [1, 3]})
False
>>> widget = form.as_widget()
You can get the subwidgets by using the normal indexing operators:
>>> username = widget['username']
>>> password = widget['password']
The conversion to unicode calls the widget which renders it and displays
an error list as unordered list next to it.
>>> unicode(username) == username()
True
To render a widget you can usually invoke the `render()` method. All
keyword parameters are used as HTML attribute in the resulting tag.
You can also call the widget itself (``username()`` instead of
``username.render()``) which does the same if there are no errors for
the field but adds the default error list after the widget if there
are errors.
Widgets have some public attributes:
`errors`
gives the list of errors:
>>> username.errors
[u'This field is required.']
This error list is printable:
>>> print username.errors
<ul class="errors"><li>This field is required.</li></ul>
Like any other sequence that yields list items it provides
`as_ul` and `as_ol` methods:
>>> print username.errors.as_ul()
<ul><li>This field is required.</li></ul>
Keep in mind that ``widget.errors()`` is equivalent to
``widget.errors.as_ul(class_='errors', hide_empty=True)``.
`value`
returns the value of the widget as primitive. For basic
widgets this is always a string, for widgets with subwidgets or
widgets with multiple values a dict or a list:
>>> username.value
u''
>>> widget['flags'].value
[u'1', u'3']
`name` gives you the name of the field for form submissions:
>>> username.name
'username'
Please keep in mind that the name is not always that obvious. Glashammer
supports nested form fields so it's a good idea to always use the
name attribute.
`id`
gives you the default domain for the widget. This is either none
if there is no idea for the field or `f_` + the field name with
underscores instead of dots:
>>> username.id
'f_username'
`all_errors`
like `errors` but also contains the errors of child
widgets.
"""
def __init__(self, field, name, value, all_errors):
self._field = field
self._value = value
self._all_errors = all_errors
self.name = name
@property
def id(self):
"""The proposed id for this widget."""
if self.name is not None:
return 'f_' + self.name.replace('.', '_')
@property
def value(self):
"""The primitive value for this widget."""
return self._field.to_primitive(self._value)
@property
def errors(self):
"""The direct errors of this widget."""
if self.name in self._all_errors:
return self._all_errors[self.name]
return ErrorList()
@property
def all_errors(self):
"""The current errors and the errors of all child widgets."""
items = sorted(self._all_errors.items())
if self.name is None:
return ErrorList(chain(*(item[1] for item in items)))
result = ErrorList()
for key, value in items:
if key == self.name or key.startswith(self.name + '.'):
result.extend(value)
return result
def _attr_setdefault(self, attrs):
"""Add an ID to the attrs if there is none."""
if 'id' not in attrs and self.id is not None:
attrs['id'] = self.id
def __call__(self, **attrs):
"""The default display is the form + error list as ul if needed."""
return self.render(**attrs) + self.errors()
class Label(_Renderable):
"""Holds a label."""
def __init__(self, text, linked_to=None):
self.text = text
self.linked_to = linked_to
def render(self, **attrs):
attrs.setdefault('for', self.linked_to)
return html.label(escape(self.text), **attrs)
class InternalWidget(Widget):
"""Special widgets are widgets that can't be used on arbitrary
form fields but belong to others.
"""
def __init__(self, parent):
self._parent = parent
value = name = None
errors = all_errors = property(lambda: ErrorList())
class Input(Widget):
"""A widget that is a HTML input field."""
hide_value = False
type = None
def render(self, **attrs):
self._attr_setdefault(attrs)
value = self.value
if self.hide_value:
value = u''
return html.input(name=self.name, value=value, type=self.type,
**attrs)
class TextInput(Input):
"""A widget that holds text."""
type = 'text'
class PasswordInput(TextInput):
"""A widget that holds a password."""
type = 'password'
hide_value = True
class HiddenInput(Input):
"""A hidden input field for text."""
type = 'hidden'
class Textarea(Widget):
"""Displays a textarea."""
def _attr_setdefault(self, attrs):
Widget._attr_setdefault(self, attrs)
attrs.setdefault('rows', 8)
attrs.setdefault('cols', 40)
def render(self, **attrs):
self._attr_setdefault(attrs)
return html.textarea(self.value, name=self.name, **attrs)
class Checkbox(Widget):
"""A simple checkbox."""
def render(self, **attrs):
self._attr_setdefault(attrs)
return html.input(name=self.name, type='checkbox',
checked=self.value, **attrs)
class SelectBox(Widget):
"""A select box."""
def _attr_setdefault(self, attrs):
Widget._attr_setdefault(self, attrs)
attrs.setdefault('multiple', self._field.multiple_choices)
def render(self, **attrs):
self._attr_setdefault(attrs)
items = []
for choice in self._field.choices:
if isinstance(choice, tuple):
key, value = choice
else:
key = value = choice
items.append(html.option(unicode(value), value=unicode(key),
selected=key == self.value))
return html.select(name=self.name, *items, **attrs)
class _InputGroupMember(InternalWidget):
"""A widget that is a single radio button."""
def __init__(self, parent, value, label):
InternalWidget.__init__(self, parent)
self.value = unicode(value)
self.label = Label(label, self.id)
@property
def name(self):
return self._parent.name
@property
def id(self):
return 'f_%s_%s' % (self._parent.name, self.value)
@property
def checked(self):
if self._parent._field.multiple_choices:
return self.value in self._parent.value
return self._parent.value == self.value
def render(self, **attrs):
self._attr_setdefault(attrs)
return html.input(type=self.type, name=self.name, value=self.value,
checked=self.checked, **attrs)
class RadioButton(_InputGroupMember):
"""A radio button in an input group."""
type = 'radio'
class GroupCheckbox(_InputGroupMember):
"""A checkbox in an input group."""
type = 'checkbox'
class _InputGroup(Widget):
def __init__(self, field, name, value, all_errors):
Widget.__init__(self, field, name, value, all_errors)
self.choices = []
self._subwidgets = {}
for value, label in _iter_choices(self._field.choices):
widget = self.subwidget(self, value, label)
self.choices.append(widget)
self._subwidgets[value] = widget
def __getitem__(self, value):
"""Return a subwidget."""
return self._subwidgets[value]
def _as_list(self, list_type, attrs):
if attrs.pop('hide_empty', False) and not self.choices:
return u''
self._attr_setdefault(attrs)
return list_type(*[u'<li>%s %s</li>' % (
choice,
choice.label
) for choice in self.choices], **attrs)
def as_ul(self, **attrs):
"""Render the radio buttons widget as <ul>"""
return self._as_list(html.ul, attrs)
def as_ol(self, **attrs):
"""Render the radio buttons widget as <ol>"""
return self._as_list(html.ol, attrs)
def as_table(self, **attrs):
"""Render the radio buttons widget as <table>"""
self._attr_setdefault(attrs)
return list_type(*[u'<tr><td>%s</td><td>%s</td></tr>' % (
choice,
choice.label
) for choice in self.choices], **attrs)
def render(self, **attrs):
return self.as_ul(**attrs)
class RadioButtonGroup(_InputGroup):
"""A group of radio buttons."""
subwidget = RadioButton
class CheckboxGroup(_InputGroup):
"""A group of checkboxes."""
subwidget = GroupCheckbox
class MappingWidget(Widget):
"""Special widget for dict-like fields."""
def __init__(self, field, name, value, all_errors):
Widget.__init__(self, field, name, _force_dict(value), all_errors)
self._subwidgets = {}
def __getitem__(self, name):
subwidget = self._subwidgets.get(name)
if subwidget is None:
# this could raise a KeyError we pass through
field = self._field.fields[name]
subwidget = _make_widget(field,
_make_name(self.name, name),
self._value.get(name) or field.default,
self._all_errors)
self._subwidgets[name] = subwidget
return subwidget
def as_dl(self, **attrs):
return html.dl(*(html.dt(key.title()) + html.dd(self[key]())
for key in self.get_input_fields()), **attrs)
def actions_container(self, **attrs):
return html.div(*(self[key]() for key in self.get_action_fields()), **attrs)
def get_input_fields(self):
return [key for key in self if self._field.fields[key].is_input]
def get_action_fields(self):
return [key for key in self if not self._field.fields[key].is_input]
def __call__(self, *args, **kwargs):
return self.as_dl(*args, **kwargs)
def __iter__(self):
return iter(self._field.fields)
class FormWidget(MappingWidget):
"""A widget for forms."""
def get_hidden_fields(self):
"""This method is called by the `hidden_fields` property to return
a list of (key, value) pairs for the special hidden fields.
"""
fields = []
if self._field.form.request is not None:
if self._field.form.csrf_protected:
fields.append(('_csrf_token', self.csrf_token))
if self._field.form.redirect_tracking:
target = self.redirect_target
if target is not None:
fields.append(('_redirect_target', target))
return fields
@property
def hidden_fields(self):
"""The hidden fields as string."""
return u''.join(html.input(type='hidden', name=name, value=value)
for name, value in self.get_hidden_fields())
@property
def csrf_token(self):
"""Forward the CSRF check token for templates."""
return self._field.form.csrf_token
@property
def redirect_target(self):
"""The redirect target for this form."""
return self._field.form.redirect_target
def render(self, action='', method='post', **attrs):
self._attr_setdefault(attrs)
with_errors = attrs.pop('with_errors', False)
# support jinja's caller
caller = attrs.pop('caller', None)
if caller is not None:
body = caller()
else:
body = self.as_dl()
body = body + self.actions_container()
hidden = self.hidden_fields
if hidden:
# if there are hidden fields we put an invisible div around
# it. the HTML standard doesn't allow input fields as direct
# childs of a <form> tag...
body = '<div style="display: none">%s</div>%s' % (hidden, body)
if with_errors:
body = self.errors() + body
return html.form(body, action=action, method=method, **attrs)
def __call__(self, *args, **attrs):
attrs.setdefault('with_errors', True)
return self.render(*args, **attrs)
class ListWidget(Widget, _ListSupport):
"""Special widget for list-like fields."""
def __init__(self, field, name, value, all_errors):
Widget.__init__(self, field, name, _force_list(value), all_errors)
self._subwidgets = {}
def __getitem__(self, index):
if not isinstance(index, (int, long)):
raise TypeError('list widget indices must be integers')
subwidget = self._subwidgets.get(index)
if subwidget is None:
try:
value = self._value[index]
except IndexError:
# return an widget without value if we try
# to access a field not in the list
value = self._field.field.default
subwidget = _make_widget(self._field.field,
_make_name(self.name, index), value,
self._all_errors)
self._subwidgets[index] = subwidget
return subwidget
def __iter__(self):
for index in xrange(len(self)):
yield self[index]
def __len__(self):
return len(self._value)
def __call__(self, *args, **kwargs):
return self.as_ul(*args, **kwargs)
class ErrorList(_Renderable, _ListSupport, list):
"""The class that is used to display the errors."""
def render(self, **attrs):
return self.as_ul(**attrs)
def __call__(self, **attrs):
attrs.setdefault('class', attrs.pop('class_', 'errors'))
attrs.setdefault('hide_empty', True)
return self.render(**attrs)
class MultipleValidationErrors(ValidationError):
"""A validation error subclass for multiple errors raised by
subfields. This is used by the mapping and list fields.
"""
def __init__(self, errors):
ValidationError.__init__(self, '%d error%s' % (
len(errors), len(errors) != 1 and 's' or ''
))
self.errors = errors
def __unicode__(self):
return ', '.join(map(unicode, self.errors.itervalues()))
def unpack(self, key=None):
rv = {}
for name, error in self.errors.iteritems():
rv.update(error.unpack(_make_name(key, name)))
return rv
class FieldMeta(type):
def __new__(cls, name, bases, d):
messages = {}
for base in reversed(bases):
if hasattr(base, 'messages'):
messages.update(base.messages)
if 'messages' in d:
messages.update(d['messages'])
d['messages'] = messages
return type.__new__(cls, name, bases, d)
class Field(object):
"""Abstract field base class."""
__metaclass__ = FieldMeta
widget = TextInput
messages = dict(required=lazy_gettext('This field is required.'))
form = None
is_input = True
def __init__(self, validators=None, widget=None, messages=None, default=None):
if validators is None:
validators = []
self.validators = validators
self.custom_converter = None
if widget is not None:
self.widget = widget
if messages:
self.messages = self.messages.copy()
self.messages.update(messages)
self.default = default
assert not issubclass(self.widget, InternalWidget), \
'can\'t use internal widgets as widgets for fields'
def __call__(self, value):
value = self.convert(value)
self.apply_validators(value)
return value
def apply_validators(self, value):
"""Applies all validators on the value."""
if self.should_validate(value):
for validate in self.validators:
validate(self.form, value)
def should_validate(self, value):
"""Per default validate if the value is not None. This method is
called before the custom validators are applied to not perform
validation if the field is empty and not required.
For example a validator like `is_valid_ip` is never alled if the
value is an empty string and the field hasn't raised a validation
error when checking if the field is required.
"""
return value is not None
def convert(self, value):
"""This can be overridden by subclasses and performs the value
conversion.
"""
return unicode(value)
def to_primitive(self, value):
"""Convert a value into a primitve (string or a list/dict of lists,
dicts or strings).
This method must never fail!
"""
return _to_string(value)
def _bind(self, form, memo):
"""Method that binds a field to a form."""
if self.bound:
raise TypeError('%r already bound' % type(obj).__name__)
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.validators = self.validators[:]
rv.messages = self.messages.copy()
rv.form = form
return rv
@property
def bound(self):
"""True if the form is bound."""
return 'form' in self.__dict__
def __repr__(self):
rv = object.__repr__(self)
if self.bound:
rv = rv[:-1] + ' [bound]>'
return rv
class Mapping(Field):
"""Apply a set of fields to a dictionary of values.
>>> field = Mapping(name=TextField(), age=IntegerField())
>>> field({'name': u'John Doe', 'age': u'42'})
{'age': 42, 'name': u'John Doe'}
"""
widget = MappingWidget
def __init__(self, **fields):
Field.__init__(self)
self.fields = fields
def convert(self, value):
value = _force_dict(value)
errors = {}
result = {}
for name, field in self.fields.iteritems():
try:
result[name] = field(value.get(name))
except ValidationError, e:
errors[name] = e
if errors:
raise MultipleValidationErrors(errors)
return result
def to_primitive(self, value):
value = _force_dict(value, silent=True)
result = {}
for key, field in self.fields.iteritems():
result[key] = field.to_primitive(value.get(key))
return result
def _bind(self, form, memo):
rv = Field._bind(self, form, memo)
rv.fields = {}
for key, field in self.fields.iteritems():
rv.fields[key] = _bind(field, form, memo)
return rv
class FormMapping(Mapping):
"""Like a mapping but does csrf protection and stuff."""
widget = FormWidget
def convert(self, value):
if self.form is None:
raise TypeError('form mapping without form passed is unable '
'to convert data')
if self.form.csrf_protected and self.form.request is not None:
token = self.form.request.values.get('_csrf_token')
if token != self.form.csrf_token:
raise ValidationError(_('Invalid security token submitted.'))
return Mapping.convert(self, value)
class FormAsField(Mapping):
"""If a form is converted into a field the returned field object is an
instance of this class. The behavior is mostly equivalent to a normal
:class:`Mapping` field with the difference that it as an attribute called
:attr:`form_class` that points to the form class it was created from.
"""
def __init__(self):
raise TypeError('can\'t create %r instances' %
self.__class__.__name__)
class Multiple(Field):
"""Apply a single field to a sequence of values.
>>> field = Multiple(IntegerField())
>>> field([u'1', u'2', u'3'])
[1, 2, 3]
"""
widget = ListWidget
messages = dict(too_small=None, too_big=None)
def __init__(self, field, min_size=None, max_size=None,
messages=None):
Field.__init__(self, messages=messages)
self.field = field
self.min_size = min_size
self.max_size = max_size
def convert(self, value):
value = _force_list(value)
if self.min_size is not None and len(value) < self.min_size:
message = self.messages['too_small']
if message is None:
message = ngettext('Please provide at least %d item.',
'Please provide at least %d items.',
self.min_size) % self.min_size
raise ValidationError(message)
if self.max_size is not None and len(value) > self.max_size:
message = self.messages['too_big']
if message is None:
message = ngettext('Please provide no more than %d item.',
'Please provide no more than %d items.',
self.min_size) % self.min_size
raise ValidationError(message)
result = []
errors = {}
for idx, item in enumerate(value):
try:
result.append(self.field(item))
except ValidationError, e:
errors[idx] = e
if errors:
raise MultipleValidationErrors(errors)
return result
def to_primitive(self, value):
return map(self.field.to_primitive, _force_list(value, silent=True))
def _bind(self, form, memo):
rv = Field._bind(self, form, memo)
rv.field = _bind(self.field, form, memo)
return rv
class TextField(Field):
"""Field for strings.
>>> field = TextField(required=True, min_length=6)
>>> field('foo bar')
u'foo bar'
>>> field('')
Traceback (most recent call last):
...
ValidationError: This field is required.
"""
messages = dict(too_short=None, too_long=None)
def __init__(self, required=False, min_length=None, max_length=None,
validators=None, widget=None, messages=None):
Field.__init__(self, validators, widget, messages)
self.required = required
self.min_length = min_length
self.max_length = max_length
def convert(self, value):
value = _to_string(value)
if self.required and not value:
raise ValidationError(self.messages['required'])
if self.min_length is not None and len(value) < self.min_length:
message = self.messages['too_short']
if message is None:
message = ngettext('Please enter at least %d character.',
'Please enter at least %d characters.',
self.min_length) % self.min_length
raise ValidationError(message)
if self.max_length is not None and len(value) > self.max_length:
message = self.messages['too_long']
if message is None:
message = ngettext('Please enter no more than %d character.',
'Please enter no more than %d characters.',
self.max_length) % self.max_length
raise ValidationError(message)
return value
def should_validate(self, value):
"""Validate if the string is not empty."""
return bool(value)
class DateTimeField(Field):
"""Field for datetime objects.
>>> field = DateTimeField()
>>> field('1970-01-12 00:00')
datetime.datetime(1970, 1, 12, 0, 0)
>>> field('foo')
Traceback (most recent call last):
...
ValidationError: Please enter a valid date.
"""
messages = dict(invalid_date=lazy_gettext('Please enter a valid date.'))
def __init__(self, required=False, format='%m/%d/%Y',
validators=None, widget=None, messages=None):
Field.__init__(self, validators, widget, messages)
self.required = required
self.format = format
def convert(self, value):
if isinstance(value, datetime):
return value
value = _to_string(value)
if not value:
if self.required:
raise ValidationError(self.messages['required'])
return None
try:
return datetime(*strptime(value, self.format)[:7])
except ValueError:
raise ValidationError(self.messages['invalid_date'])
def to_primitive(self, value):
if isinstance(value, datetime):
value = value.strftime(self.format)
return value
class ModelField(Field):
"""A field that queries for a model.
The first argument is the name of the model, the second the named
argument for `filter_by` (eg: `User` and ``'username'``).
"""
messages = dict(not_found=lazy_gettext('"%(value)s" does not exist'))
def __init__(self, model, key, required=False, message=None,
validators=None, widget=None, messages=None):
Field.__init__(self, validators, widget, messages)
self.model = model
self.key = key
self.required = required
self.message = message
def convert(self, value):
if isinstance(value, self.model):
return value
if not value:
if self.required:
raise ValidationError(self.messages['required'])
return None
value = self._coerce_value(value)
rv = self.model.objects.filter_by(**{self.key: value}).first()
if rv is None:
raise ValidationError(self.messages['not_found'] %
{'value': value})
return rv
def _coerce_value(self, value):
return value
def to_primitive(self, value):
if value is None:
return u''
elif isinstance(value, self.model):
value = getattr(value, self.key)
return unicode(value)
class HiddenModelField(ModelField):
"""A hidden field that points to a model identified by primary key.
Can be used to pass models through a form.
"""
widget = HiddenInput
# these messages should never show up unless ...
# ... the user tempered with the form data
# ... or the object went away in the meantime.
messages = dict(
invalid=lazy_gettext('Invalid value.'),
not_found=lazy_gettext('Key does not exist.')
)
def __init__(self, model, key=None, required=False, message=None,
validators=None, widget=None, messages=None):
if key is None:
keys = db.class_mapper(model).primary_key
assert len(keys) == 1, 'Model has multiple primary keys'
key = keys[0].name
ModelField.__init__(self, model, key, required=False, message=None,
validators=None, widget=None, messages=None)
def _coerce_value(self, value):
try:
return int(value)
except (TypeError, ValueError):
raise ValidationError(self.messages['invalid'])
class ChoiceField(Field):
"""A field that lets a user select one out of many choices.
A choice field accepts some choices that are valid values for it.
Values are compared after converting to unicode which means that
``1 == "1"``:
>>> field = ChoiceField(choices=[1, 2, 3])
>>> field('1')
1
>>> field('42')
Traceback (most recent call last):
...
ValidationError: Please enter a valid choice.
A choice field also accepts lists of tuples as argument where the
first item is used for comparing and the second for displaying
(which is used by the `SelectBoxWidget`):
>>> field = ChoiceField(choices=[(0, 'inactive', 1, 'active')])
>>> field('0')
0
Because all fields are bound to the form before validation it's
possible to assign the choices later:
>>> class MyForm(Form):
... status = ChoiceField()
...
>>> form = MyForm()
>>> form.fields['status'].choices = [(0, 'inactive', 1, 'active')]
>>> form.validate({'status': '0'})
True
>>> form.data
{'status': 0}
"""
widget = SelectBox
multiple_choices = False
messages = dict(
invalid_choice=lazy_gettext('Please enter a valid choice.')
)
def __init__(self, required=True, choices=None, validators=None,
widget=None, messages=None):
Field.__init__(self, validators, widget, messages)
self.required = required
self.choices = choices
def convert(self, value):
if not value and not self.required:
return
for choice in self.choices:
if isinstance(choice, tuple):
choice = choice[0]
# perform type interpolation. If the key from the choices
# is a integer (42) and the value is a string ("42") we
# return the integer.
if value == choice or unicode(value) == unicode(choice):
return choice
raise ValidationError(self.messages['invalid_choice'])
def _bind(self, form, memo):
rv = Field._bind(self, form, memo)
if self.choices is not None:
rv.choices = list(self.choices)
return rv
class MultiChoiceField(ChoiceField):
"""A field that lets a user select multiple choices."""
multiple_choices = True
messages = dict(too_small=None, too_big=None)
def __init__(self, choices=None, min_size=None, max_size=None,
validators=None, widget=None, messages=None):
ChoiceField.__init__(self, min_size > 0, choices, validators,
widget, messages)
self.min_size = min_size
self.max_size = max_size
def convert(self, value):
values = _to_list(value)
container = set(values) | set(map(unicode, values))
result = []
for choice in self.choices:
if isinstance(choice, tuple):
choice = choice[0]
if choice not in container:
raise ValidationError(_('Select a valid choice.'))
result.append(choice)
if self.min_size is not None and len(result) < self.min_size:
message = self.messages['too_small']
if message is None:
message = ngettext('Please provide at least %d item.',
'Please provide at least %d items.',
self.min_size) % self.min_size
raise ValidationError(message)
if self.max_size is not None and len(result) > self.max_size:
message = self.messages['too_big']
if message is None:
message = ngettext('Please provide no more than %d item.',
'Please provide no more than %d items.',
self.min_size) % self.min_size
raise ValidationError(message)
return result
def to_primitive(self, value):
return map(unicode, _force_list(value, silent=True))
class IntegerField(Field):
"""Field for integers.
>>> field = IntegerField(min_value=0, max_value=99)
>>> field('13')
13
>>> field('thirteen')
Traceback (most recent call last):
...
ValidationError: Please enter a whole number.
>>> field('193')
Traceback (most recent call last):
...
ValidationError: Ensure this value is less than or equal to 99.
"""
messages = dict(
too_small=None,
too_big=None,
no_integer=lazy_gettext('Please enter a whole number.')
)
def __init__(self, required=False, min_value=None, max_value=None,
validators=None, widget=None, messages=None):
Field.__init__(self, validators, widget, messages=None)
self.required = required
self.min_value = min_value
self.max_value = max_value
def convert(self, value):
value = _to_string(value)
if not value:
if self.required:
raise ValidationError(self.messages['required'])
return None
try:
value = int(value)
except ValueError:
raise ValidationError(self.messages['no_integer'])
if self.min_value is not None and value < self.min_value:
message = self.messages['too_small']
if message is None:
message = _('Ensure this value is greater than or '
'equal to %s.') % self.min_value
raise ValidationError(message)
if self.max_value is not None and value > self.max_value:
message = self.messages['too_big']
if message is None:
message = _('Ensure this value is less than or '
'equal to %s.') % self.max_value
raise ValidationError(message)
return int(value)
class BooleanField(Field):
"""Field for boolean values.
>>> field = BooleanField()
>>> field('1')
True
>>> field = BooleanField()
>>> field('')
False
"""
widget = Checkbox
def convert(self, value):
return bool(value)
def to_primitive(self, value):
if value:
return '1'
return ''
class FormMeta(type):
"""Meta class for forms. Handles form inheritance and registers
validator functions.
"""
def __new__(cls, name, bases, d):
fields = {}
validator_functions = {}
root_validator_functions = []
for base in reversed(bases):
if hasattr(base, '_root_field'):
# base._root_field is always a FormMapping field
fields.update(base._root_field.fields)
root_validator_functions.extend(base._root_field.validators)
for key, value in d.iteritems():
if key.startswith('validate_') and callable(value):
validator_functions[key[9:]] = value
elif isinstance(value, Field):
fields[key] = value
d[key] = FieldDescriptor(key)
for field_name, func in validator_functions.iteritems():
if field_name in fields:
fields[field_name].validators.append(func)
d['_root_field'] = root = FormMapping(**fields)
context_validate = d.get('context_validate')
root.validators.extend(root_validator_functions)
if context_validate is not None:
root.validators.append(context_validate)
return type.__new__(cls, name, bases, d)
def as_field(cls):
"""Returns a field object for this form. The field object returned
is independent of the form and can be modified in the same manner as
a bound field.
"""
field = object.__new__(FormAsField)
field.__dict__.update(cls._root_field.__dict__)
field.form_class = cls
field.validators = cls._root_field.validators[:]
field.fields = cls._root_field.fields.copy()
return field
@property
def validators(cls):
return cls._root_field.validators
@property
def fields(cls):
return cls._root_field.fields
class FieldDescriptor(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, type=None):
return (obj or type).fields[self.name]
def __set__(self, obj, value):
obj.fields[self.name] = value
class Form(object):
"""Form base class.
>>> class PersonForm(Form):
... name = TextField(required=True)
... age = IntegerField()
>>> form = PersonForm()
>>> form.validate({'name': 'johnny', 'age': '42'})
True
>>> form.data['name']
u'johnny'
>>> form.data['age']
42
Let's cause a simple validation error:
>>> form = PersonForm()
>>> form.validate({'name': '', 'age': 'fourty-two'})
False
>>> print form.errors['age'][0]
Please enter a whole number.
>>> print form.errors['name'][0]
This field is required.
You can also add custom validation routines for fields by adding methods
that start with the prefix ``validate_`` and the field name that take the
value as argument. For example:
>>> class PersonForm(Form):
... name = TextField(required=True)
... age = IntegerField()
...
... def validate_name(self, value):
... if not value.isalpha():
... raise ValidationError(u'The value must only contain letters')
>>> form = PersonForm()
>>> form.validate({'name': 'mr.t', 'age': '42'})
False
>>> form.errors
{'name': [u'The value must only contain letters']}
You can also validate multiple fields in the context of other fields.
That validation is performed after all other validations. Just add a
method called ``context_validate`` that is passed the dict of all fields::
>>> class RegisterForm(Form):
... username = TextField(required=True)
... password = TextField(required=True)
... password_again = TextField(required=True)
...
... def context_validate(self, data):
... if data['password'] != data['password_again']:
... raise ValidationError(u'The two passwords must be the same')
>>> form = RegisterForm()
>>> form.validate({'username': 'admin', 'password': 'blah',
... 'password_again': 'blag'})
...
False
>>> form.errors
{None: [u'The two passwords must be the same']}
Forms can be used as fields for other forms. To create a form field of
a form you can call the `as_field` class method::
>>> field = RegisterForm.as_field()
This field can be used like any other field class. What's important about
forms as fields is that validators don't get an instance of `RegisterForm`
passed as `form` / `self` but the form where it's used in if the field is
used from a form.
Form fields are bound to the form on form instanciation. This makes it
possible to modify a particular instance of the form. For example you
can create an instance of it and drop some fiels by using
``del form.fields['name']`` or reassign choices of choice fields. It's
however not easily possible to add new fields to an instance because newly
added fields wouldn't be bound. The fields that are stored directly on
the form can also be accessed with their name like a regular attribute.
Example usage::
>>> class StatusForm(Form):
... status = ChoiceField()
...
>>> StatusForm.status.bound
False
>>> form = StatusForm()
>>> form.status.bound
True
>>> form.status.choices = [u'happy', u'unhappy']
>>> form.validate({'status': u'happy'})
True
>>> form['status']
u'happy'
"""
__metaclass__ = FormMeta
csrf_protected = True
redirect_tracking = True
def __init__(self, initial=None):
self.request = get_request()
if initial is None:
initial = {}
self.initial = initial
self.invalid_redirect_targets = set()
self._root_field = _bind(self.__class__._root_field, self, {})
self.reset()
def __getitem__(self, key):
return self.data[key]
def __contains__(self, key):
return key in self.data
def as_widget(self):
"""Return the form as widget."""
# if there is submitted data, use that for the widget
if self.raw_data is not None:
data = self.raw_data
# otherwise go with the data from the source (eg: database)
else:
data = self.data
return _make_widget(self._root_field, None, data, self.errors)
def add_invalid_redirect_target(self, *args, **kwargs):
"""Add an invalid target. Invalid targets are URLs we don't want to
visit again. For example if a post is deleted from the post edit page
it's a bad idea to redirect back to the edit page because in that
situation the edit page would return a page not found.
This function accepts the same parameters as `url_for`.
"""
self.invalid_redirect_targets.add(url_for(*args, **kwargs))
@property
def redirect_target(self):
"""The back-redirect target for this form."""
return get_redirect_target(self.invalid_redirect_targets,
self.request)
def redirect(self, *args, **kwargs):
"""Redirects to the url rule given or back to the URL where we are
comming from if `redirect_tracking` is enabled.
"""
target = None
if self.redirect_tracking:
target = self.redirect_target
if target is None:
target = url_for(*args, **kwargs)
return redirect(target)
@property
def csrf_token(self):
"""The unique CSRF security token for this form."""
if self.request is None:
raise AttributeError('no csrf token because form not bound '
'to request')
path = self.request.path
user_id = -1
#XXX
#if self.request.user.is_somebody:
# user_id = self.request.user.user_id
#key = self.request.app.cfg['secret_key']
key = 'I am secret'
return sha1(('%s|%s|%s' % (path, user_id, key))
.encode('utf-8')).hexdigest()
@property
def is_valid(self):
"""True if the form is valid."""
return not self.errors
@property
def has_changed(self):
"""True if the form has changed."""
return self._root_field.to_primitive(self.initial) != \
self._root_field.to_primitive(self.data)
@property
def fields(self):
return self._root_field.fields
@property
def validators(self):
return self._root_field.validators
def reset(self):
"""Resets the form."""
self.data = self.initial.copy()
self.errors = {}
self.raw_data = None
def validate(self, data):
"""Validate the form against the data passed."""
self.raw_data = _decode(data)
d = self.data.copy()
d.update(self.raw_data)
errors = {}
try:
data = self._root_field(d)
except ValidationError, e:
errors = e.unpack()
self.errors = errors
if errors:
return False
self.data.update(data)
return True
def populate(self, obj):
for k in self.data:
setattr(obj, k, self.data[k])
# glashammer extras
class SubmitButton(Input):
"""A submit button field"""
type = 'submit'
class ResetButton(Input):
type = 'submit'
class SubmitField(Field):
widget = SubmitButton
is_input = False
class ResetField(Field):
widget = ResetButton
is_input = False
| passy/glashammer-rdrei | glashammer/utils/forms.py | Python | mit | 51,663 | [
"VisIt"
] | d57cfd18da4f9ccda3b3ed721f6f336252caedd48078d8571b8768f8781b3039 |
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import os
import errno
from typing import List
from typing import Optional
from typing import Text
def relative_normpath(f, path):
# type: (Optional[Text], Text) -> Optional[Text]
"""Return the path of file relative to `path`."""
if f is not None:
return os.path.normpath(os.path.relpath(f, path))
else:
return None
def create_dir(dir_path):
# type: (Text) -> None
"""Creates a directory and its super paths. Succeeds even if the path already exists."""
try:
os.makedirs(dir_path)
except OSError as e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
def create_dir_for_file(file_path):
# type: (Text) -> None
"""Creates any missing parent directories of this files path."""
try:
os.makedirs(os.path.dirname(file_path))
except OSError as e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
def recursively_find_files(resource_name):
# type: (Text) -> List[Text]
"""Traverse directory hierarchy to find files.
`resource_name` can be a folder or a file. In both cases we will return a list of files."""
if not resource_name:
raise ValueError("Resource name '{}' must be an existing directory or file.".format(resource_name))
elif os.path.isfile(resource_name):
return [resource_name]
elif os.path.isdir(resource_name):
resources = [] # type: List[Text]
# walk the fs tree and return a list of files
nodes_to_visit = [resource_name]
while len(nodes_to_visit) > 0:
# skip hidden files
nodes_to_visit = [f for f in nodes_to_visit if not f.split("/")[-1].startswith('.')]
current_node = nodes_to_visit[0]
# if current node is a folder, schedule its children for a visit. Else add them to the resources.
if os.path.isdir(current_node):
nodes_to_visit += [os.path.join(current_node, f) for f in os.listdir(current_node)]
else:
resources += [current_node]
nodes_to_visit = nodes_to_visit[1:]
return resources
else:
raise ValueError("Could not locate the resource '{}'.".format(os.path.abspath(resource_name)))
| verloop/rasa_nlu | rasa_nlu/utils/__init__.py | Python | apache-2.0 | 2,474 | [
"VisIt"
] | 9181f8fe11d22c7aacc1e9d66fd5982c2cd416de9553d63db241630523b8cb38 |
from mpinterfaces.utils import jobs_from_file
from pymatgen.io.vasp.inputs import Kpoints
from pymatgen.io.vasp.outputs import Vasprun
from scipy.optimize import curve_fit
import numpy as np
import os
from glob import glob
import pandas as pd
def diagnose_cur_dir():
datasets = []
good_vaspruns = []
relax_only = []
not_goods = []
totl = len(os.listdir('.'))
for n,f in enumerate(os.listdir('.')):
data = {'energy':[],'volume':[],'kpts':[]}
try:
#v = Vasprun(f+os.sep+'vasprun.xml')
if 'POS' in os.listdir(f):
try:
for r in os.listdir(f+os.sep+'POS'):
v = Vasprun(f+os.sep+'POS'+os.sep+r+os.sep+'vasprun.xml')
struct = v.structures[0]
data['energy'].append(v.final_energy/len(struct))
data['volume'].append(v.structures[0].volume/len(struct))
kpt_list = v.kpoints.as_dict()['kpoints'][0]
kpts = kpt_list[0]*kpt_list[1]*kpt_list[2] / len(struct)
data['kpts'].append(kpts)
good_vaspruns.append(f)
except:
relax_only.append(f)
else:
v = Vasprun(f+os.sep+'vasprun.xml')
relax_only.append(f)
except:
not_goods.append(f)
dframe = pd.DataFrame(data)
datasets.append({f:dframe})
print ('FINISHED {0} of {1}'.format(n,totl))
return good_vaspruns, relax_only, not_goods, len(os.listdir('.')), datasets
def check_correction(tag):
el, struct = tag.split('_')[0], tag.split('_')[2]
corr = pd.read_csv('check_combine/{}'.format(tag))
if struct == 'BCC':
s = 'bcc'
elif struct == 'FCC':
s = 'fcc'
elif struct == 'HCP':
s = 'hcp'
orig = pd.read_csv('{0}_PBE_{1}_VASP.csv'.format(el,s))
orig.dropna(inplace=False)
kpts_corr = [k for k in np.unique(corr['kpts']) if k not in np.unique(orig['kpts'])]
printnew = False
for n,k in enumerate(kpts_corr):
print (corr[corr['kpts']==k])
response = input('Enter 1 if good: ')
if response:
orig = pd.concat([orig,corr[corr['kpts']==k]])
print ('Adding {0}, {1} out of {2}'.format(k,n+1,len(kpts_corr)))
printnew = True
else:
print ('User says {} is not good to add'.format(k))
if printnew:
orig.to_csv('buffer_Alex/{0}_PBE_{1}_VASP.csv'.format(el,s),index=False)
def check_kpts(filename):
print ('checking {}'.format(filename))
cfilt_file =pd.read_csv(filename)
dropna_file = cfilt_file.dropna()
troublemakers = {'Name':[],'Kpt': [], 'NumKpts':[], 'Reason':[]}
def parabola(x,a,b,c):
return a + b*x + c*x**2
numkpts_afterdrop = [(k,dropna_file[dropna_file['kpts']==k]) for k in np.unique(dropna_file['kpts'])]
print (len(np.unique(dropna_file['kpts'])), max(dropna_file['kpts']))
for n in numkpts_afterdrop:
parabola_guess = [min(n[1]['energy']), 1, 1]
try:
popt, pcov = curve_fit(parabola, n[1]['volume'], n[1]['energy'], parabola_guess)
parabola_parameters = popt
# Here I just make sure the minimum is bracketed by the volumes
# this if for the solver
minvol = min(n[1]['volume'])
maxvol = max(n[1]['volume'])
# the minimum of the parabola is at dE/dV = 0, or 2 * c V +b =0
c = parabola_parameters[2]
b = parabola_parameters[1]
a = parabola_parameters[0]
parabola_vmin = -b / 2 / c
#print (n[0], parabola_vmin)
Reasons = []
if not minvol < parabola_vmin and parabola_vmin < maxvol:
print ('{} has minima issues'.format(n[0]) )
troublemakers['Name'].append(filename)
troublemakers['Kpt'].append(n[0])
troublemakers['NumKpts'].append(len(n[1]))
troublemakers['Reason'].append('MinimaIssue')
elif len(n[1])!=11:
troublemakers['Name'].append(filename)
troublemakers['Kpt'].append(n[0])
troublemakers['NumKpts'].append(len(n[1]))
troublemakers['Reason'].append('Not11Kpoints')
except:
print ('Exception caught for {0} at {1}'.format(filename,n[0]))
troublemakers['Name'].append(filename)
troublemakers['Kpt'].append(n[0])
troublemakers['NumKpts'].append(len(n[1]))
troublemakers['Reason'].append('Exception')
return troublemakers
def process_to_dataframe(identifiers, metadata = ['energy','volume','kpoints'], chkfiles=glob('*.json'),job_dirs=None):
"""
utility function that processes data from a list of checkpoints
or directories into a pandas dataframe
identifiers: Creates file names for the dataframes
1. dict type: job directory concatenation
example: {0:(('_',0),('/',1)),1:('__',1),'JoinBy':'_','OtherNames':'PBE'}
will split the job_dirs elements by
'_' and take position 0 followed by a split by '__' take
position 1
"""
if chkfiles:
#
chk_jobs = sum([jobs_from_file(c) for c in chkfiles],[])
jobs = [j for j in chk_jobs if j.final_energy]
#
if type(identifiers)==dict:
def rec_split(string, split_rule):
#print ('Split rule', split_rule)
for s in split_rule:
#print ('element in split rule', s)
string = string.split(s[0])[s[1]]
#print (string)
return string
dir_splits = []
for j in jobs:
single_name = {k:None for k in list(identifiers.keys()) if type(k)==int}
#
num_a = len(j.vis.poscar.structure)
kpt_list = Kpoints.from_file(j.job_dir+os.sep+'KPOINTS').as_dict()['kpoints'][0]
kpts = kpt_list[0]*kpt_list[1]*kpt_list[2]
data = pd.DataFrame({'energy':[j.final_energy/num_a],\
'volume':[j.vis.poscar.structure.volume/num_a],\
'kpts':[kpts/num_a]})
#
for k in list(identifiers.keys()):
if type(identifiers[k])==tuple:
single_name.update({k:rec_split(j.job_dir,identifiers[k])})
else:
if k != 'JoinBy':
single_name.update({k:identifiers[k]})
dir_splits.append({identifiers['JoinBy'].join(list(single_name.values())):data})
print ('finished {}'.format(j.job_dir))
sorted_dict = {list(d.keys())[0]:[] for d in dir_splits}
for d in dir_splits:
sorted_dict[list(d.keys())[0]].append(list(d.values())[0])
complete_frames = {k:pd.concat(sorted_dict[k]) for k in list(sorted_dict.keys())}
write_outs = [complete_frames[k].to_csv(k,index=False) \
for k in list(complete_frames.keys())]
elif jobdirs:
if type(identifiers)==dict:
def rec_split(string, split_rule):
#print ('Split rule', split_rule)
for s in split_rule:
#print ('element in split rule', s)
string = string.split(s[0])[s[1]]
#print (string)
return string
dir_splits = []
for j in jobdirs:
# simple read whole vasprun
try:
v = Vasprun(j+os.sep+'vasprun.xml')
single_name = {k:None for k in list(identifiers.keys()) if type(k)==int}
#
num_a = len(v.vis.poscar.structure)
kpt_list = Kpoints.from_file(j.job_dir+os.sep+'KPOINTS').as_dict()['kpoints'][0]
kpts = kpt_list[0]*kpt_list[1]*kpt_list[2]
data = pd.DataFrame({'energy':[j.final_energy/num_a],\
'volume':[j.vis.poscar.structure.volume/num_a],\
'kpts':[kpts/num_a]})
#
for k in list(identifiers.keys()):
if type(identifiers[k])==tuple:
single_name.update({k:rec_split(j.job_dir,identifiers[k])})
else:
if k != 'JoinBy':
single_name.update({k:identifiers[k]})
dir_splits.append({identifiers['JoinBy'].join(list(single_name.values())):data})
print ('finished {}'.format(j.job_dir))
except:
print ('Vaspruns issues')
sorted_dict = {list(d.keys())[0]:[] for d in dir_splits}
for d in dir_splits:
sorted_dict[list(d.keys())[0]].append(list(d.values())[0])
complete_frames = {k:pd.concat(sorted_dict[k]) for k in list(sorted_dict.keys())}
write_outs = [complete_frames[k].to_csv(k,index=False) \
for k in list(complete_frames.keys())]
if __name__ == '__main__':
crossfilts_VASP = [pd.DataFrame(check_kpts(filename=f)) for f in glob('AutoCrossfilts_LDA_VASP/*VASP.csv')]
pd.concat(crossfilts_VASP).to_csv('LDA_Report.csv',index=False)
#process_to_dataframe(identifiers={0:(('__',0),('_',0)),1:'PBE',2:(('__',0),('_',1)),\
# 'JoinBy':'_','3':'VASP.csv'}, chkfiles=glob('*Relaxed*.json'))
| joshgabriel/dft-crossfilter | CompleteApp/benchmark-precision/precision/script_to_general_pandas.py | Python | mit | 9,536 | [
"VASP",
"pymatgen"
] | 05de04380dd4b35bac20c09ce566fafba9943291e19fba38f9cb9652a090fe1c |
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import subprocess
import sys
import csv
import matplotlib.pyplot as plt
import numpy as np
# Use fonts that match LaTeX
from matplotlib import rcParams
rcParams['font.family'] = 'serif'
rcParams['font.size'] = 17
rcParams['font.serif'] = ['Computer Modern Roman']
rcParams['text.usetex'] = True
# Small font size for the legend
from matplotlib.font_manager import FontProperties
fontP = FontProperties()
fontP.set_size('x-small')
def get_last_row(csv_filename):
'''
Function which returns just the last row of a CSV file. We have to
read every line of the file, there was no stackoverflow example of
reading just the last line.
http://stackoverflow.com/questions/20296955/reading-last-row-from-csv-file-python-error
'''
with open(csv_filename, 'r') as f:
lastrow = None
for row in csv.reader(f):
if (row != []): # skip blank lines at end of file.
lastrow = row
return lastrow
def run_moose(dt, time_integrator):
'''
Function which actually runs MOOSE.
'''
implicit_flag = 'true'
explicit_methods = ['ExplicitEuler', 'ExplicitMidpoint', 'Heun', 'Ralston']
# Set implicit_flag based on TimeIntegrator name
if (time_integrator in explicit_methods):
implicit_flag = 'false'
command_line_args = ['../../../moose_test-opt', '-i', 'scalar.i',
'Executioner/dt={}'.format(dt),
'Executioner/TimeIntegrator/type={}'.format(time_integrator),
'GlobalParams/implicit={}'.format(implicit_flag)]
try:
child = subprocess.Popen(command_line_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# communicate() waits for the process to terminate, so there's no
# need to wait() for it. It also sets the returncode attribute on
# child.
(stdoutdata, stderrdata) = child.communicate()
if (child.returncode != 0):
print('Running MOOSE failed: program output is below:')
print(stdoutdata)
raise
except:
print('Error executing moose_test')
sys.exit(1)
# Parse the last line of the output file to get the error at the final time.
last_row = get_last_row('scalar_out.csv')
return float(last_row[1])
#
# Main program
#
fig = plt.figure()
ax1 = fig.add_subplot(111)
# Lists of timesteps and TimeIntegrators to plot.
time_integrators = ['ImplicitEuler', 'ImplicitMidpoint', 'LStableDirk2', 'BDF2', 'CrankNicolson',
'LStableDirk3', 'LStableDirk4', 'AStableDirk4',
'ExplicitEuler', 'ExplicitMidpoint', 'Heun', 'Ralston']
dts = [.125, .0625, .03125, .015625]
# Plot colors
colors = ['maroon', 'blue', 'green', 'black', 'burlywood', 'olivedrab', 'midnightblue',
'tomato', 'darkmagenta', 'chocolate', 'lightslategray', 'skyblue']
# Plot line markers
markers = ['v', 'o', 'x', '^', 'H', 'h', '+', 'D', '*', '4', 'd', '8']
# Plot line styles
linestyles = [':', '-', '-.', '--', ':', '-.', '--', ':', '--', '-', '-.', '-']
for i in xrange(len(time_integrators)):
time_integrator = time_integrators[i]
# Place to store the results for this TimeIntegrator
results = []
# Call MOOSE to compute the results
for dt in dts:
results.append(run_moose(dt, time_integrator))
# Make plot
xdata = np.log10(np.reciprocal(dts))
ydata = np.log10(results)
# Compute linear fit of last three points.
start_fit = len(xdata) - 3
end_fit = len(xdata)
fit = np.polyfit(xdata[start_fit:end_fit], ydata[start_fit:end_fit], 1)
# Make the plot -- unpack the user's additional plotting arguments
# from kwargs by prepending with **.
ax1.plot(xdata, ydata, label=time_integrator + ", $" + "{:.2f}".format(fit[0]) + "$",
color=colors[i], marker=markers[i], linestyle=linestyles[i])
# Set up the axis labels.
ax1.set_xlabel('$\log (\Delta t^{-1})$')
ax1.set_ylabel('$\log \|e(T)\|_{L^2}$')
# Add a legend
plt.legend(loc='lower left', prop=fontP)
# Save a PDF
plt.savefig('plot.pdf', format='pdf')
# Local Variables:
# python-indent: 2
# End:
| nuclear-wizard/moose | test/tests/time_integrators/scalar/run.py | Python | lgpl-2.1 | 4,487 | [
"MOOSE"
] | c4cc21e972606196a2d6fe397e48f90bb711a89c90b884327d1427996b0f825a |
# -*- coding: utf-8 -*-
#
# cloudbridge documentation build configuration file, created by
# sphinx-quickstart on Sat Oct 10 03:17:52 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import shlex
import sys
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cloudbridge'
copyright = u'2019, GVL and Galaxy Projects'
author = u'GVL and Galaxy Projects'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.2'
# The full version, including alpha/beta/rc tags.
release = '1.0.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'style_external_links': True
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
html_extra_path = ['extras']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'cloudbridgedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'cloudbridge.tex', u'cloudbridge Documentation',
u'GVL and Galaxy Projects', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cloudbridge', u'cloudbridge Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'cloudbridge', u'cloudbridge Documentation',
author, 'cloudbridge', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| gvlproject/libcloudbridge | docs/conf.py | Python | mit | 9,543 | [
"Galaxy"
] | c5ee17aa12865a6df79321a670d106ee9aef101c4a468a2c677d4739c54a5053 |
# Copyright (C) 2015 Henrique Pereira Coutada Miranda, Alejandro Molina Sanchez
# All rights reserved.
#
# This file is part of yambopy
#
#
import os
import re
from math import sqrt
from numpy import array
from qepy.auxiliary import *
meVtocm = 8.06573
class DynmatIn():
"""
Generate an manipulate quantum espresso input files for matdyn.x
"""
def __init__(self):
self.variable = dict()
self.qpoints = []
def write(self,filename):
f = open(filename,'w')
f.write(str(self))
f.close()
def __str__(self):
s = '&input\n'
s += self.stringify_group('',self.variable) #print variable
if len(self.qpoints) > 0:
s+=("%d\n"%len(self.qpoints))
for q in self.qpoints:
s+=("%12.8lf %12.8lf %12.8lf")%tuple(q[:3])+"\n"
return s
def __setitem__(self,key,value):
self.variable[key] = value
def __getitem__(self,key):
return self.variable[key]
def stringify_group(self, keyword, group):
if group != {}:
string='\n'
for keyword in group:
string += "%20s = %s\n" % (keyword, group[keyword])
string += "/\n"
return string
else:
return ''
class Matdyn():
""" Class to read and plot the data from matdyn.modes files
"""
_datafile = 'matdyn.modes'
def __init__(self,natoms,path,folder='.'):
self.folder = folder
self.path = path
data_phon = open("%s/%s"%(folder, self._datafile),'r').readlines()
self.nmodes = natoms*3
self.nqpoints = len(path.get_klist())
self.eigen, self.modes = [], []
self.qpoints = []
for j in xrange(self.nqpoints):
frec, v_frec = [], []
k=2 + j*(self.nmodes*(natoms+1)+5)
self.qpoints.append(float_from_string(data_phon[k]))
for i in xrange(self.nmodes):
k=4 + j*(self.nmodes*(natoms+1)+5) + i*(natoms+1)
y = float_from_string(data_phon[k])
v_mode = []
for ii in xrange(1,natoms+1):
z = float_from_string(data_phon[k+ii])
v_atom = array([complex(z[0],z[1]),complex(z[2],z[3]),complex(z[4],z[5])])
v_mode.append(v_atom)
v_frec.append(array(v_mode))
frec.append(y[1])
self.eigen.append(frec)
self.modes.append(array(v_frec))
def plot_eigen(self,path=[]):
""" plot the phonon frequencies using matplotlib
"""
import matplotlib.pyplot as plt
if self.eigen is None:
print('Error')
#self.get_eigen()
if path:
if isinstance(path,Path):
path = path.get_indexes()
plt.xticks( *zip(*path) )
plt.ylabel('\\omega (cm$^{-1}$)')
#plot vertical line
for point in path:
x, label = point
plt.axvline(x)
#plot bands
eigen = array(self.eigen)
for ib in range(self.nmodes):
plt.plot(xrange(self.nqpoints),eigen[:,ib], 'r-', lw=2)
plt.show()
def __str__(self):
s = ''
for nq in xrange(self.nqpoints):
s+="\n\n q = "+("%12.8lf "*3)%tuple(self.qpoints[nq])+"\n"
for n in xrange(self.nmodes):
s+= "freq (cm-1): %4.3lf\n"%self.eigen[nq][n]
for na in xrange(self.nmodes/3):
xr = self.modes[nq][n][na].real
xi = self.modes[nq][n][na].imag
s+=("%12.8lf %12.8lfj "*3)%(xr[0],xi[0],xr[1],xi[1],xr[2],xi[2])+"\n"
return s
def write_freq_file(self,filename='freq.dat'):
f = open(filename,'w')
for n in xrange(self.nmodes):
for nq in xrange(self.nqpoints):
f.write("%4.3lf %4.3lf\n"%(float(nq),self.eigen[nq][n]))
f.write("\n")
f.close()
| alexmoratalla/yambo-py | qepy/dynmat.py | Python | bsd-3-clause | 3,974 | [
"Quantum ESPRESSO"
] | 7338465085a9656489a06268679c309167b25fab3bf8e0eb14fda8a21067c134 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# =============================================================================
'''Popular initialization methods for parameter values (Tensor objects).
Example usages::
from singa import tensor
from singa import initializer
x = tensor.Tensor((3, 5))
initializer.uniform(x, 3, 5) # use both fan_in and fan_out
initializer.uniform(x, 3, 0) # use only fan_in
'''
import math
def uniform(t, fan_in=0, fan_out=0):
'''Initialize the values of the input tensor following a uniform
distribution with specific bounds.
Args:
fan_in(int): for the weight Tensor of a convolution layer,
fan_in = nb_channel * kh * kw; for dense layer,
fan_in = input_feature_length
fan_out(int): for the convolution layer weight Tensor,
fan_out = nb_filter * kh * kw; for the weight Tensor of a dense
layer, fan_out = output_feature_length
Ref: [Bengio and Glorot 2010]: Understanding the difficulty of
training deep feedforward neuralnetworks.
'''
assert fan_in > 0 or fan_out > 0, \
'fan_in and fan_out cannot be 0 at the same time'
avg = 2
if fan_in * fan_out == 0:
avg = 1
x = math.sqrt(3.0 * avg / (fan_in + fan_out))
t.uniform(-x, x)
def gaussian(t, fan_in=0, fan_out=0):
'''Initialize the values of the input tensor following a Gaussian
distribution with specific std.
Args:
fan_in(int): for the weight Tensor of a convolution layer,
fan_in = nb_channel * kh * kw; for dense layer,
fan_in = input_feature_length
fan_out(int): for the convolution layer weight Tensor,
fan_out = nb_filter * kh * kw; for the weight Tensor of a dense
layer, fan_out = output_feature_length
Ref Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun: Delving Deep into
Rectifiers: Surpassing Human-Level Performance on ImageNet Classification
'''
assert fan_in > 0 or fan_out > 0, \
'fan_in and fan_out cannot be 0 at the same time'
avg = 2
if fan_in * fan_out == 0:
avg = 1
std = math.sqrt(2.0 * avg / (fan_in + fan_out))
t.gaussian(0, std)
def xavier(t):
'''Initialize the matrix parameter follow a Uniform distribution from
[-sqrt(6/(fan_in + fan_out)), sqrt(6/(fan_in + fan_out))].
Deprecated. Please use uniform()
Args:
t (Tensor): the parater tensor
'''
scale = math.sqrt(6.0 / (t.shape[0] + t.shape[1]))
t.uniform(-scale, scale)
def glorot(t):
'''Initialize the matrix parameter follow a Gaussian distribution with
mean = 0 and std = sqrt(2.0 / (nb_row + nb_col))
Deprecated. Please use gaussian()
Args:
t (Tensor): the parater tensor
'''
scale = math.sqrt(2.0 / (t.shape[0] + t.shape[1]))
t.gaussian(0, 1)
t *= scale
def msra(t):
'''Initialize the matrix parameter follow a Guassian distribution with
mean = 0, std = math.sqrt(2.0 / nb_row).
Deprecated. Please use gaussian()
Ref [He, Zhang, Ren and Sun 2015]: Specifically accounts for ReLU
nonlinearities.
Args:
t (Tensor): the parater tensor
'''
t.gaussian(0, math.sqrt(2.0 / t.shape[0]))
| wangsheng1001/incubator-singa | python/singa/initializer.py | Python | apache-2.0 | 3,999 | [
"Gaussian"
] | f2191a12a1fb288727da78f67c0db14488425bb362b6b56120ed680f41818856 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.